diff --git a/.circleci/config.yml b/.circleci/config.yml
deleted file mode 100644
index 621ac49ffeb..00000000000
--- a/.circleci/config.yml
+++ /dev/null
@@ -1,123 +0,0 @@
-version: 2
-
-jobs:
- # the first half of the jobs are in this test
- short-tests-0:
- # TODO: Create a small custom docker image with all the dependencies we need
- # preinstalled to reduce installation time.
- docker:
- - image: fbopensource/zstd-circleci-primary:0.0.1
- steps:
- - checkout
- - run:
- name: Test
- command: |
- ./tests/test-license.py
- cc -v
- CFLAGS="-O0 -Werror -pedantic" make allmost; make clean
- make c99build; make clean
- make c11build; make clean
- make -j regressiontest; make clean
- make shortest; make clean
- make cxxtest; make clean
- # the second half of the jobs are in this test
- short-tests-1:
- docker:
- - image: fbopensource/zstd-circleci-primary:0.0.1
- steps:
- - checkout
- - run:
- name: Test
- command: |
- make gnu90build; make clean
- make gnu99build; make clean
- make ppc64build V=1; make clean
- make ppcbuild V=1; make clean
- make armbuild V=1; make clean
- make aarch64build V=1; make clean
- make -C tests test-legacy test-longmatch; make clean
- make -C lib libzstd-nomt; make clean
- # This step should only be run in a cron job
- regression-test:
- docker:
- - image: fbopensource/zstd-circleci-primary:0.0.1
- environment:
- CIRCLE_ARTIFACTS: /tmp/circleci-artifacts
- steps:
- - checkout
- # Restore the cached resources.
- - restore_cache:
- # We try our best to bust the cache when the data changes by hashing
- # data.c. If that doesn't work, simply update the version number here
- # and below. If we fail to bust the cache, the regression testing will
- # still work, since it has its own stamp, but will need to redownload
- # everything.
- keys:
- - regression-cache-{{ checksum "tests/regression/data.c" }}-v0
- - run:
- name: Regression Test
- command: |
- make -C programs zstd
- make -C tests/regression test
- mkdir -p $CIRCLE_ARTIFACTS
- ./tests/regression/test \
- --cache tests/regression/cache \
- --output $CIRCLE_ARTIFACTS/results.csv \
- --zstd programs/zstd
- echo "NOTE: The new results.csv is uploaded as an artifact to this job"
- echo " If this fails, go to the Artifacts pane in CircleCI, "
- echo " download /tmp/circleci-artifacts/results.csv, and if they "
- echo " are still good, copy it into the repo and commit it."
- echo "> diff tests/regression/results.csv $CIRCLE_ARTIFACTS/results.csv"
- diff tests/regression/results.csv $CIRCLE_ARTIFACTS/results.csv
- # Only save the cache on success (default), since if the failure happened
- # before we stamp the data cache, we will have a bad cache for this key.
- - save_cache:
- key: regression-cache-{{ checksum "tests/regression/data.c" }}-v0
- paths:
- - tests/regression/cache
- - store_artifacts:
- path: /tmp/circleci-artifacts
-
-
-workflows:
- version: 2
- commit:
- jobs:
- # Run the tests in parallel
- - short-tests-0
- - short-tests-1
- - regression-test
-
- nightly:
- triggers:
- - schedule:
- cron: "0 0 * * *"
- filters:
- branches:
- only:
- - release
- - dev
- - master
- jobs:
- # Run daily regression tests
- - regression-test
-
-
-
- # Longer tests
- #- make -C tests test-zstd-nolegacy && make clean
- #- pyenv global 3.4.4; make -C tests versionsTest && make clean
- #- make zlibwrapper && make clean
- #- gcc -v; make -C tests test32 MOREFLAGS="-I/usr/include/x86_64-linux-gnu" && make clean
- #- make uasan && make clean
- #- make asan32 && make clean
- #- make -C tests test32 CC=clang MOREFLAGS="-g -fsanitize=address -I/usr/include/x86_64-linux-gnu"
- # Valgrind tests
- #- CFLAGS="-O1 -g" make -C zlibWrapper valgrindTest && make clean
- #- make -C tests valgrindTest && make clean
- # ARM, AArch64, PowerPC, PowerPC64 tests
- #- make ppctest && make clean
- #- make ppc64test && make clean
- #- make armtest && make clean
- #- make aarch64test && make clean
diff --git a/.circleci/images/primary/Dockerfile b/.circleci/images/primary/Dockerfile
deleted file mode 100644
index 4b77032b6dd..00000000000
--- a/.circleci/images/primary/Dockerfile
+++ /dev/null
@@ -1,9 +0,0 @@
-FROM circleci/buildpack-deps@sha256:f6f10c11b7b8ccfd4f4a5b830c3256803604ce61292b60cb22e26b12f62b0e8c
-
-RUN sudo dpkg --add-architecture i386
-RUN sudo apt-get -y -qq update
-RUN sudo apt-get -y install \
- gcc-multilib-powerpc-linux-gnu gcc-arm-linux-gnueabi \
- libc6-dev-armel-cross gcc-aarch64-linux-gnu libc6-dev-arm64-cross \
- libc6-dev-ppc64-powerpc-cross zstd gzip coreutils \
- libcurl4-openssl-dev
diff --git a/.cirrus.yml b/.cirrus.yml
index bf3f0c415d2..745024bc22f 100644
--- a/.cirrus.yml
+++ b/.cirrus.yml
@@ -1,10 +1,9 @@
task:
- name: FreeBSD (shortest)
+ name: FreeBSD (make check)
freebsd_instance:
matrix:
- image_family: freebsd-14-0
- image_family: freebsd-13-2
+ image_family: freebsd-14-2
install_script: pkg install -y gmake coreutils
script: |
MOREFLAGS="-Werror" gmake -j all
- gmake shortest
+ gmake check
diff --git a/.github/workflows/android-ndk-build.yml b/.github/workflows/android-ndk-build.yml
new file mode 100644
index 00000000000..175add4ff9c
--- /dev/null
+++ b/.github/workflows/android-ndk-build.yml
@@ -0,0 +1,39 @@
+name: Android NDK Build
+
+on:
+ pull_request:
+ branches: [ dev, release, actionsTest ]
+ push:
+ branches: [ actionsTest, '*ndk*' ]
+
+permissions: read-all
+
+jobs:
+ build:
+ runs-on: ubuntu-latest
+
+ steps:
+ - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # tag=v4.1.1
+
+ - name: Set up JDK 17
+ uses: actions/setup-java@3a4f6e1af504cf6a31855fa899c6aa5355ba6c12 # v4.7.0
+ with:
+ java-version: '17'
+ distribution: 'temurin'
+
+ - name: Setup Android SDK
+ uses: android-actions/setup-android@9fc6c4e9069bf8d3d10b2204b1fb8f6ef7065407 # v3.2.2
+
+ - name: Install Android NDK
+ run: |
+ sdkmanager --install "ndk;27.0.12077973"
+ echo "ANDROID_NDK_HOME=$ANDROID_SDK_ROOT/ndk/27.0.12077973" >> $GITHUB_ENV
+
+ - name: Build with NDK
+ run: |
+ export PATH=$ANDROID_NDK_HOME/toolchains/llvm/prebuilt/linux-x86_64/bin:$PATH
+ make CC=aarch64-linux-android21-clang \
+ AR=llvm-ar \
+ RANLIB=llvm-ranlib \
+ STRIP=llvm-strip
+
diff --git a/.github/workflows/commit.yml b/.github/workflows/commit.yml
index 25d8c52f9ef..659072861f2 100644
--- a/.github/workflows/commit.yml
+++ b/.github/workflows/commit.yml
@@ -3,7 +3,12 @@ on:
push:
branches:
- dev
+ pull_request:
+ branches:
+ - dev
+
permissions: read-all
+
jobs:
short-tests-0:
runs-on: ubuntu-latest
@@ -25,8 +30,9 @@ jobs:
make c99build; make clean
make c11build; make clean
make -j regressiontest; make clean
- make shortest; make clean
+ make check; make clean
make cxxtest; make clean
+
short-tests-1:
runs-on: ubuntu-latest
services:
@@ -38,17 +44,26 @@ jobs:
- name: Install Dependencies
run: |
sudo apt-get update
- sudo apt-get install gcc-powerpc-linux-gnu gcc-arm-linux-gnueabi gcc-aarch64-linux-gnu libc6-dev-ppc64-powerpc-cross libcurl4-gnutls-dev lib64gcc-11-dev-powerpc-cross
- - name: Test
- run: |-
- make gnu90build; make clean
- make gnu99build; make clean
- make ppc64build V=1; make clean
- make ppcbuild V=1; make clean
- make armbuild V=1; make clean
- make aarch64build V=1; make clean
- make -C tests test-legacy test-longmatch; make clean
- make -C lib libzstd-nomt; make clean
+ sudo apt-get install gcc-powerpc-linux-gnu gcc-arm-linux-gnueabi gcc-aarch64-linux-gnu libc6-dev-ppc64-powerpc-cross libcurl4-gnutls-dev lib64gcc-13-dev-powerpc-cross
+ - name: gnu90 build
+ run: make gnu90build && make clean
+ - name: gnu99 build
+ run: make gnu99build && make clean
+ - name: ppc64 build
+ run: make ppc64build V=1 && make clean
+ - name: ppc build
+ run: make ppcbuild V=1 && make clean
+ - name: arm build
+ run: make armbuild V=1 && make clean
+ - name: aarch64 build
+ run: make aarch64build V=1 && make clean
+ - name: test-legacy
+ run: make -C tests test-legacy V=1 && make clean
+ - name: test-longmatch
+ run: make -C tests test-longmatch V=1 && make clean
+ - name: libzstd-nomt build
+ run: make -C lib libzstd-nomt V=1 && make clean
+
regression-test:
runs-on: ubuntu-latest
services:
diff --git a/.github/workflows/dev-long-tests.yml b/.github/workflows/dev-long-tests.yml
index eb8f40a9a44..899a57b754b 100644
--- a/.github/workflows/dev-long-tests.yml
+++ b/.github/workflows/dev-long-tests.yml
@@ -31,12 +31,12 @@ jobs:
run: make test
# lasts ~26mn
- make-test-osx:
+ make-test-macos:
runs-on: macos-latest
steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # tag=v4.1.1
- - name: OS-X test
- run: make test # make -c lib all doesn't work because of the fact that it's not a tty
+ - name: make test on macos
+ run: make test
# lasts ~24mn
make-test-32bit:
@@ -46,11 +46,12 @@ jobs:
READFROMBLOCKDEVICE: 1
steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # tag=v4.1.1
- - name: make test
+ - name: make test # note: `make -j test success` seems to require a clean state
run: |
sudo apt-get -qqq update
make libc6install
- CFLAGS="-m32" make test
+ make clean
+ CFLAGS="-m32 -O2" make -j test V=1
no-intrinsics-fuzztest:
runs-on: ubuntu-latest
@@ -60,22 +61,22 @@ jobs:
run: MOREFLAGS="-DZSTD_NO_INTRINSICS" make -C tests fuzztest
tsan-zstreamtest:
- runs-on: ubuntu-20.04
+ runs-on: ubuntu-latest
steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # tag=v4.1.1
- name: thread sanitizer zstreamtest
run: CC=clang ZSTREAM_TESTTIME=-T3mn make tsan-test-zstream
- ubsan-zstreamtest:
- runs-on: ubuntu-20.04
+ uasan-zstreamtest:
+ runs-on: ubuntu-latest
steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # tag=v4.1.1
- - name: undefined behavior sanitizer zstreamtest
+ - name: ub + address sanitizer on zstreamtest
run: CC=clang make uasan-test-zstream
# lasts ~15mn
tsan-fuzztest:
- runs-on: ubuntu-20.04
+ runs-on: ubuntu-latest
steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # tag=v4.1.1
- name: thread sanitizer fuzztest
@@ -94,7 +95,7 @@ jobs:
# lasts ~23mn
gcc-8-asan-ubsan-testzstd:
- runs-on: ubuntu-20.04
+ runs-on: ubuntu-latest
steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # tag=v4.1.1
- name: gcc-8 + ASan + UBSan + Test Zstd
@@ -106,14 +107,14 @@ jobs:
CC=gcc-8 make -j uasan-test-zstd cross.ini < Return runtime library version, like "1.4.5". Requires v1.3.0+.
`compressedSize` : must be the _exact_ size of some number of compressed and/or skippable frames.
- `dstCapacity` is an upper bound of originalSize to regenerate.
- If user cannot imply a maximum upper bound, it's better to use streaming mode to decompress data.
- @return : the number of bytes decompressed into `dst` (<= `dstCapacity`),
- or an errorCode if it fails (which can be tested using ZSTD_isError()).
+ `compressedSize` : must be the _exact_ size of some number of compressed and/or skippable frames.
+ Multiple compressed frames can be decompressed at once with this method.
+ The result will be the concatenation of all decompressed frames, back to back.
+ `dstCapacity` is an upper bound of originalSize to regenerate.
+ First frame's decompressed size can be extracted using ZSTD_getFrameContentSize().
+ If maximum upper bound isn't known, prefer using streaming mode to decompress data.
+ @return : the number of bytes decompressed into `dst` (<= `dstCapacity`),
+ or an errorCode if it fails (which can be tested using ZSTD_isError()).
`src` should point to the start of a ZSTD encoded frame.
- `srcSize` must be at least as large as the frame header.
- hint : any size >= `ZSTD_frameHeaderSize_max` is large enough.
- @return : - decompressed size of `src` frame content, if known
- - ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined
- - ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small)
- note 1 : a 0 return value means the frame is valid but "empty".
- note 2 : decompressed size is an optional field, it may not be present, typically in streaming mode.
- When `return==ZSTD_CONTENTSIZE_UNKNOWN`, data to decompress could be any size.
- In which case, it's necessary to use streaming mode to decompress data.
- Optionally, application can rely on some implicit limit,
- as ZSTD_decompress() only needs an upper bound of decompressed size.
- (For example, data could be necessarily cut into blocks <= 16 KB).
- note 3 : decompressed size is always present when compression is completed using single-pass functions,
- such as ZSTD_compress(), ZSTD_compressCCtx() ZSTD_compress_usingDict() or ZSTD_compress_usingCDict().
- note 4 : decompressed size can be very large (64-bits value),
- potentially larger than what local system can handle as a single memory segment.
- In which case, it's necessary to use streaming mode to decompress data.
- note 5 : If source is untrusted, decompressed size could be wrong or intentionally modified.
- Always ensure return value fits within application's authorized limits.
- Each application can set its own limits.
- note 6 : This function replaces ZSTD_getDecompressedSize()
+ `src` should point to the start of a ZSTD encoded frame.
+ `srcSize` must be at least as large as the frame header.
+ hint : any size >= `ZSTD_frameHeaderSize_max` is large enough.
+ @return : - decompressed size of `src` frame content, if known
+ - ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined
+ - ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small)
+ note 1 : a 0 return value means the frame is valid but "empty".
+ note 2 : decompressed size is an optional field, it may not be present (typically in streaming mode).
+ When `return==ZSTD_CONTENTSIZE_UNKNOWN`, data to decompress could be any size.
+ In which case, it's necessary to use streaming mode to decompress data.
+ Optionally, application can rely on some implicit limit,
+ as ZSTD_decompress() only needs an upper bound of decompressed size.
+ (For example, data could be necessarily cut into blocks <= 16 KB).
+ note 3 : decompressed size is always present when compression is completed using single-pass functions,
+ such as ZSTD_compress(), ZSTD_compressCCtx() ZSTD_compress_usingDict() or ZSTD_compress_usingCDict().
+ note 4 : decompressed size can be very large (64-bits value),
+ potentially larger than what local system can handle as a single memory segment.
+ In which case, it's necessary to use streaming mode to decompress data.
+ note 5 : If source is untrusted, decompressed size could be wrong or intentionally modified.
+ Always ensure return value fits within application's authorized limits.
+ Each application can set its own limits.
+ note 6 : This function replaces ZSTD_getDecompressedSize()
" << version << "
\n";
+ ostream << "Note: the content of this file has been automatically generated by parsing \"zstd.h\" \n";
ostream << "
\nContents
\n\n";
for (size_t i=0; i
zstd 1.5.6 Manual
+zstd 1.5.7 Manual
+Note: the content of this file has been automatically generated by parsing "zstd.h"
Contents
zstd 1.5.6 Manual
-Simple API
+Simple Core API
size_t ZSTD_compress( void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
@@ -88,38 +89,42 @@ zstd 1.5.6 Manual
size_t ZSTD_decompress( void* dst, size_t dstCapacity,
const void* src, size_t compressedSize);
-
+Decompression helper functions
#define ZSTD_CONTENTSIZE_UNKNOWN (0ULL - 1)
#define ZSTD_CONTENTSIZE_ERROR (0ULL - 2)
unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize);
-
ZSTD_DEPRECATED("Replaced by ZSTD_getFrameContentSize")
@@ -140,50 +145,54 @@ zstd 1.5.6 Manual
or an error code if input is invalid
#define ZSTD_MAX_INPUT_SIZE ((sizeof(size_t)==8) ? 0xFF00FF00FF00FF00ULL : 0xFF00FF00U) #define ZSTD_COMPRESSBOUND(srcSize) (((size_t)(srcSize) >= ZSTD_MAX_INPUT_SIZE) ? 0 : (srcSize) + ((srcSize)>>8) + (((srcSize) < (128<<10)) ? (((128<<10) - (srcSize)) >> 11) /* margin, from 64 to 0 */ : 0)) /* this formula ensures that bound(A) + bound(B) <= bound(A+B) as long as A and B >= 128 KB */ size_t ZSTD_compressBound(size_t srcSize); /*!< maximum compressed size in worst case single-pass scenario */ +maximum compressed size in worst case single-pass scenario. + When invoking `ZSTD_compress()`, or any other one-pass compression function, + it's recommended to provide @dstCapacity >= ZSTD_compressBound(srcSize) + as it eliminates one potential failure scenario, + aka not enough room in dst buffer to write the compressed frame. + Note : ZSTD_compressBound() itself can fail, if @srcSize >= ZSTD_MAX_INPUT_SIZE . + In which case, ZSTD_compressBound() will return an error code + which can be tested using ZSTD_isError(). + + ZSTD_COMPRESSBOUND() : + same as ZSTD_compressBound(), but as a macro. + It can be used to produce constants, which can be useful for static allocation, + for example to size a static array on stack. + Will produce constant value 0 if srcSize is too large. + +
#include "zstd_errors.h"/* list of errors */ /* ZSTD_isError() : * Most ZSTD_* functions returning a size_t value can be tested for error, * using ZSTD_isError(). * @return 1 if error, 0 otherwise */ -unsigned ZSTD_isError(size_t code); /*!< tells if a `size_t` function result is an error code */ -const char* ZSTD_getErrorName(size_t code); /*!< provides readable string from an error code */ -int ZSTD_minCLevel(void); /*!< minimum negative compression level allowed, requires v1.4.0+ */ -int ZSTD_maxCLevel(void); /*!< maximum compression level available */ -int ZSTD_defaultCLevel(void); /*!< default compression level, specified by ZSTD_CLEVEL_DEFAULT, requires v1.5.0+ */ +unsigned ZSTD_isError(size_t result); /*!< tells if a `size_t` function result is an error code */ +ZSTD_ErrorCode ZSTD_getErrorCode(size_t functionResult); /* convert a result into an error code, which can be compared to error enum list */ +const char* ZSTD_getErrorName(size_t result); /*!< provides readable string from a function result */ +int ZSTD_minCLevel(void); /*!< minimum negative compression level allowed, requires v1.4.0+ */ +int ZSTD_maxCLevel(void); /*!< maximum compression level available */ +int ZSTD_defaultCLevel(void); /*!< default compression level, specified by ZSTD_CLEVEL_DEFAULT, requires v1.5.0+ */
When compressing many times,
- it is recommended to allocate a context just once,
+ it is recommended to allocate a compression context just once,
and reuse it for each successive compression operation.
- This will make workload friendlier for system's memory.
+ This will make the workload easier for system's memory.
Note : re-using context is just a speed / resource optimization.
It doesn't change the compression ratio, which remains identical.
- Note 2 : In multi-threaded environments,
- use one different context per thread for parallel execution.
+ Note 2: For parallel execution in multi-threaded environments,
+ use one different context per thread .
typedef struct ZSTD_CCtx_s ZSTD_CCtx; ZSTD_CCtx* ZSTD_createCCtx(void); -size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx);/* accept NULL pointer */ +size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx); /* compatible with NULL pointer */
size_t ZSTD_compressCCtx(ZSTD_CCtx* cctx,
void* dst, size_t dstCapacity,
@@ -194,7 +203,7 @@ Compression context
When compressing many times,
this function compresses at the requested compression level,
__ignoring any other advanced parameter__ .
If any advanced parameter was set using the advanced API,
- they will all be reset. Only `compressionLevel` remains.
+ they will all be reset. Only @compressionLevel remains.
@@ -298,7 +307,7 @@ Decompression context
When decompressing many times,
* Special: value 0 means "use default strategy". */
ZSTD_c_targetCBlockSize=130, /* v1.5.6+
- * Attempts to fit compressed block size into approximatively targetCBlockSize.
+ * Attempts to fit compressed block size into approximately targetCBlockSize.
* Bound by ZSTD_TARGETCBLOCKSIZE_MIN and ZSTD_TARGETCBLOCKSIZE_MAX.
* Note that it's not a guarantee, just a convergence target (default:0).
* No target when targetCBlockSize == 0.
@@ -394,7 +403,8 @@ Decompression context
When decompressing many times,
* ZSTD_c_stableOutBuffer
* ZSTD_c_blockDelimiters
* ZSTD_c_validateSequences
- * ZSTD_c_useBlockSplitter
+ * ZSTD_c_blockSplitterLevel
+ * ZSTD_c_splitAfterSequences
* ZSTD_c_useRowMatchFinder
* ZSTD_c_prefetchCDictTables
* ZSTD_c_enableSeqProducerFallback
@@ -421,7 +431,8 @@ Decompression context
When decompressing many times,
ZSTD_c_experimentalParam16=1013,
ZSTD_c_experimentalParam17=1014,
ZSTD_c_experimentalParam18=1015,
- ZSTD_c_experimentalParam19=1016
+ ZSTD_c_experimentalParam19=1016,
+ ZSTD_c_experimentalParam20=1017
} ZSTD_cParameter;
typedef struct {
@@ -718,7 +729,7 @@ Streaming compression functions
typedef enum {
Streaming decompression - HowTo
A ZSTD_DStream object is required to track streaming operations.
Use ZSTD_createDStream() and ZSTD_freeDStream() to create/release resources.
- ZSTD_DStream objects can be reused multiple times.
+ ZSTD_DStream objects can be re-employed multiple times.
Use ZSTD_initDStream() to start a new decompression operation.
@return : recommended first input size
@@ -728,16 +739,21 @@ Streaming compression functions
typedef enum {
The function will update both `pos` fields.
If `input.pos < input.size`, some input has not been consumed.
It's up to the caller to present again remaining data.
+
The function tries to flush all data decoded immediately, respecting output buffer size.
If `output.pos < output.size`, decoder has flushed everything it could.
- But if `output.pos == output.size`, there might be some data left within internal buffers.,
+
+ However, when `output.pos == output.size`, it's more difficult to know.
+ If @return > 0, the frame is not complete, meaning
+ either there is still some data left to flush within internal buffers,
+ or there is more input to read to complete the frame (or both).
In which case, call ZSTD_decompressStream() again to flush whatever remains in the buffer.
Note : with no additional input provided, amount of data flushed is necessarily <= ZSTD_BLOCKSIZE_MAX.
@return : 0 when a frame is completely decoded and fully flushed,
or an error code, which can be tested using ZSTD_isError(),
or any other value > 0, which means there is still some decoding or flushing to do to complete current frame :
the return value is a suggested next input size (just a hint for better latency)
- that will never request more than the remaining frame size.
+ that will never request more than the remaining content of the compressed frame.
@@ -763,9 +779,10 @@ Streaming decompression functions
Function will update both input and output `pos` fields exposing current state via these fields:
- `input.pos < input.size`, some input remaining and caller should provide remaining input
on the next call.
- - `output.pos < output.size`, decoder finished and flushed all remaining buffers.
- - `output.pos == output.size`, potentially uncflushed data present in the internal buffers,
- call ZSTD_decompressStream() again to flush remaining data to output.
+ - `output.pos < output.size`, decoder flushed internal output buffer.
+ - `output.pos == output.size`, unflushed data potentially present in the internal buffers,
+ check ZSTD_decompressStream() @return value,
+ if > 0, invoke it again to flush remaining data to output.
Note : with no additional input, amount of data flushed <= ZSTD_BLOCKSIZE_MAX.
@return : 0 when a frame is completely decoded and fully flushed,
@@ -1067,7 +1084,7 @@ Streaming decompression functions
*
* Note: This field is optional. ZSTD_generateSequences() will calculate the value of
* 'rep', but repeat offsets do not necessarily need to be calculated from an external
- * sequence provider's perspective. For example, ZSTD_compressSequences() does not
+ * sequence provider perspective. For example, ZSTD_compressSequences() does not
* use this 'rep' field at all (as of now).
*/
} ZSTD_Sequence;
@@ -1172,14 +1189,14 @@ Streaming decompression functions
} ZSTD_literalCompressionMode_e;
typedef enum {
- /* Note: This enum controls features which are conditionally beneficial. Zstd typically will make a final
- * decision on whether or not to enable the feature (ZSTD_ps_auto), but setting the switch to ZSTD_ps_enable
- * or ZSTD_ps_disable allow for a force enable/disable the feature.
+ /* Note: This enum controls features which are conditionally beneficial.
+ * Zstd can take a decision on whether or not to enable the feature (ZSTD_ps_auto),
+ * but setting the switch to ZSTD_ps_enable or ZSTD_ps_disable force enable/disable the feature.
*/
ZSTD_ps_auto = 0, /* Let the library automatically determine whether the feature shall be enabled */
ZSTD_ps_enable = 1, /* Force-enable the feature */
ZSTD_ps_disable = 2 /* Do not use the feature */
-} ZSTD_paramSwitch_e;
+} ZSTD_ParamSwitch_e;
Frame header and size functions
@@ -1227,13 +1244,13 @@ Streaming decompression functions
or an error code (if srcSize is too small)
-typedef enum { ZSTD_frame, ZSTD_skippableFrame } ZSTD_frameType_e;
+typedef enum { ZSTD_frame, ZSTD_skippableFrame } ZSTD_FrameType_e;
typedef struct {
unsigned long long frameContentSize; /* if == ZSTD_CONTENTSIZE_UNKNOWN, it means this field is not available. 0 means "empty" */
unsigned long long windowSize; /* can be very large, up to <= frameContentSize */
unsigned blockSizeMax;
- ZSTD_frameType_e frameType; /* if == ZSTD_skippableFrame, frameContentSize is the size of skippable content */
+ ZSTD_FrameType_e frameType; /* if == ZSTD_skippableFrame, frameContentSize is the size of skippable content */
unsigned headerSize;
unsigned dictID;
unsigned checksumFlag;
@@ -1241,11 +1258,11 @@ Streaming decompression functions
unsigned _reserved2;
} ZSTD_frameHeader;
-ZSTDLIB_STATIC_API size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize); /**< doesn't consume input */
+ZSTDLIB_STATIC_API size_t ZSTD_getFrameHeader(ZSTD_FrameHeader* zfhPtr, const void* src, size_t srcSize); /**< doesn't consume input */
/*! ZSTD_getFrameHeader_advanced() :
* same as ZSTD_getFrameHeader(),
* with added capability to select a format (like ZSTD_f_zstd1_magicless) */
-ZSTDLIB_STATIC_API size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize, ZSTD_format_e format);
+ZSTDLIB_STATIC_API size_t ZSTD_getFrameHeader_advanced(ZSTD_FrameHeader* zfhPtr, const void* src, size_t srcSize, ZSTD_format_e format);
decode Frame Header, or requires larger `srcSize`.
@return : 0, `zfhPtr` is correctly filled,
>0, `srcSize` is too small, value is wanted `srcSize` amount,
@@ -1298,9 +1315,9 @@
Streaming decompression functions
typedef enum {
- ZSTD_sf_noBlockDelimiters = 0, /* Representation of ZSTD_Sequence has no block delimiters, sequences only */
- ZSTD_sf_explicitBlockDelimiters = 1 /* Representation of ZSTD_Sequence contains explicit block delimiters */
-} ZSTD_sequenceFormat_e;
+ ZSTD_sf_noBlockDelimiters = 0, /* ZSTD_Sequence[] has no block delimiters, just sequences */
+ ZSTD_sf_explicitBlockDelimiters = 1 /* ZSTD_Sequence[] contains explicit block delimiters */
+} ZSTD_SequenceFormat_e;
ZSTDLIB_STATIC_API size_t ZSTD_sequenceBound(size_t srcSize);
`srcSize` : size of the input buffer
@@ -1311,19 +1328,37 @@
Streaming decompression functions
- Generate sequences using ZSTD_compress2(), given a source buffer.
+
ZSTD_DEPRECATED("For debugging only, will be replaced by ZSTD_extractSequences()")
+ZSTDLIB_STATIC_API size_t
+ZSTD_generateSequences(ZSTD_CCtx* zc,
+ ZSTD_Sequence* outSeqs, size_t outSeqsCapacity,
+ const void* src, size_t srcSize);
+ WARNING: This function is meant for debugging and informational purposes ONLY!
+ Its implementation is flawed, and it will be deleted in a future version.
+ It is not guaranteed to succeed, as there are several cases where it will give
+ up and fail. You should NOT use this function in production code.
+
+ This function is deprecated, and will be removed in a future version.
+
+ Generate sequences using ZSTD_compress2(), given a source buffer.
+
+ @param zc The compression context to be used for ZSTD_compress2(). Set any
+ compression parameters you need on this context.
+ @param outSeqs The output sequences buffer of size @p outSeqsSize
+ @param outSeqsCapacity The size of the output sequences buffer.
+ ZSTD_sequenceBound(srcSize) is an upper bound on the number
+ of sequences that can be generated.
+ @param src The source buffer to generate sequences from of size @p srcSize.
+ @param srcSize The size of the source buffer.
Each block will end with a dummy sequence
with offset == 0, matchLength == 0, and litLength == length of last literals.
litLength may be == 0, and if so, then the sequence of (of: 0 ml: 0 ll: 0)
simply acts as a block delimiter.
- @zc can be used to insert custom compression params.
- This function invokes ZSTD_compress2().
-
- The output of this function can be fed into ZSTD_compressSequences() with CCtx
- setting of ZSTD_c_blockDelimiters as ZSTD_sf_explicitBlockDelimiters
- @return : number of sequences generated
+ @returns The number of sequences generated, necessarily less than
+ ZSTD_sequenceBound(srcSize), or an error code that can be checked
+ with ZSTD_isError().
@@ -1341,13 +1376,14 @@ Streaming decompression functions
ZSTDLIB_STATIC_API size_t
-ZSTD_compressSequences( ZSTD_CCtx* cctx, void* dst, size_t dstSize,
- const ZSTD_Sequence* inSeqs, size_t inSeqsSize,
- const void* src, size_t srcSize);
+ZSTD_compressSequences(ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const ZSTD_Sequence* inSeqs, size_t inSeqsSize,
+ const void* src, size_t srcSize);
Compress an array of ZSTD_Sequence, associated with @src buffer, into dst.
@src contains the entire input (not just the literals).
If @srcSize > sum(sequence.length), the remaining bytes are considered all literals
- If a dictionary is included, then the cctx should reference the dict. (see: ZSTD_CCtx_refCDict(), ZSTD_CCtx_loadDictionary(), etc.)
+ If a dictionary is included, then the cctx should reference the dict (see: ZSTD_CCtx_refCDict(), ZSTD_CCtx_loadDictionary(), etc.).
The entire source is compressed into a single frame.
The compression behavior changes based on cctx params. In particular:
@@ -1356,11 +1392,17 @@
Streaming decompression functions
the block size derived from the cctx, and sequences may be split. This is the default setting.
If ZSTD_c_blockDelimiters == ZSTD_sf_explicitBlockDelimiters, the array of ZSTD_Sequence is expected to contain
- block delimiters (defined in ZSTD_Sequence). Behavior is undefined if no block delimiters are provided.
+ valid block delimiters (defined in ZSTD_Sequence). Behavior is undefined if no block delimiters are provided.
+
+ When ZSTD_c_blockDelimiters == ZSTD_sf_explicitBlockDelimiters, it's possible to decide generating repcodes
+ using the advanced parameter ZSTD_c_repcodeResolution. Repcodes will improve compression ratio, though the benefit
+ can vary greatly depending on Sequences. On the other hand, repcode resolution is an expensive operation.
+ By default, it's disabled at low (<10) compression levels, and enabled above the threshold (>=10).
+ ZSTD_c_repcodeResolution makes it possible to directly manage this processing in either direction.
- If ZSTD_c_validateSequences == 0, this function will blindly accept the sequences provided. Invalid sequences cause undefined
- behavior. If ZSTD_c_validateSequences == 1, then if sequence is invalid (see doc/zstd_compression_format.md for
- specifics regarding offset/matchlength requirements) then the function will bail out and return an error.
+ If ZSTD_c_validateSequences == 0, this function blindly accepts the Sequences provided. Invalid Sequences cause undefined
+ behavior. If ZSTD_c_validateSequences == 1, then the function will detect invalid Sequences (see doc/zstd_compression_format.md for
+ specifics regarding offset/matchlength requirements) and then bail out and return an error.
In addition to the two adjustable experimental params, there are other important cctx params.
- ZSTD_c_minMatch MUST be set as less than or equal to the smallest match generated by the match finder. It has a minimum value of ZSTD_MINMATCH_MIN.
@@ -1368,9 +1410,33 @@ Streaming decompression functions
- ZSTD_c_windowLog affects offset validation: this function will return an error at higher debug levels if a provided offset
is larger than what the spec allows for a given window log and dictionary (if present). See: doc/zstd_compression_format.md
- Note: Repcodes are, as of now, always re-calculated within this function, so ZSTD_Sequence::rep is unused.
- Note 2: Once we integrate ability to ingest repcodes, the explicit block delims mode must respect those repcodes exactly,
- and cannot emit an RLE block that disagrees with the repcode history
+ Note: Repcodes are, as of now, always re-calculated within this function, ZSTD_Sequence.rep is effectively unused.
+ Dev Note: Once ability to ingest repcodes become available, the explicit block delims mode must respect those repcodes exactly,
+ and cannot emit an RLE block that disagrees with the repcode history.
+ @return : final compressed size, or a ZSTD error code.
+
+
+
+ZSTDLIB_STATIC_API size_t
+ZSTD_compressSequencesAndLiterals(ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const ZSTD_Sequence* inSeqs, size_t nbSequences,
+ const void* literals, size_t litSize, size_t litCapacity,
+ size_t decompressedSize);
+ This is a variant of ZSTD_compressSequences() which,
+ instead of receiving (src,srcSize) as input parameter, receives (literals,litSize),
+ aka all the literals, already extracted and laid out into a single continuous buffer.
+ This can be useful if the process generating the sequences also happens to generate the buffer of literals,
+ thus skipping an extraction + caching stage.
+ It's a speed optimization, useful when the right conditions are met,
+ but it also features the following limitations:
+ - Only supports explicit delimiter mode
+ - Currently does not support Sequences validation (so input Sequences are trusted)
+ - Not compatible with frame checksum, which must be disabled
+ - If any block is incompressible, will fail and return an error
+ - @litSize must be == sum of all @.litLength fields in @inSeqs. Any discrepancy will generate an error.
+ - the buffer @literals must have a size @litCapacity which is larger than @litSize by at least 8 bytes.
+ - @decompressedSize must be correct, and correspond to the sum of all Sequences. Any discrepancy will generate an error.
@return : final compressed size, or a ZSTD error code.
@@ -1512,13 +1578,14 @@ Streaming decompression functions
#ifdef __GNUC__
__attribute__((__unused__))
#endif
-ZSTD_customMem const ZSTD_defaultCMem = { NULL, NULL, NULL }; /**< this constant defers to stdlib's functions */
These prototypes make it possible to pass your own allocation/free functions.
ZSTD_customMem is provided at creation time, using ZSTD_create*_advanced() variants listed below.
All allocation/free operations will be completed using these custom variants instead of regular ones.
+ZSTD_customMem const ZSTD_defaultCMem = { NULL, NULL, NULL }; /**< this constant defers to stdlib's functions */
+
typedef struct POOL_ctx_s ZSTD_threadPool;
ZSTDLIB_STATIC_API ZSTD_threadPool* ZSTD_createThreadPool(size_t numThreads);
ZSTDLIB_STATIC_API void ZSTD_freeThreadPool (ZSTD_threadPool* pool); /* accept NULL pointer */
@@ -2057,7 +2124,7 @@ Buffer-less streaming compression functions
ZSTD_DEPR
>0 : `srcSize` is too small, please provide at least result bytes on next attempt.
errorCode, which can be tested using ZSTD_isError().
- It fills a ZSTD_frameHeader structure with important information to correctly decode the frame,
+ It fills a ZSTD_FrameHeader structure with important information to correctly decode the frame,
such as the dictionary ID, content size, or maximum back-reference distance (`windowSize`).
Note that these values could be wrong, either because of data corruption, or because a 3rd party deliberately spoofs false information.
As a consequence, check that values remain within valid application range.
diff --git a/lib/Makefile b/lib/Makefile
index 8bfdade9f12..a6a0eb09d82 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -63,6 +63,8 @@ CPPFLAGS_DYNLIB += -DZSTD_MULTITHREAD # dynamic library build defaults to multi
LDFLAGS_DYNLIB += -pthread
CPPFLAGS_STATICLIB += # static library build defaults to single-threaded
+# pkg-config Libs.private points to LDFLAGS_DYNLIB
+PCLIB := $(LDFLAGS_DYNLIB)
ifeq ($(findstring GCC,$(CCVER)),GCC)
decompress/zstd_decompress_block.o : CFLAGS+=-fno-tree-vectorize
@@ -71,13 +73,15 @@ endif
# macOS linker doesn't support -soname, and use different extension
# see : https://developer.apple.com/library/mac/documentation/DeveloperTools/Conceptual/DynamicLibraries/100-Articles/DynamicLibraryDesignGuidelines.html
-ifeq ($(UNAME), Darwin)
+UNAME_TARGET_SYSTEM ?= $(UNAME)
+
+ifeq ($(UNAME_TARGET_SYSTEM), Darwin)
SHARED_EXT = dylib
SHARED_EXT_MAJOR = $(LIBVER_MAJOR).$(SHARED_EXT)
SHARED_EXT_VER = $(LIBVER).$(SHARED_EXT)
SONAME_FLAGS = -install_name $(LIBDIR)/libzstd.$(SHARED_EXT_MAJOR) -compatibility_version $(LIBVER_MAJOR) -current_version $(LIBVER)
else
- ifeq ($(UNAME), AIX)
+ ifeq ($(UNAME_TARGET_SYSTEM), AIX)
SONAME_FLAGS =
else
SONAME_FLAGS = -Wl,-soname=libzstd.$(SHARED_EXT).$(LIBVER_MAJOR)
@@ -186,12 +190,15 @@ lib : libzstd.a libzstd
%-mt : CPPFLAGS_DYNLIB := -DZSTD_MULTITHREAD
%-mt : CPPFLAGS_STATICLIB := -DZSTD_MULTITHREAD
%-mt : LDFLAGS_DYNLIB := -pthread
+%-mt : PCLIB :=
+%-mt : PCMTLIB := $(LDFLAGS_DYNLIB)
%-mt : %
@echo multi-threaded build completed
%-nomt : CPPFLAGS_DYNLIB :=
%-nomt : LDFLAGS_DYNLIB :=
%-nomt : CPPFLAGS_STATICLIB :=
+%-nomt : PCLIB :=
%-nomt : %
@echo single-threaded build completed
@@ -261,7 +268,7 @@ clean:
#-----------------------------------------------------------------------------
# make install is validated only for below listed environments
#-----------------------------------------------------------------------------
-ifneq (,$(filter $(UNAME),Linux Darwin GNU/kFreeBSD GNU OpenBSD FreeBSD NetBSD DragonFly SunOS Haiku AIX MSYS_NT CYGWIN_NT))
+ifneq (,$(filter Linux Darwin GNU/kFreeBSD GNU OpenBSD FreeBSD NetBSD DragonFly SunOS Haiku AIX MSYS_NT% CYGWIN_NT%,$(UNAME)))
lib: libzstd.pc
@@ -292,13 +299,21 @@ PCLIBPREFIX := $(if $(findstring $(LIBDIR),$(PCLIBDIR)),,$${exec_prefix})
# to PREFIX, rather than as a resolved value.
PCEXEC_PREFIX := $(if $(HAS_EXPLICIT_EXEC_PREFIX),$(EXEC_PREFIX),$${prefix})
-ifneq (,$(filter $(UNAME),FreeBSD NetBSD DragonFly))
+
+ifneq ($(MT),)
+ PCLIB :=
+ PCMTLIB := $(LDFLAGS_DYNLIB)
+else
+ PCLIB := $(LDFLAGS_DYNLIB)
+endif
+
+ifneq (,$(filter FreeBSD NetBSD DragonFly,$(UNAME)))
PKGCONFIGDIR ?= $(PREFIX)/libdata/pkgconfig
else
PKGCONFIGDIR ?= $(LIBDIR)/pkgconfig
endif
-ifneq (,$(filter $(UNAME),SunOS))
+ifneq (,$(filter SunOS,$(UNAME)))
INSTALL ?= ginstall
else
INSTALL ?= install
@@ -308,6 +323,10 @@ INSTALL_PROGRAM ?= $(INSTALL)
INSTALL_DATA ?= $(INSTALL) -m 644
+# pkg-config library define.
+# For static single-threaded library declare -pthread in Libs.private
+# For static multi-threaded library declare -pthread in Libs and Cflags
+.PHONY: libzstd.pc
libzstd.pc: libzstd.pc.in
@echo creating pkgconfig
@sed \
@@ -316,7 +335,8 @@ libzstd.pc: libzstd.pc.in
-e 's|@INCLUDEDIR@|$(PCINCPREFIX)$(PCINCDIR)|' \
-e 's|@LIBDIR@|$(PCLIBPREFIX)$(PCLIBDIR)|' \
-e 's|@VERSION@|$(VERSION)|' \
- -e 's|@LIBS_PRIVATE@|$(LDFLAGS_DYNLIB)|' \
+ -e 's|@LIBS_MT@|$(PCMTLIB)|' \
+ -e 's|@LIBS_PRIVATE@|$(PCLIB)|' \
$< >$@
.PHONY: install
diff --git a/lib/README.md b/lib/README.md
index a560f06cada..b37f5fc4f3f 100644
--- a/lib/README.md
+++ b/lib/README.md
@@ -27,12 +27,16 @@ Enabling multithreading requires 2 conditions :
For convenience, we provide a build target to generate multi and single threaded libraries:
- Force enable multithreading on both dynamic and static libraries by appending `-mt` to the target, e.g. `make lib-mt`.
+ Note that the `.pc` generated on calling `make lib-mt` will already include the require Libs and Cflags.
- Force disable multithreading on both dynamic and static libraries by appending `-nomt` to the target, e.g. `make lib-nomt`.
- By default, as mentioned before, dynamic library is multithreaded, and static library is single-threaded, e.g. `make lib`.
When linking a POSIX program with a multithreaded version of `libzstd`,
note that it's necessary to invoke the `-pthread` flag during link stage.
+The `.pc` generated from `make install` or `make install-pc` always assume a single-threaded static library
+is compiled. To correctly generate a `.pc` for the multi-threaded static library, set `MT=1` as ENV variable.
+
Multithreading capabilities are exposed
via the [advanced API defined in `lib/zstd.h`](https://github.com/facebook/zstd/blob/v1.4.3/lib/zstd.h#L351).
@@ -145,6 +149,13 @@ The file structure is designed to make this selection manually achievable for an
will expose the deprecated `ZSTDMT` API exposed by `zstdmt_compress.h` in
the shared library, which is now hidden by default.
+- The build macro `STATIC_BMI2` can be set to 1 to force usage of `bmi2` instructions.
+ It is generally not necessary to set this build macro,
+ because `STATIC_BMI2` will be automatically set to 1
+ on detecting the presence of the corresponding instruction set in the compilation target.
+ It's nonetheless available as an optional manual toggle for better control,
+ and can also be used to forcefully disable `bmi2` instructions by setting it to 0.
+
- The build macro `DYNAMIC_BMI2` can be set to 1 or 0 in order to generate binaries
which can detect at runtime the presence of BMI2 instructions, and use them only if present.
These instructions contribute to better performance, notably on the decoder side.
diff --git a/lib/common/bits.h b/lib/common/bits.h
index def56c474c3..f452f088914 100644
--- a/lib/common/bits.h
+++ b/lib/common/bits.h
@@ -28,27 +28,29 @@ MEM_STATIC unsigned ZSTD_countTrailingZeros32_fallback(U32 val)
MEM_STATIC unsigned ZSTD_countTrailingZeros32(U32 val)
{
assert(val != 0);
-# if defined(_MSC_VER)
-# if STATIC_BMI2 == 1
- return (unsigned)_tzcnt_u32(val);
-# else
- if (val != 0) {
- unsigned long r;
- _BitScanForward(&r, val);
- return (unsigned)r;
- } else {
- /* Should not reach this code path */
- __assume(0);
- }
-# endif
-# elif defined(__GNUC__) && (__GNUC__ >= 4)
- return (unsigned)__builtin_ctz(val);
-# else
- return ZSTD_countTrailingZeros32_fallback(val);
-# endif
+#if defined(_MSC_VER)
+# if STATIC_BMI2
+ return (unsigned)_tzcnt_u32(val);
+# else
+ if (val != 0) {
+ unsigned long r;
+ _BitScanForward(&r, val);
+ return (unsigned)r;
+ } else {
+ __assume(0); /* Should not reach this code path */
+ }
+# endif
+#elif defined(__GNUC__) && (__GNUC__ >= 4)
+ return (unsigned)__builtin_ctz(val);
+#elif defined(__ICCARM__)
+ return (unsigned)__builtin_ctz(val);
+#else
+ return ZSTD_countTrailingZeros32_fallback(val);
+#endif
}
-MEM_STATIC unsigned ZSTD_countLeadingZeros32_fallback(U32 val) {
+MEM_STATIC unsigned ZSTD_countLeadingZeros32_fallback(U32 val)
+{
assert(val != 0);
{
static const U32 DeBruijnClz[32] = {0, 9, 1, 10, 13, 21, 2, 29,
@@ -67,86 +69,89 @@ MEM_STATIC unsigned ZSTD_countLeadingZeros32_fallback(U32 val) {
MEM_STATIC unsigned ZSTD_countLeadingZeros32(U32 val)
{
assert(val != 0);
-# if defined(_MSC_VER)
-# if STATIC_BMI2 == 1
- return (unsigned)_lzcnt_u32(val);
-# else
- if (val != 0) {
- unsigned long r;
- _BitScanReverse(&r, val);
- return (unsigned)(31 - r);
- } else {
- /* Should not reach this code path */
- __assume(0);
- }
-# endif
-# elif defined(__GNUC__) && (__GNUC__ >= 4)
- return (unsigned)__builtin_clz(val);
-# else
- return ZSTD_countLeadingZeros32_fallback(val);
-# endif
+#if defined(_MSC_VER)
+# if STATIC_BMI2
+ return (unsigned)_lzcnt_u32(val);
+# else
+ if (val != 0) {
+ unsigned long r;
+ _BitScanReverse(&r, val);
+ return (unsigned)(31 - r);
+ } else {
+ __assume(0); /* Should not reach this code path */
+ }
+# endif
+#elif defined(__GNUC__) && (__GNUC__ >= 4)
+ return (unsigned)__builtin_clz(val);
+#elif defined(__ICCARM__)
+ return (unsigned)__builtin_clz(val);
+#else
+ return ZSTD_countLeadingZeros32_fallback(val);
+#endif
}
MEM_STATIC unsigned ZSTD_countTrailingZeros64(U64 val)
{
assert(val != 0);
-# if defined(_MSC_VER) && defined(_WIN64)
-# if STATIC_BMI2 == 1
- return (unsigned)_tzcnt_u64(val);
-# else
- if (val != 0) {
- unsigned long r;
- _BitScanForward64(&r, val);
- return (unsigned)r;
- } else {
- /* Should not reach this code path */
- __assume(0);
- }
-# endif
-# elif defined(__GNUC__) && (__GNUC__ >= 4) && defined(__LP64__)
- return (unsigned)__builtin_ctzll(val);
-# else
- {
- U32 mostSignificantWord = (U32)(val >> 32);
- U32 leastSignificantWord = (U32)val;
- if (leastSignificantWord == 0) {
- return 32 + ZSTD_countTrailingZeros32(mostSignificantWord);
- } else {
- return ZSTD_countTrailingZeros32(leastSignificantWord);
- }
+#if defined(_MSC_VER) && defined(_WIN64)
+# if STATIC_BMI2
+ return (unsigned)_tzcnt_u64(val);
+# else
+ if (val != 0) {
+ unsigned long r;
+ _BitScanForward64(&r, val);
+ return (unsigned)r;
+ } else {
+ __assume(0); /* Should not reach this code path */
+ }
+# endif
+#elif defined(__GNUC__) && (__GNUC__ >= 4) && defined(__LP64__)
+ return (unsigned)__builtin_ctzll(val);
+#elif defined(__ICCARM__)
+ return (unsigned)__builtin_ctzll(val);
+#else
+ {
+ U32 mostSignificantWord = (U32)(val >> 32);
+ U32 leastSignificantWord = (U32)val;
+ if (leastSignificantWord == 0) {
+ return 32 + ZSTD_countTrailingZeros32(mostSignificantWord);
+ } else {
+ return ZSTD_countTrailingZeros32(leastSignificantWord);
}
-# endif
+ }
+#endif
}
MEM_STATIC unsigned ZSTD_countLeadingZeros64(U64 val)
{
assert(val != 0);
-# if defined(_MSC_VER) && defined(_WIN64)
-# if STATIC_BMI2 == 1
- return (unsigned)_lzcnt_u64(val);
-# else
- if (val != 0) {
- unsigned long r;
- _BitScanReverse64(&r, val);
- return (unsigned)(63 - r);
- } else {
- /* Should not reach this code path */
- __assume(0);
- }
-# endif
-# elif defined(__GNUC__) && (__GNUC__ >= 4)
- return (unsigned)(__builtin_clzll(val));
-# else
- {
- U32 mostSignificantWord = (U32)(val >> 32);
- U32 leastSignificantWord = (U32)val;
- if (mostSignificantWord == 0) {
- return 32 + ZSTD_countLeadingZeros32(leastSignificantWord);
- } else {
- return ZSTD_countLeadingZeros32(mostSignificantWord);
- }
+#if defined(_MSC_VER) && defined(_WIN64)
+# if STATIC_BMI2
+ return (unsigned)_lzcnt_u64(val);
+# else
+ if (val != 0) {
+ unsigned long r;
+ _BitScanReverse64(&r, val);
+ return (unsigned)(63 - r);
+ } else {
+ __assume(0); /* Should not reach this code path */
+ }
+# endif
+#elif defined(__GNUC__) && (__GNUC__ >= 4)
+ return (unsigned)(__builtin_clzll(val));
+#elif defined(__ICCARM__)
+ return (unsigned)(__builtin_clzll(val));
+#else
+ {
+ U32 mostSignificantWord = (U32)(val >> 32);
+ U32 leastSignificantWord = (U32)val;
+ if (mostSignificantWord == 0) {
+ return 32 + ZSTD_countLeadingZeros32(leastSignificantWord);
+ } else {
+ return ZSTD_countLeadingZeros32(mostSignificantWord);
}
-# endif
+ }
+#endif
}
MEM_STATIC unsigned ZSTD_NbCommonBytes(size_t val)
diff --git a/lib/common/bitstream.h b/lib/common/bitstream.h
index 676044989c9..3b7ad483d9e 100644
--- a/lib/common/bitstream.h
+++ b/lib/common/bitstream.h
@@ -14,9 +14,6 @@
#ifndef BITSTREAM_H_MODULE
#define BITSTREAM_H_MODULE
-#if defined (__cplusplus)
-extern "C" {
-#endif
/*
* This API consists of small unitary functions, which must be inlined for best performance.
* Since link-time-optimization is not available for all compilers,
@@ -32,7 +29,6 @@ extern "C" {
#include "error_private.h" /* error codes and messages */
#include "bits.h" /* ZSTD_highbit32 */
-
/*=========================================
* Target specific
=========================================*/
@@ -52,12 +48,13 @@ extern "C" {
/*-******************************************
* bitStream encoding API (write forward)
********************************************/
+typedef size_t BitContainerType;
/* bitStream can mix input from multiple sources.
* A critical property of these streams is that they encode and decode in **reverse** direction.
* So the first bit sequence you add will be the last to be read, like a LIFO stack.
*/
typedef struct {
- size_t bitContainer;
+ BitContainerType bitContainer;
unsigned bitPos;
char* startPtr;
char* ptr;
@@ -65,7 +62,7 @@ typedef struct {
} BIT_CStream_t;
MEM_STATIC size_t BIT_initCStream(BIT_CStream_t* bitC, void* dstBuffer, size_t dstCapacity);
-MEM_STATIC void BIT_addBits(BIT_CStream_t* bitC, size_t value, unsigned nbBits);
+MEM_STATIC void BIT_addBits(BIT_CStream_t* bitC, BitContainerType value, unsigned nbBits);
MEM_STATIC void BIT_flushBits(BIT_CStream_t* bitC);
MEM_STATIC size_t BIT_closeCStream(BIT_CStream_t* bitC);
@@ -74,7 +71,7 @@ MEM_STATIC size_t BIT_closeCStream(BIT_CStream_t* bitC);
* `dstCapacity` must be >= sizeof(bitD->bitContainer), otherwise @return will be an error code.
*
* bits are first added to a local register.
-* Local register is size_t, hence 64-bits on 64-bits systems, or 32-bits on 32-bits systems.
+* Local register is BitContainerType, 64-bits on 64-bits systems, or 32-bits on 32-bits systems.
* Writing data into memory is an explicit operation, performed by the flushBits function.
* Hence keep track how many bits are potentially stored into local register to avoid register overflow.
* After a flushBits, a maximum of 7 bits might still be stored into local register.
@@ -90,7 +87,6 @@ MEM_STATIC size_t BIT_closeCStream(BIT_CStream_t* bitC);
/*-********************************************
* bitStream decoding API (read backward)
**********************************************/
-typedef size_t BitContainerType;
typedef struct {
BitContainerType bitContainer;
unsigned bitsConsumed;
@@ -106,7 +102,7 @@ typedef enum { BIT_DStream_unfinished = 0, /* fully refilled */
} BIT_DStream_status; /* result of BIT_reloadDStream() */
MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize);
-MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits);
+MEM_STATIC BitContainerType BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits);
MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD);
MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* bitD);
@@ -125,7 +121,7 @@ MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* bitD);
/*-****************************************
* unsafe API
******************************************/
-MEM_STATIC void BIT_addBitsFast(BIT_CStream_t* bitC, size_t value, unsigned nbBits);
+MEM_STATIC void BIT_addBitsFast(BIT_CStream_t* bitC, BitContainerType value, unsigned nbBits);
/* faster, but works only if value is "clean", meaning all high bits above nbBits are 0 */
MEM_STATIC void BIT_flushBitsFast(BIT_CStream_t* bitC);
@@ -163,10 +159,15 @@ MEM_STATIC size_t BIT_initCStream(BIT_CStream_t* bitC,
return 0;
}
-FORCE_INLINE_TEMPLATE size_t BIT_getLowerBits(size_t bitContainer, U32 const nbBits)
+FORCE_INLINE_TEMPLATE BitContainerType BIT_getLowerBits(BitContainerType bitContainer, U32 const nbBits)
{
-#if defined(STATIC_BMI2) && STATIC_BMI2 == 1 && !defined(ZSTD_NO_INTRINSICS)
- return _bzhi_u64(bitContainer, nbBits);
+#if STATIC_BMI2 && !defined(ZSTD_NO_INTRINSICS)
+# if (defined(__x86_64__) || defined(_M_X64)) && !defined(__ILP32__)
+ return _bzhi_u64(bitContainer, nbBits);
+# else
+ DEBUG_STATIC_ASSERT(sizeof(bitContainer) == sizeof(U32));
+ return _bzhi_u32(bitContainer, nbBits);
+# endif
#else
assert(nbBits < BIT_MASK_SIZE);
return bitContainer & BIT_mask[nbBits];
@@ -177,7 +178,7 @@ FORCE_INLINE_TEMPLATE size_t BIT_getLowerBits(size_t bitContainer, U32 const nbB
* can add up to 31 bits into `bitC`.
* Note : does not check for register overflow ! */
MEM_STATIC void BIT_addBits(BIT_CStream_t* bitC,
- size_t value, unsigned nbBits)
+ BitContainerType value, unsigned nbBits)
{
DEBUG_STATIC_ASSERT(BIT_MASK_SIZE == 32);
assert(nbBits < BIT_MASK_SIZE);
@@ -190,7 +191,7 @@ MEM_STATIC void BIT_addBits(BIT_CStream_t* bitC,
* works only if `value` is _clean_,
* meaning all high bits above nbBits are 0 */
MEM_STATIC void BIT_addBitsFast(BIT_CStream_t* bitC,
- size_t value, unsigned nbBits)
+ BitContainerType value, unsigned nbBits)
{
assert((value>>nbBits) == 0);
assert(nbBits + bitC->bitPos < sizeof(bitC->bitContainer) * 8);
@@ -237,7 +238,7 @@ MEM_STATIC size_t BIT_closeCStream(BIT_CStream_t* bitC)
BIT_addBitsFast(bitC, 1, 1); /* endMark */
BIT_flushBits(bitC);
if (bitC->ptr >= bitC->endPtr) return 0; /* overflow detected */
- return (bitC->ptr - bitC->startPtr) + (bitC->bitPos > 0);
+ return (size_t)(bitC->ptr - bitC->startPtr) + (bitC->bitPos > 0);
}
@@ -298,12 +299,12 @@ MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, si
return srcSize;
}
-FORCE_INLINE_TEMPLATE size_t BIT_getUpperBits(BitContainerType bitContainer, U32 const start)
+FORCE_INLINE_TEMPLATE BitContainerType BIT_getUpperBits(BitContainerType bitContainer, U32 const start)
{
return bitContainer >> start;
}
-FORCE_INLINE_TEMPLATE size_t BIT_getMiddleBits(BitContainerType bitContainer, U32 const start, U32 const nbBits)
+FORCE_INLINE_TEMPLATE BitContainerType BIT_getMiddleBits(BitContainerType bitContainer, U32 const start, U32 const nbBits)
{
U32 const regMask = sizeof(bitContainer)*8 - 1;
/* if start > regMask, bitstream is corrupted, and result is undefined */
@@ -313,7 +314,7 @@ FORCE_INLINE_TEMPLATE size_t BIT_getMiddleBits(BitContainerType bitContainer, U3
* such cpus old (pre-Haswell, 2013) and their performance is not of that
* importance.
*/
-#if defined(__x86_64__) || defined(_M_X86)
+#if defined(__x86_64__) || defined(_M_X64)
return (bitContainer >> (start & regMask)) & ((((U64)1) << nbBits) - 1);
#else
return (bitContainer >> (start & regMask)) & BIT_mask[nbBits];
@@ -326,7 +327,7 @@ FORCE_INLINE_TEMPLATE size_t BIT_getMiddleBits(BitContainerType bitContainer, U3
* On 32-bits, maxNbBits==24.
* On 64-bits, maxNbBits==56.
* @return : value extracted */
-FORCE_INLINE_TEMPLATE size_t BIT_lookBits(const BIT_DStream_t* bitD, U32 nbBits)
+FORCE_INLINE_TEMPLATE BitContainerType BIT_lookBits(const BIT_DStream_t* bitD, U32 nbBits)
{
/* arbitrate between double-shift and shift+mask */
#if 1
@@ -342,7 +343,7 @@ FORCE_INLINE_TEMPLATE size_t BIT_lookBits(const BIT_DStream_t* bitD, U32 nbBits
/*! BIT_lookBitsFast() :
* unsafe version; only works if nbBits >= 1 */
-MEM_STATIC size_t BIT_lookBitsFast(const BIT_DStream_t* bitD, U32 nbBits)
+MEM_STATIC BitContainerType BIT_lookBitsFast(const BIT_DStream_t* bitD, U32 nbBits)
{
U32 const regMask = sizeof(bitD->bitContainer)*8 - 1;
assert(nbBits >= 1);
@@ -358,18 +359,18 @@ FORCE_INLINE_TEMPLATE void BIT_skipBits(BIT_DStream_t* bitD, U32 nbBits)
* Read (consume) next n bits from local register and update.
* Pay attention to not read more than nbBits contained into local register.
* @return : extracted value. */
-FORCE_INLINE_TEMPLATE size_t BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits)
+FORCE_INLINE_TEMPLATE BitContainerType BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits)
{
- size_t const value = BIT_lookBits(bitD, nbBits);
+ BitContainerType const value = BIT_lookBits(bitD, nbBits);
BIT_skipBits(bitD, nbBits);
return value;
}
/*! BIT_readBitsFast() :
* unsafe version; only works if nbBits >= 1 */
-MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits)
+MEM_STATIC BitContainerType BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits)
{
- size_t const value = BIT_lookBitsFast(bitD, nbBits);
+ BitContainerType const value = BIT_lookBitsFast(bitD, nbBits);
assert(nbBits >= 1);
BIT_skipBits(bitD, nbBits);
return value;
@@ -450,8 +451,4 @@ MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* DStream)
return ((DStream->ptr == DStream->start) && (DStream->bitsConsumed == sizeof(DStream->bitContainer)*8));
}
-#if defined (__cplusplus)
-}
-#endif
-
#endif /* BITSTREAM_H_MODULE */
diff --git a/lib/common/compiler.h b/lib/common/compiler.h
index 31880ecbe16..1f7da50e6da 100644
--- a/lib/common/compiler.h
+++ b/lib/common/compiler.h
@@ -27,7 +27,7 @@
# define INLINE_KEYWORD
#endif
-#if defined(__GNUC__) || defined(__ICCARM__)
+#if defined(__GNUC__) || defined(__IAR_SYSTEMS_ICC__)
# define FORCE_INLINE_ATTR __attribute__((always_inline))
#elif defined(_MSC_VER)
# define FORCE_INLINE_ATTR __forceinline
@@ -54,7 +54,7 @@
#endif
/* UNUSED_ATTR tells the compiler it is okay if the function is unused. */
-#if defined(__GNUC__)
+#if defined(__GNUC__) || defined(__IAR_SYSTEMS_ICC__)
# define UNUSED_ATTR __attribute__((unused))
#else
# define UNUSED_ATTR
@@ -95,6 +95,8 @@
#ifndef MEM_STATIC /* already defined in Linux Kernel mem.h */
#if defined(__GNUC__)
# define MEM_STATIC static __inline UNUSED_ATTR
+#elif defined(__IAR_SYSTEMS_ICC__)
+# define MEM_STATIC static inline UNUSED_ATTR
#elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
# define MEM_STATIC static inline
#elif defined(_MSC_VER)
@@ -108,7 +110,7 @@
#ifdef _MSC_VER
# define FORCE_NOINLINE static __declspec(noinline)
#else
-# if defined(__GNUC__) || defined(__ICCARM__)
+# if defined(__GNUC__) || defined(__IAR_SYSTEMS_ICC__)
# define FORCE_NOINLINE static __attribute__((__noinline__))
# else
# define FORCE_NOINLINE static
@@ -117,7 +119,7 @@
/* target attribute */
-#if defined(__GNUC__) || defined(__ICCARM__)
+#if defined(__GNUC__) || defined(__IAR_SYSTEMS_ICC__)
# define TARGET_ATTRIBUTE(target) __attribute__((__target__(target)))
#else
# define TARGET_ATTRIBUTE(target)
@@ -205,30 +207,21 @@
# pragma warning(disable : 4324) /* disable: C4324: padded structure */
#endif
-/*Like DYNAMIC_BMI2 but for compile time determination of BMI2 support*/
-#ifndef STATIC_BMI2
-# if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_I86))
-# ifdef __AVX2__ //MSVC does not have a BMI2 specific flag, but every CPU that supports AVX2 also supports BMI2
-# define STATIC_BMI2 1
-# endif
-# elif defined(__BMI2__) && defined(__x86_64__) && defined(__GNUC__)
-# define STATIC_BMI2 1
-# endif
-#endif
-
-#ifndef STATIC_BMI2
- #define STATIC_BMI2 0
-#endif
-
/* compile time determination of SIMD support */
#if !defined(ZSTD_NO_INTRINSICS)
-# if defined(__SSE2__) || defined(_M_AMD64) || (defined (_M_IX86) && defined(_M_IX86_FP) && (_M_IX86_FP >= 2))
+# if defined(__AVX2__)
+# define ZSTD_ARCH_X86_AVX2
+# endif
+# if defined(__SSE2__) || defined(_M_X64) || (defined (_M_IX86) && defined(_M_IX86_FP) && (_M_IX86_FP >= 2))
# define ZSTD_ARCH_X86_SSE2
# endif
# if defined(__ARM_NEON) || defined(_M_ARM64)
# define ZSTD_ARCH_ARM_NEON
# endif
#
+# if defined(ZSTD_ARCH_X86_AVX2)
+# include
+# endif
# if defined(ZSTD_ARCH_X86_SSE2)
# include
# elif defined(ZSTD_ARCH_ARM_NEON)
@@ -273,9 +266,15 @@
#endif
/*-**************************************************************
-* Alignment check
+* Alignment
*****************************************************************/
+/* @return 1 if @u is a 2^n value, 0 otherwise
+ * useful to check a value is valid for alignment restrictions */
+MEM_STATIC int ZSTD_isPower2(size_t u) {
+ return (u & (u-1)) == 0;
+}
+
/* this test was initially positioned in mem.h,
* but this file is removed (or replaced) for linux kernel
* so it's now hosted in compiler.h,
@@ -301,6 +300,21 @@
# endif
#endif /* ZSTD_ALIGNOF */
+#ifndef ZSTD_ALIGNED
+/* C90-compatible alignment macro (GCC/Clang). Adjust for other compilers if needed. */
+# if defined(__GNUC__) || defined(__clang__)
+# define ZSTD_ALIGNED(a) __attribute__((aligned(a)))
+# elif defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) /* C11 */
+# define ZSTD_ALIGNED(a) _Alignas(a)
+#elif defined(_MSC_VER)
+# define ZSTD_ALIGNED(n) __declspec(align(n))
+# else
+ /* this compiler will require its own alignment instruction */
+# define ZSTD_ALIGNED(...)
+# endif
+#endif /* ZSTD_ALIGNED */
+
+
/*-**************************************************************
* Sanitizer
*****************************************************************/
@@ -324,7 +338,7 @@
#endif
/**
- * Helper function to perform a wrapped pointer difference without trigging
+ * Helper function to perform a wrapped pointer difference without triggering
* UBSAN.
*
* @returns lhs - rhs with wrapping
diff --git a/lib/common/cpu.h b/lib/common/cpu.h
index 0e684d9ad8e..3f15d560f0c 100644
--- a/lib/common/cpu.h
+++ b/lib/common/cpu.h
@@ -35,7 +35,7 @@ MEM_STATIC ZSTD_cpuid_t ZSTD_cpuid(void) {
U32 f7b = 0;
U32 f7c = 0;
#if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_IX86))
-#if !defined(__clang__)
+#if !defined(_M_X64) || !defined(__clang__) || __clang_major__ >= 16
int reg[4];
__cpuid((int*)reg, 0);
{
diff --git a/lib/common/debug.h b/lib/common/debug.h
index a16b69e5743..4b60ddf7f51 100644
--- a/lib/common/debug.h
+++ b/lib/common/debug.h
@@ -32,10 +32,6 @@
#ifndef DEBUG_H_12987983217
#define DEBUG_H_12987983217
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
/* static assert is triggered at compile time, leaving no runtime artefact.
* static assert only works with compile-time constants.
@@ -108,9 +104,4 @@ extern int g_debuglevel; /* the variable is only declared,
# define DEBUGLOG(l, ...) do { } while (0) /* disabled */
#endif
-
-#if defined (__cplusplus)
-}
-#endif
-
#endif /* DEBUG_H_12987983217 */
diff --git a/lib/common/error_private.c b/lib/common/error_private.c
index 075fc5ef42f..7e7f24f6756 100644
--- a/lib/common/error_private.c
+++ b/lib/common/error_private.c
@@ -40,6 +40,7 @@ const char* ERR_getErrorString(ERR_enum code)
case PREFIX(tableLog_tooLarge): return "tableLog requires too much memory : unsupported";
case PREFIX(maxSymbolValue_tooLarge): return "Unsupported max Symbol Value : too large";
case PREFIX(maxSymbolValue_tooSmall): return "Specified maxSymbolValue is too small";
+ case PREFIX(cannotProduce_uncompressedBlock): return "This mode cannot generate an uncompressed block";
case PREFIX(stabilityCondition_notRespected): return "pledged buffer stability condition is not respected";
case PREFIX(dictionary_corrupted): return "Dictionary is corrupted";
case PREFIX(dictionary_wrong): return "Dictionary mismatch";
diff --git a/lib/common/error_private.h b/lib/common/error_private.h
index 0156010c745..9dcc8595123 100644
--- a/lib/common/error_private.h
+++ b/lib/common/error_private.h
@@ -13,11 +13,6 @@
#ifndef ERROR_H_MODULE
#define ERROR_H_MODULE
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-
/* ****************************************
* Dependencies
******************************************/
@@ -26,7 +21,6 @@ extern "C" {
#include "debug.h"
#include "zstd_deps.h" /* size_t */
-
/* ****************************************
* Compiler-specific
******************************************/
@@ -161,8 +155,4 @@ void _force_has_format_string(const char *format, ...) {
} \
} while(0)
-#if defined (__cplusplus)
-}
-#endif
-
#endif /* ERROR_H_MODULE */
diff --git a/lib/common/fse.h b/lib/common/fse.h
index 2ae128e60db..b6c2a3e9ccc 100644
--- a/lib/common/fse.h
+++ b/lib/common/fse.h
@@ -11,11 +11,6 @@
* in the COPYING file in the root directory of this source tree).
* You may select, at your option, one of the above-listed licenses.
****************************************************************** */
-
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
#ifndef FSE_H
#define FSE_H
@@ -25,7 +20,6 @@ extern "C" {
******************************************/
#include "zstd_deps.h" /* size_t, ptrdiff_t */
-
/*-*****************************************
* FSE_PUBLIC_API : control library symbols visibility
******************************************/
@@ -232,11 +226,8 @@ If there is an error, the function will return an error code, which can be teste
#if defined(FSE_STATIC_LINKING_ONLY) && !defined(FSE_H_FSE_STATIC_LINKING_ONLY)
#define FSE_H_FSE_STATIC_LINKING_ONLY
-
-/* *** Dependency *** */
#include "bitstream.h"
-
/* *****************************************
* Static allocation
*******************************************/
@@ -465,13 +456,13 @@ MEM_STATIC void FSE_encodeSymbol(BIT_CStream_t* bitC, FSE_CState_t* statePtr, un
FSE_symbolCompressionTransform const symbolTT = ((const FSE_symbolCompressionTransform*)(statePtr->symbolTT))[symbol];
const U16* const stateTable = (const U16*)(statePtr->stateTable);
U32 const nbBitsOut = (U32)((statePtr->value + symbolTT.deltaNbBits) >> 16);
- BIT_addBits(bitC, (size_t)statePtr->value, nbBitsOut);
+ BIT_addBits(bitC, (BitContainerType)statePtr->value, nbBitsOut);
statePtr->value = stateTable[ (statePtr->value >> nbBitsOut) + symbolTT.deltaFindState];
}
MEM_STATIC void FSE_flushCState(BIT_CStream_t* bitC, const FSE_CState_t* statePtr)
{
- BIT_addBits(bitC, (size_t)statePtr->value, statePtr->stateLog);
+ BIT_addBits(bitC, (BitContainerType)statePtr->value, statePtr->stateLog);
BIT_flushBits(bitC);
}
@@ -631,10 +622,4 @@ MEM_STATIC unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr)
#define FSE_TABLESTEP(tableSize) (((tableSize)>>1) + ((tableSize)>>3) + 3)
-
#endif /* FSE_STATIC_LINKING_ONLY */
-
-
-#if defined (__cplusplus)
-}
-#endif
diff --git a/lib/common/fse_decompress.c b/lib/common/fse_decompress.c
index 0dcc4640d09..c8f1bb0cf23 100644
--- a/lib/common/fse_decompress.c
+++ b/lib/common/fse_decompress.c
@@ -190,6 +190,8 @@ FORCE_INLINE_TEMPLATE size_t FSE_decompress_usingDTable_generic(
FSE_initDState(&state1, &bitD, dt);
FSE_initDState(&state2, &bitD, dt);
+ RETURN_ERROR_IF(BIT_reloadDStream(&bitD)==BIT_DStream_overflow, corruption_detected, "");
+
#define FSE_GETSYMBOL(statePtr) fast ? FSE_decodeSymbolFast(statePtr, &bitD) : FSE_decodeSymbol(statePtr, &bitD)
/* 4 symbols per loop */
diff --git a/lib/common/huf.h b/lib/common/huf.h
index 99bf85d6f4e..4b142c4f996 100644
--- a/lib/common/huf.h
+++ b/lib/common/huf.h
@@ -12,10 +12,6 @@
* You may select, at your option, one of the above-listed licenses.
****************************************************************** */
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
#ifndef HUF_H_298734234
#define HUF_H_298734234
@@ -25,7 +21,6 @@ extern "C" {
#define FSE_STATIC_LINKING_ONLY
#include "fse.h"
-
/* *** Tool functions *** */
#define HUF_BLOCKSIZE_MAX (128 * 1024) /**< maximum input size for a single block compressed with HUF_compress */
size_t HUF_compressBound(size_t size); /**< maximum compressed size (worst case) */
@@ -280,7 +275,3 @@ size_t HUF_readDTableX2_wksp(HUF_DTable* DTable, const void* src, size_t srcSize
#endif
#endif /* HUF_H_298734234 */
-
-#if defined (__cplusplus)
-}
-#endif
diff --git a/lib/common/mem.h b/lib/common/mem.h
index 096f4be519d..e66a2eaeb27 100644
--- a/lib/common/mem.h
+++ b/lib/common/mem.h
@@ -11,10 +11,6 @@
#ifndef MEM_H_MODULE
#define MEM_H_MODULE
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
/*-****************************************
* Dependencies
******************************************/
@@ -30,6 +26,8 @@ extern "C" {
#if defined(_MSC_VER) /* Visual Studio */
# include /* _byteswap_ulong */
# include /* _byteswap_* */
+#elif defined(__ICCARM__)
+# include
#endif
/*-**************************************************************
@@ -74,7 +72,6 @@ extern "C" {
typedef signed long long S64;
#endif
-
/*-**************************************************************
* Memory I/O API
*****************************************************************/
@@ -150,10 +147,12 @@ MEM_STATIC unsigned MEM_isLittleEndian(void)
return 1;
#elif defined(__clang__) && __BIG_ENDIAN__
return 0;
-#elif defined(_MSC_VER) && (_M_AMD64 || _M_IX86)
+#elif defined(_MSC_VER) && (_M_X64 || _M_IX86)
return 1;
#elif defined(__DMC__) && defined(_M_IX86)
return 1;
+#elif defined(__IAR_SYSTEMS_ICC__) && __LITTLE_ENDIAN__
+ return 1;
#else
const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */
return one.c[0];
@@ -246,6 +245,8 @@ MEM_STATIC U32 MEM_swap32(U32 in)
#elif (defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)) \
|| (defined(__clang__) && __has_builtin(__builtin_bswap32))
return __builtin_bswap32(in);
+#elif defined(__ICCARM__)
+ return __REV(in);
#else
return MEM_swap32_fallback(in);
#endif
@@ -418,9 +419,4 @@ MEM_STATIC void MEM_writeBEST(void* memPtr, size_t val)
/* code only tested on 32 and 64 bits systems */
MEM_STATIC void MEM_check(void) { DEBUG_STATIC_ASSERT((sizeof(size_t)==4) || (sizeof(size_t)==8)); }
-
-#if defined (__cplusplus)
-}
-#endif
-
#endif /* MEM_H_MODULE */
diff --git a/lib/common/pool.h b/lib/common/pool.h
index cca4de73a83..f39b7f1eb99 100644
--- a/lib/common/pool.h
+++ b/lib/common/pool.h
@@ -11,10 +11,6 @@
#ifndef POOL_H
#define POOL_H
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
#include "zstd_deps.h"
#define ZSTD_STATIC_LINKING_ONLY /* ZSTD_customMem */
@@ -82,9 +78,4 @@ void POOL_add(POOL_ctx* ctx, POOL_function function, void* opaque);
*/
int POOL_tryAdd(POOL_ctx* ctx, POOL_function function, void* opaque);
-
-#if defined (__cplusplus)
-}
-#endif
-
#endif
diff --git a/lib/common/portability_macros.h b/lib/common/portability_macros.h
index e50314a78e4..860734141df 100644
--- a/lib/common/portability_macros.h
+++ b/lib/common/portability_macros.h
@@ -74,26 +74,39 @@
# define ZSTD_HIDE_ASM_FUNCTION(func)
#endif
+/* Compile time determination of BMI2 support */
+#ifndef STATIC_BMI2
+# if defined(__BMI2__)
+# define STATIC_BMI2 1
+# elif defined(_MSC_VER) && defined(__AVX2__)
+# define STATIC_BMI2 1 /* MSVC does not have a BMI2 specific flag, but every CPU that supports AVX2 also supports BMI2 */
+# endif
+#endif
+
+#ifndef STATIC_BMI2
+# define STATIC_BMI2 0
+#endif
+
/* Enable runtime BMI2 dispatch based on the CPU.
* Enabled for clang & gcc >=4.8 on x86 when BMI2 isn't enabled by default.
*/
#ifndef DYNAMIC_BMI2
- #if ((defined(__clang__) && __has_attribute(__target__)) \
+# if ((defined(__clang__) && __has_attribute(__target__)) \
|| (defined(__GNUC__) \
&& (__GNUC__ >= 5 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)))) \
- && (defined(__x86_64__) || defined(_M_X64)) \
+ && (defined(__i386__) || defined(__x86_64__) || defined(_M_IX86) || defined(_M_X64)) \
&& !defined(__BMI2__)
- # define DYNAMIC_BMI2 1
- #else
- # define DYNAMIC_BMI2 0
- #endif
+# define DYNAMIC_BMI2 1
+# else
+# define DYNAMIC_BMI2 0
+# endif
#endif
/**
- * Only enable assembly for GNUC compatible compilers,
+ * Only enable assembly for GNU C compatible compilers,
* because other platforms may not support GAS assembly syntax.
*
- * Only enable assembly for Linux / MacOS, other platforms may
+ * Only enable assembly for Linux / MacOS / Win32, other platforms may
* work, but they haven't been tested. This could likely be
* extended to BSD systems.
*
@@ -101,7 +114,7 @@
* 100% of code to be instrumented to work.
*/
#if defined(__GNUC__)
-# if defined(__linux__) || defined(__linux) || defined(__APPLE__)
+# if defined(__linux__) || defined(__linux) || defined(__APPLE__) || defined(_WIN32)
# if ZSTD_MEMORY_SANITIZER
# define ZSTD_ASM_SUPPORTED 0
# elif ZSTD_DATAFLOW_SANITIZER
diff --git a/lib/common/threading.h b/lib/common/threading.h
index fb5c1c87873..e123cdf14a3 100644
--- a/lib/common/threading.h
+++ b/lib/common/threading.h
@@ -16,10 +16,6 @@
#include "debug.h"
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
#if defined(ZSTD_MULTITHREAD) && defined(_WIN32)
/**
@@ -72,7 +68,6 @@ int ZSTD_pthread_join(ZSTD_pthread_t thread);
* add here more wrappers as required
*/
-
#elif defined(ZSTD_MULTITHREAD) /* posix assumed ; need a better detection method */
/* === POSIX Systems === */
# include
@@ -143,8 +138,5 @@ typedef int ZSTD_pthread_cond_t;
#endif /* ZSTD_MULTITHREAD */
-#if defined (__cplusplus)
-}
-#endif
#endif /* THREADING_H_938743 */
diff --git a/lib/common/xxhash.h b/lib/common/xxhash.h
index e59e44267c1..b6af402fdf4 100644
--- a/lib/common/xxhash.h
+++ b/lib/common/xxhash.h
@@ -227,10 +227,6 @@
* xxHash prototypes and implementation
*/
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
/* ****************************
* INLINE mode
******************************/
@@ -537,6 +533,9 @@ extern "C" {
/*! @brief Version number, encoded as two digits each */
#define XXH_VERSION_NUMBER (XXH_VERSION_MAJOR *100*100 + XXH_VERSION_MINOR *100 + XXH_VERSION_RELEASE)
+#if defined (__cplusplus)
+extern "C" {
+#endif
/*!
* @brief Obtains the xxHash version.
*
@@ -547,6 +546,9 @@ extern "C" {
*/
XXH_PUBLIC_API XXH_CONSTF unsigned XXH_versionNumber (void);
+#if defined (__cplusplus)
+}
+#endif
/* ****************************
* Common basic types
@@ -593,6 +595,10 @@ typedef uint32_t XXH32_hash_t;
# endif
#endif
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
/*!
* @}
*
@@ -821,6 +827,9 @@ XXH_PUBLIC_API XXH_PUREF XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canoni
#endif
/*! @endcond */
+#if defined (__cplusplus)
+} /* end of extern "C" */
+#endif
/*!
* @}
@@ -859,6 +868,9 @@ typedef uint64_t XXH64_hash_t;
# endif
#endif
+#if defined (__cplusplus)
+extern "C" {
+#endif
/*!
* @}
*
@@ -1562,6 +1574,11 @@ XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH128_hashFromCanonical(XXH_NOESCAPE con
#endif /* !XXH_NO_XXH3 */
+
+#if defined (__cplusplus)
+} /* extern "C" */
+#endif
+
#endif /* XXH_NO_LONG_LONG */
/*!
@@ -1748,6 +1765,10 @@ struct XXH3_state_s {
} while(0)
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
/*!
* @brief Calculates the 128-bit hash of @p data using XXH3.
*
@@ -1963,8 +1984,13 @@ XXH3_128bits_reset_withSecretandSeed(XXH_NOESCAPE XXH3_state_t* statePtr,
XXH64_hash_t seed64);
#endif /* !XXH_NO_STREAM */
+#if defined (__cplusplus)
+} /* extern "C" */
+#endif
+
#endif /* !XXH_NO_XXH3 */
#endif /* XXH_NO_LONG_LONG */
+
#if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)
# define XXH_IMPLEMENTATION
#endif
@@ -2263,10 +2289,12 @@ XXH3_128bits_reset_withSecretandSeed(XXH_NOESCAPE XXH3_state_t* statePtr,
* @{
*/
-
/* *************************************
* Includes & Memory related functions
***************************************/
+#include /* memcmp, memcpy */
+#include /* ULLONG_MAX */
+
#if defined(XXH_NO_STREAM)
/* nothing */
#elif defined(XXH_NO_STDLIB)
@@ -2280,9 +2308,17 @@ XXH3_128bits_reset_withSecretandSeed(XXH_NOESCAPE XXH3_state_t* statePtr,
* without access to dynamic allocation.
*/
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
static XXH_CONSTF void* XXH_malloc(size_t s) { (void)s; return NULL; }
static void XXH_free(void* p) { (void)p; }
+#if defined (__cplusplus)
+} /* extern "C" */
+#endif
+
#else
/*
@@ -2291,6 +2327,9 @@ static void XXH_free(void* p) { (void)p; }
*/
#include
+#if defined (__cplusplus)
+extern "C" {
+#endif
/*!
* @internal
* @brief Modify this function to use a different routine than malloc().
@@ -2303,10 +2342,15 @@ static XXH_MALLOCF void* XXH_malloc(size_t s) { return malloc(s); }
*/
static void XXH_free(void* p) { free(p); }
-#endif /* XXH_NO_STDLIB */
+#if defined (__cplusplus)
+} /* extern "C" */
+#endif
-#include
+#endif /* XXH_NO_STDLIB */
+#if defined (__cplusplus)
+extern "C" {
+#endif
/*!
* @internal
* @brief Modify this function to use a different routine than memcpy().
@@ -2316,8 +2360,9 @@ static void* XXH_memcpy(void* dest, const void* src, size_t size)
return memcpy(dest,src,size);
}
-#include /* ULLONG_MAX */
-
+#if defined (__cplusplus)
+} /* extern "C" */
+#endif
/* *************************************
* Compiler Specific Options
@@ -2452,6 +2497,10 @@ typedef XXH32_hash_t xxh_u32;
# define U32 xxh_u32
#endif
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
/* *** Memory access *** */
/*!
@@ -3608,6 +3657,10 @@ XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(XXH_NOESCAPE const XXH64_can
return XXH_readBE64(src);
}
+#if defined (__cplusplus)
+}
+#endif
+
#ifndef XXH_NO_XXH3
/* *********************************************************************
@@ -3839,7 +3892,7 @@ enum XXH_VECTOR_TYPE /* fake enum */ {
# define XXH_VECTOR XXH_AVX512
# elif defined(__AVX2__)
# define XXH_VECTOR XXH_AVX2
-# elif defined(__SSE2__) || defined(_M_AMD64) || defined(_M_X64) || (defined(_M_IX86_FP) && (_M_IX86_FP == 2))
+# elif defined(__SSE2__) || defined(_M_X64) || (defined(_M_IX86_FP) && (_M_IX86_FP == 2))
# define XXH_VECTOR XXH_SSE2
# elif (defined(__PPC64__) && defined(__POWER8_VECTOR__)) \
|| (defined(__s390x__) && defined(__VEC__)) \
@@ -3928,6 +3981,10 @@ enum XXH_VECTOR_TYPE /* fake enum */ {
# pragma GCC optimize("-O2")
#endif
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
#if XXH_VECTOR == XXH_NEON
/*
@@ -4050,6 +4107,10 @@ XXH_vmlal_high_u32(uint64x2_t acc, uint32x4_t lhs, uint32x4_t rhs)
# endif
#endif /* XXH_VECTOR == XXH_NEON */
+#if defined (__cplusplus)
+} /* extern "C" */
+#endif
+
/*
* VSX and Z Vector helpers.
*
@@ -4111,6 +4172,9 @@ typedef xxh_u64x2 xxh_aliasing_u64x2 XXH_ALIASING;
# if defined(__POWER9_VECTOR__) || (defined(__clang__) && defined(__s390x__))
# define XXH_vec_revb vec_revb
# else
+#if defined (__cplusplus)
+extern "C" {
+#endif
/*!
* A polyfill for POWER9's vec_revb().
*/
@@ -4120,9 +4184,15 @@ XXH_FORCE_INLINE xxh_u64x2 XXH_vec_revb(xxh_u64x2 val)
0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08 };
return vec_perm(val, val, vByteSwap);
}
+#if defined (__cplusplus)
+} /* extern "C" */
+#endif
# endif
# endif /* XXH_VSX_BE */
+#if defined (__cplusplus)
+extern "C" {
+#endif
/*!
* Performs an unaligned vector load and byte swaps it on big endian.
*/
@@ -4167,6 +4237,11 @@ XXH_FORCE_INLINE xxh_u64x2 XXH_vec_mule(xxh_u32x4 a, xxh_u32x4 b)
return result;
}
# endif /* XXH_vec_mulo, XXH_vec_mule */
+
+#if defined (__cplusplus)
+} /* extern "C" */
+#endif
+
#endif /* XXH_VECTOR == XXH_VSX */
#if XXH_VECTOR == XXH_SVE
@@ -4200,7 +4275,9 @@ do { \
# endif
#endif /* XXH_NO_PREFETCH */
-
+#if defined (__cplusplus)
+extern "C" {
+#endif
/* ==========================================
* XXH3 default settings
* ========================================== */
@@ -6877,8 +6954,6 @@ XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_digest (XXH_NOESCAPE const XXH3_state_
#endif /* !XXH_NO_STREAM */
/* 128-bit utility functions */
-#include /* memcmp, memcpy */
-
/* return : 1 is equal, 0 if different */
/*! @ingroup XXH3_family */
XXH_PUBLIC_API int XXH128_isEqual(XXH128_hash_t h1, XXH128_hash_t h2)
@@ -7005,16 +7080,15 @@ XXH3_generateSecret_fromSeed(XXH_NOESCAPE void* secretBuffer, XXH64_hash_t seed)
# pragma GCC pop_options
#endif
-#endif /* XXH_NO_LONG_LONG */
+#if defined (__cplusplus)
+} /* extern "C" */
+#endif
+
+#endif /* XXH_NO_LONG_LONG */
#endif /* XXH_NO_XXH3 */
/*!
* @}
*/
#endif /* XXH_IMPLEMENTATION */
-
-
-#if defined (__cplusplus)
-} /* extern "C" */
-#endif
diff --git a/lib/common/zstd_deps.h b/lib/common/zstd_deps.h
index 4d767ae9b05..8a9c7cc5313 100644
--- a/lib/common/zstd_deps.h
+++ b/lib/common/zstd_deps.h
@@ -24,6 +24,18 @@
#ifndef ZSTD_DEPS_COMMON
#define ZSTD_DEPS_COMMON
+/* Even though we use qsort_r only for the dictionary builder, the macro
+ * _GNU_SOURCE has to be declared *before* the inclusion of any standard
+ * header and the script 'combine.sh' combines the whole zstd source code
+ * in a single file.
+ */
+#if defined(__linux) || defined(__linux__) || defined(linux) || defined(__gnu_linux__) || \
+ defined(__CYGWIN__) || defined(__MSYS__)
+#if !defined(_GNU_SOURCE) && !defined(__ANDROID__) /* NDK doesn't ship qsort_r(). */
+#define _GNU_SOURCE
+#endif
+#endif
+
#include
#include
#include
diff --git a/lib/common/zstd_internal.h b/lib/common/zstd_internal.h
index ecb9cfba87c..2789a359122 100644
--- a/lib/common/zstd_internal.h
+++ b/lib/common/zstd_internal.h
@@ -39,10 +39,6 @@
# define ZSTD_TRACE 0
#endif
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
/* ---- static assert (debug) --- */
#define ZSTD_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c)
#define ZSTD_isError ERR_isError /* for inlining */
@@ -95,7 +91,7 @@ typedef enum { bt_raw, bt_rle, bt_compressed, bt_reserved } blockType_e;
#define MIN_CBLOCK_SIZE (1 /*litCSize*/ + 1 /* RLE or RAW */) /* for a non-null block */
#define MIN_LITERALS_FOR_4_STREAMS 6
-typedef enum { set_basic, set_rle, set_compressed, set_repeat } symbolEncodingType_e;
+typedef enum { set_basic, set_rle, set_compressed, set_repeat } SymbolEncodingType_e;
#define LONGNBSEQ 0x7F00
@@ -278,62 +274,6 @@ typedef enum {
/*-*******************************************
* Private declarations
*********************************************/
-typedef struct seqDef_s {
- U32 offBase; /* offBase == Offset + ZSTD_REP_NUM, or repcode 1,2,3 */
- U16 litLength;
- U16 mlBase; /* mlBase == matchLength - MINMATCH */
-} seqDef;
-
-/* Controls whether seqStore has a single "long" litLength or matchLength. See seqStore_t. */
-typedef enum {
- ZSTD_llt_none = 0, /* no longLengthType */
- ZSTD_llt_literalLength = 1, /* represents a long literal */
- ZSTD_llt_matchLength = 2 /* represents a long match */
-} ZSTD_longLengthType_e;
-
-typedef struct {
- seqDef* sequencesStart;
- seqDef* sequences; /* ptr to end of sequences */
- BYTE* litStart;
- BYTE* lit; /* ptr to end of literals */
- BYTE* llCode;
- BYTE* mlCode;
- BYTE* ofCode;
- size_t maxNbSeq;
- size_t maxNbLit;
-
- /* longLengthPos and longLengthType to allow us to represent either a single litLength or matchLength
- * in the seqStore that has a value larger than U16 (if it exists). To do so, we increment
- * the existing value of the litLength or matchLength by 0x10000.
- */
- ZSTD_longLengthType_e longLengthType;
- U32 longLengthPos; /* Index of the sequence to apply long length modification to */
-} seqStore_t;
-
-typedef struct {
- U32 litLength;
- U32 matchLength;
-} ZSTD_sequenceLength;
-
-/**
- * Returns the ZSTD_sequenceLength for the given sequences. It handles the decoding of long sequences
- * indicated by longLengthPos and longLengthType, and adds MINMATCH back to matchLength.
- */
-MEM_STATIC ZSTD_sequenceLength ZSTD_getSequenceLength(seqStore_t const* seqStore, seqDef const* seq)
-{
- ZSTD_sequenceLength seqLen;
- seqLen.litLength = seq->litLength;
- seqLen.matchLength = seq->mlBase + MINMATCH;
- if (seqStore->longLengthPos == (U32)(seq - seqStore->sequencesStart)) {
- if (seqStore->longLengthType == ZSTD_llt_literalLength) {
- seqLen.litLength += 0x10000;
- }
- if (seqStore->longLengthType == ZSTD_llt_matchLength) {
- seqLen.matchLength += 0x10000;
- }
- }
- return seqLen;
-}
/**
* Contains the compressed frame size and an upper-bound for the decompressed frame size.
@@ -347,10 +287,6 @@ typedef struct {
unsigned long long decompressedBound;
} ZSTD_frameSizeInfo; /* decompress & legacy */
-const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx); /* compress & dictBuilder */
-int ZSTD_seqToCodes(const seqStore_t* seqStorePtr); /* compress, dictBuilder, decodeCorpus (shouldn't get its definition from here) */
-
-
/* ZSTD_invalidateRepCodes() :
* ensures next compression will not use repcodes from previous block.
* Note : only works with regular variant;
@@ -385,8 +321,4 @@ MEM_STATIC int ZSTD_cpuSupportsBmi2(void)
return ZSTD_cpuid_bmi1(cpuid) && ZSTD_cpuid_bmi2(cpuid);
}
-#if defined (__cplusplus)
-}
-#endif
-
#endif /* ZSTD_CCOMMON_H_MODULE */
diff --git a/lib/common/zstd_trace.h b/lib/common/zstd_trace.h
index da20534ebd8..d8eec100758 100644
--- a/lib/common/zstd_trace.h
+++ b/lib/common/zstd_trace.h
@@ -11,23 +11,20 @@
#ifndef ZSTD_TRACE_H
#define ZSTD_TRACE_H
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
#include
/* weak symbol support
* For now, enable conservatively:
* - Only GNUC
* - Only ELF
- * - Only x86-64, i386 and aarch64
+ * - Only x86-64, i386, aarch64 and risc-v.
* Also, explicitly disable on platforms known not to work so they aren't
* forgotten in the future.
*/
#if !defined(ZSTD_HAVE_WEAK_SYMBOLS) && \
defined(__GNUC__) && defined(__ELF__) && \
- (defined(__x86_64__) || defined(_M_X64) || defined(__i386__) || defined(_M_IX86) || defined(__aarch64__)) && \
+ (defined(__x86_64__) || defined(_M_X64) || defined(__i386__) || \
+ defined(_M_IX86) || defined(__aarch64__) || defined(__riscv)) && \
!defined(__APPLE__) && !defined(_WIN32) && !defined(__MINGW32__) && \
!defined(__CYGWIN__) && !defined(_AIX)
# define ZSTD_HAVE_WEAK_SYMBOLS 1
@@ -64,7 +61,7 @@ typedef struct {
/**
* Non-zero if streaming (de)compression is used.
*/
- unsigned streaming;
+ int streaming;
/**
* The dictionary ID.
*/
@@ -73,7 +70,7 @@ typedef struct {
* Is the dictionary cold?
* Only set on decompression.
*/
- unsigned dictionaryIsCold;
+ int dictionaryIsCold;
/**
* The dictionary size or zero if no dictionary.
*/
@@ -156,8 +153,4 @@ ZSTD_WEAK_ATTR void ZSTD_trace_decompress_end(
#endif /* ZSTD_TRACE */
-#if defined (__cplusplus)
-}
-#endif
-
#endif /* ZSTD_TRACE_H */
diff --git a/lib/compress/hist.c b/lib/compress/hist.c
index e2fb431f03a..4ccf9a90a9e 100644
--- a/lib/compress/hist.c
+++ b/lib/compress/hist.c
@@ -26,6 +26,16 @@ unsigned HIST_isError(size_t code) { return ERR_isError(code); }
/*-**************************************************************
* Histogram functions
****************************************************************/
+void HIST_add(unsigned* count, const void* src, size_t srcSize)
+{
+ const BYTE* ip = (const BYTE*)src;
+ const BYTE* const end = ip + srcSize;
+
+ while (ip 1 << 17 == 128Ki positions.
* This structure is only used in zstd_opt.
* Since allocation is centralized for all strategies, it has to be known here.
- * The actual (selected) size of the hash table is then stored in ZSTD_matchState_t.hashLog3,
+ * The actual (selected) size of the hash table is then stored in ZSTD_MatchState_t.hashLog3,
* so that zstd_opt.c doesn't need to know about this constant.
*/
#ifndef ZSTD_HASHLOG3_MAX
@@ -82,12 +83,12 @@ struct ZSTD_CDict_s {
ZSTD_dictContentType_e dictContentType; /* The dictContentType the CDict was created with */
U32* entropyWorkspace; /* entropy workspace of HUF_WORKSPACE_SIZE bytes */
ZSTD_cwksp workspace;
- ZSTD_matchState_t matchState;
+ ZSTD_MatchState_t matchState;
ZSTD_compressedBlockState_t cBlockState;
ZSTD_customMem customMem;
U32 dictID;
int compressionLevel; /* 0 indicates that advanced API was used to select CDict params */
- ZSTD_paramSwitch_e useRowMatchFinder; /* Indicates whether the CDict was created with params that would use
+ ZSTD_ParamSwitch_e useRowMatchFinder; /* Indicates whether the CDict was created with params that would use
* row-based matchfinder. Unless the cdict is reloaded, we will use
* the same greedy/lazy matchfinder at compression time.
*/
@@ -137,11 +138,12 @@ ZSTD_CCtx* ZSTD_initStaticCCtx(void* workspace, size_t workspaceSize)
ZSTD_cwksp_move(&cctx->workspace, &ws);
cctx->staticSize = workspaceSize;
- /* statically sized space. entropyWorkspace never moves (but prev/next block swap places) */
- if (!ZSTD_cwksp_check_available(&cctx->workspace, ENTROPY_WORKSPACE_SIZE + 2 * sizeof(ZSTD_compressedBlockState_t))) return NULL;
+ /* statically sized space. tmpWorkspace never moves (but prev/next block swap places) */
+ if (!ZSTD_cwksp_check_available(&cctx->workspace, TMP_WORKSPACE_SIZE + 2 * sizeof(ZSTD_compressedBlockState_t))) return NULL;
cctx->blockState.prevCBlock = (ZSTD_compressedBlockState_t*)ZSTD_cwksp_reserve_object(&cctx->workspace, sizeof(ZSTD_compressedBlockState_t));
cctx->blockState.nextCBlock = (ZSTD_compressedBlockState_t*)ZSTD_cwksp_reserve_object(&cctx->workspace, sizeof(ZSTD_compressedBlockState_t));
- cctx->entropyWorkspace = (U32*)ZSTD_cwksp_reserve_object(&cctx->workspace, ENTROPY_WORKSPACE_SIZE);
+ cctx->tmpWorkspace = ZSTD_cwksp_reserve_object(&cctx->workspace, TMP_WORKSPACE_SIZE);
+ cctx->tmpWkspSize = TMP_WORKSPACE_SIZE;
cctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid());
return cctx;
}
@@ -217,7 +219,7 @@ size_t ZSTD_sizeof_CStream(const ZSTD_CStream* zcs)
}
/* private API call, for dictBuilder only */
-const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx) { return &(ctx->seqStore); }
+const SeqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx) { return &(ctx->seqStore); }
/* Returns true if the strategy supports using a row based matchfinder */
static int ZSTD_rowMatchFinderSupported(const ZSTD_strategy strategy) {
@@ -227,32 +229,23 @@ static int ZSTD_rowMatchFinderSupported(const ZSTD_strategy strategy) {
/* Returns true if the strategy and useRowMatchFinder mode indicate that we will use the row based matchfinder
* for this compression.
*/
-static int ZSTD_rowMatchFinderUsed(const ZSTD_strategy strategy, const ZSTD_paramSwitch_e mode) {
+static int ZSTD_rowMatchFinderUsed(const ZSTD_strategy strategy, const ZSTD_ParamSwitch_e mode) {
assert(mode != ZSTD_ps_auto);
return ZSTD_rowMatchFinderSupported(strategy) && (mode == ZSTD_ps_enable);
}
/* Returns row matchfinder usage given an initial mode and cParams */
-static ZSTD_paramSwitch_e ZSTD_resolveRowMatchFinderMode(ZSTD_paramSwitch_e mode,
+static ZSTD_ParamSwitch_e ZSTD_resolveRowMatchFinderMode(ZSTD_ParamSwitch_e mode,
const ZSTD_compressionParameters* const cParams) {
-#if defined(ZSTD_ARCH_X86_SSE2) || defined(ZSTD_ARCH_ARM_NEON)
- int const kHasSIMD128 = 1;
-#else
- int const kHasSIMD128 = 0;
-#endif
if (mode != ZSTD_ps_auto) return mode; /* if requested enabled, but no SIMD, we still will use row matchfinder */
mode = ZSTD_ps_disable;
if (!ZSTD_rowMatchFinderSupported(cParams->strategy)) return mode;
- if (kHasSIMD128) {
- if (cParams->windowLog > 14) mode = ZSTD_ps_enable;
- } else {
- if (cParams->windowLog > 17) mode = ZSTD_ps_enable;
- }
+ if (cParams->windowLog > 14) mode = ZSTD_ps_enable;
return mode;
}
/* Returns block splitter usage (generally speaking, when using slower/stronger compression modes) */
-static ZSTD_paramSwitch_e ZSTD_resolveBlockSplitterMode(ZSTD_paramSwitch_e mode,
+static ZSTD_ParamSwitch_e ZSTD_resolveBlockSplitterMode(ZSTD_ParamSwitch_e mode,
const ZSTD_compressionParameters* const cParams) {
if (mode != ZSTD_ps_auto) return mode;
return (cParams->strategy >= ZSTD_btopt && cParams->windowLog >= 17) ? ZSTD_ps_enable : ZSTD_ps_disable;
@@ -260,7 +253,7 @@ static ZSTD_paramSwitch_e ZSTD_resolveBlockSplitterMode(ZSTD_paramSwitch_e mode,
/* Returns 1 if the arguments indicate that we should allocate a chainTable, 0 otherwise */
static int ZSTD_allocateChainTable(const ZSTD_strategy strategy,
- const ZSTD_paramSwitch_e useRowMatchFinder,
+ const ZSTD_ParamSwitch_e useRowMatchFinder,
const U32 forDDSDict) {
assert(useRowMatchFinder != ZSTD_ps_auto);
/* We always should allocate a chaintable if we are allocating a matchstate for a DDS dictionary matchstate.
@@ -273,7 +266,7 @@ static int ZSTD_allocateChainTable(const ZSTD_strategy strategy,
* enable long distance matching (wlog >= 27, strategy >= btopt).
* Returns ZSTD_ps_disable otherwise.
*/
-static ZSTD_paramSwitch_e ZSTD_resolveEnableLdm(ZSTD_paramSwitch_e mode,
+static ZSTD_ParamSwitch_e ZSTD_resolveEnableLdm(ZSTD_ParamSwitch_e mode,
const ZSTD_compressionParameters* const cParams) {
if (mode != ZSTD_ps_auto) return mode;
return (cParams->strategy >= ZSTD_btopt && cParams->windowLog >= 27) ? ZSTD_ps_enable : ZSTD_ps_disable;
@@ -292,7 +285,7 @@ static size_t ZSTD_resolveMaxBlockSize(size_t maxBlockSize) {
}
}
-static ZSTD_paramSwitch_e ZSTD_resolveExternalRepcodeSearch(ZSTD_paramSwitch_e value, int cLevel) {
+static ZSTD_ParamSwitch_e ZSTD_resolveExternalRepcodeSearch(ZSTD_ParamSwitch_e value, int cLevel) {
if (value != ZSTD_ps_auto) return value;
if (cLevel < 10) {
return ZSTD_ps_disable;
@@ -322,7 +315,7 @@ static ZSTD_CCtx_params ZSTD_makeCCtxParamsFromCParams(
assert(cctxParams.ldmParams.hashLog >= cctxParams.ldmParams.bucketSizeLog);
assert(cctxParams.ldmParams.hashRateLog < 32);
}
- cctxParams.useBlockSplitter = ZSTD_resolveBlockSplitterMode(cctxParams.useBlockSplitter, &cParams);
+ cctxParams.postBlockSplitter = ZSTD_resolveBlockSplitterMode(cctxParams.postBlockSplitter, &cParams);
cctxParams.useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(cctxParams.useRowMatchFinder, &cParams);
cctxParams.validateSequences = ZSTD_resolveExternalSequenceValidation(cctxParams.validateSequences);
cctxParams.maxBlockSize = ZSTD_resolveMaxBlockSize(cctxParams.maxBlockSize);
@@ -390,13 +383,13 @@ ZSTD_CCtxParams_init_internal(ZSTD_CCtx_params* cctxParams,
*/
cctxParams->compressionLevel = compressionLevel;
cctxParams->useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(cctxParams->useRowMatchFinder, ¶ms->cParams);
- cctxParams->useBlockSplitter = ZSTD_resolveBlockSplitterMode(cctxParams->useBlockSplitter, ¶ms->cParams);
+ cctxParams->postBlockSplitter = ZSTD_resolveBlockSplitterMode(cctxParams->postBlockSplitter, ¶ms->cParams);
cctxParams->ldmParams.enableLdm = ZSTD_resolveEnableLdm(cctxParams->ldmParams.enableLdm, ¶ms->cParams);
cctxParams->validateSequences = ZSTD_resolveExternalSequenceValidation(cctxParams->validateSequences);
cctxParams->maxBlockSize = ZSTD_resolveMaxBlockSize(cctxParams->maxBlockSize);
cctxParams->searchForExternalRepcodes = ZSTD_resolveExternalRepcodeSearch(cctxParams->searchForExternalRepcodes, compressionLevel);
DEBUGLOG(4, "ZSTD_CCtxParams_init_internal: useRowMatchFinder=%d, useBlockSplitter=%d ldm=%d",
- cctxParams->useRowMatchFinder, cctxParams->useBlockSplitter, cctxParams->ldmParams.enableLdm);
+ cctxParams->useRowMatchFinder, cctxParams->postBlockSplitter, cctxParams->ldmParams.enableLdm);
}
size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_parameters params)
@@ -597,11 +590,16 @@ ZSTD_bounds ZSTD_cParam_getBounds(ZSTD_cParameter param)
bounds.upperBound = 1;
return bounds;
- case ZSTD_c_useBlockSplitter:
+ case ZSTD_c_splitAfterSequences:
bounds.lowerBound = (int)ZSTD_ps_auto;
bounds.upperBound = (int)ZSTD_ps_disable;
return bounds;
+ case ZSTD_c_blockSplitterLevel:
+ bounds.lowerBound = 0;
+ bounds.upperBound = ZSTD_BLOCKSPLITTER_LEVEL_MAX;
+ return bounds;
+
case ZSTD_c_useRowMatchFinder:
bounds.lowerBound = (int)ZSTD_ps_auto;
bounds.upperBound = (int)ZSTD_ps_disable;
@@ -627,7 +625,7 @@ ZSTD_bounds ZSTD_cParam_getBounds(ZSTD_cParameter param)
bounds.upperBound = ZSTD_BLOCKSIZE_MAX;
return bounds;
- case ZSTD_c_searchForExternalRepcodes:
+ case ZSTD_c_repcodeResolution:
bounds.lowerBound = (int)ZSTD_ps_auto;
bounds.upperBound = (int)ZSTD_ps_disable;
return bounds;
@@ -668,6 +666,7 @@ static int ZSTD_isUpdateAuthorized(ZSTD_cParameter param)
case ZSTD_c_minMatch:
case ZSTD_c_targetLength:
case ZSTD_c_strategy:
+ case ZSTD_c_blockSplitterLevel:
return 1;
case ZSTD_c_format:
@@ -694,13 +693,13 @@ static int ZSTD_isUpdateAuthorized(ZSTD_cParameter param)
case ZSTD_c_stableOutBuffer:
case ZSTD_c_blockDelimiters:
case ZSTD_c_validateSequences:
- case ZSTD_c_useBlockSplitter:
+ case ZSTD_c_splitAfterSequences:
case ZSTD_c_useRowMatchFinder:
case ZSTD_c_deterministicRefPrefix:
case ZSTD_c_prefetchCDictTables:
case ZSTD_c_enableSeqProducerFallback:
case ZSTD_c_maxBlockSize:
- case ZSTD_c_searchForExternalRepcodes:
+ case ZSTD_c_repcodeResolution:
default:
return 0;
}
@@ -753,13 +752,14 @@ size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int value)
case ZSTD_c_stableOutBuffer:
case ZSTD_c_blockDelimiters:
case ZSTD_c_validateSequences:
- case ZSTD_c_useBlockSplitter:
+ case ZSTD_c_splitAfterSequences:
+ case ZSTD_c_blockSplitterLevel:
case ZSTD_c_useRowMatchFinder:
case ZSTD_c_deterministicRefPrefix:
case ZSTD_c_prefetchCDictTables:
case ZSTD_c_enableSeqProducerFallback:
case ZSTD_c_maxBlockSize:
- case ZSTD_c_searchForExternalRepcodes:
+ case ZSTD_c_repcodeResolution:
break;
default: RETURN_ERROR(parameter_unsupported, "unknown parameter");
@@ -857,7 +857,7 @@ size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams,
}
case ZSTD_c_literalCompressionMode : {
- const ZSTD_paramSwitch_e lcm = (ZSTD_paramSwitch_e)value;
+ const ZSTD_ParamSwitch_e lcm = (ZSTD_ParamSwitch_e)value;
BOUNDCHECK(ZSTD_c_literalCompressionMode, (int)lcm);
CCtxParams->literalCompressionMode = lcm;
return CCtxParams->literalCompressionMode;
@@ -883,7 +883,7 @@ size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams,
value = ZSTDMT_JOBSIZE_MIN;
FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(param, &value), "");
assert(value >= 0);
- CCtxParams->jobSize = value;
+ CCtxParams->jobSize = (size_t)value;
return CCtxParams->jobSize;
#endif
@@ -913,7 +913,7 @@ size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams,
case ZSTD_c_enableLongDistanceMatching :
BOUNDCHECK(ZSTD_c_enableLongDistanceMatching, value);
- CCtxParams->ldmParams.enableLdm = (ZSTD_paramSwitch_e)value;
+ CCtxParams->ldmParams.enableLdm = (ZSTD_ParamSwitch_e)value;
return CCtxParams->ldmParams.enableLdm;
case ZSTD_c_ldmHashLog :
@@ -966,7 +966,7 @@ size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams,
case ZSTD_c_blockDelimiters:
BOUNDCHECK(ZSTD_c_blockDelimiters, value);
- CCtxParams->blockDelimiters = (ZSTD_sequenceFormat_e)value;
+ CCtxParams->blockDelimiters = (ZSTD_SequenceFormat_e)value;
return CCtxParams->blockDelimiters;
case ZSTD_c_validateSequences:
@@ -974,14 +974,19 @@ size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams,
CCtxParams->validateSequences = value;
return (size_t)CCtxParams->validateSequences;
- case ZSTD_c_useBlockSplitter:
- BOUNDCHECK(ZSTD_c_useBlockSplitter, value);
- CCtxParams->useBlockSplitter = (ZSTD_paramSwitch_e)value;
- return CCtxParams->useBlockSplitter;
+ case ZSTD_c_splitAfterSequences:
+ BOUNDCHECK(ZSTD_c_splitAfterSequences, value);
+ CCtxParams->postBlockSplitter = (ZSTD_ParamSwitch_e)value;
+ return CCtxParams->postBlockSplitter;
+
+ case ZSTD_c_blockSplitterLevel:
+ BOUNDCHECK(ZSTD_c_blockSplitterLevel, value);
+ CCtxParams->preBlockSplitter_level = value;
+ return (size_t)CCtxParams->preBlockSplitter_level;
case ZSTD_c_useRowMatchFinder:
BOUNDCHECK(ZSTD_c_useRowMatchFinder, value);
- CCtxParams->useRowMatchFinder = (ZSTD_paramSwitch_e)value;
+ CCtxParams->useRowMatchFinder = (ZSTD_ParamSwitch_e)value;
return CCtxParams->useRowMatchFinder;
case ZSTD_c_deterministicRefPrefix:
@@ -991,7 +996,7 @@ size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams,
case ZSTD_c_prefetchCDictTables:
BOUNDCHECK(ZSTD_c_prefetchCDictTables, value);
- CCtxParams->prefetchCDictTables = (ZSTD_paramSwitch_e)value;
+ CCtxParams->prefetchCDictTables = (ZSTD_ParamSwitch_e)value;
return CCtxParams->prefetchCDictTables;
case ZSTD_c_enableSeqProducerFallback:
@@ -1002,12 +1007,13 @@ size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams,
case ZSTD_c_maxBlockSize:
if (value!=0) /* 0 ==> default */
BOUNDCHECK(ZSTD_c_maxBlockSize, value);
- CCtxParams->maxBlockSize = value;
+ assert(value>=0);
+ CCtxParams->maxBlockSize = (size_t)value;
return CCtxParams->maxBlockSize;
- case ZSTD_c_searchForExternalRepcodes:
- BOUNDCHECK(ZSTD_c_searchForExternalRepcodes, value);
- CCtxParams->searchForExternalRepcodes = (ZSTD_paramSwitch_e)value;
+ case ZSTD_c_repcodeResolution:
+ BOUNDCHECK(ZSTD_c_repcodeResolution, value);
+ CCtxParams->searchForExternalRepcodes = (ZSTD_ParamSwitch_e)value;
return CCtxParams->searchForExternalRepcodes;
default: RETURN_ERROR(parameter_unsupported, "unknown parameter");
@@ -1025,7 +1031,7 @@ size_t ZSTD_CCtxParams_getParameter(
switch(param)
{
case ZSTD_c_format :
- *value = CCtxParams->format;
+ *value = (int)CCtxParams->format;
break;
case ZSTD_c_compressionLevel :
*value = CCtxParams->compressionLevel;
@@ -1040,16 +1046,16 @@ size_t ZSTD_CCtxParams_getParameter(
*value = (int)CCtxParams->cParams.chainLog;
break;
case ZSTD_c_searchLog :
- *value = CCtxParams->cParams.searchLog;
+ *value = (int)CCtxParams->cParams.searchLog;
break;
case ZSTD_c_minMatch :
- *value = CCtxParams->cParams.minMatch;
+ *value = (int)CCtxParams->cParams.minMatch;
break;
case ZSTD_c_targetLength :
- *value = CCtxParams->cParams.targetLength;
+ *value = (int)CCtxParams->cParams.targetLength;
break;
case ZSTD_c_strategy :
- *value = (unsigned)CCtxParams->cParams.strategy;
+ *value = (int)CCtxParams->cParams.strategy;
break;
case ZSTD_c_contentSizeFlag :
*value = CCtxParams->fParams.contentSizeFlag;
@@ -1064,10 +1070,10 @@ size_t ZSTD_CCtxParams_getParameter(
*value = CCtxParams->forceWindow;
break;
case ZSTD_c_forceAttachDict :
- *value = CCtxParams->attachDictPref;
+ *value = (int)CCtxParams->attachDictPref;
break;
case ZSTD_c_literalCompressionMode :
- *value = CCtxParams->literalCompressionMode;
+ *value = (int)CCtxParams->literalCompressionMode;
break;
case ZSTD_c_nbWorkers :
#ifndef ZSTD_MULTITHREAD
@@ -1101,19 +1107,19 @@ size_t ZSTD_CCtxParams_getParameter(
*value = CCtxParams->enableDedicatedDictSearch;
break;
case ZSTD_c_enableLongDistanceMatching :
- *value = CCtxParams->ldmParams.enableLdm;
+ *value = (int)CCtxParams->ldmParams.enableLdm;
break;
case ZSTD_c_ldmHashLog :
- *value = CCtxParams->ldmParams.hashLog;
+ *value = (int)CCtxParams->ldmParams.hashLog;
break;
case ZSTD_c_ldmMinMatch :
- *value = CCtxParams->ldmParams.minMatchLength;
+ *value = (int)CCtxParams->ldmParams.minMatchLength;
break;
case ZSTD_c_ldmBucketSizeLog :
- *value = CCtxParams->ldmParams.bucketSizeLog;
+ *value = (int)CCtxParams->ldmParams.bucketSizeLog;
break;
case ZSTD_c_ldmHashRateLog :
- *value = CCtxParams->ldmParams.hashRateLog;
+ *value = (int)CCtxParams->ldmParams.hashRateLog;
break;
case ZSTD_c_targetCBlockSize :
*value = (int)CCtxParams->targetCBlockSize;
@@ -1133,8 +1139,11 @@ size_t ZSTD_CCtxParams_getParameter(
case ZSTD_c_validateSequences :
*value = (int)CCtxParams->validateSequences;
break;
- case ZSTD_c_useBlockSplitter :
- *value = (int)CCtxParams->useBlockSplitter;
+ case ZSTD_c_splitAfterSequences :
+ *value = (int)CCtxParams->postBlockSplitter;
+ break;
+ case ZSTD_c_blockSplitterLevel :
+ *value = CCtxParams->preBlockSplitter_level;
break;
case ZSTD_c_useRowMatchFinder :
*value = (int)CCtxParams->useRowMatchFinder;
@@ -1151,7 +1160,7 @@ size_t ZSTD_CCtxParams_getParameter(
case ZSTD_c_maxBlockSize:
*value = (int)CCtxParams->maxBlockSize;
break;
- case ZSTD_c_searchForExternalRepcodes:
+ case ZSTD_c_repcodeResolution:
*value = (int)CCtxParams->searchForExternalRepcodes;
break;
default: RETURN_ERROR(parameter_unsupported, "unknown parameter");
@@ -1186,13 +1195,13 @@ size_t ZSTD_CCtx_setCParams(ZSTD_CCtx* cctx, ZSTD_compressionParameters cparams)
DEBUGLOG(4, "ZSTD_CCtx_setCParams");
/* only update if all parameters are valid */
FORWARD_IF_ERROR(ZSTD_checkCParams(cparams), "");
- FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_windowLog, cparams.windowLog), "");
- FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_chainLog, cparams.chainLog), "");
- FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_hashLog, cparams.hashLog), "");
- FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_searchLog, cparams.searchLog), "");
- FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_minMatch, cparams.minMatch), "");
- FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_targetLength, cparams.targetLength), "");
- FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_strategy, cparams.strategy), "");
+ FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_windowLog, (int)cparams.windowLog), "");
+ FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_chainLog, (int)cparams.chainLog), "");
+ FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_hashLog, (int)cparams.hashLog), "");
+ FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_searchLog, (int)cparams.searchLog), "");
+ FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_minMatch, (int)cparams.minMatch), "");
+ FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_targetLength, (int)cparams.targetLength), "");
+ FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_strategy, (int)cparams.strategy), "");
return 0;
}
@@ -1384,7 +1393,7 @@ size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams)
BOUNDCHECK(ZSTD_c_searchLog, (int)cParams.searchLog);
BOUNDCHECK(ZSTD_c_minMatch, (int)cParams.minMatch);
BOUNDCHECK(ZSTD_c_targetLength,(int)cParams.targetLength);
- BOUNDCHECK(ZSTD_c_strategy, cParams.strategy);
+ BOUNDCHECK(ZSTD_c_strategy, (int)cParams.strategy);
return 0;
}
@@ -1457,15 +1466,15 @@ static U32 ZSTD_dictAndWindowLog(U32 windowLog, U64 srcSize, U64 dictSize)
* optimize `cPar` for a specified input (`srcSize` and `dictSize`).
* mostly downsize to reduce memory consumption and initialization latency.
* `srcSize` can be ZSTD_CONTENTSIZE_UNKNOWN when not known.
- * `mode` is the mode for parameter adjustment. See docs for `ZSTD_cParamMode_e`.
+ * `mode` is the mode for parameter adjustment. See docs for `ZSTD_CParamMode_e`.
* note : `srcSize==0` means 0!
* condition : cPar is presumed validated (can be checked using ZSTD_checkCParams()). */
static ZSTD_compressionParameters
ZSTD_adjustCParams_internal(ZSTD_compressionParameters cPar,
unsigned long long srcSize,
size_t dictSize,
- ZSTD_cParamMode_e mode,
- ZSTD_paramSwitch_e useRowMatchFinder)
+ ZSTD_CParamMode_e mode,
+ ZSTD_ParamSwitch_e useRowMatchFinder)
{
const U64 minSrcSize = 513; /* (1<<9) + 1 */
const U64 maxWindowResize = 1ULL << (ZSTD_WINDOWLOG_MAX-1);
@@ -1609,8 +1618,8 @@ ZSTD_adjustCParams(ZSTD_compressionParameters cPar,
return ZSTD_adjustCParams_internal(cPar, srcSize, dictSize, ZSTD_cpm_unknown, ZSTD_ps_auto);
}
-static ZSTD_compressionParameters ZSTD_getCParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode);
-static ZSTD_parameters ZSTD_getParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode);
+static ZSTD_compressionParameters ZSTD_getCParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_CParamMode_e mode);
+static ZSTD_parameters ZSTD_getParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_CParamMode_e mode);
static void ZSTD_overrideCParams(
ZSTD_compressionParameters* cParams,
@@ -1626,11 +1635,12 @@ static void ZSTD_overrideCParams(
}
ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams(
- const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode)
+ const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize, ZSTD_CParamMode_e mode)
{
ZSTD_compressionParameters cParams;
if (srcSizeHint == ZSTD_CONTENTSIZE_UNKNOWN && CCtxParams->srcSizeHint > 0) {
- srcSizeHint = CCtxParams->srcSizeHint;
+ assert(CCtxParams->srcSizeHint>=0);
+ srcSizeHint = (U64)CCtxParams->srcSizeHint;
}
cParams = ZSTD_getCParams_internal(CCtxParams->compressionLevel, srcSizeHint, dictSize, mode);
if (CCtxParams->ldmParams.enableLdm == ZSTD_ps_enable) cParams.windowLog = ZSTD_LDM_DEFAULT_WINDOW_LOG;
@@ -1642,8 +1652,8 @@ ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams(
static size_t
ZSTD_sizeof_matchState(const ZSTD_compressionParameters* const cParams,
- const ZSTD_paramSwitch_e useRowMatchFinder,
- const U32 enableDedicatedDictSearch,
+ const ZSTD_ParamSwitch_e useRowMatchFinder,
+ const int enableDedicatedDictSearch,
const U32 forCCtx)
{
/* chain table size should be 0 for fast or row-hash strategies */
@@ -1659,14 +1669,14 @@ ZSTD_sizeof_matchState(const ZSTD_compressionParameters* const cParams,
+ hSize * sizeof(U32)
+ h3Size * sizeof(U32);
size_t const optPotentialSpace =
- ZSTD_cwksp_aligned_alloc_size((MaxML+1) * sizeof(U32))
- + ZSTD_cwksp_aligned_alloc_size((MaxLL+1) * sizeof(U32))
- + ZSTD_cwksp_aligned_alloc_size((MaxOff+1) * sizeof(U32))
- + ZSTD_cwksp_aligned_alloc_size((1<strategy, useRowMatchFinder)
- ? ZSTD_cwksp_aligned_alloc_size(hSize)
+ ? ZSTD_cwksp_aligned64_alloc_size(hSize)
: 0;
size_t const optSpace = (forCCtx && (cParams->strategy >= ZSTD_btopt))
? optPotentialSpace
@@ -1693,7 +1703,7 @@ static size_t ZSTD_estimateCCtxSize_usingCCtxParams_internal(
const ZSTD_compressionParameters* cParams,
const ldmParams_t* ldmParams,
const int isStatic,
- const ZSTD_paramSwitch_e useRowMatchFinder,
+ const ZSTD_ParamSwitch_e useRowMatchFinder,
const size_t buffInSize,
const size_t buffOutSize,
const U64 pledgedSrcSize,
@@ -1704,16 +1714,16 @@ static size_t ZSTD_estimateCCtxSize_usingCCtxParams_internal(
size_t const blockSize = MIN(ZSTD_resolveMaxBlockSize(maxBlockSize), windowSize);
size_t const maxNbSeq = ZSTD_maxNbSeq(blockSize, cParams->minMatch, useSequenceProducer);
size_t const tokenSpace = ZSTD_cwksp_alloc_size(WILDCOPY_OVERLENGTH + blockSize)
- + ZSTD_cwksp_aligned_alloc_size(maxNbSeq * sizeof(seqDef))
+ + ZSTD_cwksp_aligned64_alloc_size(maxNbSeq * sizeof(SeqDef))
+ 3 * ZSTD_cwksp_alloc_size(maxNbSeq * sizeof(BYTE));
- size_t const entropySpace = ZSTD_cwksp_alloc_size(ENTROPY_WORKSPACE_SIZE);
+ size_t const tmpWorkSpace = ZSTD_cwksp_alloc_size(TMP_WORKSPACE_SIZE);
size_t const blockStateSpace = 2 * ZSTD_cwksp_alloc_size(sizeof(ZSTD_compressedBlockState_t));
size_t const matchStateSize = ZSTD_sizeof_matchState(cParams, useRowMatchFinder, /* enableDedicatedDictSearch */ 0, /* forCCtx */ 1);
size_t const ldmSpace = ZSTD_ldm_getTableSize(*ldmParams);
size_t const maxNbLdmSeq = ZSTD_ldm_getMaxNbSeq(*ldmParams, blockSize);
size_t const ldmSeqSpace = ldmParams->enableLdm == ZSTD_ps_enable ?
- ZSTD_cwksp_aligned_alloc_size(maxNbLdmSeq * sizeof(rawSeq)) : 0;
+ ZSTD_cwksp_aligned64_alloc_size(maxNbLdmSeq * sizeof(rawSeq)) : 0;
size_t const bufferSpace = ZSTD_cwksp_alloc_size(buffInSize)
@@ -1723,12 +1733,12 @@ static size_t ZSTD_estimateCCtxSize_usingCCtxParams_internal(
size_t const maxNbExternalSeq = ZSTD_sequenceBound(blockSize);
size_t const externalSeqSpace = useSequenceProducer
- ? ZSTD_cwksp_aligned_alloc_size(maxNbExternalSeq * sizeof(ZSTD_Sequence))
+ ? ZSTD_cwksp_aligned64_alloc_size(maxNbExternalSeq * sizeof(ZSTD_Sequence))
: 0;
size_t const neededSpace =
cctxSpace +
- entropySpace +
+ tmpWorkSpace +
blockStateSpace +
ldmSpace +
ldmSeqSpace +
@@ -1745,7 +1755,7 @@ size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params)
{
ZSTD_compressionParameters const cParams =
ZSTD_getCParamsFromCCtxParams(params, ZSTD_CONTENTSIZE_UNKNOWN, 0, ZSTD_cpm_noAttachDict);
- ZSTD_paramSwitch_e const useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(params->useRowMatchFinder,
+ ZSTD_ParamSwitch_e const useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(params->useRowMatchFinder,
&cParams);
RETURN_ERROR_IF(params->nbWorkers > 0, GENERIC, "Estimate CCtx size is supported for single-threaded compression only.");
@@ -1810,7 +1820,7 @@ size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params)
size_t const outBuffSize = (params->outBufferMode == ZSTD_bm_buffered)
? ZSTD_compressBound(blockSize) + 1
: 0;
- ZSTD_paramSwitch_e const useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(params->useRowMatchFinder, ¶ms->cParams);
+ ZSTD_ParamSwitch_e const useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(params->useRowMatchFinder, ¶ms->cParams);
return ZSTD_estimateCCtxSize_usingCCtxParams_internal(
&cParams, ¶ms->ldmParams, 1, useRowMatchFinder, inBuffSize, outBuffSize,
@@ -1920,7 +1930,7 @@ void ZSTD_reset_compressedBlockState(ZSTD_compressedBlockState_t* bs)
* Invalidate all the matches in the match finder tables.
* Requires nextSrc and base to be set (can be NULL).
*/
-static void ZSTD_invalidateMatchState(ZSTD_matchState_t* ms)
+static void ZSTD_invalidateMatchState(ZSTD_MatchState_t* ms)
{
ZSTD_window_clear(&ms->window);
@@ -1967,15 +1977,15 @@ static U64 ZSTD_bitmix(U64 val, U64 len) {
}
/* Mixes in the hashSalt and hashSaltEntropy to create a new hashSalt */
-static void ZSTD_advanceHashSalt(ZSTD_matchState_t* ms) {
+static void ZSTD_advanceHashSalt(ZSTD_MatchState_t* ms) {
ms->hashSalt = ZSTD_bitmix(ms->hashSalt, 8) ^ ZSTD_bitmix((U64) ms->hashSaltEntropy, 4);
}
static size_t
-ZSTD_reset_matchState(ZSTD_matchState_t* ms,
+ZSTD_reset_matchState(ZSTD_MatchState_t* ms,
ZSTD_cwksp* ws,
const ZSTD_compressionParameters* cParams,
- const ZSTD_paramSwitch_e useRowMatchFinder,
+ const ZSTD_ParamSwitch_e useRowMatchFinder,
const ZSTD_compResetPolicy_e crp,
const ZSTD_indexResetPolicy_e forceResetIndex,
const ZSTD_resetTarget_e forWho)
@@ -2029,7 +2039,7 @@ ZSTD_reset_matchState(ZSTD_matchState_t* ms,
ZSTD_advanceHashSalt(ms);
} else {
/* When we are not salting we want to always memset the memory */
- ms->tagTable = (BYTE*) ZSTD_cwksp_reserve_aligned(ws, tagTableSize);
+ ms->tagTable = (BYTE*) ZSTD_cwksp_reserve_aligned64(ws, tagTableSize);
ZSTD_memset(ms->tagTable, 0, tagTableSize);
ms->hashSalt = 0;
}
@@ -2043,12 +2053,12 @@ ZSTD_reset_matchState(ZSTD_matchState_t* ms,
/* opt parser space */
if ((forWho == ZSTD_resetTarget_CCtx) && (cParams->strategy >= ZSTD_btopt)) {
DEBUGLOG(4, "reserving optimal parser space");
- ms->opt.litFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (1<opt.litLengthFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxLL+1) * sizeof(unsigned));
- ms->opt.matchLengthFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxML+1) * sizeof(unsigned));
- ms->opt.offCodeFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxOff+1) * sizeof(unsigned));
- ms->opt.matchTable = (ZSTD_match_t*)ZSTD_cwksp_reserve_aligned(ws, ZSTD_OPT_SIZE * sizeof(ZSTD_match_t));
- ms->opt.priceTable = (ZSTD_optimal_t*)ZSTD_cwksp_reserve_aligned(ws, ZSTD_OPT_SIZE * sizeof(ZSTD_optimal_t));
+ ms->opt.litFreq = (unsigned*)ZSTD_cwksp_reserve_aligned64(ws, (1<opt.litLengthFreq = (unsigned*)ZSTD_cwksp_reserve_aligned64(ws, (MaxLL+1) * sizeof(unsigned));
+ ms->opt.matchLengthFreq = (unsigned*)ZSTD_cwksp_reserve_aligned64(ws, (MaxML+1) * sizeof(unsigned));
+ ms->opt.offCodeFreq = (unsigned*)ZSTD_cwksp_reserve_aligned64(ws, (MaxOff+1) * sizeof(unsigned));
+ ms->opt.matchTable = (ZSTD_match_t*)ZSTD_cwksp_reserve_aligned64(ws, ZSTD_OPT_SIZE * sizeof(ZSTD_match_t));
+ ms->opt.priceTable = (ZSTD_optimal_t*)ZSTD_cwksp_reserve_aligned64(ws, ZSTD_OPT_SIZE * sizeof(ZSTD_optimal_t));
}
ms->cParams = *cParams;
@@ -2096,7 +2106,7 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
{
ZSTD_cwksp* const ws = &zc->workspace;
DEBUGLOG(4, "ZSTD_resetCCtx_internal: pledgedSrcSize=%u, wlog=%u, useRowMatchFinder=%d useBlockSplitter=%d",
- (U32)pledgedSrcSize, params->cParams.windowLog, (int)params->useRowMatchFinder, (int)params->useBlockSplitter);
+ (U32)pledgedSrcSize, params->cParams.windowLog, (int)params->useRowMatchFinder, (int)params->postBlockSplitter);
assert(!ZSTD_isError(ZSTD_checkCParams(params->cParams)));
zc->isFirstBlock = 1;
@@ -2108,7 +2118,7 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
params = &zc->appliedParams;
assert(params->useRowMatchFinder != ZSTD_ps_auto);
- assert(params->useBlockSplitter != ZSTD_ps_auto);
+ assert(params->postBlockSplitter != ZSTD_ps_auto);
assert(params->ldmParams.enableLdm != ZSTD_ps_auto);
assert(params->maxBlockSize != 0);
if (params->ldmParams.enableLdm == ZSTD_ps_enable) {
@@ -2164,15 +2174,16 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
DEBUGLOG(5, "reserving object space");
/* Statically sized space.
- * entropyWorkspace never moves,
+ * tmpWorkspace never moves,
* though prev/next block swap places */
assert(ZSTD_cwksp_check_available(ws, 2 * sizeof(ZSTD_compressedBlockState_t)));
zc->blockState.prevCBlock = (ZSTD_compressedBlockState_t*) ZSTD_cwksp_reserve_object(ws, sizeof(ZSTD_compressedBlockState_t));
RETURN_ERROR_IF(zc->blockState.prevCBlock == NULL, memory_allocation, "couldn't allocate prevCBlock");
zc->blockState.nextCBlock = (ZSTD_compressedBlockState_t*) ZSTD_cwksp_reserve_object(ws, sizeof(ZSTD_compressedBlockState_t));
RETURN_ERROR_IF(zc->blockState.nextCBlock == NULL, memory_allocation, "couldn't allocate nextCBlock");
- zc->entropyWorkspace = (U32*) ZSTD_cwksp_reserve_object(ws, ENTROPY_WORKSPACE_SIZE);
- RETURN_ERROR_IF(zc->entropyWorkspace == NULL, memory_allocation, "couldn't allocate entropyWorkspace");
+ zc->tmpWorkspace = ZSTD_cwksp_reserve_object(ws, TMP_WORKSPACE_SIZE);
+ RETURN_ERROR_IF(zc->tmpWorkspace == NULL, memory_allocation, "couldn't allocate tmpWorkspace");
+ zc->tmpWkspSize = TMP_WORKSPACE_SIZE;
} }
ZSTD_cwksp_clear(ws);
@@ -2187,7 +2198,7 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
zc->appliedParams.fParams.contentSizeFlag = 0;
DEBUGLOG(4, "pledged content size : %u ; flag : %u",
(unsigned)pledgedSrcSize, zc->appliedParams.fParams.contentSizeFlag);
- zc->blockSize = blockSize;
+ zc->blockSizeMax = blockSize;
XXH64_reset(&zc->xxhState, 0);
zc->stage = ZSTDcs_init;
@@ -2205,15 +2216,15 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
needsIndexReset,
ZSTD_resetTarget_CCtx), "");
- zc->seqStore.sequencesStart = (seqDef*)ZSTD_cwksp_reserve_aligned(ws, maxNbSeq * sizeof(seqDef));
+ zc->seqStore.sequencesStart = (SeqDef*)ZSTD_cwksp_reserve_aligned64(ws, maxNbSeq * sizeof(SeqDef));
/* ldm hash table */
if (params->ldmParams.enableLdm == ZSTD_ps_enable) {
/* TODO: avoid memset? */
size_t const ldmHSize = ((size_t)1) << params->ldmParams.hashLog;
- zc->ldmState.hashTable = (ldmEntry_t*)ZSTD_cwksp_reserve_aligned(ws, ldmHSize * sizeof(ldmEntry_t));
+ zc->ldmState.hashTable = (ldmEntry_t*)ZSTD_cwksp_reserve_aligned64(ws, ldmHSize * sizeof(ldmEntry_t));
ZSTD_memset(zc->ldmState.hashTable, 0, ldmHSize * sizeof(ldmEntry_t));
- zc->ldmSequences = (rawSeq*)ZSTD_cwksp_reserve_aligned(ws, maxNbLdmSeq * sizeof(rawSeq));
+ zc->ldmSequences = (rawSeq*)ZSTD_cwksp_reserve_aligned64(ws, maxNbLdmSeq * sizeof(rawSeq));
zc->maxNbLdmSequences = maxNbLdmSeq;
ZSTD_window_init(&zc->ldmState.window);
@@ -2225,7 +2236,7 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
size_t const maxNbExternalSeq = ZSTD_sequenceBound(blockSize);
zc->extSeqBufCapacity = maxNbExternalSeq;
zc->extSeqBuf =
- (ZSTD_Sequence*)ZSTD_cwksp_reserve_aligned(ws, maxNbExternalSeq * sizeof(ZSTD_Sequence));
+ (ZSTD_Sequence*)ZSTD_cwksp_reserve_aligned64(ws, maxNbExternalSeq * sizeof(ZSTD_Sequence));
}
/* buffers */
@@ -2444,7 +2455,8 @@ static size_t ZSTD_resetCCtx_byCopyingCDict(ZSTD_CCtx* cctx,
}
/* Zero the hashTable3, since the cdict never fills it */
- { int const h3log = cctx->blockState.matchState.hashLog3;
+ assert(cctx->blockState.matchState.hashLog3 <= 31);
+ { U32 const h3log = cctx->blockState.matchState.hashLog3;
size_t const h3Size = h3log ? ((size_t)1 << h3log) : 0;
assert(cdict->matchState.hashLog3 == 0);
ZSTD_memset(cctx->blockState.matchState.hashTable3, 0, h3Size * sizeof(U32));
@@ -2453,8 +2465,8 @@ static size_t ZSTD_resetCCtx_byCopyingCDict(ZSTD_CCtx* cctx,
ZSTD_cwksp_mark_tables_clean(&cctx->workspace);
/* copy dictionary offsets */
- { ZSTD_matchState_t const* srcMatchState = &cdict->matchState;
- ZSTD_matchState_t* dstMatchState = &cctx->blockState.matchState;
+ { ZSTD_MatchState_t const* srcMatchState = &cdict->matchState;
+ ZSTD_MatchState_t* dstMatchState = &cctx->blockState.matchState;
dstMatchState->window = srcMatchState->window;
dstMatchState->nextToUpdate = srcMatchState->nextToUpdate;
dstMatchState->loadedDictEnd= srcMatchState->loadedDictEnd;
@@ -2512,10 +2524,10 @@ static size_t ZSTD_copyCCtx_internal(ZSTD_CCtx* dstCCtx,
/* Copy only compression parameters related to tables. */
params.cParams = srcCCtx->appliedParams.cParams;
assert(srcCCtx->appliedParams.useRowMatchFinder != ZSTD_ps_auto);
- assert(srcCCtx->appliedParams.useBlockSplitter != ZSTD_ps_auto);
+ assert(srcCCtx->appliedParams.postBlockSplitter != ZSTD_ps_auto);
assert(srcCCtx->appliedParams.ldmParams.enableLdm != ZSTD_ps_auto);
params.useRowMatchFinder = srcCCtx->appliedParams.useRowMatchFinder;
- params.useBlockSplitter = srcCCtx->appliedParams.useBlockSplitter;
+ params.postBlockSplitter = srcCCtx->appliedParams.postBlockSplitter;
params.ldmParams = srcCCtx->appliedParams.ldmParams;
params.fParams = fParams;
params.maxBlockSize = srcCCtx->appliedParams.maxBlockSize;
@@ -2538,7 +2550,7 @@ static size_t ZSTD_copyCCtx_internal(ZSTD_CCtx* dstCCtx,
? ((size_t)1 << srcCCtx->appliedParams.cParams.chainLog)
: 0;
size_t const hSize = (size_t)1 << srcCCtx->appliedParams.cParams.hashLog;
- int const h3log = srcCCtx->blockState.matchState.hashLog3;
+ U32 const h3log = srcCCtx->blockState.matchState.hashLog3;
size_t const h3Size = h3log ? ((size_t)1 << h3log) : 0;
ZSTD_memcpy(dstCCtx->blockState.matchState.hashTable,
@@ -2556,8 +2568,8 @@ static size_t ZSTD_copyCCtx_internal(ZSTD_CCtx* dstCCtx,
/* copy dictionary offsets */
{
- const ZSTD_matchState_t* srcMatchState = &srcCCtx->blockState.matchState;
- ZSTD_matchState_t* dstMatchState = &dstCCtx->blockState.matchState;
+ const ZSTD_MatchState_t* srcMatchState = &srcCCtx->blockState.matchState;
+ ZSTD_MatchState_t* dstMatchState = &dstCCtx->blockState.matchState;
dstMatchState->window = srcMatchState->window;
dstMatchState->nextToUpdate = srcMatchState->nextToUpdate;
dstMatchState->loadedDictEnd= srcMatchState->loadedDictEnd;
@@ -2606,7 +2618,7 @@ ZSTD_reduceTable_internal (U32* const table, U32 const size, U32 const reducerVa
/* Protect special index values < ZSTD_WINDOW_START_INDEX. */
U32 const reducerThreshold = reducerValue + ZSTD_WINDOW_START_INDEX;
assert((size & (ZSTD_ROWSIZE-1)) == 0); /* multiple of ZSTD_ROWSIZE */
- assert(size < (1U<<31)); /* can be casted to int */
+ assert(size < (1U<<31)); /* can be cast to int */
#if ZSTD_MEMORY_SANITIZER && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)
/* To validate that the table reuse logic is sound, and that we don't
@@ -2651,7 +2663,7 @@ static void ZSTD_reduceTable_btlazy2(U32* const table, U32 const size, U32 const
/*! ZSTD_reduceIndex() :
* rescale all indexes to avoid future overflow (indexes are U32) */
-static void ZSTD_reduceIndex (ZSTD_matchState_t* ms, ZSTD_CCtx_params const* params, const U32 reducerValue)
+static void ZSTD_reduceIndex (ZSTD_MatchState_t* ms, ZSTD_CCtx_params const* params, const U32 reducerValue)
{
{ U32 const hSize = (U32)1 << params->cParams.hashLog;
ZSTD_reduceTable(ms->hashTable, hSize, reducerValue);
@@ -2678,9 +2690,9 @@ static void ZSTD_reduceIndex (ZSTD_matchState_t* ms, ZSTD_CCtx_params const* par
/* See doc/zstd_compression_format.md for detailed format description */
-int ZSTD_seqToCodes(const seqStore_t* seqStorePtr)
+int ZSTD_seqToCodes(const SeqStore_t* seqStorePtr)
{
- const seqDef* const sequences = seqStorePtr->sequencesStart;
+ const SeqDef* const sequences = seqStorePtr->sequencesStart;
BYTE* const llCodeTable = seqStorePtr->llCode;
BYTE* const ofCodeTable = seqStorePtr->ofCode;
BYTE* const mlCodeTable = seqStorePtr->mlCode;
@@ -2723,9 +2735,9 @@ static int ZSTD_useTargetCBlockSize(const ZSTD_CCtx_params* cctxParams)
* Returns 1 if true, 0 otherwise. */
static int ZSTD_blockSplitterEnabled(ZSTD_CCtx_params* cctxParams)
{
- DEBUGLOG(5, "ZSTD_blockSplitterEnabled (useBlockSplitter=%d)", cctxParams->useBlockSplitter);
- assert(cctxParams->useBlockSplitter != ZSTD_ps_auto);
- return (cctxParams->useBlockSplitter == ZSTD_ps_enable);
+ DEBUGLOG(5, "ZSTD_blockSplitterEnabled (postBlockSplitter=%d)", cctxParams->postBlockSplitter);
+ assert(cctxParams->postBlockSplitter != ZSTD_ps_auto);
+ return (cctxParams->postBlockSplitter == ZSTD_ps_enable);
}
/* Type returned by ZSTD_buildSequencesStatistics containing finalized symbol encoding types
@@ -2749,7 +2761,7 @@ typedef struct {
*/
static ZSTD_symbolEncodingTypeStats_t
ZSTD_buildSequencesStatistics(
- const seqStore_t* seqStorePtr, size_t nbSeq,
+ const SeqStore_t* seqStorePtr, size_t nbSeq,
const ZSTD_fseCTables_t* prevEntropy, ZSTD_fseCTables_t* nextEntropy,
BYTE* dst, const BYTE* const dstEnd,
ZSTD_strategy strategy, unsigned* countWorkspace,
@@ -2785,7 +2797,7 @@ ZSTD_buildSequencesStatistics(
assert(!(stats.LLtype < set_compressed && nextEntropy->litlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */
{ size_t const countSize = ZSTD_buildCTable(
op, (size_t)(oend - op),
- CTable_LitLength, LLFSELog, (symbolEncodingType_e)stats.LLtype,
+ CTable_LitLength, LLFSELog, (SymbolEncodingType_e)stats.LLtype,
countWorkspace, max, llCodeTable, nbSeq,
LL_defaultNorm, LL_defaultNormLog, MaxLL,
prevEntropy->litlengthCTable,
@@ -2806,7 +2818,7 @@ ZSTD_buildSequencesStatistics(
size_t const mostFrequent = HIST_countFast_wksp(
countWorkspace, &max, ofCodeTable, nbSeq, entropyWorkspace, entropyWkspSize); /* can't fail */
/* We can only use the basic table if max <= DefaultMaxOff, otherwise the offsets are too large */
- ZSTD_defaultPolicy_e const defaultPolicy = (max <= DefaultMaxOff) ? ZSTD_defaultAllowed : ZSTD_defaultDisallowed;
+ ZSTD_DefaultPolicy_e const defaultPolicy = (max <= DefaultMaxOff) ? ZSTD_defaultAllowed : ZSTD_defaultDisallowed;
DEBUGLOG(5, "Building OF table");
nextEntropy->offcode_repeatMode = prevEntropy->offcode_repeatMode;
stats.Offtype = ZSTD_selectEncodingType(&nextEntropy->offcode_repeatMode,
@@ -2817,7 +2829,7 @@ ZSTD_buildSequencesStatistics(
assert(!(stats.Offtype < set_compressed && nextEntropy->offcode_repeatMode != FSE_repeat_none)); /* We don't copy tables */
{ size_t const countSize = ZSTD_buildCTable(
op, (size_t)(oend - op),
- CTable_OffsetBits, OffFSELog, (symbolEncodingType_e)stats.Offtype,
+ CTable_OffsetBits, OffFSELog, (SymbolEncodingType_e)stats.Offtype,
countWorkspace, max, ofCodeTable, nbSeq,
OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff,
prevEntropy->offcodeCTable,
@@ -2847,7 +2859,7 @@ ZSTD_buildSequencesStatistics(
assert(!(stats.MLtype < set_compressed && nextEntropy->matchlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */
{ size_t const countSize = ZSTD_buildCTable(
op, (size_t)(oend - op),
- CTable_MatchLength, MLFSELog, (symbolEncodingType_e)stats.MLtype,
+ CTable_MatchLength, MLFSELog, (SymbolEncodingType_e)stats.MLtype,
countWorkspace, max, mlCodeTable, nbSeq,
ML_defaultNorm, ML_defaultNormLog, MaxML,
prevEntropy->matchlengthCTable,
@@ -2874,11 +2886,12 @@ ZSTD_buildSequencesStatistics(
#define SUSPECT_UNCOMPRESSIBLE_LITERAL_RATIO 20
MEM_STATIC size_t
ZSTD_entropyCompressSeqStore_internal(
- const seqStore_t* seqStorePtr,
+ void* dst, size_t dstCapacity,
+ const void* literals, size_t litSize,
+ const SeqStore_t* seqStorePtr,
const ZSTD_entropyCTables_t* prevEntropy,
ZSTD_entropyCTables_t* nextEntropy,
const ZSTD_CCtx_params* cctxParams,
- void* dst, size_t dstCapacity,
void* entropyWorkspace, size_t entropyWkspSize,
const int bmi2)
{
@@ -2887,7 +2900,7 @@ ZSTD_entropyCompressSeqStore_internal(
FSE_CTable* CTable_LitLength = nextEntropy->fse.litlengthCTable;
FSE_CTable* CTable_OffsetBits = nextEntropy->fse.offcodeCTable;
FSE_CTable* CTable_MatchLength = nextEntropy->fse.matchlengthCTable;
- const seqDef* const sequences = seqStorePtr->sequencesStart;
+ const SeqDef* const sequences = seqStorePtr->sequencesStart;
const size_t nbSeq = (size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
const BYTE* const ofCodeTable = seqStorePtr->ofCode;
const BYTE* const llCodeTable = seqStorePtr->llCode;
@@ -2906,12 +2919,9 @@ ZSTD_entropyCompressSeqStore_internal(
assert(entropyWkspSize >= HUF_WORKSPACE_SIZE);
/* Compress literals */
- { const BYTE* const literals = seqStorePtr->litStart;
- size_t const numSequences = (size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
- size_t const numLiterals = (size_t)(seqStorePtr->lit - seqStorePtr->litStart);
+ { size_t const numSequences = (size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
/* Base suspicion of uncompressibility on ratio of literals to sequences */
- unsigned const suspectUncompressible = (numSequences == 0) || (numLiterals / numSequences >= SUSPECT_UNCOMPRESSIBLE_LITERAL_RATIO);
- size_t const litSize = (size_t)(seqStorePtr->lit - literals);
+ int const suspectUncompressible = (numSequences == 0) || (litSize / numSequences >= SUSPECT_UNCOMPRESSIBLE_LITERAL_RATIO);
size_t const cSize = ZSTD_compressLiterals(
op, dstCapacity,
@@ -2992,33 +3002,35 @@ ZSTD_entropyCompressSeqStore_internal(
return (size_t)(op - ostart);
}
-MEM_STATIC size_t
-ZSTD_entropyCompressSeqStore(
- const seqStore_t* seqStorePtr,
+static size_t
+ZSTD_entropyCompressSeqStore_wExtLitBuffer(
+ void* dst, size_t dstCapacity,
+ const void* literals, size_t litSize,
+ size_t blockSize,
+ const SeqStore_t* seqStorePtr,
const ZSTD_entropyCTables_t* prevEntropy,
ZSTD_entropyCTables_t* nextEntropy,
const ZSTD_CCtx_params* cctxParams,
- void* dst, size_t dstCapacity,
- size_t srcSize,
void* entropyWorkspace, size_t entropyWkspSize,
int bmi2)
{
size_t const cSize = ZSTD_entropyCompressSeqStore_internal(
- seqStorePtr, prevEntropy, nextEntropy, cctxParams,
dst, dstCapacity,
+ literals, litSize,
+ seqStorePtr, prevEntropy, nextEntropy, cctxParams,
entropyWorkspace, entropyWkspSize, bmi2);
if (cSize == 0) return 0;
/* When srcSize <= dstCapacity, there is enough space to write a raw uncompressed block.
* Since we ran out of space, block must be not compressible, so fall back to raw uncompressed block.
*/
- if ((cSize == ERROR(dstSize_tooSmall)) & (srcSize <= dstCapacity)) {
+ if ((cSize == ERROR(dstSize_tooSmall)) & (blockSize <= dstCapacity)) {
DEBUGLOG(4, "not enough dstCapacity (%zu) for ZSTD_entropyCompressSeqStore_internal()=> do not compress block", dstCapacity);
return 0; /* block not compressed */
}
FORWARD_IF_ERROR(cSize, "ZSTD_entropyCompressSeqStore_internal failed");
/* Check compressibility */
- { size_t const maxCSize = srcSize - ZSTD_minGain(srcSize, cctxParams->cParams.strategy);
+ { size_t const maxCSize = blockSize - ZSTD_minGain(blockSize, cctxParams->cParams.strategy);
if (cSize >= maxCSize) return 0; /* block not compressed */
}
DEBUGLOG(5, "ZSTD_entropyCompressSeqStore() cSize: %zu", cSize);
@@ -3029,12 +3041,34 @@ ZSTD_entropyCompressSeqStore(
return cSize;
}
+static size_t
+ZSTD_entropyCompressSeqStore(
+ const SeqStore_t* seqStorePtr,
+ const ZSTD_entropyCTables_t* prevEntropy,
+ ZSTD_entropyCTables_t* nextEntropy,
+ const ZSTD_CCtx_params* cctxParams,
+ void* dst, size_t dstCapacity,
+ size_t srcSize,
+ void* entropyWorkspace, size_t entropyWkspSize,
+ int bmi2)
+{
+ return ZSTD_entropyCompressSeqStore_wExtLitBuffer(
+ dst, dstCapacity,
+ seqStorePtr->litStart, (size_t)(seqStorePtr->lit - seqStorePtr->litStart),
+ srcSize,
+ seqStorePtr,
+ prevEntropy, nextEntropy,
+ cctxParams,
+ entropyWorkspace, entropyWkspSize,
+ bmi2);
+}
+
/* ZSTD_selectBlockCompressor() :
* Not static, but internal use only (used by long distance matcher)
* assumption : strat is a valid strategy */
-ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_paramSwitch_e useRowMatchFinder, ZSTD_dictMode_e dictMode)
+ZSTD_BlockCompressor_f ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_ParamSwitch_e useRowMatchFinder, ZSTD_dictMode_e dictMode)
{
- static const ZSTD_blockCompressor blockCompressor[4][ZSTD_STRATEGY_MAX+1] = {
+ static const ZSTD_BlockCompressor_f blockCompressor[4][ZSTD_STRATEGY_MAX+1] = {
{ ZSTD_compressBlock_fast /* default for 0 */,
ZSTD_compressBlock_fast,
ZSTD_COMPRESSBLOCK_DOUBLEFAST,
@@ -3079,13 +3113,13 @@ ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_paramS
NULL,
NULL }
};
- ZSTD_blockCompressor selectedCompressor;
+ ZSTD_BlockCompressor_f selectedCompressor;
ZSTD_STATIC_ASSERT((unsigned)ZSTD_fast == 1);
- assert(ZSTD_cParam_withinBounds(ZSTD_c_strategy, strat));
- DEBUGLOG(4, "Selected block compressor: dictMode=%d strat=%d rowMatchfinder=%d", (int)dictMode, (int)strat, (int)useRowMatchFinder);
+ assert(ZSTD_cParam_withinBounds(ZSTD_c_strategy, (int)strat));
+ DEBUGLOG(5, "Selected block compressor: dictMode=%d strat=%d rowMatchfinder=%d", (int)dictMode, (int)strat, (int)useRowMatchFinder);
if (ZSTD_rowMatchFinderUsed(strat, useRowMatchFinder)) {
- static const ZSTD_blockCompressor rowBasedBlockCompressors[4][3] = {
+ static const ZSTD_BlockCompressor_f rowBasedBlockCompressors[4][3] = {
{
ZSTD_COMPRESSBLOCK_GREEDY_ROW,
ZSTD_COMPRESSBLOCK_LAZY_ROW,
@@ -3107,7 +3141,7 @@ ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_paramS
ZSTD_COMPRESSBLOCK_LAZY2_DEDICATEDDICTSEARCH_ROW
}
};
- DEBUGLOG(4, "Selecting a row-based matchfinder");
+ DEBUGLOG(5, "Selecting a row-based matchfinder");
assert(useRowMatchFinder != ZSTD_ps_auto);
selectedCompressor = rowBasedBlockCompressors[(int)dictMode][(int)strat - (int)ZSTD_greedy];
} else {
@@ -3117,14 +3151,14 @@ ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_paramS
return selectedCompressor;
}
-static void ZSTD_storeLastLiterals(seqStore_t* seqStorePtr,
+static void ZSTD_storeLastLiterals(SeqStore_t* seqStorePtr,
const BYTE* anchor, size_t lastLLSize)
{
ZSTD_memcpy(seqStorePtr->lit, anchor, lastLLSize);
seqStorePtr->lit += lastLLSize;
}
-void ZSTD_resetSeqStore(seqStore_t* ssPtr)
+void ZSTD_resetSeqStore(SeqStore_t* ssPtr)
{
ssPtr->lit = ssPtr->litStart;
ssPtr->sequences = ssPtr->sequencesStart;
@@ -3197,11 +3231,39 @@ static size_t ZSTD_fastSequenceLengthSum(ZSTD_Sequence const* seqBuf, size_t seq
return litLenSum + matchLenSum;
}
-typedef enum { ZSTDbss_compress, ZSTDbss_noCompress } ZSTD_buildSeqStore_e;
+/**
+ * Function to validate sequences produced by a block compressor.
+ */
+static void ZSTD_validateSeqStore(const SeqStore_t* seqStore, const ZSTD_compressionParameters* cParams)
+{
+#if DEBUGLEVEL >= 1
+ const SeqDef* seq = seqStore->sequencesStart;
+ const SeqDef* const seqEnd = seqStore->sequences;
+ size_t const matchLenLowerBound = cParams->minMatch == 3 ? 3 : 4;
+ for (; seq < seqEnd; ++seq) {
+ const ZSTD_SequenceLength seqLength = ZSTD_getSequenceLength(seqStore, seq);
+ assert(seqLength.matchLength >= matchLenLowerBound);
+ (void)seqLength;
+ (void)matchLenLowerBound;
+ }
+#else
+ (void)seqStore;
+ (void)cParams;
+#endif
+}
+
+static size_t
+ZSTD_transferSequences_wBlockDelim(ZSTD_CCtx* cctx,
+ ZSTD_SequencePosition* seqPos,
+ const ZSTD_Sequence* const inSeqs, size_t inSeqsSize,
+ const void* src, size_t blockSize,
+ ZSTD_ParamSwitch_e externalRepSearch);
+
+typedef enum { ZSTDbss_compress, ZSTDbss_noCompress } ZSTD_BuildSeqStore_e;
static size_t ZSTD_buildSeqStore(ZSTD_CCtx* zc, const void* src, size_t srcSize)
{
- ZSTD_matchState_t* const ms = &zc->blockState.matchState;
+ ZSTD_MatchState_t* const ms = &zc->blockState.matchState;
DEBUGLOG(5, "ZSTD_buildSeqStore (srcSize=%zu)", srcSize);
assert(srcSize <= ZSTD_BLOCKSIZE_MAX);
/* Assert that we have correctly flushed the ctx params into the ms's copy */
@@ -3262,7 +3324,7 @@ static size_t ZSTD_buildSeqStore(ZSTD_CCtx* zc, const void* src, size_t srcSize)
src, srcSize);
assert(zc->externSeqStore.pos <= zc->externSeqStore.size);
} else if (zc->appliedParams.ldmParams.enableLdm == ZSTD_ps_enable) {
- rawSeqStore_t ldmSeqStore = kNullRawSeqStore;
+ RawSeqStore_t ldmSeqStore = kNullRawSeqStore;
/* External matchfinder + LDM is technically possible, just not implemented yet.
* We need to revisit soon and implement it. */
@@ -3313,11 +3375,11 @@ static size_t ZSTD_buildSeqStore(ZSTD_CCtx* zc, const void* src, size_t srcSize)
/* Return early if there is no error, since we don't need to worry about last literals */
if (!ZSTD_isError(nbPostProcessedSeqs)) {
- ZSTD_sequencePosition seqPos = {0,0,0};
+ ZSTD_SequencePosition seqPos = {0,0,0};
size_t const seqLenSum = ZSTD_fastSequenceLengthSum(zc->extSeqBuf, nbPostProcessedSeqs);
RETURN_ERROR_IF(seqLenSum > srcSize, externalSequences_invalid, "External sequences imply too large a block!");
FORWARD_IF_ERROR(
- ZSTD_copySequencesToSeqStoreExplicitBlockDelim(
+ ZSTD_transferSequences_wBlockDelim(
zc, &seqPos,
zc->extSeqBuf, nbPostProcessedSeqs,
src, srcSize,
@@ -3336,7 +3398,7 @@ static size_t ZSTD_buildSeqStore(ZSTD_CCtx* zc, const void* src, size_t srcSize)
}
/* Fallback to software matchfinder */
- { ZSTD_blockCompressor const blockCompressor =
+ { ZSTD_BlockCompressor_f const blockCompressor =
ZSTD_selectBlockCompressor(
zc->appliedParams.cParams.strategy,
zc->appliedParams.useRowMatchFinder,
@@ -3350,7 +3412,7 @@ static size_t ZSTD_buildSeqStore(ZSTD_CCtx* zc, const void* src, size_t srcSize)
lastLLSize = blockCompressor(ms, &zc->seqStore, zc->blockState.nextCBlock->rep, src, srcSize);
} }
} else { /* not long range mode and no external matchfinder */
- ZSTD_blockCompressor const blockCompressor = ZSTD_selectBlockCompressor(
+ ZSTD_BlockCompressor_f const blockCompressor = ZSTD_selectBlockCompressor(
zc->appliedParams.cParams.strategy,
zc->appliedParams.useRowMatchFinder,
dictMode);
@@ -3360,19 +3422,20 @@ static size_t ZSTD_buildSeqStore(ZSTD_CCtx* zc, const void* src, size_t srcSize)
{ const BYTE* const lastLiterals = (const BYTE*)src + srcSize - lastLLSize;
ZSTD_storeLastLiterals(&zc->seqStore, lastLiterals, lastLLSize);
} }
+ ZSTD_validateSeqStore(&zc->seqStore, &zc->appliedParams.cParams);
return ZSTDbss_compress;
}
-static size_t ZSTD_copyBlockSequences(SeqCollector* seqCollector, const seqStore_t* seqStore, const U32 prevRepcodes[ZSTD_REP_NUM])
+static size_t ZSTD_copyBlockSequences(SeqCollector* seqCollector, const SeqStore_t* seqStore, const U32 prevRepcodes[ZSTD_REP_NUM])
{
- const seqDef* inSeqs = seqStore->sequencesStart;
- const size_t nbInSequences = seqStore->sequences - inSeqs;
+ const SeqDef* inSeqs = seqStore->sequencesStart;
+ const size_t nbInSequences = (size_t)(seqStore->sequences - inSeqs);
const size_t nbInLiterals = (size_t)(seqStore->lit - seqStore->litStart);
ZSTD_Sequence* outSeqs = seqCollector->seqIndex == 0 ? seqCollector->seqStart : seqCollector->seqStart + seqCollector->seqIndex;
const size_t nbOutSequences = nbInSequences + 1;
size_t nbOutLiterals = 0;
- repcodes_t repcodes;
+ Repcodes_t repcodes;
size_t i;
/* Bounds check that we have enough space for every input sequence
@@ -3458,7 +3521,7 @@ size_t ZSTD_generateSequences(ZSTD_CCtx* zc, ZSTD_Sequence* outSeqs,
size_t outSeqsSize, const void* src, size_t srcSize)
{
const size_t dstCapacity = ZSTD_compressBound(srcSize);
- void* dst = ZSTD_customMalloc(dstCapacity, ZSTD_defaultCMem);
+ void* dst; /* Make C90 happy. */
SeqCollector seqCollector;
{
int targetCBlockSize;
@@ -3471,6 +3534,7 @@ size_t ZSTD_generateSequences(ZSTD_CCtx* zc, ZSTD_Sequence* outSeqs,
RETURN_ERROR_IF(nbWorkers != 0, parameter_unsupported, "nbWorkers != 0");
}
+ dst = ZSTD_customMalloc(dstCapacity, ZSTD_defaultCMem);
RETURN_ERROR_IF(dst == NULL, memory_allocation, "NULL pointer!");
seqCollector.collectSequences = 1;
@@ -3531,7 +3595,7 @@ static int ZSTD_isRLE(const BYTE* src, size_t length) {
* This is just a heuristic based on the compressibility.
* It may return both false positives and false negatives.
*/
-static int ZSTD_maybeRLE(seqStore_t const* seqStore)
+static int ZSTD_maybeRLE(SeqStore_t const* seqStore)
{
size_t const nbSeqs = (size_t)(seqStore->sequences - seqStore->sequencesStart);
size_t const nbLits = (size_t)(seqStore->lit - seqStore->litStart);
@@ -3555,7 +3619,7 @@ writeBlockHeader(void* op, size_t cSize, size_t blockSize, U32 lastBlock)
lastBlock + (((U32)bt_rle)<<1) + (U32)(blockSize << 3) :
lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3);
MEM_writeLE24(op, cBlockHeader);
- DEBUGLOG(3, "writeBlockHeader: cSize: %zu blockSize: %zu lastBlock: %u", cSize, blockSize, lastBlock);
+ DEBUGLOG(5, "writeBlockHeader: cSize: %zu blockSize: %zu lastBlock: %u", cSize, blockSize, lastBlock);
}
/** ZSTD_buildBlockEntropyStats_literals() :
@@ -3693,7 +3757,7 @@ ZSTD_buildDummySequencesStatistics(ZSTD_fseCTables_t* nextEntropy)
* @return : size of fse tables or error code */
static size_t
ZSTD_buildBlockEntropyStats_sequences(
- const seqStore_t* seqStorePtr,
+ const SeqStore_t* seqStorePtr,
const ZSTD_fseCTables_t* prevEntropy,
ZSTD_fseCTables_t* nextEntropy,
const ZSTD_CCtx_params* cctxParams,
@@ -3717,9 +3781,9 @@ ZSTD_buildBlockEntropyStats_sequences(
entropyWorkspace, entropyWorkspaceSize)
: ZSTD_buildDummySequencesStatistics(nextEntropy);
FORWARD_IF_ERROR(stats.size, "ZSTD_buildSequencesStatistics failed!");
- fseMetadata->llType = (symbolEncodingType_e) stats.LLtype;
- fseMetadata->ofType = (symbolEncodingType_e) stats.Offtype;
- fseMetadata->mlType = (symbolEncodingType_e) stats.MLtype;
+ fseMetadata->llType = (SymbolEncodingType_e) stats.LLtype;
+ fseMetadata->ofType = (SymbolEncodingType_e) stats.Offtype;
+ fseMetadata->mlType = (SymbolEncodingType_e) stats.MLtype;
fseMetadata->lastCountSize = stats.lastCountSize;
return stats.size;
}
@@ -3732,7 +3796,7 @@ ZSTD_buildBlockEntropyStats_sequences(
* Note : also employed in superblock
*/
size_t ZSTD_buildBlockEntropyStats(
- const seqStore_t* seqStorePtr,
+ const SeqStore_t* seqStorePtr,
const ZSTD_entropyCTables_t* prevEntropy,
ZSTD_entropyCTables_t* nextEntropy,
const ZSTD_CCtx_params* cctxParams,
@@ -3790,7 +3854,7 @@ ZSTD_estimateBlockSize_literal(const BYTE* literals, size_t litSize,
/* Returns the size estimate for the FSE-compressed symbols (of, ml, ll) of a block */
static size_t
-ZSTD_estimateBlockSize_symbolType(symbolEncodingType_e type,
+ZSTD_estimateBlockSize_symbolType(SymbolEncodingType_e type,
const BYTE* codeTable, size_t nbSeq, unsigned maxCode,
const FSE_CTable* fseCTable,
const U8* additionalBits,
@@ -3881,7 +3945,7 @@ ZSTD_estimateBlockSize(const BYTE* literals, size_t litSize,
* @return: estimated compressed size of the seqStore, or a zstd error.
*/
static size_t
-ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(seqStore_t* seqStore, ZSTD_CCtx* zc)
+ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(SeqStore_t* seqStore, ZSTD_CCtx* zc)
{
ZSTD_entropyCTablesMetadata_t* const entropyMetadata = &zc->blockSplitCtx.entropyMetadata;
DEBUGLOG(6, "ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize()");
@@ -3890,25 +3954,25 @@ ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(seqStore_t* seqStore, ZSTD_CC
&zc->blockState.nextCBlock->entropy,
&zc->appliedParams,
entropyMetadata,
- zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE), "");
+ zc->tmpWorkspace, zc->tmpWkspSize), "");
return ZSTD_estimateBlockSize(
seqStore->litStart, (size_t)(seqStore->lit - seqStore->litStart),
seqStore->ofCode, seqStore->llCode, seqStore->mlCode,
(size_t)(seqStore->sequences - seqStore->sequencesStart),
&zc->blockState.nextCBlock->entropy,
entropyMetadata,
- zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE,
+ zc->tmpWorkspace, zc->tmpWkspSize,
(int)(entropyMetadata->hufMetadata.hType == set_compressed), 1);
}
/* Returns literals bytes represented in a seqStore */
-static size_t ZSTD_countSeqStoreLiteralsBytes(const seqStore_t* const seqStore)
+static size_t ZSTD_countSeqStoreLiteralsBytes(const SeqStore_t* const seqStore)
{
size_t literalsBytes = 0;
size_t const nbSeqs = (size_t)(seqStore->sequences - seqStore->sequencesStart);
size_t i;
for (i = 0; i < nbSeqs; ++i) {
- seqDef const seq = seqStore->sequencesStart[i];
+ SeqDef const seq = seqStore->sequencesStart[i];
literalsBytes += seq.litLength;
if (i == seqStore->longLengthPos && seqStore->longLengthType == ZSTD_llt_literalLength) {
literalsBytes += 0x10000;
@@ -3917,13 +3981,13 @@ static size_t ZSTD_countSeqStoreLiteralsBytes(const seqStore_t* const seqStore)
}
/* Returns match bytes represented in a seqStore */
-static size_t ZSTD_countSeqStoreMatchBytes(const seqStore_t* const seqStore)
+static size_t ZSTD_countSeqStoreMatchBytes(const SeqStore_t* const seqStore)
{
size_t matchBytes = 0;
size_t const nbSeqs = (size_t)(seqStore->sequences - seqStore->sequencesStart);
size_t i;
for (i = 0; i < nbSeqs; ++i) {
- seqDef seq = seqStore->sequencesStart[i];
+ SeqDef seq = seqStore->sequencesStart[i];
matchBytes += seq.mlBase + MINMATCH;
if (i == seqStore->longLengthPos && seqStore->longLengthType == ZSTD_llt_matchLength) {
matchBytes += 0x10000;
@@ -3934,8 +3998,8 @@ static size_t ZSTD_countSeqStoreMatchBytes(const seqStore_t* const seqStore)
/* Derives the seqStore that is a chunk of the originalSeqStore from [startIdx, endIdx).
* Stores the result in resultSeqStore.
*/
-static void ZSTD_deriveSeqStoreChunk(seqStore_t* resultSeqStore,
- const seqStore_t* originalSeqStore,
+static void ZSTD_deriveSeqStoreChunk(SeqStore_t* resultSeqStore,
+ const SeqStore_t* originalSeqStore,
size_t startIdx, size_t endIdx)
{
*resultSeqStore = *originalSeqStore;
@@ -4003,13 +4067,13 @@ ZSTD_resolveRepcodeToRawOffset(const U32 rep[ZSTD_REP_NUM], const U32 offBase, c
* 4+ : real_offset+3
*/
static void
-ZSTD_seqStore_resolveOffCodes(repcodes_t* const dRepcodes, repcodes_t* const cRepcodes,
- const seqStore_t* const seqStore, U32 const nbSeq)
+ZSTD_seqStore_resolveOffCodes(Repcodes_t* const dRepcodes, Repcodes_t* const cRepcodes,
+ const SeqStore_t* const seqStore, U32 const nbSeq)
{
U32 idx = 0;
U32 const longLitLenIdx = seqStore->longLengthType == ZSTD_llt_literalLength ? seqStore->longLengthPos : nbSeq;
for (; idx < nbSeq; ++idx) {
- seqDef* const seq = seqStore->sequencesStart + idx;
+ SeqDef* const seq = seqStore->sequencesStart + idx;
U32 const ll0 = (seq->litLength == 0) && (idx != longLitLenIdx);
U32 const offBase = seq->offBase;
assert(offBase > 0);
@@ -4039,8 +4103,8 @@ ZSTD_seqStore_resolveOffCodes(repcodes_t* const dRepcodes, repcodes_t* const cRe
*/
static size_t
ZSTD_compressSeqStore_singleBlock(ZSTD_CCtx* zc,
- const seqStore_t* const seqStore,
- repcodes_t* const dRep, repcodes_t* const cRep,
+ const SeqStore_t* const seqStore,
+ Repcodes_t* const dRep, Repcodes_t* const cRep,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
U32 lastBlock, U32 isPartition)
@@ -4052,7 +4116,7 @@ ZSTD_compressSeqStore_singleBlock(ZSTD_CCtx* zc,
size_t cSeqsSize;
/* In case of an RLE or raw block, the simulated decompression repcode history must be reset */
- repcodes_t const dRepOriginal = *dRep;
+ Repcodes_t const dRepOriginal = *dRep;
DEBUGLOG(5, "ZSTD_compressSeqStore_singleBlock");
if (isPartition)
ZSTD_seqStore_resolveOffCodes(dRep, cRep, seqStore, (U32)(seqStore->sequences - seqStore->sequencesStart));
@@ -4063,7 +4127,7 @@ ZSTD_compressSeqStore_singleBlock(ZSTD_CCtx* zc,
&zc->appliedParams,
op + ZSTD_blockHeaderSize, dstCapacity - ZSTD_blockHeaderSize,
srcSize,
- zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */,
+ zc->tmpWorkspace, zc->tmpWkspSize /* statically allocated in resetCCtx */,
zc->bmi2);
FORWARD_IF_ERROR(cSeqsSize, "ZSTD_entropyCompressSeqStore failed!");
@@ -4087,18 +4151,18 @@ ZSTD_compressSeqStore_singleBlock(ZSTD_CCtx* zc,
if (cSeqsSize == 0) {
cSize = ZSTD_noCompressBlock(op, dstCapacity, ip, srcSize, lastBlock);
FORWARD_IF_ERROR(cSize, "Nocompress block failed");
- DEBUGLOG(4, "Writing out nocompress block, size: %zu", cSize);
+ DEBUGLOG(5, "Writing out nocompress block, size: %zu", cSize);
*dRep = dRepOriginal; /* reset simulated decompression repcode history */
} else if (cSeqsSize == 1) {
cSize = ZSTD_rleCompressBlock(op, dstCapacity, *ip, srcSize, lastBlock);
FORWARD_IF_ERROR(cSize, "RLE compress block failed");
- DEBUGLOG(4, "Writing out RLE block, size: %zu", cSize);
+ DEBUGLOG(5, "Writing out RLE block, size: %zu", cSize);
*dRep = dRepOriginal; /* reset simulated decompression repcode history */
} else {
ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState);
writeBlockHeader(op, cSeqsSize, srcSize, lastBlock);
cSize = ZSTD_blockHeaderSize + cSeqsSize;
- DEBUGLOG(4, "Writing out compressed block, size: %zu", cSize);
+ DEBUGLOG(5, "Writing out compressed block, size: %zu", cSize);
}
if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid)
@@ -4131,11 +4195,11 @@ typedef struct {
*/
static void
ZSTD_deriveBlockSplitsHelper(seqStoreSplits* splits, size_t startIdx, size_t endIdx,
- ZSTD_CCtx* zc, const seqStore_t* origSeqStore)
+ ZSTD_CCtx* zc, const SeqStore_t* origSeqStore)
{
- seqStore_t* const fullSeqStoreChunk = &zc->blockSplitCtx.fullSeqStoreChunk;
- seqStore_t* const firstHalfSeqStore = &zc->blockSplitCtx.firstHalfSeqStore;
- seqStore_t* const secondHalfSeqStore = &zc->blockSplitCtx.secondHalfSeqStore;
+ SeqStore_t* const fullSeqStoreChunk = &zc->blockSplitCtx.fullSeqStoreChunk;
+ SeqStore_t* const firstHalfSeqStore = &zc->blockSplitCtx.firstHalfSeqStore;
+ SeqStore_t* const secondHalfSeqStore = &zc->blockSplitCtx.secondHalfSeqStore;
size_t estimatedOriginalSize;
size_t estimatedFirstHalfSize;
size_t estimatedSecondHalfSize;
@@ -4205,8 +4269,8 @@ ZSTD_compressBlock_splitBlock_internal(ZSTD_CCtx* zc,
size_t i = 0;
size_t srcBytesTotal = 0;
U32* const partitions = zc->blockSplitCtx.partitions; /* size == ZSTD_MAX_NB_BLOCK_SPLITS */
- seqStore_t* const nextSeqStore = &zc->blockSplitCtx.nextSeqStore;
- seqStore_t* const currSeqStore = &zc->blockSplitCtx.currSeqStore;
+ SeqStore_t* const nextSeqStore = &zc->blockSplitCtx.nextSeqStore;
+ SeqStore_t* const currSeqStore = &zc->blockSplitCtx.currSeqStore;
size_t const numSplits = ZSTD_deriveBlockSplits(zc, partitions, nbSeq);
/* If a block is split and some partitions are emitted as RLE/uncompressed, then repcode history
@@ -4223,11 +4287,11 @@ ZSTD_compressBlock_splitBlock_internal(ZSTD_CCtx* zc,
*
* See ZSTD_seqStore_resolveOffCodes() for more details.
*/
- repcodes_t dRep;
- repcodes_t cRep;
- ZSTD_memcpy(dRep.rep, zc->blockState.prevCBlock->rep, sizeof(repcodes_t));
- ZSTD_memcpy(cRep.rep, zc->blockState.prevCBlock->rep, sizeof(repcodes_t));
- ZSTD_memset(nextSeqStore, 0, sizeof(seqStore_t));
+ Repcodes_t dRep;
+ Repcodes_t cRep;
+ ZSTD_memcpy(dRep.rep, zc->blockState.prevCBlock->rep, sizeof(Repcodes_t));
+ ZSTD_memcpy(cRep.rep, zc->blockState.prevCBlock->rep, sizeof(Repcodes_t));
+ ZSTD_memset(nextSeqStore, 0, sizeof(SeqStore_t));
DEBUGLOG(5, "ZSTD_compressBlock_splitBlock_internal (dstCapacity=%u, dictLimit=%u, nextToUpdate=%u)",
(unsigned)dstCapacity, (unsigned)zc->blockState.matchState.window.dictLimit,
@@ -4242,8 +4306,8 @@ ZSTD_compressBlock_splitBlock_internal(ZSTD_CCtx* zc,
lastBlock, 0 /* isPartition */);
FORWARD_IF_ERROR(cSizeSingleBlock, "Compressing single block from splitBlock_internal() failed!");
DEBUGLOG(5, "ZSTD_compressBlock_splitBlock_internal: No splits");
- assert(zc->blockSize <= ZSTD_BLOCKSIZE_MAX);
- assert(cSizeSingleBlock <= zc->blockSize + ZSTD_blockHeaderSize);
+ assert(zc->blockSizeMax <= ZSTD_BLOCKSIZE_MAX);
+ assert(cSizeSingleBlock <= zc->blockSizeMax + ZSTD_blockHeaderSize);
return cSizeSingleBlock;
}
@@ -4277,12 +4341,12 @@ ZSTD_compressBlock_splitBlock_internal(ZSTD_CCtx* zc,
dstCapacity -= cSizeChunk;
cSize += cSizeChunk;
*currSeqStore = *nextSeqStore;
- assert(cSizeChunk <= zc->blockSize + ZSTD_blockHeaderSize);
+ assert(cSizeChunk <= zc->blockSizeMax + ZSTD_blockHeaderSize);
}
/* cRep and dRep may have diverged during the compression.
* If so, we use the dRep repcodes for the next block.
*/
- ZSTD_memcpy(zc->blockState.prevCBlock->rep, dRep.rep, sizeof(repcodes_t));
+ ZSTD_memcpy(zc->blockState.prevCBlock->rep, dRep.rep, sizeof(Repcodes_t));
return cSize;
}
@@ -4293,8 +4357,8 @@ ZSTD_compressBlock_splitBlock(ZSTD_CCtx* zc,
{
U32 nbSeq;
size_t cSize;
- DEBUGLOG(4, "ZSTD_compressBlock_splitBlock");
- assert(zc->appliedParams.useBlockSplitter == ZSTD_ps_enable);
+ DEBUGLOG(5, "ZSTD_compressBlock_splitBlock");
+ assert(zc->appliedParams.postBlockSplitter == ZSTD_ps_enable);
{ const size_t bss = ZSTD_buildSeqStore(zc, src, srcSize);
FORWARD_IF_ERROR(bss, "ZSTD_buildSeqStore failed");
@@ -4304,7 +4368,7 @@ ZSTD_compressBlock_splitBlock(ZSTD_CCtx* zc,
RETURN_ERROR_IF(zc->seqCollector.collectSequences, sequenceProducer_failed, "Uncompressible block");
cSize = ZSTD_noCompressBlock(dst, dstCapacity, src, srcSize, lastBlock);
FORWARD_IF_ERROR(cSize, "ZSTD_noCompressBlock failed");
- DEBUGLOG(4, "ZSTD_compressBlock_splitBlock: Nocompress block");
+ DEBUGLOG(5, "ZSTD_compressBlock_splitBlock: Nocompress block");
return cSize;
}
nbSeq = (U32)(zc->seqStore.sequences - zc->seqStore.sequencesStart);
@@ -4353,7 +4417,7 @@ ZSTD_compressBlock_internal(ZSTD_CCtx* zc,
&zc->appliedParams,
dst, dstCapacity,
srcSize,
- zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */,
+ zc->tmpWorkspace, zc->tmpWkspSize /* statically allocated in resetCCtx */,
zc->bmi2);
if (frame &&
@@ -4459,7 +4523,7 @@ static size_t ZSTD_compressBlock_targetCBlockSize(ZSTD_CCtx* zc,
return cSize;
}
-static void ZSTD_overflowCorrectIfNeeded(ZSTD_matchState_t* ms,
+static void ZSTD_overflowCorrectIfNeeded(ZSTD_MatchState_t* ms,
ZSTD_cwksp* ws,
ZSTD_CCtx_params const* params,
void const* ip,
@@ -4483,6 +4547,40 @@ static void ZSTD_overflowCorrectIfNeeded(ZSTD_matchState_t* ms,
}
}
+#include "zstd_preSplit.h"
+
+static size_t ZSTD_optimalBlockSize(ZSTD_CCtx* cctx, const void* src, size_t srcSize, size_t blockSizeMax, int splitLevel, ZSTD_strategy strat, S64 savings)
+{
+ /* split level based on compression strategy, from `fast` to `btultra2` */
+ static const int splitLevels[] = { 0, 0, 1, 2, 2, 3, 3, 4, 4, 4 };
+ /* note: conservatively only split full blocks (128 KB) currently.
+ * While it's possible to go lower, let's keep it simple for a first implementation.
+ * Besides, benefits of splitting are reduced when blocks are already small.
+ */
+ if (srcSize < 128 KB || blockSizeMax < 128 KB)
+ return MIN(srcSize, blockSizeMax);
+ /* do not split incompressible data though:
+ * require verified savings to allow pre-splitting.
+ * Note: as a consequence, the first full block is not split.
+ */
+ if (savings < 3) {
+ DEBUGLOG(6, "don't attempt splitting: savings (%i) too low", (int)savings);
+ return 128 KB;
+ }
+ /* apply @splitLevel, or use default value (which depends on @strat).
+ * note that splitting heuristic is still conditioned by @savings >= 3,
+ * so the first block will not reach this code path */
+ if (splitLevel == 1) return 128 KB;
+ if (splitLevel == 0) {
+ assert(ZSTD_fast <= strat && strat <= ZSTD_btultra2);
+ splitLevel = splitLevels[strat];
+ } else {
+ assert(2 <= splitLevel && splitLevel <= 6);
+ splitLevel -= 2;
+ }
+ return ZSTD_splitBlock(src, blockSizeMax, splitLevel, cctx->tmpWorkspace, cctx->tmpWkspSize);
+}
+
/*! ZSTD_compress_frameChunk() :
* Compress a chunk of data into one or multiple blocks.
* All blocks will be terminated, all input will be consumed.
@@ -4495,29 +4593,36 @@ static size_t ZSTD_compress_frameChunk(ZSTD_CCtx* cctx,
const void* src, size_t srcSize,
U32 lastFrameChunk)
{
- size_t blockSize = cctx->blockSize;
+ size_t blockSizeMax = cctx->blockSizeMax;
size_t remaining = srcSize;
const BYTE* ip = (const BYTE*)src;
BYTE* const ostart = (BYTE*)dst;
BYTE* op = ostart;
U32 const maxDist = (U32)1 << cctx->appliedParams.cParams.windowLog;
+ S64 savings = (S64)cctx->consumedSrcSize - (S64)cctx->producedCSize;
assert(cctx->appliedParams.cParams.windowLog <= ZSTD_WINDOWLOG_MAX);
- DEBUGLOG(4, "ZSTD_compress_frameChunk (blockSize=%u)", (unsigned)blockSize);
+ DEBUGLOG(5, "ZSTD_compress_frameChunk (srcSize=%u, blockSizeMax=%u)", (unsigned)srcSize, (unsigned)blockSizeMax);
if (cctx->appliedParams.fParams.checksumFlag && srcSize)
XXH64_update(&cctx->xxhState, src, srcSize);
while (remaining) {
- ZSTD_matchState_t* const ms = &cctx->blockState.matchState;
- U32 const lastBlock = lastFrameChunk & (blockSize >= remaining);
+ ZSTD_MatchState_t* const ms = &cctx->blockState.matchState;
+ size_t const blockSize = ZSTD_optimalBlockSize(cctx,
+ ip, remaining,
+ blockSizeMax,
+ cctx->appliedParams.preBlockSplitter_level,
+ cctx->appliedParams.cParams.strategy,
+ savings);
+ U32 const lastBlock = lastFrameChunk & (blockSize == remaining);
+ assert(blockSize <= remaining);
/* TODO: See 3090. We reduced MIN_CBLOCK_SIZE from 3 to 2 so to compensate we are adding
* additional 1. We need to revisit and change this logic to be more consistent */
RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize + MIN_CBLOCK_SIZE + 1,
dstSize_tooSmall,
"not enough space to store compressed block");
- if (remaining < blockSize) blockSize = remaining;
ZSTD_overflowCorrectIfNeeded(
ms, &cctx->workspace, &cctx->appliedParams, ip, ip + blockSize);
@@ -4555,6 +4660,21 @@ static size_t ZSTD_compress_frameChunk(ZSTD_CCtx* cctx,
}
} /* if (ZSTD_useTargetCBlockSize(&cctx->appliedParams))*/
+ /* @savings is employed to ensure that splitting doesn't worsen expansion of incompressible data.
+ * Without splitting, the maximum expansion is 3 bytes per full block.
+ * An adversarial input could attempt to fudge the split detector,
+ * and make it split incompressible data, resulting in more block headers.
+ * Note that, since ZSTD_COMPRESSBOUND() assumes a worst case scenario of 1KB per block,
+ * and the splitter never creates blocks that small (current lower limit is 8 KB),
+ * there is already no risk to expand beyond ZSTD_COMPRESSBOUND() limit.
+ * But if the goal is to not expand by more than 3-bytes per 128 KB full block,
+ * then yes, it becomes possible to make the block splitter oversplit incompressible data.
+ * Using @savings, we enforce an even more conservative condition,
+ * requiring the presence of enough savings (at least 3 bytes) to authorize splitting,
+ * otherwise only full blocks are used.
+ * But being conservative is fine,
+ * since splitting barely compressible blocks is not fruitful anyway */
+ savings += (S64)blockSize - (S64)cSize;
ip += blockSize;
assert(remaining >= blockSize);
@@ -4573,8 +4693,10 @@ static size_t ZSTD_compress_frameChunk(ZSTD_CCtx* cctx,
static size_t ZSTD_writeFrameHeader(void* dst, size_t dstCapacity,
- const ZSTD_CCtx_params* params, U64 pledgedSrcSize, U32 dictID)
-{ BYTE* const op = (BYTE*)dst;
+ const ZSTD_CCtx_params* params,
+ U64 pledgedSrcSize, U32 dictID)
+{
+ BYTE* const op = (BYTE*)dst;
U32 const dictIDSizeCodeLength = (dictID>0) + (dictID>=256) + (dictID>=65536); /* 0-3 */
U32 const dictIDSizeCode = params->fParams.noDictIDFlag ? 0 : dictIDSizeCodeLength; /* 0-3 */
U32 const checksumFlag = params->fParams.checksumFlag>0;
@@ -4672,7 +4794,7 @@ static size_t ZSTD_compressContinue_internal (ZSTD_CCtx* cctx,
const void* src, size_t srcSize,
U32 frame, U32 lastFrameChunk)
{
- ZSTD_matchState_t* const ms = &cctx->blockState.matchState;
+ ZSTD_MatchState_t* const ms = &cctx->blockState.matchState;
size_t fhSize = 0;
DEBUGLOG(5, "ZSTD_compressContinue_internal, stage: %u, srcSize: %u",
@@ -4707,7 +4829,7 @@ static size_t ZSTD_compressContinue_internal (ZSTD_CCtx* cctx,
src, (BYTE const*)src + srcSize);
}
- DEBUGLOG(5, "ZSTD_compressContinue_internal (blockSize=%u)", (unsigned)cctx->blockSize);
+ DEBUGLOG(5, "ZSTD_compressContinue_internal (blockSize=%u)", (unsigned)cctx->blockSizeMax);
{ size_t const cSize = frame ?
ZSTD_compress_frameChunk (cctx, dst, dstCapacity, src, srcSize, lastFrameChunk) :
ZSTD_compressBlock_internal (cctx, dst, dstCapacity, src, srcSize, 0 /* frame */);
@@ -4776,13 +4898,14 @@ size_t ZSTD_compressBlock(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const
/*! ZSTD_loadDictionaryContent() :
* @return : 0, or an error code
*/
-static size_t ZSTD_loadDictionaryContent(ZSTD_matchState_t* ms,
- ldmState_t* ls,
- ZSTD_cwksp* ws,
- ZSTD_CCtx_params const* params,
- const void* src, size_t srcSize,
- ZSTD_dictTableLoadMethod_e dtlm,
- ZSTD_tableFillPurpose_e tfp)
+static size_t
+ZSTD_loadDictionaryContent(ZSTD_MatchState_t* ms,
+ ldmState_t* ls,
+ ZSTD_cwksp* ws,
+ ZSTD_CCtx_params const* params,
+ const void* src, size_t srcSize,
+ ZSTD_dictTableLoadMethod_e dtlm,
+ ZSTD_tableFillPurpose_e tfp)
{
const BYTE* ip = (const BYTE*) src;
const BYTE* const iend = ip + srcSize;
@@ -4826,17 +4949,18 @@ static size_t ZSTD_loadDictionaryContent(ZSTD_matchState_t* ms,
}
ZSTD_window_update(&ms->window, src, srcSize, /* forceNonContiguous */ 0);
- DEBUGLOG(4, "ZSTD_loadDictionaryContent(): useRowMatchFinder=%d", (int)params->useRowMatchFinder);
+ DEBUGLOG(4, "ZSTD_loadDictionaryContent: useRowMatchFinder=%d", (int)params->useRowMatchFinder);
if (loadLdmDict) { /* Load the entire dict into LDM matchfinders. */
+ DEBUGLOG(4, "ZSTD_loadDictionaryContent: Trigger loadLdmDict");
ZSTD_window_update(&ls->window, src, srcSize, /* forceNonContiguous */ 0);
ls->loadedDictEnd = params->forceWindow ? 0 : (U32)(iend - ls->window.base);
ZSTD_ldm_fillHashTable(ls, ip, iend, ¶ms->ldmParams);
+ DEBUGLOG(4, "ZSTD_loadDictionaryContent: ZSTD_ldm_fillHashTable completes");
}
/* If the dict is larger than we can reasonably index in our tables, only load the suffix. */
- if (params->cParams.strategy < ZSTD_btultra) {
- U32 maxDictSize = 8U << MIN(MAX(params->cParams.hashLog, params->cParams.chainLog), 28);
+ { U32 maxDictSize = 1U << MIN(MAX(params->cParams.hashLog + 3, params->cParams.chainLog + 1), 31);
if (srcSize > maxDictSize) {
ip = iend - maxDictSize;
src = ip;
@@ -4900,6 +5024,7 @@ static size_t ZSTD_loadDictionaryContent(ZSTD_matchState_t* ms,
|| !defined(ZSTD_EXCLUDE_BTOPT_BLOCK_COMPRESSOR) \
|| !defined(ZSTD_EXCLUDE_BTULTRA_BLOCK_COMPRESSOR)
assert(srcSize >= HASH_READ_SIZE);
+ DEBUGLOG(4, "Fill %u bytes into the Binary Tree", (unsigned)srcSize);
ZSTD_updateTree(ms, iend-HASH_READ_SIZE, iend);
#else
assert(0); /* shouldn't be called: cparams should've been adjusted. */
@@ -4946,7 +5071,7 @@ size_t ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* workspace,
{ unsigned maxSymbolValue = 255;
unsigned hasZeroWeights = 1;
size_t const hufHeaderSize = HUF_readCTable((HUF_CElt*)bs->entropy.huf.CTable, &maxSymbolValue, dictPtr,
- dictEnd-dictPtr, &hasZeroWeights);
+ (size_t)(dictEnd-dictPtr), &hasZeroWeights);
/* We only set the loaded table as valid if it contains all non-zero
* weights. Otherwise, we set it to check */
@@ -4958,7 +5083,7 @@ size_t ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* workspace,
}
{ unsigned offcodeLog;
- size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd-dictPtr);
+ size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, (size_t)(dictEnd-dictPtr));
RETURN_ERROR_IF(FSE_isError(offcodeHeaderSize), dictionary_corrupted, "");
RETURN_ERROR_IF(offcodeLog > OffFSELog, dictionary_corrupted, "");
/* fill all offset symbols to avoid garbage at end of table */
@@ -4973,7 +5098,7 @@ size_t ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* workspace,
{ short matchlengthNCount[MaxML+1];
unsigned matchlengthMaxValue = MaxML, matchlengthLog;
- size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd-dictPtr);
+ size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, (size_t)(dictEnd-dictPtr));
RETURN_ERROR_IF(FSE_isError(matchlengthHeaderSize), dictionary_corrupted, "");
RETURN_ERROR_IF(matchlengthLog > MLFSELog, dictionary_corrupted, "");
RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp(
@@ -4987,7 +5112,7 @@ size_t ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* workspace,
{ short litlengthNCount[MaxLL+1];
unsigned litlengthMaxValue = MaxLL, litlengthLog;
- size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd-dictPtr);
+ size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, (size_t)(dictEnd-dictPtr));
RETURN_ERROR_IF(FSE_isError(litlengthHeaderSize), dictionary_corrupted, "");
RETURN_ERROR_IF(litlengthLog > LLFSELog, dictionary_corrupted, "");
RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp(
@@ -5021,7 +5146,7 @@ size_t ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* workspace,
RETURN_ERROR_IF(bs->rep[u] > dictContentSize, dictionary_corrupted, "");
} } }
- return dictPtr - (const BYTE*)dict;
+ return (size_t)(dictPtr - (const BYTE*)dict);
}
/* Dictionary format :
@@ -5034,7 +5159,7 @@ size_t ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* workspace,
* dictSize supposed >= 8
*/
static size_t ZSTD_loadZstdDictionary(ZSTD_compressedBlockState_t* bs,
- ZSTD_matchState_t* ms,
+ ZSTD_MatchState_t* ms,
ZSTD_cwksp* ws,
ZSTD_CCtx_params const* params,
const void* dict, size_t dictSize,
@@ -5067,7 +5192,7 @@ static size_t ZSTD_loadZstdDictionary(ZSTD_compressedBlockState_t* bs,
* @return : dictID, or an error code */
static size_t
ZSTD_compress_insertDictionary(ZSTD_compressedBlockState_t* bs,
- ZSTD_matchState_t* ms,
+ ZSTD_MatchState_t* ms,
ldmState_t* ls,
ZSTD_cwksp* ws,
const ZSTD_CCtx_params* params,
@@ -5144,11 +5269,11 @@ static size_t ZSTD_compressBegin_internal(ZSTD_CCtx* cctx,
cctx->blockState.prevCBlock, &cctx->blockState.matchState,
&cctx->ldmState, &cctx->workspace, &cctx->appliedParams, cdict->dictContent,
cdict->dictContentSize, cdict->dictContentType, dtlm,
- ZSTD_tfp_forCCtx, cctx->entropyWorkspace)
+ ZSTD_tfp_forCCtx, cctx->tmpWorkspace)
: ZSTD_compress_insertDictionary(
cctx->blockState.prevCBlock, &cctx->blockState.matchState,
&cctx->ldmState, &cctx->workspace, &cctx->appliedParams, dict, dictSize,
- dictContentType, dtlm, ZSTD_tfp_forCCtx, cctx->entropyWorkspace);
+ dictContentType, dtlm, ZSTD_tfp_forCCtx, cctx->tmpWorkspace);
FORWARD_IF_ERROR(dictID, "ZSTD_compress_insertDictionary failed");
assert(dictID <= UINT_MAX);
cctx->dictID = (U32)dictID;
@@ -5252,7 +5377,7 @@ static size_t ZSTD_writeEpilogue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity)
}
cctx->stage = ZSTDcs_created; /* return to "created but no init" status */
- return op-ostart;
+ return (size_t)(op-ostart);
}
void ZSTD_CCtx_trace(ZSTD_CCtx* cctx, size_t extraCSize)
@@ -5476,14 +5601,16 @@ static size_t ZSTD_initCDict_internal(
return 0;
}
-static ZSTD_CDict* ZSTD_createCDict_advanced_internal(size_t dictSize,
- ZSTD_dictLoadMethod_e dictLoadMethod,
- ZSTD_compressionParameters cParams,
- ZSTD_paramSwitch_e useRowMatchFinder,
- U32 enableDedicatedDictSearch,
- ZSTD_customMem customMem)
+static ZSTD_CDict*
+ZSTD_createCDict_advanced_internal(size_t dictSize,
+ ZSTD_dictLoadMethod_e dictLoadMethod,
+ ZSTD_compressionParameters cParams,
+ ZSTD_ParamSwitch_e useRowMatchFinder,
+ int enableDedicatedDictSearch,
+ ZSTD_customMem customMem)
{
if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL;
+ DEBUGLOG(3, "ZSTD_createCDict_advanced_internal (dictSize=%u)", (unsigned)dictSize);
{ size_t const workspaceSize =
ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict)) +
@@ -5520,6 +5647,7 @@ ZSTD_CDict* ZSTD_createCDict_advanced(const void* dictBuffer, size_t dictSize,
{
ZSTD_CCtx_params cctxParams;
ZSTD_memset(&cctxParams, 0, sizeof(cctxParams));
+ DEBUGLOG(3, "ZSTD_createCDict_advanced, dictSize=%u, mode=%u", (unsigned)dictSize, (unsigned)dictContentType);
ZSTD_CCtxParams_init(&cctxParams, 0);
cctxParams.cParams = cParams;
cctxParams.customMem = customMem;
@@ -5540,7 +5668,7 @@ ZSTD_CDict* ZSTD_createCDict_advanced2(
ZSTD_compressionParameters cParams;
ZSTD_CDict* cdict;
- DEBUGLOG(3, "ZSTD_createCDict_advanced2, mode %u", (unsigned)dictContentType);
+ DEBUGLOG(3, "ZSTD_createCDict_advanced2, dictSize=%u, mode=%u", (unsigned)dictSize, (unsigned)dictContentType);
if (!customMem.customAlloc ^ !customMem.customFree) return NULL;
if (cctxParams.enableDedicatedDictSearch) {
@@ -5559,7 +5687,7 @@ ZSTD_CDict* ZSTD_createCDict_advanced2(
&cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict);
}
- DEBUGLOG(3, "ZSTD_createCDict_advanced2: DDS: %u", cctxParams.enableDedicatedDictSearch);
+ DEBUGLOG(3, "ZSTD_createCDict_advanced2: DedicatedDictSearch=%u", cctxParams.enableDedicatedDictSearch);
cctxParams.cParams = cParams;
cctxParams.useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(cctxParams.useRowMatchFinder, &cParams);
@@ -5622,7 +5750,7 @@ size_t ZSTD_freeCDict(ZSTD_CDict* cdict)
* workspaceSize: Use ZSTD_estimateCDictSize()
* to determine how large workspace must be.
* cParams : use ZSTD_getCParams() to transform a compression level
- * into its relevants cParams.
+ * into its relevant cParams.
* @return : pointer to ZSTD_CDict*, or NULL if error (size too small)
* Note : there is no corresponding "free" function.
* Since workspace was allocated externally, it must be freed externally.
@@ -5634,7 +5762,7 @@ const ZSTD_CDict* ZSTD_initStaticCDict(
ZSTD_dictContentType_e dictContentType,
ZSTD_compressionParameters cParams)
{
- ZSTD_paramSwitch_e const useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(ZSTD_ps_auto, &cParams);
+ ZSTD_ParamSwitch_e const useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(ZSTD_ps_auto, &cParams);
/* enableDedicatedDictSearch == 1 ensures matchstate is not too small in case this CDict will be used for DDS + row hash */
size_t const matchStateSize = ZSTD_sizeof_matchState(&cParams, useRowMatchFinder, /* enableDedicatedDictSearch */ 1, /* forCCtx */ 0);
size_t const neededSize = ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict))
@@ -5645,6 +5773,7 @@ const ZSTD_CDict* ZSTD_initStaticCDict(
ZSTD_CDict* cdict;
ZSTD_CCtx_params params;
+ DEBUGLOG(4, "ZSTD_initStaticCDict (dictSize==%u)", (unsigned)dictSize);
if ((size_t)workspace & 7) return NULL; /* 8-aligned */
{
@@ -5655,8 +5784,6 @@ const ZSTD_CDict* ZSTD_initStaticCDict(
ZSTD_cwksp_move(&cdict->workspace, &ws);
}
- DEBUGLOG(4, "(workspaceSize < neededSize) : (%u < %u) => %u",
- (unsigned)workspaceSize, (unsigned)neededSize, (unsigned)(workspaceSize < neededSize));
if (workspaceSize < neededSize) return NULL;
ZSTD_CCtxParams_init(¶ms, 0);
@@ -5829,7 +5956,7 @@ size_t ZSTD_CStreamOutSize(void)
return ZSTD_compressBound(ZSTD_BLOCKSIZE_MAX) + ZSTD_blockHeaderSize + 4 /* 32-bits hash */ ;
}
-static ZSTD_cParamMode_e ZSTD_getCParamMode(ZSTD_CDict const* cdict, ZSTD_CCtx_params const* params, U64 pledgedSrcSize)
+static ZSTD_CParamMode_e ZSTD_getCParamMode(ZSTD_CDict const* cdict, ZSTD_CCtx_params const* params, U64 pledgedSrcSize)
{
if (cdict != NULL && ZSTD_shouldAttachDict(cdict, params, pledgedSrcSize))
return ZSTD_cpm_attachDict;
@@ -5961,11 +6088,11 @@ size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel)
static size_t ZSTD_nextInputSizeHint(const ZSTD_CCtx* cctx)
{
if (cctx->appliedParams.inBufferMode == ZSTD_bm_stable) {
- return cctx->blockSize - cctx->stableIn_notConsumed;
+ return cctx->blockSizeMax - cctx->stableIn_notConsumed;
}
assert(cctx->appliedParams.inBufferMode == ZSTD_bm_buffered);
{ size_t hintInSize = cctx->inBuffTarget - cctx->inBuffPos;
- if (hintInSize==0) hintInSize = cctx->blockSize;
+ if (hintInSize==0) hintInSize = cctx->blockSizeMax;
return hintInSize;
}
}
@@ -6017,12 +6144,13 @@ static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
case zcss_load:
if ( (flushMode == ZSTD_e_end)
- && ( (size_t)(oend-op) >= ZSTD_compressBound(iend-ip) /* Enough output space */
+ && ( (size_t)(oend-op) >= ZSTD_compressBound((size_t)(iend-ip)) /* Enough output space */
|| zcs->appliedParams.outBufferMode == ZSTD_bm_stable) /* OR we are allowed to return dstSizeTooSmall */
&& (zcs->inBuffPos == 0) ) {
/* shortcut to compression pass directly into output buffer */
size_t const cSize = ZSTD_compressEnd_public(zcs,
- op, oend-op, ip, iend-ip);
+ op, (size_t)(oend-op),
+ ip, (size_t)(iend-ip));
DEBUGLOG(4, "ZSTD_compressEnd : cSize=%u", (unsigned)cSize);
FORWARD_IF_ERROR(cSize, "ZSTD_compressEnd failed");
ip = iend;
@@ -6036,7 +6164,7 @@ static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
size_t const toLoad = zcs->inBuffTarget - zcs->inBuffPos;
size_t const loaded = ZSTD_limitCopy(
zcs->inBuff + zcs->inBuffPos, toLoad,
- ip, iend-ip);
+ ip, (size_t)(iend-ip));
zcs->inBuffPos += loaded;
if (ip) ip += loaded;
if ( (flushMode == ZSTD_e_continue)
@@ -6052,7 +6180,7 @@ static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
} else {
assert(zcs->appliedParams.inBufferMode == ZSTD_bm_stable);
if ( (flushMode == ZSTD_e_continue)
- && ( (size_t)(iend - ip) < zcs->blockSize) ) {
+ && ( (size_t)(iend - ip) < zcs->blockSizeMax) ) {
/* can't compress a full block : stop here */
zcs->stableIn_notConsumed = (size_t)(iend - ip);
ip = iend; /* pretend to have consumed input */
@@ -6069,9 +6197,9 @@ static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
{ int const inputBuffered = (zcs->appliedParams.inBufferMode == ZSTD_bm_buffered);
void* cDst;
size_t cSize;
- size_t oSize = oend-op;
+ size_t oSize = (size_t)(oend-op);
size_t const iSize = inputBuffered ? zcs->inBuffPos - zcs->inToCompress
- : MIN((size_t)(iend - ip), zcs->blockSize);
+ : MIN((size_t)(iend - ip), zcs->blockSizeMax);
if (oSize >= ZSTD_compressBound(iSize) || zcs->appliedParams.outBufferMode == ZSTD_bm_stable)
cDst = op; /* compress into output buffer, to skip flush stage */
else
@@ -6086,9 +6214,9 @@ static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
FORWARD_IF_ERROR(cSize, "%s", lastBlock ? "ZSTD_compressEnd failed" : "ZSTD_compressContinue failed");
zcs->frameEnded = lastBlock;
/* prepare next block */
- zcs->inBuffTarget = zcs->inBuffPos + zcs->blockSize;
+ zcs->inBuffTarget = zcs->inBuffPos + zcs->blockSizeMax;
if (zcs->inBuffTarget > zcs->inBuffSize)
- zcs->inBuffPos = 0, zcs->inBuffTarget = zcs->blockSize;
+ zcs->inBuffPos = 0, zcs->inBuffTarget = zcs->blockSizeMax;
DEBUGLOG(5, "inBuffTarget:%u / inBuffSize:%u",
(unsigned)zcs->inBuffTarget, (unsigned)zcs->inBuffSize);
if (!lastBlock)
@@ -6152,8 +6280,8 @@ static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
}
}
- input->pos = ip - istart;
- output->pos = op - ostart;
+ input->pos = (size_t)(ip - istart);
+ output->pos = (size_t)(op - ostart);
if (zcs->frameEnded) return 0;
return ZSTD_nextInputSizeHint(zcs);
}
@@ -6213,6 +6341,11 @@ static size_t ZSTD_checkBufferStability(ZSTD_CCtx const* cctx,
return 0;
}
+/*
+ * If @endOp == ZSTD_e_end, @inSize becomes pledgedSrcSize.
+ * Otherwise, it's ignored.
+ * @return: 0 on success, or a ZSTD_error code otherwise.
+ */
static size_t ZSTD_CCtx_init_compressStream2(ZSTD_CCtx* cctx,
ZSTD_EndDirective endOp,
size_t inSize)
@@ -6229,19 +6362,19 @@ static size_t ZSTD_CCtx_init_compressStream2(ZSTD_CCtx* cctx,
*/
params.compressionLevel = cctx->cdict->compressionLevel;
}
- DEBUGLOG(4, "ZSTD_compressStream2 : transparent init stage");
+ DEBUGLOG(4, "ZSTD_CCtx_init_compressStream2 : transparent init stage");
if (endOp == ZSTD_e_end) cctx->pledgedSrcSizePlusOne = inSize + 1; /* auto-determine pledgedSrcSize */
{ size_t const dictSize = prefixDict.dict
? prefixDict.dictSize
: (cctx->cdict ? cctx->cdict->dictContentSize : 0);
- ZSTD_cParamMode_e const mode = ZSTD_getCParamMode(cctx->cdict, ¶ms, cctx->pledgedSrcSizePlusOne - 1);
+ ZSTD_CParamMode_e const mode = ZSTD_getCParamMode(cctx->cdict, ¶ms, cctx->pledgedSrcSizePlusOne - 1);
params.cParams = ZSTD_getCParamsFromCCtxParams(
¶ms, cctx->pledgedSrcSizePlusOne-1,
dictSize, mode);
}
- params.useBlockSplitter = ZSTD_resolveBlockSplitterMode(params.useBlockSplitter, ¶ms.cParams);
+ params.postBlockSplitter = ZSTD_resolveBlockSplitterMode(params.postBlockSplitter, ¶ms.cParams);
params.ldmParams.enableLdm = ZSTD_resolveEnableLdm(params.ldmParams.enableLdm, ¶ms.cParams);
params.useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(params.useRowMatchFinder, ¶ms.cParams);
params.validateSequences = ZSTD_resolveExternalSequenceValidation(params.validateSequences);
@@ -6260,9 +6393,9 @@ static size_t ZSTD_CCtx_init_compressStream2(ZSTD_CCtx* cctx,
params.nbWorkers = 0; /* do not invoke multi-threading when src size is too small */
}
if (params.nbWorkers > 0) {
-#if ZSTD_TRACE
+# if ZSTD_TRACE
cctx->traceCtx = (ZSTD_trace_compress_begin != NULL) ? ZSTD_trace_compress_begin(cctx) : 0;
-#endif
+# endif
/* mt context creation */
if (cctx->mtctx == NULL) {
DEBUGLOG(4, "ZSTD_compressStream2: creating new mtctx for nbWorkers=%u",
@@ -6298,7 +6431,7 @@ static size_t ZSTD_CCtx_init_compressStream2(ZSTD_CCtx* cctx,
/* for small input: avoid automatic flush on reaching end of block, since
* it would require to add a 3-bytes null block to end frame
*/
- cctx->inBuffTarget = cctx->blockSize + (cctx->blockSize == pledgedSrcSize);
+ cctx->inBuffTarget = cctx->blockSizeMax + (cctx->blockSizeMax == pledgedSrcSize);
} else {
cctx->inBuffTarget = 0;
}
@@ -6464,11 +6597,11 @@ size_t ZSTD_compress2(ZSTD_CCtx* cctx,
}
/* ZSTD_validateSequence() :
- * @offCode : is presumed to follow format required by ZSTD_storeSeq()
+ * @offBase : must use the format required by ZSTD_storeSeq()
* @returns a ZSTD error code if sequence is not valid
*/
static size_t
-ZSTD_validateSequence(U32 offCode, U32 matchLength, U32 minMatch,
+ZSTD_validateSequence(U32 offBase, U32 matchLength, U32 minMatch,
size_t posInSrc, U32 windowLog, size_t dictSize, int useSequenceProducer)
{
U32 const windowSize = 1u << windowLog;
@@ -6479,7 +6612,7 @@ ZSTD_validateSequence(U32 offCode, U32 matchLength, U32 minMatch,
*/
size_t const offsetBound = posInSrc > windowSize ? (size_t)windowSize : posInSrc + (size_t)dictSize;
size_t const matchLenLowerBound = (minMatch == 3 || useSequenceProducer) ? 3 : 4;
- RETURN_ERROR_IF(offCode > OFFSET_TO_OFFBASE(offsetBound), externalSequences_invalid, "Offset too large!");
+ RETURN_ERROR_IF(offBase > OFFSET_TO_OFFBASE(offsetBound), externalSequences_invalid, "Offset too large!");
/* Validate maxNbSeq is large enough for the given matchLength and minMatch */
RETURN_ERROR_IF(matchLength < matchLenLowerBound, externalSequences_invalid, "Matchlength too small for the minMatch");
return 0;
@@ -6502,21 +6635,27 @@ static U32 ZSTD_finalizeOffBase(U32 rawOffset, const U32 rep[ZSTD_REP_NUM], U32
return offBase;
}
-size_t
-ZSTD_copySequencesToSeqStoreExplicitBlockDelim(ZSTD_CCtx* cctx,
- ZSTD_sequencePosition* seqPos,
- const ZSTD_Sequence* const inSeqs, size_t inSeqsSize,
- const void* src, size_t blockSize,
- ZSTD_paramSwitch_e externalRepSearch)
+/* This function scans through an array of ZSTD_Sequence,
+ * storing the sequences it reads, until it reaches a block delimiter.
+ * Note that the block delimiter includes the last literals of the block.
+ * @blockSize must be == sum(sequence_lengths).
+ * @returns @blockSize on success, and a ZSTD_error otherwise.
+ */
+static size_t
+ZSTD_transferSequences_wBlockDelim(ZSTD_CCtx* cctx,
+ ZSTD_SequencePosition* seqPos,
+ const ZSTD_Sequence* const inSeqs, size_t inSeqsSize,
+ const void* src, size_t blockSize,
+ ZSTD_ParamSwitch_e externalRepSearch)
{
U32 idx = seqPos->idx;
U32 const startIdx = idx;
BYTE const* ip = (BYTE const*)(src);
const BYTE* const iend = ip + blockSize;
- repcodes_t updatedRepcodes;
+ Repcodes_t updatedRepcodes;
U32 dictSize;
- DEBUGLOG(5, "ZSTD_copySequencesToSeqStoreExplicitBlockDelim (blockSize = %zu)", blockSize);
+ DEBUGLOG(5, "ZSTD_transferSequences_wBlockDelim (blockSize = %zu)", blockSize);
if (cctx->cdict) {
dictSize = (U32)cctx->cdict->dictContentSize;
@@ -6525,7 +6664,7 @@ ZSTD_copySequencesToSeqStoreExplicitBlockDelim(ZSTD_CCtx* cctx,
} else {
dictSize = 0;
}
- ZSTD_memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, sizeof(repcodes_t));
+ ZSTD_memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, sizeof(Repcodes_t));
for (; idx < inSeqsSize && (inSeqs[idx].matchLength != 0 || inSeqs[idx].offset != 0); ++idx) {
U32 const litLength = inSeqs[idx].litLength;
U32 const matchLength = inSeqs[idx].matchLength;
@@ -6542,8 +6681,10 @@ ZSTD_copySequencesToSeqStoreExplicitBlockDelim(ZSTD_CCtx* cctx,
DEBUGLOG(6, "Storing sequence: (of: %u, ml: %u, ll: %u)", offBase, matchLength, litLength);
if (cctx->appliedParams.validateSequences) {
seqPos->posInSrc += litLength + matchLength;
- FORWARD_IF_ERROR(ZSTD_validateSequence(offBase, matchLength, cctx->appliedParams.cParams.minMatch, seqPos->posInSrc,
- cctx->appliedParams.cParams.windowLog, dictSize, ZSTD_hasExtSeqProd(&cctx->appliedParams)),
+ FORWARD_IF_ERROR(ZSTD_validateSequence(offBase, matchLength, cctx->appliedParams.cParams.minMatch,
+ seqPos->posInSrc,
+ cctx->appliedParams.cParams.windowLog, dictSize,
+ ZSTD_hasExtSeqProd(&cctx->appliedParams)),
"Sequence validation failed");
}
RETURN_ERROR_IF(idx - seqPos->idx >= cctx->seqStore.maxNbSeq, externalSequences_invalid,
@@ -6551,6 +6692,7 @@ ZSTD_copySequencesToSeqStoreExplicitBlockDelim(ZSTD_CCtx* cctx,
ZSTD_storeSeq(&cctx->seqStore, litLength, ip, iend, offBase, matchLength);
ip += matchLength + litLength;
}
+ RETURN_ERROR_IF(idx == inSeqsSize, externalSequences_invalid, "Block delimiter not found.");
/* If we skipped repcode search while parsing, we need to update repcodes now */
assert(externalRepSearch != ZSTD_ps_auto);
@@ -6575,7 +6717,7 @@ ZSTD_copySequencesToSeqStoreExplicitBlockDelim(ZSTD_CCtx* cctx,
}
}
- ZSTD_memcpy(cctx->blockState.nextCBlock->rep, updatedRepcodes.rep, sizeof(repcodes_t));
+ ZSTD_memcpy(cctx->blockState.nextCBlock->rep, updatedRepcodes.rep, sizeof(Repcodes_t));
if (inSeqs[idx].litLength) {
DEBUGLOG(6, "Storing last literals of size: %u", inSeqs[idx].litLength);
@@ -6585,21 +6727,35 @@ ZSTD_copySequencesToSeqStoreExplicitBlockDelim(ZSTD_CCtx* cctx,
}
RETURN_ERROR_IF(ip != iend, externalSequences_invalid, "Blocksize doesn't agree with block delimiter!");
seqPos->idx = idx+1;
- return 0;
+ return blockSize;
}
-size_t
-ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition* seqPos,
- const ZSTD_Sequence* const inSeqs, size_t inSeqsSize,
- const void* src, size_t blockSize, ZSTD_paramSwitch_e externalRepSearch)
+/*
+ * This function attempts to scan through @blockSize bytes in @src
+ * represented by the sequences in @inSeqs,
+ * storing any (partial) sequences.
+ *
+ * Occasionally, we may want to reduce the actual number of bytes consumed from @src
+ * to avoid splitting a match, notably if it would produce a match smaller than MINMATCH.
+ *
+ * @returns the number of bytes consumed from @src, necessarily <= @blockSize.
+ * Otherwise, it may return a ZSTD error if something went wrong.
+ */
+static size_t
+ZSTD_transferSequences_noDelim(ZSTD_CCtx* cctx,
+ ZSTD_SequencePosition* seqPos,
+ const ZSTD_Sequence* const inSeqs, size_t inSeqsSize,
+ const void* src, size_t blockSize,
+ ZSTD_ParamSwitch_e externalRepSearch)
{
U32 idx = seqPos->idx;
U32 startPosInSequence = seqPos->posInSequence;
U32 endPosInSequence = seqPos->posInSequence + (U32)blockSize;
size_t dictSize;
- BYTE const* ip = (BYTE const*)(src);
- BYTE const* iend = ip + blockSize; /* May be adjusted if we decide to process fewer than blockSize bytes */
- repcodes_t updatedRepcodes;
+ const BYTE* const istart = (const BYTE*)(src);
+ const BYTE* ip = istart;
+ const BYTE* iend = istart + blockSize; /* May be adjusted if we decide to process fewer than blockSize bytes */
+ Repcodes_t updatedRepcodes;
U32 bytesAdjustment = 0;
U32 finalMatchSplit = 0;
@@ -6613,9 +6769,9 @@ ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition*
} else {
dictSize = 0;
}
- DEBUGLOG(5, "ZSTD_copySequencesToSeqStoreNoBlockDelim: idx: %u PIS: %u blockSize: %zu", idx, startPosInSequence, blockSize);
+ DEBUGLOG(5, "ZSTD_transferSequences_noDelim: idx: %u PIS: %u blockSize: %zu", idx, startPosInSequence, blockSize);
DEBUGLOG(5, "Start seq: idx: %u (of: %u ml: %u ll: %u)", idx, inSeqs[idx].offset, inSeqs[idx].matchLength, inSeqs[idx].litLength);
- ZSTD_memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, sizeof(repcodes_t));
+ ZSTD_memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, sizeof(Repcodes_t));
while (endPosInSequence && idx < inSeqsSize && !finalMatchSplit) {
const ZSTD_Sequence currSeq = inSeqs[idx];
U32 litLength = currSeq.litLength;
@@ -6696,35 +6852,40 @@ ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition*
assert(idx == inSeqsSize || endPosInSequence <= inSeqs[idx].litLength + inSeqs[idx].matchLength);
seqPos->idx = idx;
seqPos->posInSequence = endPosInSequence;
- ZSTD_memcpy(cctx->blockState.nextCBlock->rep, updatedRepcodes.rep, sizeof(repcodes_t));
+ ZSTD_memcpy(cctx->blockState.nextCBlock->rep, updatedRepcodes.rep, sizeof(Repcodes_t));
iend -= bytesAdjustment;
if (ip != iend) {
/* Store any last literals */
- U32 lastLLSize = (U32)(iend - ip);
+ U32 const lastLLSize = (U32)(iend - ip);
assert(ip <= iend);
DEBUGLOG(6, "Storing last literals of size: %u", lastLLSize);
ZSTD_storeLastLiterals(&cctx->seqStore, ip, lastLLSize);
seqPos->posInSrc += lastLLSize;
}
- return bytesAdjustment;
+ return (size_t)(iend-istart);
}
-typedef size_t (*ZSTD_sequenceCopier) (ZSTD_CCtx* cctx, ZSTD_sequencePosition* seqPos,
- const ZSTD_Sequence* const inSeqs, size_t inSeqsSize,
- const void* src, size_t blockSize, ZSTD_paramSwitch_e externalRepSearch);
-static ZSTD_sequenceCopier ZSTD_selectSequenceCopier(ZSTD_sequenceFormat_e mode)
+/* @seqPos represents a position within @inSeqs,
+ * it is read and updated by this function,
+ * once the goal to produce a block of size @blockSize is reached.
+ * @return: nb of bytes consumed from @src, necessarily <= @blockSize.
+ */
+typedef size_t (*ZSTD_SequenceCopier_f)(ZSTD_CCtx* cctx,
+ ZSTD_SequencePosition* seqPos,
+ const ZSTD_Sequence* const inSeqs, size_t inSeqsSize,
+ const void* src, size_t blockSize,
+ ZSTD_ParamSwitch_e externalRepSearch);
+
+static ZSTD_SequenceCopier_f ZSTD_selectSequenceCopier(ZSTD_SequenceFormat_e mode)
{
- ZSTD_sequenceCopier sequenceCopier = NULL;
- assert(ZSTD_cParam_withinBounds(ZSTD_c_blockDelimiters, mode));
+ assert(ZSTD_cParam_withinBounds(ZSTD_c_blockDelimiters, (int)mode));
if (mode == ZSTD_sf_explicitBlockDelimiters) {
- return ZSTD_copySequencesToSeqStoreExplicitBlockDelim;
- } else if (mode == ZSTD_sf_noBlockDelimiters) {
- return ZSTD_copySequencesToSeqStoreNoBlockDelim;
+ return ZSTD_transferSequences_wBlockDelim;
}
- assert(sequenceCopier != NULL);
- return sequenceCopier;
+ assert(mode == ZSTD_sf_noBlockDelimiters);
+ return ZSTD_transferSequences_noDelim;
}
/* Discover the size of next block by searching for the delimiter.
@@ -6732,7 +6893,7 @@ static ZSTD_sequenceCopier ZSTD_selectSequenceCopier(ZSTD_sequenceFormat_e mode)
* otherwise it's an input error.
* The block size retrieved will be later compared to ensure it remains within bounds */
static size_t
-blockSize_explicitDelimiter(const ZSTD_Sequence* inSeqs, size_t inSeqsSize, ZSTD_sequencePosition seqPos)
+blockSize_explicitDelimiter(const ZSTD_Sequence* inSeqs, size_t inSeqsSize, ZSTD_SequencePosition seqPos)
{
int end = 0;
size_t blockSize = 0;
@@ -6754,20 +6915,17 @@ blockSize_explicitDelimiter(const ZSTD_Sequence* inSeqs, size_t inSeqsSize, ZSTD
return blockSize;
}
-/* More a "target" block size */
-static size_t blockSize_noDelimiter(size_t blockSize, size_t remaining)
-{
- int const lastBlock = (remaining <= blockSize);
- return lastBlock ? remaining : blockSize;
-}
-
-static size_t determine_blockSize(ZSTD_sequenceFormat_e mode,
+static size_t determine_blockSize(ZSTD_SequenceFormat_e mode,
size_t blockSize, size_t remaining,
- const ZSTD_Sequence* inSeqs, size_t inSeqsSize, ZSTD_sequencePosition seqPos)
+ const ZSTD_Sequence* inSeqs, size_t inSeqsSize,
+ ZSTD_SequencePosition seqPos)
{
DEBUGLOG(6, "determine_blockSize : remainingSize = %zu", remaining);
- if (mode == ZSTD_sf_noBlockDelimiters)
- return blockSize_noDelimiter(blockSize, remaining);
+ if (mode == ZSTD_sf_noBlockDelimiters) {
+ /* Note: more a "target" block size */
+ return MIN(remaining, blockSize);
+ }
+ assert(mode == ZSTD_sf_explicitBlockDelimiters);
{ size_t const explicitBlockSize = blockSize_explicitDelimiter(inSeqs, inSeqsSize, seqPos);
FORWARD_IF_ERROR(explicitBlockSize, "Error while determining block size with explicit delimiters");
if (explicitBlockSize > blockSize)
@@ -6778,7 +6936,7 @@ static size_t determine_blockSize(ZSTD_sequenceFormat_e mode,
}
}
-/* Compress, block-by-block, all of the sequences given.
+/* Compress all provided sequences, block-by-block.
*
* Returns the cumulative size of all compressed blocks (including their headers),
* otherwise a ZSTD error.
@@ -6791,11 +6949,11 @@ ZSTD_compressSequences_internal(ZSTD_CCtx* cctx,
{
size_t cSize = 0;
size_t remaining = srcSize;
- ZSTD_sequencePosition seqPos = {0, 0, 0};
+ ZSTD_SequencePosition seqPos = {0, 0, 0};
- BYTE const* ip = (BYTE const*)src;
+ const BYTE* ip = (BYTE const*)src;
BYTE* op = (BYTE*)dst;
- ZSTD_sequenceCopier const sequenceCopier = ZSTD_selectSequenceCopier(cctx->appliedParams.blockDelimiters);
+ ZSTD_SequenceCopier_f const sequenceCopier = ZSTD_selectSequenceCopier(cctx->appliedParams.blockDelimiters);
DEBUGLOG(4, "ZSTD_compressSequences_internal srcSize: %zu, inSeqsSize: %zu", srcSize, inSeqsSize);
/* Special case: empty frame */
@@ -6811,19 +6969,19 @@ ZSTD_compressSequences_internal(ZSTD_CCtx* cctx,
while (remaining) {
size_t compressedSeqsSize;
size_t cBlockSize;
- size_t additionalByteAdjustment;
size_t blockSize = determine_blockSize(cctx->appliedParams.blockDelimiters,
- cctx->blockSize, remaining,
+ cctx->blockSizeMax, remaining,
inSeqs, inSeqsSize, seqPos);
U32 const lastBlock = (blockSize == remaining);
FORWARD_IF_ERROR(blockSize, "Error while trying to determine block size");
assert(blockSize <= remaining);
ZSTD_resetSeqStore(&cctx->seqStore);
- DEBUGLOG(5, "Working on new block. Blocksize: %zu (total:%zu)", blockSize, (ip - (const BYTE*)src) + blockSize);
- additionalByteAdjustment = sequenceCopier(cctx, &seqPos, inSeqs, inSeqsSize, ip, blockSize, cctx->appliedParams.searchForExternalRepcodes);
- FORWARD_IF_ERROR(additionalByteAdjustment, "Bad sequence copy");
- blockSize -= additionalByteAdjustment;
+ blockSize = sequenceCopier(cctx,
+ &seqPos, inSeqs, inSeqsSize,
+ ip, blockSize,
+ cctx->appliedParams.searchForExternalRepcodes);
+ FORWARD_IF_ERROR(blockSize, "Bad sequence copy");
/* If blocks are too small, emit as a nocompress block */
/* TODO: See 3090. We reduced MIN_CBLOCK_SIZE from 3 to 2 so to compensate we are adding
@@ -6831,7 +6989,7 @@ ZSTD_compressSequences_internal(ZSTD_CCtx* cctx,
if (blockSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1+1) {
cBlockSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock);
FORWARD_IF_ERROR(cBlockSize, "Nocompress block failed");
- DEBUGLOG(5, "Block too small, writing out nocompress block: cSize: %zu", cBlockSize);
+ DEBUGLOG(5, "Block too small (%zu): data remains uncompressed: cSize=%zu", blockSize, cBlockSize);
cSize += cBlockSize;
ip += blockSize;
op += cBlockSize;
@@ -6846,7 +7004,7 @@ ZSTD_compressSequences_internal(ZSTD_CCtx* cctx,
&cctx->appliedParams,
op + ZSTD_blockHeaderSize /* Leave space for block header */, dstCapacity - ZSTD_blockHeaderSize,
blockSize,
- cctx->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */,
+ cctx->tmpWorkspace, cctx->tmpWkspSize /* statically allocated in resetCCtx */,
cctx->bmi2);
FORWARD_IF_ERROR(compressedSeqsSize, "Compressing sequences of block failed");
DEBUGLOG(5, "Compressed sequences size: %zu", compressedSeqsSize);
@@ -6854,10 +7012,10 @@ ZSTD_compressSequences_internal(ZSTD_CCtx* cctx,
if (!cctx->isFirstBlock &&
ZSTD_maybeRLE(&cctx->seqStore) &&
ZSTD_isRLE(ip, blockSize)) {
- /* We don't want to emit our first block as a RLE even if it qualifies because
- * doing so will cause the decoder (cli only) to throw a "should consume all input error."
- * This is only an issue for zstd <= v1.4.3
- */
+ /* Note: don't emit the first block as RLE even if it qualifies because
+ * doing so will cause the decoder (cli <= v1.4.3 only) to throw an (invalid) error
+ * "should consume all input error."
+ */
compressedSeqsSize = 1;
}
@@ -6909,30 +7067,36 @@ size_t ZSTD_compressSequences(ZSTD_CCtx* cctx,
{
BYTE* op = (BYTE*)dst;
size_t cSize = 0;
- size_t compressedBlocksSize = 0;
- size_t frameHeaderSize = 0;
/* Transparent initialization stage, same as compressStream2() */
- DEBUGLOG(4, "ZSTD_compressSequences (dstCapacity=%zu)", dstCapacity);
+ DEBUGLOG(4, "ZSTD_compressSequences (nbSeqs=%zu,dstCapacity=%zu)", inSeqsSize, dstCapacity);
assert(cctx != NULL);
FORWARD_IF_ERROR(ZSTD_CCtx_init_compressStream2(cctx, ZSTD_e_end, srcSize), "CCtx initialization failed");
+
/* Begin writing output, starting with frame header */
- frameHeaderSize = ZSTD_writeFrameHeader(op, dstCapacity, &cctx->appliedParams, srcSize, cctx->dictID);
- op += frameHeaderSize;
- dstCapacity -= frameHeaderSize;
- cSize += frameHeaderSize;
+ { size_t const frameHeaderSize = ZSTD_writeFrameHeader(op, dstCapacity,
+ &cctx->appliedParams, srcSize, cctx->dictID);
+ op += frameHeaderSize;
+ assert(frameHeaderSize <= dstCapacity);
+ dstCapacity -= frameHeaderSize;
+ cSize += frameHeaderSize;
+ }
if (cctx->appliedParams.fParams.checksumFlag && srcSize) {
XXH64_update(&cctx->xxhState, src, srcSize);
}
- /* cSize includes block header size and compressed sequences size */
- compressedBlocksSize = ZSTD_compressSequences_internal(cctx,
+
+ /* Now generate compressed blocks */
+ { size_t const cBlocksSize = ZSTD_compressSequences_internal(cctx,
op, dstCapacity,
inSeqs, inSeqsSize,
src, srcSize);
- FORWARD_IF_ERROR(compressedBlocksSize, "Compressing blocks failed!");
- cSize += compressedBlocksSize;
- dstCapacity -= compressedBlocksSize;
+ FORWARD_IF_ERROR(cBlocksSize, "Compressing blocks failed!");
+ cSize += cBlocksSize;
+ assert(cBlocksSize <= dstCapacity);
+ dstCapacity -= cBlocksSize;
+ }
+ /* Complete with frame checksum, if needed */
if (cctx->appliedParams.fParams.checksumFlag) {
U32 const checksum = (U32) XXH64_digest(&cctx->xxhState);
RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall, "no room for checksum");
@@ -6945,6 +7109,530 @@ size_t ZSTD_compressSequences(ZSTD_CCtx* cctx,
return cSize;
}
+
+#if defined(__AVX2__)
+
+#include /* AVX2 intrinsics */
+
+/*
+ * Convert 2 sequences per iteration, using AVX2 intrinsics:
+ * - offset -> offBase = offset + 2
+ * - litLength -> (U16) litLength
+ * - matchLength -> (U16)(matchLength - 3)
+ * - rep is ignored
+ * Store only 8 bytes per SeqDef (offBase[4], litLength[2], mlBase[2]).
+ *
+ * At the end, instead of extracting two __m128i,
+ * we use _mm256_permute4x64_epi64(..., 0xE8) to move lane2 into lane1,
+ * then store the lower 16 bytes in one go.
+ *
+ * @returns 0 on succes, with no long length detected
+ * @returns > 0 if there is one long length (> 65535),
+ * indicating the position, and type.
+ */
+static size_t convertSequences_noRepcodes(
+ SeqDef* dstSeqs,
+ const ZSTD_Sequence* inSeqs,
+ size_t nbSequences)
+{
+ /*
+ * addition:
+ * For each 128-bit half: (offset+2, litLength+0, matchLength-3, rep+0)
+ */
+ const __m256i addition = _mm256_setr_epi32(
+ ZSTD_REP_NUM, 0, -MINMATCH, 0, /* for sequence i */
+ ZSTD_REP_NUM, 0, -MINMATCH, 0 /* for sequence i+1 */
+ );
+
+ /* limit: check if there is a long length */
+ const __m256i limit = _mm256_set1_epi32(65535);
+
+ /*
+ * shuffle mask for byte-level rearrangement in each 128-bit half:
+ *
+ * Input layout (after addition) per 128-bit half:
+ * [ offset+2 (4 bytes) | litLength (4 bytes) | matchLength (4 bytes) | rep (4 bytes) ]
+ * We only need:
+ * offBase (4 bytes) = offset+2
+ * litLength (2 bytes) = low 2 bytes of litLength
+ * mlBase (2 bytes) = low 2 bytes of (matchLength)
+ * => Bytes [0..3, 4..5, 8..9], zero the rest.
+ */
+ const __m256i mask = _mm256_setr_epi8(
+ /* For the lower 128 bits => sequence i */
+ 0, 1, 2, 3, /* offset+2 */
+ 4, 5, /* litLength (16 bits) */
+ 8, 9, /* matchLength (16 bits) */
+ (BYTE)0x80, (BYTE)0x80, (BYTE)0x80, (BYTE)0x80,
+ (BYTE)0x80, (BYTE)0x80, (BYTE)0x80, (BYTE)0x80,
+
+ /* For the upper 128 bits => sequence i+1 */
+ 16,17,18,19, /* offset+2 */
+ 20,21, /* litLength */
+ 24,25, /* matchLength */
+ (BYTE)0x80, (BYTE)0x80, (BYTE)0x80, (BYTE)0x80,
+ (BYTE)0x80, (BYTE)0x80, (BYTE)0x80, (BYTE)0x80
+ );
+
+ /*
+ * Next, we'll use _mm256_permute4x64_epi64(vshf, 0xE8).
+ * Explanation of 0xE8 = 11101000b => [lane0, lane2, lane2, lane3].
+ * So the lower 128 bits become [lane0, lane2] => combining seq0 and seq1.
+ */
+#define PERM_LANE_0X_E8 0xE8 /* [0,2,2,3] in lane indices */
+
+ size_t longLen = 0, i = 0;
+
+ /* AVX permutation depends on the specific definition of target structures */
+ ZSTD_STATIC_ASSERT(sizeof(ZSTD_Sequence) == 16);
+ ZSTD_STATIC_ASSERT(offsetof(ZSTD_Sequence, offset) == 0);
+ ZSTD_STATIC_ASSERT(offsetof(ZSTD_Sequence, litLength) == 4);
+ ZSTD_STATIC_ASSERT(offsetof(ZSTD_Sequence, matchLength) == 8);
+ ZSTD_STATIC_ASSERT(sizeof(SeqDef) == 8);
+ ZSTD_STATIC_ASSERT(offsetof(SeqDef, offBase) == 0);
+ ZSTD_STATIC_ASSERT(offsetof(SeqDef, litLength) == 4);
+ ZSTD_STATIC_ASSERT(offsetof(SeqDef, mlBase) == 6);
+
+ /* Process 2 sequences per loop iteration */
+ for (; i + 1 < nbSequences; i += 2) {
+ /* Load 2 ZSTD_Sequence (32 bytes) */
+ __m256i vin = _mm256_loadu_si256((const __m256i*)(const void*)&inSeqs[i]);
+
+ /* Add {2, 0, -3, 0} in each 128-bit half */
+ __m256i vadd = _mm256_add_epi32(vin, addition);
+
+ /* Check for long length */
+ __m256i ll_cmp = _mm256_cmpgt_epi32(vadd, limit); /* 0xFFFFFFFF for element > 65535 */
+ int ll_res = _mm256_movemask_epi8(ll_cmp);
+
+ /* Shuffle bytes so each half gives us the 8 bytes we need */
+ __m256i vshf = _mm256_shuffle_epi8(vadd, mask);
+ /*
+ * Now:
+ * Lane0 = seq0's 8 bytes
+ * Lane1 = 0
+ * Lane2 = seq1's 8 bytes
+ * Lane3 = 0
+ */
+
+ /* Permute 64-bit lanes => move Lane2 down into Lane1. */
+ __m256i vperm = _mm256_permute4x64_epi64(vshf, PERM_LANE_0X_E8);
+ /*
+ * Now the lower 16 bytes (Lane0+Lane1) = [seq0, seq1].
+ * The upper 16 bytes are [Lane2, Lane3] = [seq1, 0], but we won't use them.
+ */
+
+ /* Store only the lower 16 bytes => 2 SeqDef (8 bytes each) */
+ _mm_storeu_si128((__m128i *)(void*)&dstSeqs[i], _mm256_castsi256_si128(vperm));
+ /*
+ * This writes out 16 bytes total:
+ * - offset 0..7 => seq0 (offBase, litLength, mlBase)
+ * - offset 8..15 => seq1 (offBase, litLength, mlBase)
+ */
+
+ /* check (unlikely) long lengths > 65535
+ * indices for lengths correspond to bits [4..7], [8..11], [20..23], [24..27]
+ * => combined mask = 0x0FF00FF0
+ */
+ if (UNLIKELY((ll_res & 0x0FF00FF0) != 0)) {
+ /* long length detected: let's figure out which one*/
+ if (inSeqs[i].matchLength > 65535+MINMATCH) {
+ assert(longLen == 0);
+ longLen = i + 1;
+ }
+ if (inSeqs[i].litLength > 65535) {
+ assert(longLen == 0);
+ longLen = i + nbSequences + 1;
+ }
+ if (inSeqs[i+1].matchLength > 65535+MINMATCH) {
+ assert(longLen == 0);
+ longLen = i + 1 + 1;
+ }
+ if (inSeqs[i+1].litLength > 65535) {
+ assert(longLen == 0);
+ longLen = i + 1 + nbSequences + 1;
+ }
+ }
+ }
+
+ /* Handle leftover if @nbSequences is odd */
+ if (i < nbSequences) {
+ /* process last sequence */
+ assert(i == nbSequences - 1);
+ dstSeqs[i].offBase = OFFSET_TO_OFFBASE(inSeqs[i].offset);
+ dstSeqs[i].litLength = (U16)inSeqs[i].litLength;
+ dstSeqs[i].mlBase = (U16)(inSeqs[i].matchLength - MINMATCH);
+ /* check (unlikely) long lengths > 65535 */
+ if (UNLIKELY(inSeqs[i].matchLength > 65535+MINMATCH)) {
+ assert(longLen == 0);
+ longLen = i + 1;
+ }
+ if (UNLIKELY(inSeqs[i].litLength > 65535)) {
+ assert(longLen == 0);
+ longLen = i + nbSequences + 1;
+ }
+ }
+
+ return longLen;
+}
+
+/* the vector implementation could also be ported to SSSE3,
+ * but since this implementation is targeting modern systems (>= Sapphire Rapid),
+ * it's not useful to develop and maintain code for older pre-AVX2 platforms */
+
+#else /* no AVX2 */
+
+static size_t convertSequences_noRepcodes(
+ SeqDef* dstSeqs,
+ const ZSTD_Sequence* inSeqs,
+ size_t nbSequences)
+{
+ size_t longLen = 0;
+ size_t n;
+ for (n=0; n 65535 */
+ if (UNLIKELY(inSeqs[n].matchLength > 65535+MINMATCH)) {
+ assert(longLen == 0);
+ longLen = n + 1;
+ }
+ if (UNLIKELY(inSeqs[n].litLength > 65535)) {
+ assert(longLen == 0);
+ longLen = n + nbSequences + 1;
+ }
+ }
+ return longLen;
+}
+
+#endif
+
+/*
+ * Precondition: Sequences must end on an explicit Block Delimiter
+ * @return: 0 on success, or an error code.
+ * Note: Sequence validation functionality has been disabled (removed).
+ * This is helpful to generate a lean main pipeline, improving performance.
+ * It may be re-inserted later.
+ */
+size_t ZSTD_convertBlockSequences(ZSTD_CCtx* cctx,
+ const ZSTD_Sequence* const inSeqs, size_t nbSequences,
+ int repcodeResolution)
+{
+ Repcodes_t updatedRepcodes;
+ size_t seqNb = 0;
+
+ DEBUGLOG(5, "ZSTD_convertBlockSequences (nbSequences = %zu)", nbSequences);
+
+ RETURN_ERROR_IF(nbSequences >= cctx->seqStore.maxNbSeq, externalSequences_invalid,
+ "Not enough memory allocated. Try adjusting ZSTD_c_minMatch.");
+
+ ZSTD_memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, sizeof(Repcodes_t));
+
+ /* check end condition */
+ assert(nbSequences >= 1);
+ assert(inSeqs[nbSequences-1].matchLength == 0);
+ assert(inSeqs[nbSequences-1].offset == 0);
+
+ /* Convert Sequences from public format to internal format */
+ if (!repcodeResolution) {
+ size_t const longl = convertSequences_noRepcodes(cctx->seqStore.sequencesStart, inSeqs, nbSequences-1);
+ cctx->seqStore.sequences = cctx->seqStore.sequencesStart + nbSequences-1;
+ if (longl) {
+ DEBUGLOG(5, "long length");
+ assert(cctx->seqStore.longLengthType == ZSTD_llt_none);
+ if (longl <= nbSequences-1) {
+ DEBUGLOG(5, "long match length detected at pos %zu", longl-1);
+ cctx->seqStore.longLengthType = ZSTD_llt_matchLength;
+ cctx->seqStore.longLengthPos = (U32)(longl-1);
+ } else {
+ DEBUGLOG(5, "long literals length detected at pos %zu", longl-nbSequences);
+ assert(longl <= 2* (nbSequences-1));
+ cctx->seqStore.longLengthType = ZSTD_llt_literalLength;
+ cctx->seqStore.longLengthPos = (U32)(longl-(nbSequences-1)-1);
+ }
+ }
+ } else {
+ for (seqNb = 0; seqNb < nbSequences - 1 ; seqNb++) {
+ U32 const litLength = inSeqs[seqNb].litLength;
+ U32 const matchLength = inSeqs[seqNb].matchLength;
+ U32 const ll0 = (litLength == 0);
+ U32 const offBase = ZSTD_finalizeOffBase(inSeqs[seqNb].offset, updatedRepcodes.rep, ll0);
+
+ DEBUGLOG(6, "Storing sequence: (of: %u, ml: %u, ll: %u)", offBase, matchLength, litLength);
+ ZSTD_storeSeqOnly(&cctx->seqStore, litLength, offBase, matchLength);
+ ZSTD_updateRep(updatedRepcodes.rep, offBase, ll0);
+ }
+ }
+
+ /* If we skipped repcode search while parsing, we need to update repcodes now */
+ if (!repcodeResolution && nbSequences > 1) {
+ U32* const rep = updatedRepcodes.rep;
+
+ if (nbSequences >= 4) {
+ U32 lastSeqIdx = (U32)nbSequences - 2; /* index of last full sequence */
+ rep[2] = inSeqs[lastSeqIdx - 2].offset;
+ rep[1] = inSeqs[lastSeqIdx - 1].offset;
+ rep[0] = inSeqs[lastSeqIdx].offset;
+ } else if (nbSequences == 3) {
+ rep[2] = rep[0];
+ rep[1] = inSeqs[0].offset;
+ rep[0] = inSeqs[1].offset;
+ } else {
+ assert(nbSequences == 2);
+ rep[2] = rep[1];
+ rep[1] = rep[0];
+ rep[0] = inSeqs[0].offset;
+ }
+ }
+
+ ZSTD_memcpy(cctx->blockState.nextCBlock->rep, updatedRepcodes.rep, sizeof(Repcodes_t));
+
+ return 0;
+}
+
+#if defined(ZSTD_ARCH_X86_AVX2)
+
+BlockSummary ZSTD_get1BlockSummary(const ZSTD_Sequence* seqs, size_t nbSeqs)
+{
+ size_t i;
+ __m256i const zeroVec = _mm256_setzero_si256();
+ __m256i sumVec = zeroVec; /* accumulates match+lit in 32-bit lanes */
+ ZSTD_ALIGNED(32) U32 tmp[8]; /* temporary buffer for reduction */
+ size_t mSum = 0, lSum = 0;
+ ZSTD_STATIC_ASSERT(sizeof(ZSTD_Sequence) == 16);
+
+ /* Process 2 structs (32 bytes) at a time */
+ for (i = 0; i + 2 <= nbSeqs; i += 2) {
+ /* Load two consecutive ZSTD_Sequence (8×4 = 32 bytes) */
+ __m256i data = _mm256_loadu_si256((const __m256i*)(const void*)&seqs[i]);
+ /* check end of block signal */
+ __m256i cmp = _mm256_cmpeq_epi32(data, zeroVec);
+ int cmp_res = _mm256_movemask_epi8(cmp);
+ /* indices for match lengths correspond to bits [8..11], [24..27]
+ * => combined mask = 0x0F000F00 */
+ ZSTD_STATIC_ASSERT(offsetof(ZSTD_Sequence, matchLength) == 8);
+ if (cmp_res & 0x0F000F00) break;
+ /* Accumulate in sumVec */
+ sumVec = _mm256_add_epi32(sumVec, data);
+ }
+
+ /* Horizontal reduction */
+ _mm256_store_si256((__m256i*)tmp, sumVec);
+ lSum = tmp[1] + tmp[5];
+ mSum = tmp[2] + tmp[6];
+
+ /* Handle the leftover */
+ for (; i < nbSeqs; i++) {
+ lSum += seqs[i].litLength;
+ mSum += seqs[i].matchLength;
+ if (seqs[i].matchLength == 0) break; /* end of block */
+ }
+
+ if (i==nbSeqs) {
+ /* reaching end of sequences: end of block signal was not present */
+ BlockSummary bs;
+ bs.nbSequences = ERROR(externalSequences_invalid);
+ return bs;
+ }
+ { BlockSummary bs;
+ bs.nbSequences = i+1;
+ bs.blockSize = lSum + mSum;
+ bs.litSize = lSum;
+ return bs;
+ }
+}
+
+#else
+
+BlockSummary ZSTD_get1BlockSummary(const ZSTD_Sequence* seqs, size_t nbSeqs)
+{
+ size_t totalMatchSize = 0;
+ size_t litSize = 0;
+ size_t n;
+ assert(seqs);
+ for (n=0; nappliedParams.searchForExternalRepcodes == ZSTD_ps_enable);
+ assert(cctx->appliedParams.searchForExternalRepcodes != ZSTD_ps_auto);
+
+ DEBUGLOG(4, "ZSTD_compressSequencesAndLiterals_internal: nbSeqs=%zu, litSize=%zu", nbSequences, litSize);
+ RETURN_ERROR_IF(nbSequences == 0, externalSequences_invalid, "Requires at least 1 end-of-block");
+
+ /* Special case: empty frame */
+ if ((nbSequences == 1) && (inSeqs[0].litLength == 0)) {
+ U32 const cBlockHeader24 = 1 /* last block */ + (((U32)bt_raw)<<1);
+ RETURN_ERROR_IF(dstCapacity<3, dstSize_tooSmall, "No room for empty frame block header");
+ MEM_writeLE24(op, cBlockHeader24);
+ op += ZSTD_blockHeaderSize;
+ dstCapacity -= ZSTD_blockHeaderSize;
+ cSize += ZSTD_blockHeaderSize;
+ }
+
+ while (nbSequences) {
+ size_t compressedSeqsSize, cBlockSize, conversionStatus;
+ BlockSummary const block = ZSTD_get1BlockSummary(inSeqs, nbSequences);
+ U32 const lastBlock = (block.nbSequences == nbSequences);
+ FORWARD_IF_ERROR(block.nbSequences, "Error while trying to determine nb of sequences for a block");
+ assert(block.nbSequences <= nbSequences);
+ RETURN_ERROR_IF(block.litSize > litSize, externalSequences_invalid, "discrepancy: Sequences require more literals than present in buffer");
+ ZSTD_resetSeqStore(&cctx->seqStore);
+
+ conversionStatus = ZSTD_convertBlockSequences(cctx,
+ inSeqs, block.nbSequences,
+ repcodeResolution);
+ FORWARD_IF_ERROR(conversionStatus, "Bad sequence conversion");
+ inSeqs += block.nbSequences;
+ nbSequences -= block.nbSequences;
+ remaining -= block.blockSize;
+
+ /* Note: when blockSize is very small, other variant send it uncompressed.
+ * Here, we still send the sequences, because we don't have the original source to send it uncompressed.
+ * One could imagine in theory reproducing the source from the sequences,
+ * but that's complex and costly memory intensive, and goes against the objectives of this variant. */
+
+ RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize, dstSize_tooSmall, "not enough dstCapacity to write a new compressed block");
+
+ compressedSeqsSize = ZSTD_entropyCompressSeqStore_internal(
+ op + ZSTD_blockHeaderSize /* Leave space for block header */, dstCapacity - ZSTD_blockHeaderSize,
+ literals, block.litSize,
+ &cctx->seqStore,
+ &cctx->blockState.prevCBlock->entropy, &cctx->blockState.nextCBlock->entropy,
+ &cctx->appliedParams,
+ cctx->tmpWorkspace, cctx->tmpWkspSize /* statically allocated in resetCCtx */,
+ cctx->bmi2);
+ FORWARD_IF_ERROR(compressedSeqsSize, "Compressing sequences of block failed");
+ /* note: the spec forbids for any compressed block to be larger than maximum block size */
+ if (compressedSeqsSize > cctx->blockSizeMax) compressedSeqsSize = 0;
+ DEBUGLOG(5, "Compressed sequences size: %zu", compressedSeqsSize);
+ litSize -= block.litSize;
+ literals = (const char*)literals + block.litSize;
+
+ /* Note: difficult to check source for RLE block when only Literals are provided,
+ * but it could be considered from analyzing the sequence directly */
+
+ if (compressedSeqsSize == 0) {
+ /* Sending uncompressed blocks is out of reach, because the source is not provided.
+ * In theory, one could use the sequences to regenerate the source, like a decompressor,
+ * but it's complex, and memory hungry, killing the purpose of this variant.
+ * Current outcome: generate an error code.
+ */
+ RETURN_ERROR(cannotProduce_uncompressedBlock, "ZSTD_compressSequencesAndLiterals cannot generate an uncompressed block");
+ } else {
+ U32 cBlockHeader;
+ assert(compressedSeqsSize > 1); /* no RLE */
+ /* Error checking and repcodes update */
+ ZSTD_blockState_confirmRepcodesAndEntropyTables(&cctx->blockState);
+ if (cctx->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid)
+ cctx->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check;
+
+ /* Write block header into beginning of block*/
+ cBlockHeader = lastBlock + (((U32)bt_compressed)<<1) + (U32)(compressedSeqsSize << 3);
+ MEM_writeLE24(op, cBlockHeader);
+ cBlockSize = ZSTD_blockHeaderSize + compressedSeqsSize;
+ DEBUGLOG(5, "Writing out compressed block, size: %zu", cBlockSize);
+ }
+
+ cSize += cBlockSize;
+ op += cBlockSize;
+ dstCapacity -= cBlockSize;
+ cctx->isFirstBlock = 0;
+ DEBUGLOG(5, "cSize running total: %zu (remaining dstCapacity=%zu)", cSize, dstCapacity);
+
+ if (lastBlock) {
+ assert(nbSequences == 0);
+ break;
+ }
+ }
+
+ RETURN_ERROR_IF(litSize != 0, externalSequences_invalid, "literals must be entirely and exactly consumed");
+ RETURN_ERROR_IF(remaining != 0, externalSequences_invalid, "Sequences must represent a total of exactly srcSize=%zu", srcSize);
+ DEBUGLOG(4, "cSize final total: %zu", cSize);
+ return cSize;
+}
+
+size_t
+ZSTD_compressSequencesAndLiterals(ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const ZSTD_Sequence* inSeqs, size_t inSeqsSize,
+ const void* literals, size_t litSize, size_t litCapacity,
+ size_t decompressedSize)
+{
+ BYTE* op = (BYTE*)dst;
+ size_t cSize = 0;
+
+ /* Transparent initialization stage, same as compressStream2() */
+ DEBUGLOG(4, "ZSTD_compressSequencesAndLiterals (dstCapacity=%zu)", dstCapacity);
+ assert(cctx != NULL);
+ if (litCapacity < litSize) {
+ RETURN_ERROR(workSpace_tooSmall, "literals buffer is not large enough: must be at least 8 bytes larger than litSize (risk of read out-of-bound)");
+ }
+ FORWARD_IF_ERROR(ZSTD_CCtx_init_compressStream2(cctx, ZSTD_e_end, decompressedSize), "CCtx initialization failed");
+
+ if (cctx->appliedParams.blockDelimiters == ZSTD_sf_noBlockDelimiters) {
+ RETURN_ERROR(frameParameter_unsupported, "This mode is only compatible with explicit delimiters");
+ }
+ if (cctx->appliedParams.validateSequences) {
+ RETURN_ERROR(parameter_unsupported, "This mode is not compatible with Sequence validation");
+ }
+ if (cctx->appliedParams.fParams.checksumFlag) {
+ RETURN_ERROR(frameParameter_unsupported, "this mode is not compatible with frame checksum");
+ }
+
+ /* Begin writing output, starting with frame header */
+ { size_t const frameHeaderSize = ZSTD_writeFrameHeader(op, dstCapacity,
+ &cctx->appliedParams, decompressedSize, cctx->dictID);
+ op += frameHeaderSize;
+ assert(frameHeaderSize <= dstCapacity);
+ dstCapacity -= frameHeaderSize;
+ cSize += frameHeaderSize;
+ }
+
+ /* Now generate compressed blocks */
+ { size_t const cBlocksSize = ZSTD_compressSequencesAndLiterals_internal(cctx,
+ op, dstCapacity,
+ inSeqs, inSeqsSize,
+ literals, litSize, decompressedSize);
+ FORWARD_IF_ERROR(cBlocksSize, "Compressing blocks failed!");
+ cSize += cBlocksSize;
+ assert(cBlocksSize <= dstCapacity);
+ dstCapacity -= cBlocksSize;
+ }
+
+ DEBUGLOG(4, "Final compressed size: %zu", cSize);
+ return cSize;
+}
+
/*====== Finalize ======*/
static ZSTD_inBuffer inBuffer_forEndFlush(const ZSTD_CStream* zcs)
@@ -6963,7 +7651,6 @@ size_t ZSTD_flushStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output)
return ZSTD_compressStream2(zcs, output, &input, ZSTD_e_flush);
}
-
size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output)
{
ZSTD_inBuffer input = inBuffer_forEndFlush(zcs);
@@ -7044,7 +7731,7 @@ static void ZSTD_dedicatedDictSearch_revertCParams(
}
}
-static U64 ZSTD_getCParamRowSize(U64 srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode)
+static U64 ZSTD_getCParamRowSize(U64 srcSizeHint, size_t dictSize, ZSTD_CParamMode_e mode)
{
switch (mode) {
case ZSTD_cpm_unknown:
@@ -7068,8 +7755,8 @@ static U64 ZSTD_getCParamRowSize(U64 srcSizeHint, size_t dictSize, ZSTD_cParamMo
* @return ZSTD_compressionParameters structure for a selected compression level, srcSize and dictSize.
* Note: srcSizeHint 0 means 0, use ZSTD_CONTENTSIZE_UNKNOWN for unknown.
* Use dictSize == 0 for unknown or unused.
- * Note: `mode` controls how we treat the `dictSize`. See docs for `ZSTD_cParamMode_e`. */
-static ZSTD_compressionParameters ZSTD_getCParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode)
+ * Note: `mode` controls how we treat the `dictSize`. See docs for `ZSTD_CParamMode_e`. */
+static ZSTD_compressionParameters ZSTD_getCParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_CParamMode_e mode)
{
U64 const rSize = ZSTD_getCParamRowSize(srcSizeHint, dictSize, mode);
U32 const tableID = (rSize <= 256 KB) + (rSize <= 128 KB) + (rSize <= 16 KB);
@@ -7107,7 +7794,9 @@ ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long l
* same idea as ZSTD_getCParams()
* @return a `ZSTD_parameters` structure (instead of `ZSTD_compressionParameters`).
* Fields of `ZSTD_frameParameters` are set to default values */
-static ZSTD_parameters ZSTD_getParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode) {
+static ZSTD_parameters
+ZSTD_getParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_CParamMode_e mode)
+{
ZSTD_parameters params;
ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, srcSizeHint, dictSize, mode);
DEBUGLOG(5, "ZSTD_getParams (cLevel=%i)", compressionLevel);
@@ -7121,7 +7810,8 @@ static ZSTD_parameters ZSTD_getParams_internal(int compressionLevel, unsigned lo
* same idea as ZSTD_getCParams()
* @return a `ZSTD_parameters` structure (instead of `ZSTD_compressionParameters`).
* Fields of `ZSTD_frameParameters` are set to default values */
-ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize) {
+ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize)
+{
if (srcSizeHint == 0) srcSizeHint = ZSTD_CONTENTSIZE_UNKNOWN;
return ZSTD_getParams_internal(compressionLevel, srcSizeHint, dictSize, ZSTD_cpm_unknown);
}
@@ -7129,8 +7819,8 @@ ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long srcSizeH
void ZSTD_registerSequenceProducer(
ZSTD_CCtx* zc,
void* extSeqProdState,
- ZSTD_sequenceProducer_F extSeqProdFunc
-) {
+ ZSTD_sequenceProducer_F extSeqProdFunc)
+{
assert(zc != NULL);
ZSTD_CCtxParams_registerSequenceProducer(
&zc->requestedParams, extSeqProdState, extSeqProdFunc
@@ -7140,8 +7830,8 @@ void ZSTD_registerSequenceProducer(
void ZSTD_CCtxParams_registerSequenceProducer(
ZSTD_CCtx_params* params,
void* extSeqProdState,
- ZSTD_sequenceProducer_F extSeqProdFunc
-) {
+ ZSTD_sequenceProducer_F extSeqProdFunc)
+{
assert(params != NULL);
if (extSeqProdFunc != NULL) {
params->extSeqProdFunc = extSeqProdFunc;
diff --git a/lib/compress/zstd_compress_internal.h b/lib/compress/zstd_compress_internal.h
index e41d7b78ec6..ca5e2a4c5bf 100644
--- a/lib/compress/zstd_compress_internal.h
+++ b/lib/compress/zstd_compress_internal.h
@@ -24,10 +24,7 @@
# include "zstdmt_compress.h"
#endif
#include "../common/bits.h" /* ZSTD_highbit32, ZSTD_NbCommonBytes */
-
-#if defined (__cplusplus)
-extern "C" {
-#endif
+#include "zstd_preSplit.h" /* ZSTD_SLIPBLOCK_WORKSPACESIZE */
/*-*************************************
* Constants
@@ -82,6 +79,70 @@ typedef struct {
ZSTD_fseCTables_t fse;
} ZSTD_entropyCTables_t;
+/***********************************************
+* Sequences *
+***********************************************/
+typedef struct SeqDef_s {
+ U32 offBase; /* offBase == Offset + ZSTD_REP_NUM, or repcode 1,2,3 */
+ U16 litLength;
+ U16 mlBase; /* mlBase == matchLength - MINMATCH */
+} SeqDef;
+
+/* Controls whether seqStore has a single "long" litLength or matchLength. See SeqStore_t. */
+typedef enum {
+ ZSTD_llt_none = 0, /* no longLengthType */
+ ZSTD_llt_literalLength = 1, /* represents a long literal */
+ ZSTD_llt_matchLength = 2 /* represents a long match */
+} ZSTD_longLengthType_e;
+
+typedef struct {
+ SeqDef* sequencesStart;
+ SeqDef* sequences; /* ptr to end of sequences */
+ BYTE* litStart;
+ BYTE* lit; /* ptr to end of literals */
+ BYTE* llCode;
+ BYTE* mlCode;
+ BYTE* ofCode;
+ size_t maxNbSeq;
+ size_t maxNbLit;
+
+ /* longLengthPos and longLengthType to allow us to represent either a single litLength or matchLength
+ * in the seqStore that has a value larger than U16 (if it exists). To do so, we increment
+ * the existing value of the litLength or matchLength by 0x10000.
+ */
+ ZSTD_longLengthType_e longLengthType;
+ U32 longLengthPos; /* Index of the sequence to apply long length modification to */
+} SeqStore_t;
+
+typedef struct {
+ U32 litLength;
+ U32 matchLength;
+} ZSTD_SequenceLength;
+
+/**
+ * Returns the ZSTD_SequenceLength for the given sequences. It handles the decoding of long sequences
+ * indicated by longLengthPos and longLengthType, and adds MINMATCH back to matchLength.
+ */
+MEM_STATIC ZSTD_SequenceLength ZSTD_getSequenceLength(SeqStore_t const* seqStore, SeqDef const* seq)
+{
+ ZSTD_SequenceLength seqLen;
+ seqLen.litLength = seq->litLength;
+ seqLen.matchLength = seq->mlBase + MINMATCH;
+ if (seqStore->longLengthPos == (U32)(seq - seqStore->sequencesStart)) {
+ if (seqStore->longLengthType == ZSTD_llt_literalLength) {
+ seqLen.litLength += 0x10000;
+ }
+ if (seqStore->longLengthType == ZSTD_llt_matchLength) {
+ seqLen.matchLength += 0x10000;
+ }
+ }
+ return seqLen;
+}
+
+const SeqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx); /* compress & dictBuilder */
+int ZSTD_seqToCodes(const SeqStore_t* seqStorePtr); /* compress, dictBuilder, decodeCorpus (shouldn't get its definition from here) */
+
+
/***********************************************
* Entropy buffer statistics structs and funcs *
***********************************************/
@@ -91,7 +152,7 @@ typedef struct {
* hufDesSize refers to the size of huffman tree description in bytes.
* This metadata is populated in ZSTD_buildBlockEntropyStats_literals() */
typedef struct {
- symbolEncodingType_e hType;
+ SymbolEncodingType_e hType;
BYTE hufDesBuffer[ZSTD_MAX_HUF_HEADER_SIZE];
size_t hufDesSize;
} ZSTD_hufCTablesMetadata_t;
@@ -102,9 +163,9 @@ typedef struct {
* fseTablesSize refers to the size of fse tables in bytes.
* This metadata is populated in ZSTD_buildBlockEntropyStats_sequences() */
typedef struct {
- symbolEncodingType_e llType;
- symbolEncodingType_e ofType;
- symbolEncodingType_e mlType;
+ SymbolEncodingType_e llType;
+ SymbolEncodingType_e ofType;
+ SymbolEncodingType_e mlType;
BYTE fseTablesBuffer[ZSTD_MAX_FSE_HEADERS_SIZE];
size_t fseTablesSize;
size_t lastCountSize; /* This is to account for bug in 1.3.4. More detail in ZSTD_entropyCompressSeqStore_internal() */
@@ -119,7 +180,7 @@ typedef struct {
* Builds entropy for the block.
* @return : 0 on success or error code */
size_t ZSTD_buildBlockEntropyStats(
- const seqStore_t* seqStorePtr,
+ const SeqStore_t* seqStorePtr,
const ZSTD_entropyCTables_t* prevEntropy,
ZSTD_entropyCTables_t* nextEntropy,
const ZSTD_CCtx_params* cctxParams,
@@ -148,15 +209,9 @@ typedef struct {
stopped. posInSequence <= seq[pos].litLength + seq[pos].matchLength */
size_t size; /* The number of sequences. <= capacity. */
size_t capacity; /* The capacity starting from `seq` pointer */
-} rawSeqStore_t;
+} RawSeqStore_t;
-typedef struct {
- U32 idx; /* Index in array of ZSTD_Sequence */
- U32 posInSequence; /* Position within sequence at idx */
- size_t posInSrc; /* Number of bytes given by sequences provided so far */
-} ZSTD_sequencePosition;
-
-UNUSED_ATTR static const rawSeqStore_t kNullRawSeqStore = {NULL, 0, 0, 0, 0};
+UNUSED_ATTR static const RawSeqStore_t kNullRawSeqStore = {NULL, 0, 0, 0, 0};
typedef struct {
int price; /* price from beginning of segment to this position */
@@ -188,7 +243,7 @@ typedef struct {
U32 offCodeSumBasePrice; /* to compare to log2(offreq) */
ZSTD_OptPrice_e priceType; /* prices can be determined dynamically, or follow a pre-defined cost structure */
const ZSTD_entropyCTables_t* symbolCosts; /* pre-calculated dictionary statistics */
- ZSTD_paramSwitch_e literalCompressionMode;
+ ZSTD_ParamSwitch_e literalCompressionMode;
} optState_t;
typedef struct {
@@ -210,11 +265,11 @@ typedef struct {
#define ZSTD_WINDOW_START_INDEX 2
-typedef struct ZSTD_matchState_t ZSTD_matchState_t;
+typedef struct ZSTD_MatchState_t ZSTD_MatchState_t;
#define ZSTD_ROW_HASH_CACHE_SIZE 8 /* Size of prefetching hash cache for row-based matchfinder */
-struct ZSTD_matchState_t {
+struct ZSTD_MatchState_t {
ZSTD_window_t window; /* State for window round buffer management */
U32 loadedDictEnd; /* index of end of dictionary, within context's referential.
* When loadedDictEnd != 0, a dictionary is in use, and still valid.
@@ -236,15 +291,15 @@ struct ZSTD_matchState_t {
U32* hashTable3;
U32* chainTable;
- U32 forceNonContiguous; /* Non-zero if we should force non-contiguous load for the next window update. */
+ int forceNonContiguous; /* Non-zero if we should force non-contiguous load for the next window update. */
int dedicatedDictSearch; /* Indicates whether this matchState is using the
* dedicated dictionary search structure.
*/
optState_t opt; /* optimal parser state */
- const ZSTD_matchState_t* dictMatchState;
+ const ZSTD_MatchState_t* dictMatchState;
ZSTD_compressionParameters cParams;
- const rawSeqStore_t* ldmSeqStore;
+ const RawSeqStore_t* ldmSeqStore;
/* Controls prefetching in some dictMatchState matchfinders.
* This behavior is controlled from the cctx ms.
@@ -262,7 +317,7 @@ struct ZSTD_matchState_t {
typedef struct {
ZSTD_compressedBlockState_t* prevCBlock;
ZSTD_compressedBlockState_t* nextCBlock;
- ZSTD_matchState_t matchState;
+ ZSTD_MatchState_t matchState;
} ZSTD_blockState_t;
typedef struct {
@@ -289,7 +344,7 @@ typedef struct {
} ldmState_t;
typedef struct {
- ZSTD_paramSwitch_e enableLdm; /* ZSTD_ps_enable to enable LDM. ZSTD_ps_auto by default */
+ ZSTD_ParamSwitch_e enableLdm; /* ZSTD_ps_enable to enable LDM. ZSTD_ps_auto by default */
U32 hashLog; /* Log size of hashTable */
U32 bucketSizeLog; /* Log bucket size for collision resolution, at most 8 */
U32 minMatchLength; /* Minimum match length */
@@ -320,7 +375,7 @@ struct ZSTD_CCtx_params_s {
* There is no guarantee that hint is close to actual source size */
ZSTD_dictAttachPref_e attachDictPref;
- ZSTD_paramSwitch_e literalCompressionMode;
+ ZSTD_ParamSwitch_e literalCompressionMode;
/* Multithreading: used to pass parameters to mtctx */
int nbWorkers;
@@ -339,14 +394,27 @@ struct ZSTD_CCtx_params_s {
ZSTD_bufferMode_e outBufferMode;
/* Sequence compression API */
- ZSTD_sequenceFormat_e blockDelimiters;
+ ZSTD_SequenceFormat_e blockDelimiters;
int validateSequences;
- /* Block splitting */
- ZSTD_paramSwitch_e useBlockSplitter;
+ /* Block splitting
+ * @postBlockSplitter executes split analysis after sequences are produced,
+ * it's more accurate but consumes more resources.
+ * @preBlockSplitter_level splits before knowing sequences,
+ * it's more approximative but also cheaper.
+ * Valid @preBlockSplitter_level values range from 0 to 6 (included).
+ * 0 means auto, 1 means do not split,
+ * then levels are sorted in increasing cpu budget, from 2 (fastest) to 6 (slowest).
+ * Highest @preBlockSplitter_level combines well with @postBlockSplitter.
+ */
+ ZSTD_ParamSwitch_e postBlockSplitter;
+ int preBlockSplitter_level;
+
+ /* Adjust the max block size*/
+ size_t maxBlockSize;
/* Param for deciding whether to use row-based matchfinder */
- ZSTD_paramSwitch_e useRowMatchFinder;
+ ZSTD_ParamSwitch_e useRowMatchFinder;
/* Always load a dictionary in ext-dict mode (not prefix mode)? */
int deterministicRefPrefix;
@@ -355,7 +423,7 @@ struct ZSTD_CCtx_params_s {
ZSTD_customMem customMem;
/* Controls prefetching in some dictMatchState matchfinders */
- ZSTD_paramSwitch_e prefetchCDictTables;
+ ZSTD_ParamSwitch_e prefetchCDictTables;
/* Controls whether zstd will fall back to an internal matchfinder
* if the external matchfinder returns an error code. */
@@ -367,15 +435,13 @@ struct ZSTD_CCtx_params_s {
void* extSeqProdState;
ZSTD_sequenceProducer_F extSeqProdFunc;
- /* Adjust the max block size*/
- size_t maxBlockSize;
-
/* Controls repcode search in external sequence parsing */
- ZSTD_paramSwitch_e searchForExternalRepcodes;
+ ZSTD_ParamSwitch_e searchForExternalRepcodes;
}; /* typedef'd to ZSTD_CCtx_params within "zstd.h" */
#define COMPRESS_SEQUENCES_WORKSPACE_SIZE (sizeof(unsigned) * (MaxSeq + 2))
#define ENTROPY_WORKSPACE_SIZE (HUF_WORKSPACE_SIZE + COMPRESS_SEQUENCES_WORKSPACE_SIZE)
+#define TMP_WORKSPACE_SIZE (MAX(ENTROPY_WORKSPACE_SIZE, ZSTD_SLIPBLOCK_WORKSPACESIZE))
/**
* Indicates whether this compression proceeds directly from user-provided
@@ -393,11 +459,11 @@ typedef enum {
*/
#define ZSTD_MAX_NB_BLOCK_SPLITS 196
typedef struct {
- seqStore_t fullSeqStoreChunk;
- seqStore_t firstHalfSeqStore;
- seqStore_t secondHalfSeqStore;
- seqStore_t currSeqStore;
- seqStore_t nextSeqStore;
+ SeqStore_t fullSeqStoreChunk;
+ SeqStore_t firstHalfSeqStore;
+ SeqStore_t secondHalfSeqStore;
+ SeqStore_t currSeqStore;
+ SeqStore_t nextSeqStore;
U32 partitions[ZSTD_MAX_NB_BLOCK_SPLITS];
ZSTD_entropyCTablesMetadata_t entropyMetadata;
@@ -414,7 +480,7 @@ struct ZSTD_CCtx_s {
size_t dictContentSize;
ZSTD_cwksp workspace; /* manages buffer for dynamic allocations */
- size_t blockSize;
+ size_t blockSizeMax;
unsigned long long pledgedSrcSizePlusOne; /* this way, 0 (default) == unknown */
unsigned long long consumedSrcSize;
unsigned long long producedCSize;
@@ -426,13 +492,14 @@ struct ZSTD_CCtx_s {
int isFirstBlock;
int initialized;
- seqStore_t seqStore; /* sequences storage ptrs */
+ SeqStore_t seqStore; /* sequences storage ptrs */
ldmState_t ldmState; /* long distance matching state */
rawSeq* ldmSequences; /* Storage for the ldm output sequences */
size_t maxNbLdmSequences;
- rawSeqStore_t externSeqStore; /* Mutable reference to external sequences */
+ RawSeqStore_t externSeqStore; /* Mutable reference to external sequences */
ZSTD_blockState_t blockState;
- U32* entropyWorkspace; /* entropy workspace of ENTROPY_WORKSPACE_SIZE bytes */
+ void* tmpWorkspace; /* used as substitute of stack space - must be aligned for S64 type */
+ size_t tmpWkspSize;
/* Whether we are streaming or not */
ZSTD_buffered_policy_e bufferedPolicy;
@@ -506,12 +573,12 @@ typedef enum {
* behavior of taking both the source size and the dict size into account
* when selecting and adjusting parameters.
*/
-} ZSTD_cParamMode_e;
+} ZSTD_CParamMode_e;
-typedef size_t (*ZSTD_blockCompressor) (
- ZSTD_matchState_t* bs, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+typedef size_t (*ZSTD_BlockCompressor_f) (
+ ZSTD_MatchState_t* bs, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
-ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_paramSwitch_e rowMatchfinderMode, ZSTD_dictMode_e dictMode);
+ZSTD_BlockCompressor_f ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_ParamSwitch_e rowMatchfinderMode, ZSTD_dictMode_e dictMode);
MEM_STATIC U32 ZSTD_LLcode(U32 litLength)
@@ -557,6 +624,25 @@ MEM_STATIC int ZSTD_cParam_withinBounds(ZSTD_cParameter cParam, int value)
return 1;
}
+/* ZSTD_selectAddr:
+ * @return index >= lowLimit ? candidate : backup,
+ * tries to force branchless codegen. */
+MEM_STATIC const BYTE*
+ZSTD_selectAddr(U32 index, U32 lowLimit, const BYTE* candidate, const BYTE* backup)
+{
+#if defined(__GNUC__) && defined(__x86_64__)
+ __asm__ (
+ "cmp %1, %2\n"
+ "cmova %3, %0\n"
+ : "+r"(candidate)
+ : "r"(index), "r"(lowLimit), "r"(backup)
+ );
+ return candidate;
+#else
+ return index >= lowLimit ? candidate : backup;
+#endif
+}
+
/* ZSTD_noCompressBlock() :
* Writes uncompressed block to dst buffer from given src.
* Returns the size of the block */
@@ -639,14 +725,55 @@ ZSTD_safecopyLiterals(BYTE* op, BYTE const* ip, BYTE const* const iend, BYTE con
#define OFFBASE_TO_OFFSET(o) (assert(OFFBASE_IS_OFFSET(o)), (o) - ZSTD_REP_NUM)
#define OFFBASE_TO_REPCODE(o) (assert(OFFBASE_IS_REPCODE(o)), (o)) /* returns ID 1,2,3 */
+/*! ZSTD_storeSeqOnly() :
+ * Store a sequence (litlen, litPtr, offBase and matchLength) into SeqStore_t.
+ * Literals themselves are not copied, but @litPtr is updated.
+ * @offBase : Users should employ macros REPCODE_TO_OFFBASE() and OFFSET_TO_OFFBASE().
+ * @matchLength : must be >= MINMATCH
+*/
+HINT_INLINE UNUSED_ATTR void
+ZSTD_storeSeqOnly(SeqStore_t* seqStorePtr,
+ size_t litLength,
+ U32 offBase,
+ size_t matchLength)
+{
+ assert((size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart) < seqStorePtr->maxNbSeq);
+
+ /* literal Length */
+ assert(litLength <= ZSTD_BLOCKSIZE_MAX);
+ if (UNLIKELY(litLength>0xFFFF)) {
+ assert(seqStorePtr->longLengthType == ZSTD_llt_none); /* there can only be a single long length */
+ seqStorePtr->longLengthType = ZSTD_llt_literalLength;
+ seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
+ }
+ seqStorePtr->sequences[0].litLength = (U16)litLength;
+
+ /* match offset */
+ seqStorePtr->sequences[0].offBase = offBase;
+
+ /* match Length */
+ assert(matchLength <= ZSTD_BLOCKSIZE_MAX);
+ assert(matchLength >= MINMATCH);
+ { size_t const mlBase = matchLength - MINMATCH;
+ if (UNLIKELY(mlBase>0xFFFF)) {
+ assert(seqStorePtr->longLengthType == ZSTD_llt_none); /* there can only be a single long length */
+ seqStorePtr->longLengthType = ZSTD_llt_matchLength;
+ seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
+ }
+ seqStorePtr->sequences[0].mlBase = (U16)mlBase;
+ }
+
+ seqStorePtr->sequences++;
+}
+
/*! ZSTD_storeSeq() :
- * Store a sequence (litlen, litPtr, offBase and matchLength) into seqStore_t.
+ * Store a sequence (litlen, litPtr, offBase and matchLength) into SeqStore_t.
* @offBase : Users should employ macros REPCODE_TO_OFFBASE() and OFFSET_TO_OFFBASE().
* @matchLength : must be >= MINMATCH
* Allowed to over-read literals up to litLimit.
*/
HINT_INLINE UNUSED_ATTR void
-ZSTD_storeSeq(seqStore_t* seqStorePtr,
+ZSTD_storeSeq(SeqStore_t* seqStorePtr,
size_t litLength, const BYTE* literals, const BYTE* litLimit,
U32 offBase,
size_t matchLength)
@@ -680,29 +807,7 @@ ZSTD_storeSeq(seqStore_t* seqStorePtr,
}
seqStorePtr->lit += litLength;
- /* literal Length */
- if (litLength>0xFFFF) {
- assert(seqStorePtr->longLengthType == ZSTD_llt_none); /* there can only be a single long length */
- seqStorePtr->longLengthType = ZSTD_llt_literalLength;
- seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
- }
- seqStorePtr->sequences[0].litLength = (U16)litLength;
-
- /* match offset */
- seqStorePtr->sequences[0].offBase = offBase;
-
- /* match Length */
- assert(matchLength >= MINMATCH);
- { size_t const mlBase = matchLength - MINMATCH;
- if (mlBase>0xFFFF) {
- assert(seqStorePtr->longLengthType == ZSTD_llt_none); /* there can only be a single long length */
- seqStorePtr->longLengthType = ZSTD_llt_matchLength;
- seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
- }
- seqStorePtr->sequences[0].mlBase = (U16)mlBase;
- }
-
- seqStorePtr->sequences++;
+ ZSTD_storeSeqOnly(seqStorePtr, litLength, offBase, matchLength);
}
/* ZSTD_updateRep() :
@@ -731,12 +836,12 @@ ZSTD_updateRep(U32 rep[ZSTD_REP_NUM], U32 const offBase, U32 const ll0)
typedef struct repcodes_s {
U32 rep[3];
-} repcodes_t;
+} Repcodes_t;
-MEM_STATIC repcodes_t
+MEM_STATIC Repcodes_t
ZSTD_newRep(U32 const rep[ZSTD_REP_NUM], U32 const offBase, U32 const ll0)
{
- repcodes_t newReps;
+ Repcodes_t newReps;
ZSTD_memcpy(&newReps, rep, sizeof(newReps));
ZSTD_updateRep(newReps.rep, offBase, ll0);
return newReps;
@@ -779,8 +884,8 @@ ZSTD_count_2segments(const BYTE* ip, const BYTE* match,
size_t const matchLength = ZSTD_count(ip, match, vEnd);
if (match + matchLength != mEnd) return matchLength;
DEBUGLOG(7, "ZSTD_count_2segments: found a 2-parts match (current length==%zu)", matchLength);
- DEBUGLOG(7, "distance from match beginning to end dictionary = %zi", mEnd - match);
- DEBUGLOG(7, "distance from current pos to end buffer = %zi", iEnd - ip);
+ DEBUGLOG(7, "distance from match beginning to end dictionary = %i", (int)(mEnd - match));
+ DEBUGLOG(7, "distance from current pos to end buffer = %i", (int)(iEnd - ip));
DEBUGLOG(7, "next byte : ip==%02X, istart==%02X", ip[matchLength], *iStart);
DEBUGLOG(7, "final match length = %zu", matchLength + ZSTD_count(ip+matchLength, iStart, iEnd));
return matchLength + ZSTD_count(ip+matchLength, iStart, iEnd);
@@ -918,11 +1023,12 @@ MEM_STATIC U64 ZSTD_rollingHash_rotate(U64 hash, BYTE toRemove, BYTE toAdd, U64
/*-*************************************
* Round buffer management
***************************************/
-#if (ZSTD_WINDOWLOG_MAX_64 > 31)
-# error "ZSTD_WINDOWLOG_MAX is too large : would overflow ZSTD_CURRENT_MAX"
-#endif
-/* Max current allowed */
-#define ZSTD_CURRENT_MAX ((3U << 29) + (1U << ZSTD_WINDOWLOG_MAX))
+/* Max @current value allowed:
+ * In 32-bit mode: we want to avoid crossing the 2 GB limit,
+ * reducing risks of side effects in case of signed operations on indexes.
+ * In 64-bit mode: we want to ensure that adding the maximum job size (512 MB)
+ * doesn't overflow U32 index capacity (4 GB) */
+#define ZSTD_CURRENT_MAX (MEM_64bits() ? 3500U MB : 2000U MB)
/* Maximum chunk size before overflow correction needs to be called again */
#define ZSTD_CHUNKSIZE_MAX \
( ((U32)-1) /* Maximum ending current index */ \
@@ -962,7 +1068,7 @@ MEM_STATIC U32 ZSTD_window_hasExtDict(ZSTD_window_t const window)
* Inspects the provided matchState and figures out what dictMode should be
* passed to the compressor.
*/
-MEM_STATIC ZSTD_dictMode_e ZSTD_matchState_dictMode(const ZSTD_matchState_t *ms)
+MEM_STATIC ZSTD_dictMode_e ZSTD_matchState_dictMode(const ZSTD_MatchState_t *ms)
{
return ZSTD_window_hasExtDict(ms->window) ?
ZSTD_extDict :
@@ -1151,7 +1257,7 @@ ZSTD_window_enforceMaxDist(ZSTD_window_t* window,
const void* blockEnd,
U32 maxDist,
U32* loadedDictEndPtr,
- const ZSTD_matchState_t** dictMatchStatePtr)
+ const ZSTD_MatchState_t** dictMatchStatePtr)
{
U32 const blockEndIdx = (U32)((BYTE const*)blockEnd - window->base);
U32 const loadedDictEnd = (loadedDictEndPtr != NULL) ? *loadedDictEndPtr : 0;
@@ -1196,7 +1302,7 @@ ZSTD_checkDictValidity(const ZSTD_window_t* window,
const void* blockEnd,
U32 maxDist,
U32* loadedDictEndPtr,
- const ZSTD_matchState_t** dictMatchStatePtr)
+ const ZSTD_MatchState_t** dictMatchStatePtr)
{
assert(loadedDictEndPtr != NULL);
assert(dictMatchStatePtr != NULL);
@@ -1246,8 +1352,8 @@ MEM_STATIC void ZSTD_window_init(ZSTD_window_t* window) {
MEM_STATIC
ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
U32 ZSTD_window_update(ZSTD_window_t* window,
- void const* src, size_t srcSize,
- int forceNonContiguous)
+ const void* src, size_t srcSize,
+ int forceNonContiguous)
{
BYTE const* const ip = (BYTE const*)src;
U32 contiguous = 1;
@@ -1274,8 +1380,9 @@ U32 ZSTD_window_update(ZSTD_window_t* window,
/* if input and dictionary overlap : reduce dictionary (area presumed modified by input) */
if ( (ip+srcSize > window->dictBase + window->lowLimit)
& (ip < window->dictBase + window->dictLimit)) {
- ptrdiff_t const highInputIdx = (ip + srcSize) - window->dictBase;
- U32 const lowLimitMax = (highInputIdx > (ptrdiff_t)window->dictLimit) ? window->dictLimit : (U32)highInputIdx;
+ size_t const highInputIdx = (size_t)((ip + srcSize) - window->dictBase);
+ U32 const lowLimitMax = (highInputIdx > (size_t)window->dictLimit) ? window->dictLimit : (U32)highInputIdx;
+ assert(highInputIdx < UINT_MAX);
window->lowLimit = lowLimitMax;
DEBUGLOG(5, "Overlapping extDict and input : new lowLimit = %u", window->lowLimit);
}
@@ -1285,7 +1392,7 @@ U32 ZSTD_window_update(ZSTD_window_t* window,
/**
* Returns the lowest allowed match index. It may either be in the ext-dict or the prefix.
*/
-MEM_STATIC U32 ZSTD_getLowestMatchIndex(const ZSTD_matchState_t* ms, U32 curr, unsigned windowLog)
+MEM_STATIC U32 ZSTD_getLowestMatchIndex(const ZSTD_MatchState_t* ms, U32 curr, unsigned windowLog)
{
U32 const maxDistance = 1U << windowLog;
U32 const lowestValid = ms->window.lowLimit;
@@ -1302,7 +1409,7 @@ MEM_STATIC U32 ZSTD_getLowestMatchIndex(const ZSTD_matchState_t* ms, U32 curr, u
/**
* Returns the lowest allowed match index in the prefix.
*/
-MEM_STATIC U32 ZSTD_getLowestPrefixIndex(const ZSTD_matchState_t* ms, U32 curr, unsigned windowLog)
+MEM_STATIC U32 ZSTD_getLowestPrefixIndex(const ZSTD_MatchState_t* ms, U32 curr, unsigned windowLog)
{
U32 const maxDistance = 1U << windowLog;
U32 const lowestValid = ms->window.dictLimit;
@@ -1315,6 +1422,13 @@ MEM_STATIC U32 ZSTD_getLowestPrefixIndex(const ZSTD_matchState_t* ms, U32 curr,
return matchLowest;
}
+/* index_safety_check:
+ * intentional underflow : ensure repIndex isn't overlapping dict + prefix
+ * @return 1 if values are not overlapping,
+ * 0 otherwise */
+MEM_STATIC int ZSTD_index_overlap_check(const U32 prefixLowestIndex, const U32 repIndex) {
+ return ((U32)((prefixLowestIndex-1) - repIndex) >= 3);
+}
/* debug functions */
@@ -1385,10 +1499,6 @@ MEM_STATIC int ZSTD_comparePackedTags(size_t packedTag1, size_t packedTag2) {
return tag1 == tag2;
}
-#if defined (__cplusplus)
-}
-#endif
-
/* ===============================================================
* Shared internal declarations
* These prototypes may be called from sources not in lib/compress
@@ -1404,6 +1514,25 @@ size_t ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* workspace,
void ZSTD_reset_compressedBlockState(ZSTD_compressedBlockState_t* bs);
+typedef struct {
+ U32 idx; /* Index in array of ZSTD_Sequence */
+ U32 posInSequence; /* Position within sequence at idx */
+ size_t posInSrc; /* Number of bytes given by sequences provided so far */
+} ZSTD_SequencePosition;
+
+/* for benchmark */
+size_t ZSTD_convertBlockSequences(ZSTD_CCtx* cctx,
+ const ZSTD_Sequence* const inSeqs, size_t nbSequences,
+ int const repcodeResolution);
+
+typedef struct {
+ size_t nbSequences;
+ size_t blockSize;
+ size_t litSize;
+} BlockSummary;
+
+BlockSummary ZSTD_get1BlockSummary(const ZSTD_Sequence* seqs, size_t nbSeqs);
+
/* ==============================================================
* Private declarations
* These prototypes shall only be called from within lib/compress
@@ -1415,7 +1544,7 @@ void ZSTD_reset_compressedBlockState(ZSTD_compressedBlockState_t* bs);
* Note: srcSizeHint == 0 means 0!
*/
ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams(
- const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode);
+ const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize, ZSTD_CParamMode_e mode);
/*! ZSTD_initCStream_internal() :
* Private use only. Init streaming operation.
@@ -1427,7 +1556,7 @@ size_t ZSTD_initCStream_internal(ZSTD_CStream* zcs,
const ZSTD_CDict* cdict,
const ZSTD_CCtx_params* params, unsigned long long pledgedSrcSize);
-void ZSTD_resetSeqStore(seqStore_t* ssPtr);
+void ZSTD_resetSeqStore(SeqStore_t* ssPtr);
/*! ZSTD_getCParamsFromCDict() :
* as the name implies */
@@ -1480,33 +1609,6 @@ U32 ZSTD_cycleLog(U32 hashLog, ZSTD_strategy strat);
*/
void ZSTD_CCtx_trace(ZSTD_CCtx* cctx, size_t extraCSize);
-/* Returns 0 on success, and a ZSTD_error otherwise. This function scans through an array of
- * ZSTD_Sequence, storing the sequences it finds, until it reaches a block delimiter.
- * Note that the block delimiter must include the last literals of the block.
- */
-size_t
-ZSTD_copySequencesToSeqStoreExplicitBlockDelim(ZSTD_CCtx* cctx,
- ZSTD_sequencePosition* seqPos,
- const ZSTD_Sequence* const inSeqs, size_t inSeqsSize,
- const void* src, size_t blockSize, ZSTD_paramSwitch_e externalRepSearch);
-
-/* Returns the number of bytes to move the current read position back by.
- * Only non-zero if we ended up splitting a sequence.
- * Otherwise, it may return a ZSTD error if something went wrong.
- *
- * This function will attempt to scan through blockSize bytes
- * represented by the sequences in @inSeqs,
- * storing any (partial) sequences.
- *
- * Occasionally, we may want to change the actual number of bytes we consumed from inSeqs to
- * avoid splitting a match, or to avoid splitting a match such that it would produce a match
- * smaller than MINMATCH. In this case, we return the number of bytes that we didn't read from this block.
- */
-size_t
-ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition* seqPos,
- const ZSTD_Sequence* const inSeqs, size_t inSeqsSize,
- const void* src, size_t blockSize, ZSTD_paramSwitch_e externalRepSearch);
-
/* Returns 1 if an external sequence producer is registered, otherwise returns 0. */
MEM_STATIC int ZSTD_hasExtSeqProd(const ZSTD_CCtx_params* params) {
return params->extSeqProdFunc != NULL;
diff --git a/lib/compress/zstd_compress_literals.c b/lib/compress/zstd_compress_literals.c
index bfd4f11abe4..06036de5da5 100644
--- a/lib/compress/zstd_compress_literals.c
+++ b/lib/compress/zstd_compress_literals.c
@@ -140,7 +140,7 @@ size_t ZSTD_compressLiterals (
size_t const lhSize = 3 + (srcSize >= 1 KB) + (srcSize >= 16 KB);
BYTE* const ostart = (BYTE*)dst;
U32 singleStream = srcSize < 256;
- symbolEncodingType_e hType = set_compressed;
+ SymbolEncodingType_e hType = set_compressed;
size_t cLitSize;
DEBUGLOG(5,"ZSTD_compressLiterals (disableLiteralCompression=%i, srcSize=%u, dstCapacity=%zu)",
diff --git a/lib/compress/zstd_compress_sequences.c b/lib/compress/zstd_compress_sequences.c
index 8872d4d354a..7beb9daa603 100644
--- a/lib/compress/zstd_compress_sequences.c
+++ b/lib/compress/zstd_compress_sequences.c
@@ -153,13 +153,13 @@ size_t ZSTD_crossEntropyCost(short const* norm, unsigned accuracyLog,
return cost >> 8;
}
-symbolEncodingType_e
+SymbolEncodingType_e
ZSTD_selectEncodingType(
FSE_repeat* repeatMode, unsigned const* count, unsigned const max,
size_t const mostFrequent, size_t nbSeq, unsigned const FSELog,
FSE_CTable const* prevCTable,
short const* defaultNorm, U32 defaultNormLog,
- ZSTD_defaultPolicy_e const isDefaultAllowed,
+ ZSTD_DefaultPolicy_e const isDefaultAllowed,
ZSTD_strategy const strategy)
{
ZSTD_STATIC_ASSERT(ZSTD_defaultDisallowed == 0 && ZSTD_defaultAllowed != 0);
@@ -241,7 +241,7 @@ typedef struct {
size_t
ZSTD_buildCTable(void* dst, size_t dstCapacity,
- FSE_CTable* nextCTable, U32 FSELog, symbolEncodingType_e type,
+ FSE_CTable* nextCTable, U32 FSELog, SymbolEncodingType_e type,
unsigned* count, U32 max,
const BYTE* codeTable, size_t nbSeq,
const S16* defaultNorm, U32 defaultNormLog, U32 defaultMax,
@@ -293,7 +293,7 @@ ZSTD_encodeSequences_body(
FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
- seqDef const* sequences, size_t nbSeq, int longOffsets)
+ SeqDef const* sequences, size_t nbSeq, int longOffsets)
{
BIT_CStream_t blockStream;
FSE_CState_t stateMatchLength;
@@ -387,7 +387,7 @@ ZSTD_encodeSequences_default(
FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
- seqDef const* sequences, size_t nbSeq, int longOffsets)
+ SeqDef const* sequences, size_t nbSeq, int longOffsets)
{
return ZSTD_encodeSequences_body(dst, dstCapacity,
CTable_MatchLength, mlCodeTable,
@@ -405,7 +405,7 @@ ZSTD_encodeSequences_bmi2(
FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
- seqDef const* sequences, size_t nbSeq, int longOffsets)
+ SeqDef const* sequences, size_t nbSeq, int longOffsets)
{
return ZSTD_encodeSequences_body(dst, dstCapacity,
CTable_MatchLength, mlCodeTable,
@@ -421,7 +421,7 @@ size_t ZSTD_encodeSequences(
FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
- seqDef const* sequences, size_t nbSeq, int longOffsets, int bmi2)
+ SeqDef const* sequences, size_t nbSeq, int longOffsets, int bmi2)
{
DEBUGLOG(5, "ZSTD_encodeSequences: dstCapacity = %u", (unsigned)dstCapacity);
#if DYNAMIC_BMI2
diff --git a/lib/compress/zstd_compress_sequences.h b/lib/compress/zstd_compress_sequences.h
index 4a3a05da948..4be8e91f248 100644
--- a/lib/compress/zstd_compress_sequences.h
+++ b/lib/compress/zstd_compress_sequences.h
@@ -11,26 +11,27 @@
#ifndef ZSTD_COMPRESS_SEQUENCES_H
#define ZSTD_COMPRESS_SEQUENCES_H
+#include "zstd_compress_internal.h" /* SeqDef */
#include "../common/fse.h" /* FSE_repeat, FSE_CTable */
-#include "../common/zstd_internal.h" /* symbolEncodingType_e, ZSTD_strategy */
+#include "../common/zstd_internal.h" /* SymbolEncodingType_e, ZSTD_strategy */
typedef enum {
ZSTD_defaultDisallowed = 0,
ZSTD_defaultAllowed = 1
-} ZSTD_defaultPolicy_e;
+} ZSTD_DefaultPolicy_e;
-symbolEncodingType_e
+SymbolEncodingType_e
ZSTD_selectEncodingType(
FSE_repeat* repeatMode, unsigned const* count, unsigned const max,
size_t const mostFrequent, size_t nbSeq, unsigned const FSELog,
FSE_CTable const* prevCTable,
short const* defaultNorm, U32 defaultNormLog,
- ZSTD_defaultPolicy_e const isDefaultAllowed,
+ ZSTD_DefaultPolicy_e const isDefaultAllowed,
ZSTD_strategy const strategy);
size_t
ZSTD_buildCTable(void* dst, size_t dstCapacity,
- FSE_CTable* nextCTable, U32 FSELog, symbolEncodingType_e type,
+ FSE_CTable* nextCTable, U32 FSELog, SymbolEncodingType_e type,
unsigned* count, U32 max,
const BYTE* codeTable, size_t nbSeq,
const S16* defaultNorm, U32 defaultNormLog, U32 defaultMax,
@@ -42,7 +43,7 @@ size_t ZSTD_encodeSequences(
FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
- seqDef const* sequences, size_t nbSeq, int longOffsets, int bmi2);
+ SeqDef const* sequences, size_t nbSeq, int longOffsets, int bmi2);
size_t ZSTD_fseBitCost(
FSE_CTable const* ctable,
diff --git a/lib/compress/zstd_compress_superblock.c b/lib/compress/zstd_compress_superblock.c
index 628a2dccd09..6f57345be62 100644
--- a/lib/compress/zstd_compress_superblock.c
+++ b/lib/compress/zstd_compress_superblock.c
@@ -51,7 +51,7 @@ ZSTD_compressSubBlock_literal(const HUF_CElt* hufTable,
BYTE* const oend = ostart + dstSize;
BYTE* op = ostart + lhSize;
U32 const singleStream = lhSize == 3;
- symbolEncodingType_e hType = writeEntropy ? hufMetadata->hType : set_repeat;
+ SymbolEncodingType_e hType = writeEntropy ? hufMetadata->hType : set_repeat;
size_t cLitSize = 0;
DEBUGLOG(5, "ZSTD_compressSubBlock_literal (litSize=%zu, lhSize=%zu, writeEntropy=%d)", litSize, lhSize, writeEntropy);
@@ -126,15 +126,15 @@ ZSTD_compressSubBlock_literal(const HUF_CElt* hufTable,
}
static size_t
-ZSTD_seqDecompressedSize(seqStore_t const* seqStore,
- const seqDef* sequences, size_t nbSeqs,
+ZSTD_seqDecompressedSize(SeqStore_t const* seqStore,
+ const SeqDef* sequences, size_t nbSeqs,
size_t litSize, int lastSubBlock)
{
size_t matchLengthSum = 0;
size_t litLengthSum = 0;
size_t n;
for (n=0; nsequencesStart;
- const seqDef* const send = seqStorePtr->sequences;
- const seqDef* sp = sstart; /* tracks progresses within seqStorePtr->sequences */
+ const SeqDef* const sstart = seqStorePtr->sequencesStart;
+ const SeqDef* const send = seqStorePtr->sequences;
+ const SeqDef* sp = sstart; /* tracks progresses within seqStorePtr->sequences */
size_t const nbSeqs = (size_t)(send - sstart);
const BYTE* const lstart = seqStorePtr->litStart;
const BYTE* const lend = seqStorePtr->lit;
@@ -647,8 +647,8 @@ static size_t ZSTD_compressSubBlock_multi(const seqStore_t* seqStorePtr,
op += cSize;
/* We have to regenerate the repcodes because we've skipped some sequences */
if (sp < send) {
- const seqDef* seq;
- repcodes_t rep;
+ const SeqDef* seq;
+ Repcodes_t rep;
ZSTD_memcpy(&rep, prevCBlock->rep, sizeof(rep));
for (seq = sstart; seq < sp; ++seq) {
ZSTD_updateRep(rep.rep, seq->offBase, ZSTD_getSequenceLength(seqStorePtr, seq).litLength == 0);
@@ -674,7 +674,7 @@ size_t ZSTD_compressSuperBlock(ZSTD_CCtx* zc,
&zc->blockState.nextCBlock->entropy,
&zc->appliedParams,
&entropyMetadata,
- zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */), "");
+ zc->tmpWorkspace, zc->tmpWkspSize /* statically allocated in resetCCtx */), "");
return ZSTD_compressSubBlock_multi(&zc->seqStore,
zc->blockState.prevCBlock,
@@ -684,5 +684,5 @@ size_t ZSTD_compressSuperBlock(ZSTD_CCtx* zc,
dst, dstCapacity,
src, srcSize,
zc->bmi2, lastBlock,
- zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */);
+ zc->tmpWorkspace, zc->tmpWkspSize /* statically allocated in resetCCtx */);
}
diff --git a/lib/compress/zstd_cwksp.h b/lib/compress/zstd_cwksp.h
index 3eddbd334e8..77518002d00 100644
--- a/lib/compress/zstd_cwksp.h
+++ b/lib/compress/zstd_cwksp.h
@@ -17,10 +17,7 @@
#include "../common/allocations.h" /* ZSTD_customMalloc, ZSTD_customFree */
#include "../common/zstd_internal.h"
#include "../common/portability_macros.h"
-
-#if defined (__cplusplus)
-extern "C" {
-#endif
+#include "../common/compiler.h" /* ZS2_isPower2 */
/*-*************************************
* Constants
@@ -206,9 +203,9 @@ MEM_STATIC void ZSTD_cwksp_assert_internal_consistency(ZSTD_cwksp* ws) {
/**
* Align must be a power of 2.
*/
-MEM_STATIC size_t ZSTD_cwksp_align(size_t size, size_t const align) {
+MEM_STATIC size_t ZSTD_cwksp_align(size_t size, size_t align) {
size_t const mask = align - 1;
- assert((align & mask) == 0);
+ assert(ZSTD_isPower2(align));
return (size + mask) & ~mask;
}
@@ -222,7 +219,7 @@ MEM_STATIC size_t ZSTD_cwksp_align(size_t size, size_t const align) {
* to figure out how much space you need for the matchState tables. Everything
* else is though.
*
- * Do not use for sizing aligned buffers. Instead, use ZSTD_cwksp_aligned_alloc_size().
+ * Do not use for sizing aligned buffers. Instead, use ZSTD_cwksp_aligned64_alloc_size().
*/
MEM_STATIC size_t ZSTD_cwksp_alloc_size(size_t size) {
if (size == 0)
@@ -234,12 +231,16 @@ MEM_STATIC size_t ZSTD_cwksp_alloc_size(size_t size) {
#endif
}
+MEM_STATIC size_t ZSTD_cwksp_aligned_alloc_size(size_t size, size_t alignment) {
+ return ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(size, alignment));
+}
+
/**
* Returns an adjusted alloc size that is the nearest larger multiple of 64 bytes.
* Used to determine the number of bytes required for a given "aligned".
*/
-MEM_STATIC size_t ZSTD_cwksp_aligned_alloc_size(size_t size) {
- return ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(size, ZSTD_CWKSP_ALIGNMENT_BYTES));
+MEM_STATIC size_t ZSTD_cwksp_aligned64_alloc_size(size_t size) {
+ return ZSTD_cwksp_aligned_alloc_size(size, ZSTD_CWKSP_ALIGNMENT_BYTES);
}
/**
@@ -262,7 +263,7 @@ MEM_STATIC size_t ZSTD_cwksp_slack_space_required(void) {
MEM_STATIC size_t ZSTD_cwksp_bytes_to_align_ptr(void* ptr, const size_t alignBytes) {
size_t const alignBytesMask = alignBytes - 1;
size_t const bytes = (alignBytes - ((size_t)ptr & (alignBytesMask))) & alignBytesMask;
- assert((alignBytes & alignBytesMask) == 0);
+ assert(ZSTD_isPower2(alignBytes));
assert(bytes < alignBytes);
return bytes;
}
@@ -271,8 +272,12 @@ MEM_STATIC size_t ZSTD_cwksp_bytes_to_align_ptr(void* ptr, const size_t alignByt
* Returns the initial value for allocStart which is used to determine the position from
* which we can allocate from the end of the workspace.
*/
-MEM_STATIC void* ZSTD_cwksp_initialAllocStart(ZSTD_cwksp* ws) {
- return (void*)((size_t)ws->workspaceEnd & ~(ZSTD_CWKSP_ALIGNMENT_BYTES-1));
+MEM_STATIC void* ZSTD_cwksp_initialAllocStart(ZSTD_cwksp* ws)
+{
+ char* endPtr = (char*)ws->workspaceEnd;
+ assert(ZSTD_isPower2(ZSTD_CWKSP_ALIGNMENT_BYTES));
+ endPtr = endPtr - ((size_t)endPtr % ZSTD_CWKSP_ALIGNMENT_BYTES);
+ return (void*)endPtr;
}
/**
@@ -287,7 +292,7 @@ ZSTD_cwksp_reserve_internal_buffer_space(ZSTD_cwksp* ws, size_t const bytes)
{
void* const alloc = (BYTE*)ws->allocStart - bytes;
void* const bottom = ws->tableEnd;
- DEBUGLOG(5, "cwksp: reserving %p %zd bytes, %zd bytes remaining",
+ DEBUGLOG(5, "cwksp: reserving [0x%p]:%zd bytes; %zd bytes remaining",
alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes);
ZSTD_cwksp_assert_internal_consistency(ws);
assert(alloc >= bottom);
@@ -404,7 +409,7 @@ MEM_STATIC void* ZSTD_cwksp_reserve_aligned_init_once(ZSTD_cwksp* ws, size_t byt
{
size_t const alignedBytes = ZSTD_cwksp_align(bytes, ZSTD_CWKSP_ALIGNMENT_BYTES);
void* ptr = ZSTD_cwksp_reserve_internal(ws, alignedBytes, ZSTD_cwksp_alloc_aligned_init_once);
- assert(((size_t)ptr & (ZSTD_CWKSP_ALIGNMENT_BYTES-1))== 0);
+ assert(((size_t)ptr & (ZSTD_CWKSP_ALIGNMENT_BYTES-1)) == 0);
if(ptr && ptr < ws->initOnceStart) {
/* We assume the memory following the current allocation is either:
* 1. Not usable as initOnce memory (end of workspace)
@@ -424,11 +429,12 @@ MEM_STATIC void* ZSTD_cwksp_reserve_aligned_init_once(ZSTD_cwksp* ws, size_t byt
/**
* Reserves and returns memory sized on and aligned on ZSTD_CWKSP_ALIGNMENT_BYTES (64 bytes).
*/
-MEM_STATIC void* ZSTD_cwksp_reserve_aligned(ZSTD_cwksp* ws, size_t bytes)
+MEM_STATIC void* ZSTD_cwksp_reserve_aligned64(ZSTD_cwksp* ws, size_t bytes)
{
- void* ptr = ZSTD_cwksp_reserve_internal(ws, ZSTD_cwksp_align(bytes, ZSTD_CWKSP_ALIGNMENT_BYTES),
- ZSTD_cwksp_alloc_aligned);
- assert(((size_t)ptr & (ZSTD_CWKSP_ALIGNMENT_BYTES-1))== 0);
+ void* const ptr = ZSTD_cwksp_reserve_internal(ws,
+ ZSTD_cwksp_align(bytes, ZSTD_CWKSP_ALIGNMENT_BYTES),
+ ZSTD_cwksp_alloc_aligned);
+ assert(((size_t)ptr & (ZSTD_CWKSP_ALIGNMENT_BYTES-1)) == 0);
return ptr;
}
@@ -474,7 +480,7 @@ MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes)
#endif
assert((bytes & (ZSTD_CWKSP_ALIGNMENT_BYTES-1)) == 0);
- assert(((size_t)alloc & (ZSTD_CWKSP_ALIGNMENT_BYTES-1))== 0);
+ assert(((size_t)alloc & (ZSTD_CWKSP_ALIGNMENT_BYTES-1)) == 0);
return alloc;
}
@@ -520,6 +526,20 @@ MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes)
return alloc;
}
+/**
+ * with alignment control
+ * Note : should happen only once, at workspace first initialization
+ */
+MEM_STATIC void* ZSTD_cwksp_reserve_object_aligned(ZSTD_cwksp* ws, size_t byteSize, size_t alignment)
+{
+ size_t const mask = alignment - 1;
+ size_t const surplus = (alignment > sizeof(void*)) ? alignment - sizeof(void*) : 0;
+ void* const start = ZSTD_cwksp_reserve_object(ws, byteSize + surplus);
+ if (start == NULL) return NULL;
+ if (surplus == 0) return start;
+ assert(ZSTD_isPower2(alignment));
+ return (void*)(((size_t)start + surplus) & ~mask);
+}
MEM_STATIC void ZSTD_cwksp_mark_tables_dirty(ZSTD_cwksp* ws)
{
@@ -577,7 +597,8 @@ MEM_STATIC void ZSTD_cwksp_clean_tables(ZSTD_cwksp* ws) {
* Invalidates table allocations.
* All other allocations remain valid.
*/
-MEM_STATIC void ZSTD_cwksp_clear_tables(ZSTD_cwksp* ws) {
+MEM_STATIC void ZSTD_cwksp_clear_tables(ZSTD_cwksp* ws)
+{
DEBUGLOG(4, "cwksp: clearing tables!");
#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
@@ -741,8 +762,4 @@ MEM_STATIC void ZSTD_cwksp_bump_oversized_duration(
}
}
-#if defined (__cplusplus)
-}
-#endif
-
#endif /* ZSTD_CWKSP_H */
diff --git a/lib/compress/zstd_double_fast.c b/lib/compress/zstd_double_fast.c
index a4e9c50d3bf..1a266e7d955 100644
--- a/lib/compress/zstd_double_fast.c
+++ b/lib/compress/zstd_double_fast.c
@@ -15,7 +15,7 @@
static
ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
-void ZSTD_fillDoubleHashTableForCDict(ZSTD_matchState_t* ms,
+void ZSTD_fillDoubleHashTableForCDict(ZSTD_MatchState_t* ms,
void const* end, ZSTD_dictTableLoadMethod_e dtlm)
{
const ZSTD_compressionParameters* const cParams = &ms->cParams;
@@ -53,7 +53,7 @@ void ZSTD_fillDoubleHashTableForCDict(ZSTD_matchState_t* ms,
static
ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
-void ZSTD_fillDoubleHashTableForCCtx(ZSTD_matchState_t* ms,
+void ZSTD_fillDoubleHashTableForCCtx(ZSTD_MatchState_t* ms,
void const* end, ZSTD_dictTableLoadMethod_e dtlm)
{
const ZSTD_compressionParameters* const cParams = &ms->cParams;
@@ -87,7 +87,7 @@ void ZSTD_fillDoubleHashTableForCCtx(ZSTD_matchState_t* ms,
} }
}
-void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms,
+void ZSTD_fillDoubleHashTable(ZSTD_MatchState_t* ms,
const void* const end,
ZSTD_dictTableLoadMethod_e dtlm,
ZSTD_tableFillPurpose_e tfp)
@@ -103,7 +103,7 @@ void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms,
FORCE_INLINE_TEMPLATE
ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
size_t ZSTD_compressBlock_doubleFast_noDict_generic(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize, U32 const mls /* template */)
{
ZSTD_compressionParameters const* cParams = &ms->cParams;
@@ -142,9 +142,14 @@ size_t ZSTD_compressBlock_doubleFast_noDict_generic(
const BYTE* matchl0; /* the long match for ip */
const BYTE* matchs0; /* the short match for ip */
const BYTE* matchl1; /* the long match for ip1 */
+ const BYTE* matchs0_safe; /* matchs0 or safe address */
const BYTE* ip = istart; /* the current position */
const BYTE* ip1; /* the next position */
+ /* Array of ~random data, should have low probability of matching data
+ * we load from here instead of from tables, if matchl0/matchl1 are
+ * invalid indices. Used to avoid unpredictable branches. */
+ const BYTE dummy[] = {0x12,0x34,0x56,0x78,0x9a,0xbc,0xde,0xf0,0xe2,0xb4};
DEBUGLOG(5, "ZSTD_compressBlock_doubleFast_noDict_generic");
@@ -191,24 +196,29 @@ size_t ZSTD_compressBlock_doubleFast_noDict_generic(
hl1 = ZSTD_hashPtr(ip1, hBitsL, 8);
- if (idxl0 > prefixLowestIndex) {
+ /* idxl0 > prefixLowestIndex is a (somewhat) unpredictable branch.
+ * However expression below complies into conditional move. Since
+ * match is unlikely and we only *branch* on idxl0 > prefixLowestIndex
+ * if there is a match, all branches become predictable. */
+ { const BYTE* const matchl0_safe = ZSTD_selectAddr(idxl0, prefixLowestIndex, matchl0, &dummy[0]);
+
/* check prefix long match */
- if (MEM_read64(matchl0) == MEM_read64(ip)) {
+ if (MEM_read64(matchl0_safe) == MEM_read64(ip) && matchl0_safe == matchl0) {
mLength = ZSTD_count(ip+8, matchl0+8, iend) + 8;
offset = (U32)(ip-matchl0);
while (((ip>anchor) & (matchl0>prefixLowest)) && (ip[-1] == matchl0[-1])) { ip--; matchl0--; mLength++; } /* catch up */
goto _match_found;
- }
- }
+ } }
idxl1 = hashLong[hl1];
matchl1 = base + idxl1;
- if (idxs0 > prefixLowestIndex) {
- /* check prefix short match */
- if (MEM_read32(matchs0) == MEM_read32(ip)) {
- goto _search_next_long;
- }
+ /* Same optimization as matchl0 above */
+ matchs0_safe = ZSTD_selectAddr(idxs0, prefixLowestIndex, matchs0, &dummy[0]);
+
+ /* check prefix short match */
+ if(MEM_read32(matchs0_safe) == MEM_read32(ip) && matchs0_safe == matchs0) {
+ goto _search_next_long;
}
if (ip1 >= nextStep) {
@@ -242,21 +252,23 @@ size_t ZSTD_compressBlock_doubleFast_noDict_generic(
_search_next_long:
- /* check prefix long +1 match */
- if (idxl1 > prefixLowestIndex) {
- if (MEM_read64(matchl1) == MEM_read64(ip1)) {
+ /* short match found: let's check for a longer one */
+ mLength = ZSTD_count(ip+4, matchs0+4, iend) + 4;
+ offset = (U32)(ip - matchs0);
+
+ /* check long match at +1 position */
+ if ((idxl1 > prefixLowestIndex) && (MEM_read64(matchl1) == MEM_read64(ip1))) {
+ size_t const l1len = ZSTD_count(ip1+8, matchl1+8, iend) + 8;
+ if (l1len > mLength) {
+ /* use the long match instead */
ip = ip1;
- mLength = ZSTD_count(ip+8, matchl1+8, iend) + 8;
+ mLength = l1len;
offset = (U32)(ip-matchl1);
- while (((ip>anchor) & (matchl1>prefixLowest)) && (ip[-1] == matchl1[-1])) { ip--; matchl1--; mLength++; } /* catch up */
- goto _match_found;
+ matchs0 = matchl1;
}
}
- /* if no long +1 match, explore the short match we found */
- mLength = ZSTD_count(ip+4, matchs0+4, iend) + 4;
- offset = (U32)(ip - matchs0);
- while (((ip>anchor) & (matchs0>prefixLowest)) && (ip[-1] == matchs0[-1])) { ip--; matchs0--; mLength++; } /* catch up */
+ while (((ip>anchor) & (matchs0>prefixLowest)) && (ip[-1] == matchs0[-1])) { ip--; matchs0--; mLength++; } /* complete backward */
/* fall-through */
@@ -314,7 +326,7 @@ size_t ZSTD_compressBlock_doubleFast_noDict_generic(
FORCE_INLINE_TEMPLATE
ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize,
U32 const mls /* template */)
{
@@ -335,7 +347,7 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic(
const BYTE* const ilimit = iend - HASH_READ_SIZE;
U32 offset_1=rep[0], offset_2=rep[1];
- const ZSTD_matchState_t* const dms = ms->dictMatchState;
+ const ZSTD_MatchState_t* const dms = ms->dictMatchState;
const ZSTD_compressionParameters* const dictCParams = &dms->cParams;
const U32* const dictHashLong = dms->hashTable;
const U32* const dictHashSmall = dms->chainTable;
@@ -392,7 +404,7 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic(
hashLong[h2] = hashSmall[h] = curr; /* update hash tables */
/* check repcode */
- if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)
+ if ((ZSTD_index_overlap_check(prefixLowestIndex, repIndex))
&& (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
@@ -401,14 +413,12 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic(
goto _match_stored;
}
- if (matchIndexL > prefixLowestIndex) {
+ if ((matchIndexL >= prefixLowestIndex) && (MEM_read64(matchLong) == MEM_read64(ip))) {
/* check prefix long match */
- if (MEM_read64(matchLong) == MEM_read64(ip)) {
- mLength = ZSTD_count(ip+8, matchLong+8, iend) + 8;
- offset = (U32)(ip-matchLong);
- while (((ip>anchor) & (matchLong>prefixLowest)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */
- goto _match_found;
- }
+ mLength = ZSTD_count(ip+8, matchLong+8, iend) + 8;
+ offset = (U32)(ip-matchLong);
+ while (((ip>anchor) & (matchLong>prefixLowest)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */
+ goto _match_found;
} else if (dictTagsMatchL) {
/* check dictMatchState long match */
U32 const dictMatchIndexL = dictMatchIndexAndTagL >> ZSTD_SHORT_CACHE_TAG_BITS;
@@ -423,7 +433,7 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic(
} }
if (matchIndexS > prefixLowestIndex) {
- /* check prefix short match */
+ /* short match candidate */
if (MEM_read32(match) == MEM_read32(ip)) {
goto _search_next_long;
}
@@ -453,14 +463,12 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic(
hashLong[hl3] = curr + 1;
/* check prefix long +1 match */
- if (matchIndexL3 > prefixLowestIndex) {
- if (MEM_read64(matchL3) == MEM_read64(ip+1)) {
- mLength = ZSTD_count(ip+9, matchL3+8, iend) + 8;
- ip++;
- offset = (U32)(ip-matchL3);
- while (((ip>anchor) & (matchL3>prefixLowest)) && (ip[-1] == matchL3[-1])) { ip--; matchL3--; mLength++; } /* catch up */
- goto _match_found;
- }
+ if ((matchIndexL3 >= prefixLowestIndex) && (MEM_read64(matchL3) == MEM_read64(ip+1))) {
+ mLength = ZSTD_count(ip+9, matchL3+8, iend) + 8;
+ ip++;
+ offset = (U32)(ip-matchL3);
+ while (((ip>anchor) & (matchL3>prefixLowest)) && (ip[-1] == matchL3[-1])) { ip--; matchL3--; mLength++; } /* catch up */
+ goto _match_found;
} else if (dictTagsMatchL3) {
/* check dict long +1 match */
U32 const dictMatchIndexL3 = dictMatchIndexAndTagL3 >> ZSTD_SHORT_CACHE_TAG_BITS;
@@ -513,7 +521,7 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic(
const BYTE* repMatch2 = repIndex2 < prefixLowestIndex ?
dictBase + repIndex2 - dictIndexDelta :
base + repIndex2;
- if ( ((U32)((prefixLowestIndex-1) - (U32)repIndex2) >= 3 /* intentional overflow */)
+ if ( (ZSTD_index_overlap_check(prefixLowestIndex, repIndex2))
&& (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
const BYTE* const repEnd2 = repIndex2 < prefixLowestIndex ? dictEnd : iend;
size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixLowest) + 4;
@@ -540,7 +548,7 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic(
#define ZSTD_GEN_DFAST_FN(dictMode, mls) \
static size_t ZSTD_compressBlock_doubleFast_##dictMode##_##mls( \
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], \
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], \
void const* src, size_t srcSize) \
{ \
return ZSTD_compressBlock_doubleFast_##dictMode##_generic(ms, seqStore, rep, src, srcSize, mls); \
@@ -558,7 +566,7 @@ ZSTD_GEN_DFAST_FN(dictMatchState, 7)
size_t ZSTD_compressBlock_doubleFast(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
const U32 mls = ms->cParams.minMatch;
@@ -578,7 +586,7 @@ size_t ZSTD_compressBlock_doubleFast(
size_t ZSTD_compressBlock_doubleFast_dictMatchState(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
const U32 mls = ms->cParams.minMatch;
@@ -600,7 +608,7 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState(
static
ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
size_t ZSTD_compressBlock_doubleFast_extDict_generic(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize,
U32 const mls /* template */)
{
@@ -651,7 +659,7 @@ size_t ZSTD_compressBlock_doubleFast_extDict_generic(
size_t mLength;
hashSmall[hSmall] = hashLong[hLong] = curr; /* update hash table */
- if ((((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow : ensure repIndex doesn't overlap dict + prefix */
+ if (((ZSTD_index_overlap_check(prefixStartIndex, repIndex))
& (offset_1 <= curr+1 - dictStartIndex)) /* note: we are searching at curr+1 */
&& (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
const BYTE* repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
@@ -719,7 +727,7 @@ size_t ZSTD_compressBlock_doubleFast_extDict_generic(
U32 const current2 = (U32)(ip-base);
U32 const repIndex2 = current2 - offset_2;
const BYTE* repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2;
- if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3) /* intentional overflow : ensure repIndex2 doesn't overlap dict + prefix */
+ if ( ((ZSTD_index_overlap_check(prefixStartIndex, repIndex2))
& (offset_2 <= current2 - dictStartIndex))
&& (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
@@ -749,7 +757,7 @@ ZSTD_GEN_DFAST_FN(extDict, 6)
ZSTD_GEN_DFAST_FN(extDict, 7)
size_t ZSTD_compressBlock_doubleFast_extDict(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
U32 const mls = ms->cParams.minMatch;
diff --git a/lib/compress/zstd_double_fast.h b/lib/compress/zstd_double_fast.h
index ce6ed8c97fd..cd562fea8ef 100644
--- a/lib/compress/zstd_double_fast.h
+++ b/lib/compress/zstd_double_fast.h
@@ -11,27 +11,23 @@
#ifndef ZSTD_DOUBLE_FAST_H
#define ZSTD_DOUBLE_FAST_H
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
#include "../common/mem.h" /* U32 */
#include "zstd_compress_internal.h" /* ZSTD_CCtx, size_t */
#ifndef ZSTD_EXCLUDE_DFAST_BLOCK_COMPRESSOR
-void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms,
+void ZSTD_fillDoubleHashTable(ZSTD_MatchState_t* ms,
void const* end, ZSTD_dictTableLoadMethod_e dtlm,
ZSTD_tableFillPurpose_e tfp);
size_t ZSTD_compressBlock_doubleFast(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
size_t ZSTD_compressBlock_doubleFast_dictMatchState(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
size_t ZSTD_compressBlock_doubleFast_extDict(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
#define ZSTD_COMPRESSBLOCK_DOUBLEFAST ZSTD_compressBlock_doubleFast
@@ -43,8 +39,4 @@ size_t ZSTD_compressBlock_doubleFast_extDict(
#define ZSTD_COMPRESSBLOCK_DOUBLEFAST_EXTDICT NULL
#endif /* ZSTD_EXCLUDE_DFAST_BLOCK_COMPRESSOR */
-#if defined (__cplusplus)
-}
-#endif
-
#endif /* ZSTD_DOUBLE_FAST_H */
diff --git a/lib/compress/zstd_fast.c b/lib/compress/zstd_fast.c
index 6c4554cfca7..ee25bcbac8d 100644
--- a/lib/compress/zstd_fast.c
+++ b/lib/compress/zstd_fast.c
@@ -13,7 +13,7 @@
static
ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
-void ZSTD_fillHashTableForCDict(ZSTD_matchState_t* ms,
+void ZSTD_fillHashTableForCDict(ZSTD_MatchState_t* ms,
const void* const end,
ZSTD_dictTableLoadMethod_e dtlm)
{
@@ -45,12 +45,12 @@ void ZSTD_fillHashTableForCDict(ZSTD_matchState_t* ms,
size_t const hashAndTag = ZSTD_hashPtr(ip + p, hBits, mls);
if (hashTable[hashAndTag >> ZSTD_SHORT_CACHE_TAG_BITS] == 0) { /* not yet filled */
ZSTD_writeTaggedIndex(hashTable, hashAndTag, curr + p);
- } } } }
+ } } } }
}
static
ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
-void ZSTD_fillHashTableForCCtx(ZSTD_matchState_t* ms,
+void ZSTD_fillHashTableForCCtx(ZSTD_MatchState_t* ms,
const void* const end,
ZSTD_dictTableLoadMethod_e dtlm)
{
@@ -84,7 +84,7 @@ void ZSTD_fillHashTableForCCtx(ZSTD_matchState_t* ms,
} } } }
}
-void ZSTD_fillHashTable(ZSTD_matchState_t* ms,
+void ZSTD_fillHashTable(ZSTD_MatchState_t* ms,
const void* const end,
ZSTD_dictTableLoadMethod_e dtlm,
ZSTD_tableFillPurpose_e tfp)
@@ -97,6 +97,50 @@ void ZSTD_fillHashTable(ZSTD_matchState_t* ms,
}
+typedef int (*ZSTD_match4Found) (const BYTE* currentPtr, const BYTE* matchAddress, U32 matchIdx, U32 idxLowLimit);
+
+static int
+ZSTD_match4Found_cmov(const BYTE* currentPtr, const BYTE* matchAddress, U32 matchIdx, U32 idxLowLimit)
+{
+ /* Array of ~random data, should have low probability of matching data.
+ * Load from here if the index is invalid.
+ * Used to avoid unpredictable branches. */
+ static const BYTE dummy[] = {0x12,0x34,0x56,0x78};
+
+ /* currentIdx >= lowLimit is a (somewhat) unpredictable branch.
+ * However expression below compiles into conditional move.
+ */
+ const BYTE* mvalAddr = ZSTD_selectAddr(matchIdx, idxLowLimit, matchAddress, dummy);
+ /* Note: this used to be written as : return test1 && test2;
+ * Unfortunately, once inlined, these tests become branches,
+ * in which case it becomes critical that they are executed in the right order (test1 then test2).
+ * So we have to write these tests in a specific manner to ensure their ordering.
+ */
+ if (MEM_read32(currentPtr) != MEM_read32(mvalAddr)) return 0;
+ /* force ordering of these tests, which matters once the function is inlined, as they become branches */
+#if defined(__GNUC__)
+ __asm__("");
+#endif
+ return matchIdx >= idxLowLimit;
+}
+
+static int
+ZSTD_match4Found_branch(const BYTE* currentPtr, const BYTE* matchAddress, U32 matchIdx, U32 idxLowLimit)
+{
+ /* using a branch instead of a cmov,
+ * because it's faster in scenarios where matchIdx >= idxLowLimit is generally true,
+ * aka almost all candidates are within range */
+ U32 mval;
+ if (matchIdx >= idxLowLimit) {
+ mval = MEM_read32(matchAddress);
+ } else {
+ mval = MEM_read32(currentPtr) ^ 1; /* guaranteed to not match. */
+ }
+
+ return (MEM_read32(currentPtr) == mval);
+}
+
+
/**
* If you squint hard enough (and ignore repcodes), the search operation at any
* given position is broken into 4 stages:
@@ -146,15 +190,14 @@ void ZSTD_fillHashTable(ZSTD_matchState_t* ms,
FORCE_INLINE_TEMPLATE
ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
size_t ZSTD_compressBlock_fast_noDict_generic(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize,
- U32 const mls, U32 const hasStep)
+ U32 const mls, int useCmov)
{
const ZSTD_compressionParameters* const cParams = &ms->cParams;
U32* const hashTable = ms->hashTable;
U32 const hlog = cParams->hashLog;
- /* support stepSize of 0 */
- size_t const stepSize = hasStep ? (cParams->targetLength + !(cParams->targetLength) + 1) : 2;
+ size_t const stepSize = cParams->targetLength + !(cParams->targetLength) + 1; /* min 2 */
const BYTE* const base = ms->window.base;
const BYTE* const istart = (const BYTE*)src;
const U32 endIndex = (U32)((size_t)(istart - base) + srcSize);
@@ -176,8 +219,7 @@ size_t ZSTD_compressBlock_fast_noDict_generic(
size_t hash0; /* hash for ip0 */
size_t hash1; /* hash for ip1 */
- U32 idx; /* match idx for ip0 */
- U32 mval; /* src value at match idx */
+ U32 matchIdx; /* match idx for ip0 */
U32 offcode;
const BYTE* match0;
@@ -190,6 +232,7 @@ size_t ZSTD_compressBlock_fast_noDict_generic(
size_t step;
const BYTE* nextStep;
const size_t kStepIncr = (1 << (kSearchStrength - 1));
+ const ZSTD_match4Found matchFound = useCmov ? ZSTD_match4Found_cmov : ZSTD_match4Found_branch;
DEBUGLOG(5, "ZSTD_compressBlock_fast_generic");
ip0 += (ip0 == prefixStart);
@@ -218,7 +261,7 @@ size_t ZSTD_compressBlock_fast_noDict_generic(
hash0 = ZSTD_hashPtr(ip0, hlog, mls);
hash1 = ZSTD_hashPtr(ip1, hlog, mls);
- idx = hashTable[hash0];
+ matchIdx = hashTable[hash0];
do {
/* load repcode match for ip[2]*/
@@ -238,35 +281,25 @@ size_t ZSTD_compressBlock_fast_noDict_generic(
offcode = REPCODE1_TO_OFFBASE;
mLength += 4;
- /* First write next hash table entry; we've already calculated it.
- * This write is known to be safe because the ip1 is before the
+ /* Write next hash table entry: it's already calculated.
+ * This write is known to be safe because ip1 is before the
* repcode (ip2). */
hashTable[hash1] = (U32)(ip1 - base);
goto _match;
}
- /* load match for ip[0] */
- if (idx >= prefixStartIndex) {
- mval = MEM_read32(base + idx);
- } else {
- mval = MEM_read32(ip0) ^ 1; /* guaranteed to not match. */
- }
-
- /* check match at ip[0] */
- if (MEM_read32(ip0) == mval) {
- /* found a match! */
-
- /* First write next hash table entry; we've already calculated it.
- * This write is known to be safe because the ip1 == ip0 + 1, so
- * we know we will resume searching after ip1 */
+ if (matchFound(ip0, base + matchIdx, matchIdx, prefixStartIndex)) {
+ /* Write next hash table entry (it's already calculated).
+ * This write is known to be safe because the ip1 == ip0 + 1,
+ * so searching will resume after ip1 */
hashTable[hash1] = (U32)(ip1 - base);
goto _offset;
}
/* lookup ip[1] */
- idx = hashTable[hash1];
+ matchIdx = hashTable[hash1];
/* hash ip[2] */
hash0 = hash1;
@@ -281,36 +314,19 @@ size_t ZSTD_compressBlock_fast_noDict_generic(
current0 = (U32)(ip0 - base);
hashTable[hash0] = current0;
- /* load match for ip[0] */
- if (idx >= prefixStartIndex) {
- mval = MEM_read32(base + idx);
- } else {
- mval = MEM_read32(ip0) ^ 1; /* guaranteed to not match. */
- }
-
- /* check match at ip[0] */
- if (MEM_read32(ip0) == mval) {
- /* found a match! */
-
- /* first write next hash table entry; we've already calculated it */
+ if (matchFound(ip0, base + matchIdx, matchIdx, prefixStartIndex)) {
+ /* Write next hash table entry, since it's already calculated */
if (step <= 4) {
- /* We need to avoid writing an index into the hash table >= the
- * position at which we will pick up our searching after we've
- * taken this match.
- *
- * The minimum possible match has length 4, so the earliest ip0
- * can be after we take this match will be the current ip0 + 4.
- * ip1 is ip0 + step - 1. If ip1 is >= ip0 + 4, we can't safely
- * write this position.
- */
+ /* Avoid writing an index if it's >= position where search will resume.
+ * The minimum possible match has length 4, so search can resume at ip0 + 4.
+ */
hashTable[hash1] = (U32)(ip1 - base);
}
-
goto _offset;
}
/* lookup ip[1] */
- idx = hashTable[hash1];
+ matchIdx = hashTable[hash1];
/* hash ip[2] */
hash0 = hash1;
@@ -332,7 +348,7 @@ size_t ZSTD_compressBlock_fast_noDict_generic(
} while (ip3 < ilimit);
_cleanup:
- /* Note that there are probably still a couple positions we could search.
+ /* Note that there are probably still a couple positions one could search.
* However, it seems to be a meaningful performance hit to try to search
* them. So let's not. */
@@ -361,7 +377,7 @@ size_t ZSTD_compressBlock_fast_noDict_generic(
_offset: /* Requires: ip0, idx */
/* Compute the offset code. */
- match0 = base + idx;
+ match0 = base + matchIdx;
rep_offset2 = rep_offset1;
rep_offset1 = (U32)(ip0-match0);
offcode = OFFSET_TO_OFFBASE(rep_offset1);
@@ -406,12 +422,12 @@ size_t ZSTD_compressBlock_fast_noDict_generic(
goto _start;
}
-#define ZSTD_GEN_FAST_FN(dictMode, mls, step) \
- static size_t ZSTD_compressBlock_fast_##dictMode##_##mls##_##step( \
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], \
+#define ZSTD_GEN_FAST_FN(dictMode, mml, cmov) \
+ static size_t ZSTD_compressBlock_fast_##dictMode##_##mml##_##cmov( \
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], \
void const* src, size_t srcSize) \
{ \
- return ZSTD_compressBlock_fast_##dictMode##_generic(ms, seqStore, rep, src, srcSize, mls, step); \
+ return ZSTD_compressBlock_fast_##dictMode##_generic(ms, seqStore, rep, src, srcSize, mml, cmov); \
}
ZSTD_GEN_FAST_FN(noDict, 4, 1)
@@ -425,13 +441,15 @@ ZSTD_GEN_FAST_FN(noDict, 6, 0)
ZSTD_GEN_FAST_FN(noDict, 7, 0)
size_t ZSTD_compressBlock_fast(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
- U32 const mls = ms->cParams.minMatch;
+ U32 const mml = ms->cParams.minMatch;
+ /* use cmov when "candidate in range" branch is likely unpredictable */
+ int const useCmov = ms->cParams.windowLog < 19;
assert(ms->dictMatchState == NULL);
- if (ms->cParams.targetLength > 1) {
- switch(mls)
+ if (useCmov) {
+ switch(mml)
{
default: /* includes case 3 */
case 4 :
@@ -444,7 +462,8 @@ size_t ZSTD_compressBlock_fast(
return ZSTD_compressBlock_fast_noDict_7_1(ms, seqStore, rep, src, srcSize);
}
} else {
- switch(mls)
+ /* use a branch instead */
+ switch(mml)
{
default: /* includes case 3 */
case 4 :
@@ -456,14 +475,13 @@ size_t ZSTD_compressBlock_fast(
case 7 :
return ZSTD_compressBlock_fast_noDict_7_0(ms, seqStore, rep, src, srcSize);
}
-
}
}
FORCE_INLINE_TEMPLATE
ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
size_t ZSTD_compressBlock_fast_dictMatchState_generic(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize, U32 const mls, U32 const hasStep)
{
const ZSTD_compressionParameters* const cParams = &ms->cParams;
@@ -482,7 +500,7 @@ size_t ZSTD_compressBlock_fast_dictMatchState_generic(
const BYTE* const ilimit = iend - HASH_READ_SIZE;
U32 offset_1=rep[0], offset_2=rep[1];
- const ZSTD_matchState_t* const dms = ms->dictMatchState;
+ const ZSTD_MatchState_t* const dms = ms->dictMatchState;
const ZSTD_compressionParameters* const dictCParams = &dms->cParams ;
const U32* const dictHashTable = dms->hashTable;
const U32 dictStartIndex = dms->window.dictLimit;
@@ -546,8 +564,7 @@ size_t ZSTD_compressBlock_fast_dictMatchState_generic(
size_t const dictHashAndTag1 = ZSTD_hashPtr(ip1, dictHBits, mls);
hashTable[hash0] = curr; /* update hash table */
- if (((U32) ((prefixStartIndex - 1) - repIndex) >=
- 3) /* intentional underflow : ensure repIndex isn't overlapping dict + prefix */
+ if ((ZSTD_index_overlap_check(prefixStartIndex, repIndex))
&& (MEM_read32(repMatch) == MEM_read32(ip0 + 1))) {
const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
mLength = ZSTD_count_2segments(ip0 + 1 + 4, repMatch + 4, iend, repMatchEnd, prefixStart) + 4;
@@ -580,8 +597,8 @@ size_t ZSTD_compressBlock_fast_dictMatchState_generic(
}
}
- if (matchIndex > prefixStartIndex && MEM_read32(match) == MEM_read32(ip0)) {
- /* found a regular match */
+ if (ZSTD_match4Found_cmov(ip0, match, matchIndex, prefixStartIndex)) {
+ /* found a regular match of size >= 4 */
U32 const offset = (U32) (ip0 - match);
mLength = ZSTD_count(ip0 + 4, match + 4, iend) + 4;
while (((ip0 > anchor) & (match > prefixStart))
@@ -631,7 +648,7 @@ size_t ZSTD_compressBlock_fast_dictMatchState_generic(
const BYTE* repMatch2 = repIndex2 < prefixStartIndex ?
dictBase - dictIndexDelta + repIndex2 :
base + repIndex2;
- if ( ((U32)((prefixStartIndex-1) - (U32)repIndex2) >= 3 /* intentional overflow */)
+ if ( (ZSTD_index_overlap_check(prefixStartIndex, repIndex2))
&& (MEM_read32(repMatch2) == MEM_read32(ip0))) {
const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
size_t const repLength2 = ZSTD_count_2segments(ip0+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
@@ -667,7 +684,7 @@ ZSTD_GEN_FAST_FN(dictMatchState, 6, 0)
ZSTD_GEN_FAST_FN(dictMatchState, 7, 0)
size_t ZSTD_compressBlock_fast_dictMatchState(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
U32 const mls = ms->cParams.minMatch;
@@ -690,7 +707,7 @@ size_t ZSTD_compressBlock_fast_dictMatchState(
static
ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
size_t ZSTD_compressBlock_fast_extDict_generic(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize, U32 const mls, U32 const hasStep)
{
const ZSTD_compressionParameters* const cParams = &ms->cParams;
@@ -925,7 +942,7 @@ size_t ZSTD_compressBlock_fast_extDict_generic(
while (ip0 <= ilimit) {
U32 const repIndex2 = (U32)(ip0-base) - offset_2;
const BYTE* const repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2;
- if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3) & (offset_2 > 0)) /* intentional underflow */
+ if ( ((ZSTD_index_overlap_check(prefixStartIndex, repIndex2)) & (offset_2 > 0))
&& (MEM_read32(repMatch2) == MEM_read32(ip0)) ) {
const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
size_t const repLength2 = ZSTD_count_2segments(ip0+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
@@ -948,7 +965,7 @@ ZSTD_GEN_FAST_FN(extDict, 6, 0)
ZSTD_GEN_FAST_FN(extDict, 7, 0)
size_t ZSTD_compressBlock_fast_extDict(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
U32 const mls = ms->cParams.minMatch;
diff --git a/lib/compress/zstd_fast.h b/lib/compress/zstd_fast.h
index 9e4236b4728..216821ac33b 100644
--- a/lib/compress/zstd_fast.h
+++ b/lib/compress/zstd_fast.h
@@ -11,28 +11,20 @@
#ifndef ZSTD_FAST_H
#define ZSTD_FAST_H
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
#include "../common/mem.h" /* U32 */
#include "zstd_compress_internal.h"
-void ZSTD_fillHashTable(ZSTD_matchState_t* ms,
+void ZSTD_fillHashTable(ZSTD_MatchState_t* ms,
void const* end, ZSTD_dictTableLoadMethod_e dtlm,
ZSTD_tableFillPurpose_e tfp);
size_t ZSTD_compressBlock_fast(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
size_t ZSTD_compressBlock_fast_dictMatchState(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
size_t ZSTD_compressBlock_fast_extDict(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
-#if defined (__cplusplus)
-}
-#endif
-
#endif /* ZSTD_FAST_H */
diff --git a/lib/compress/zstd_lazy.c b/lib/compress/zstd_lazy.c
index 67dd55fdb80..272ebe0ece7 100644
--- a/lib/compress/zstd_lazy.c
+++ b/lib/compress/zstd_lazy.c
@@ -26,7 +26,7 @@
static
ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
-void ZSTD_updateDUBT(ZSTD_matchState_t* ms,
+void ZSTD_updateDUBT(ZSTD_MatchState_t* ms,
const BYTE* ip, const BYTE* iend,
U32 mls)
{
@@ -71,7 +71,7 @@ void ZSTD_updateDUBT(ZSTD_matchState_t* ms,
* doesn't fail */
static
ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
-void ZSTD_insertDUBT1(const ZSTD_matchState_t* ms,
+void ZSTD_insertDUBT1(const ZSTD_MatchState_t* ms,
U32 curr, const BYTE* inputEnd,
U32 nbCompares, U32 btLow,
const ZSTD_dictMode_e dictMode)
@@ -162,7 +162,7 @@ void ZSTD_insertDUBT1(const ZSTD_matchState_t* ms,
static
ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
size_t ZSTD_DUBT_findBetterDictMatch (
- const ZSTD_matchState_t* ms,
+ const ZSTD_MatchState_t* ms,
const BYTE* const ip, const BYTE* const iend,
size_t* offsetPtr,
size_t bestLength,
@@ -170,7 +170,7 @@ size_t ZSTD_DUBT_findBetterDictMatch (
U32 const mls,
const ZSTD_dictMode_e dictMode)
{
- const ZSTD_matchState_t * const dms = ms->dictMatchState;
+ const ZSTD_MatchState_t * const dms = ms->dictMatchState;
const ZSTD_compressionParameters* const dmsCParams = &dms->cParams;
const U32 * const dictHashTable = dms->hashTable;
U32 const hashLog = dmsCParams->hashLog;
@@ -240,7 +240,7 @@ size_t ZSTD_DUBT_findBetterDictMatch (
static
ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
-size_t ZSTD_DUBT_findBestMatch(ZSTD_matchState_t* ms,
+size_t ZSTD_DUBT_findBestMatch(ZSTD_MatchState_t* ms,
const BYTE* const ip, const BYTE* const iend,
size_t* offBasePtr,
U32 const mls,
@@ -392,7 +392,7 @@ size_t ZSTD_DUBT_findBestMatch(ZSTD_matchState_t* ms,
/** ZSTD_BtFindBestMatch() : Tree updater, providing best match */
FORCE_INLINE_TEMPLATE
ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
-size_t ZSTD_BtFindBestMatch( ZSTD_matchState_t* ms,
+size_t ZSTD_BtFindBestMatch( ZSTD_MatchState_t* ms,
const BYTE* const ip, const BYTE* const iLimit,
size_t* offBasePtr,
const U32 mls /* template */,
@@ -408,7 +408,7 @@ size_t ZSTD_BtFindBestMatch( ZSTD_matchState_t* ms,
* Dedicated dict search
***********************************/
-void ZSTD_dedicatedDictSearch_lazy_loadDictionary(ZSTD_matchState_t* ms, const BYTE* const ip)
+void ZSTD_dedicatedDictSearch_lazy_loadDictionary(ZSTD_MatchState_t* ms, const BYTE* const ip)
{
const BYTE* const base = ms->window.base;
U32 const target = (U32)(ip - base);
@@ -527,7 +527,7 @@ void ZSTD_dedicatedDictSearch_lazy_loadDictionary(ZSTD_matchState_t* ms, const B
*/
FORCE_INLINE_TEMPLATE
size_t ZSTD_dedicatedDictSearch_lazy_search(size_t* offsetPtr, size_t ml, U32 nbAttempts,
- const ZSTD_matchState_t* const dms,
+ const ZSTD_MatchState_t* const dms,
const BYTE* const ip, const BYTE* const iLimit,
const BYTE* const prefixStart, const U32 curr,
const U32 dictLimit, const size_t ddsIdx) {
@@ -630,7 +630,7 @@ size_t ZSTD_dedicatedDictSearch_lazy_search(size_t* offsetPtr, size_t ml, U32 nb
FORCE_INLINE_TEMPLATE
ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
U32 ZSTD_insertAndFindFirstIndex_internal(
- ZSTD_matchState_t* ms,
+ ZSTD_MatchState_t* ms,
const ZSTD_compressionParameters* const cParams,
const BYTE* ip, U32 const mls, U32 const lazySkipping)
{
@@ -656,7 +656,7 @@ U32 ZSTD_insertAndFindFirstIndex_internal(
return hashTable[ZSTD_hashPtr(ip, hashLog, mls)];
}
-U32 ZSTD_insertAndFindFirstIndex(ZSTD_matchState_t* ms, const BYTE* ip) {
+U32 ZSTD_insertAndFindFirstIndex(ZSTD_MatchState_t* ms, const BYTE* ip) {
const ZSTD_compressionParameters* const cParams = &ms->cParams;
return ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, ms->cParams.minMatch, /* lazySkipping*/ 0);
}
@@ -665,7 +665,7 @@ U32 ZSTD_insertAndFindFirstIndex(ZSTD_matchState_t* ms, const BYTE* ip) {
FORCE_INLINE_TEMPLATE
ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
size_t ZSTD_HcFindBestMatch(
- ZSTD_matchState_t* ms,
+ ZSTD_MatchState_t* ms,
const BYTE* const ip, const BYTE* const iLimit,
size_t* offsetPtr,
const U32 mls, const ZSTD_dictMode_e dictMode)
@@ -689,7 +689,7 @@ size_t ZSTD_HcFindBestMatch(
U32 nbAttempts = 1U << cParams->searchLog;
size_t ml=4-1;
- const ZSTD_matchState_t* const dms = ms->dictMatchState;
+ const ZSTD_MatchState_t* const dms = ms->dictMatchState;
const U32 ddsHashLog = dictMode == ZSTD_dedicatedDictSearch
? dms->cParams.hashLog - ZSTD_LAZY_DDSS_BUCKET_LOG : 0;
const size_t ddsIdx = dictMode == ZSTD_dedicatedDictSearch
@@ -834,7 +834,7 @@ FORCE_INLINE_TEMPLATE void ZSTD_row_prefetch(U32 const* hashTable, BYTE const* t
*/
FORCE_INLINE_TEMPLATE
ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
-void ZSTD_row_fillHashCache(ZSTD_matchState_t* ms, const BYTE* base,
+void ZSTD_row_fillHashCache(ZSTD_MatchState_t* ms, const BYTE* base,
U32 const rowLog, U32 const mls,
U32 idx, const BYTE* const iLimit)
{
@@ -882,7 +882,7 @@ U32 ZSTD_row_nextCachedHash(U32* cache, U32 const* hashTable,
*/
FORCE_INLINE_TEMPLATE
ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
-void ZSTD_row_update_internalImpl(ZSTD_matchState_t* ms,
+void ZSTD_row_update_internalImpl(ZSTD_MatchState_t* ms,
U32 updateStartIdx, U32 const updateEndIdx,
U32 const mls, U32 const rowLog,
U32 const rowMask, U32 const useCache)
@@ -913,7 +913,7 @@ void ZSTD_row_update_internalImpl(ZSTD_matchState_t* ms,
*/
FORCE_INLINE_TEMPLATE
ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
-void ZSTD_row_update_internal(ZSTD_matchState_t* ms, const BYTE* ip,
+void ZSTD_row_update_internal(ZSTD_MatchState_t* ms, const BYTE* ip,
U32 const mls, U32 const rowLog,
U32 const rowMask, U32 const useCache)
{
@@ -946,7 +946,7 @@ void ZSTD_row_update_internal(ZSTD_matchState_t* ms, const BYTE* ip,
* External wrapper for ZSTD_row_update_internal(). Used for filling the hashtable during dictionary
* processing.
*/
-void ZSTD_row_update(ZSTD_matchState_t* const ms, const BYTE* ip) {
+void ZSTD_row_update(ZSTD_MatchState_t* const ms, const BYTE* ip) {
const U32 rowLog = BOUNDED(4, ms->cParams.searchLog, 6);
const U32 rowMask = (1u << rowLog) - 1;
const U32 mls = MIN(ms->cParams.minMatch, 6 /* mls caps out at 6 */);
@@ -1123,9 +1123,9 @@ ZSTD_row_getMatchMask(const BYTE* const tagRow, const BYTE tag, const U32 headGr
/* The high-level approach of the SIMD row based match finder is as follows:
* - Figure out where to insert the new entry:
- * - Generate a hash for current input posistion and split it into a one byte of tag and `rowHashLog` bits of index.
- * - The hash is salted by a value that changes on every contex reset, so when the same table is used
- * we will avoid collisions that would otherwise slow us down by intorducing phantom matches.
+ * - Generate a hash for current input position and split it into a one byte of tag and `rowHashLog` bits of index.
+ * - The hash is salted by a value that changes on every context reset, so when the same table is used
+ * we will avoid collisions that would otherwise slow us down by introducing phantom matches.
* - The hashTable is effectively split into groups or "rows" of 15 or 31 entries of U32, and the index determines
* which row to insert into.
* - Determine the correct position within the row to insert the entry into. Each row of 15 or 31 can
@@ -1139,7 +1139,7 @@ ZSTD_row_getMatchMask(const BYTE* const tagRow, const BYTE tag, const U32 headGr
FORCE_INLINE_TEMPLATE
ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
size_t ZSTD_RowFindBestMatch(
- ZSTD_matchState_t* ms,
+ ZSTD_MatchState_t* ms,
const BYTE* const ip, const BYTE* const iLimit,
size_t* offsetPtr,
const U32 mls, const ZSTD_dictMode_e dictMode,
@@ -1171,7 +1171,7 @@ size_t ZSTD_RowFindBestMatch(
U32 hash;
/* DMS/DDS variables that may be referenced laster */
- const ZSTD_matchState_t* const dms = ms->dictMatchState;
+ const ZSTD_MatchState_t* const dms = ms->dictMatchState;
/* Initialize the following variables to satisfy static analyzer */
size_t ddsIdx = 0;
@@ -1340,7 +1340,7 @@ size_t ZSTD_RowFindBestMatch(
* ZSTD_searchMax() dispatches to the correct implementation function.
*
* TODO: The start of the search function involves loading and calculating a
- * bunch of constants from the ZSTD_matchState_t. These computations could be
+ * bunch of constants from the ZSTD_MatchState_t. These computations could be
* done in an initialization function, and saved somewhere in the match state.
* Then we could pass a pointer to the saved state instead of the match state,
* and avoid duplicate computations.
@@ -1364,7 +1364,7 @@ size_t ZSTD_RowFindBestMatch(
#define GEN_ZSTD_BT_SEARCH_FN(dictMode, mls) \
ZSTD_SEARCH_FN_ATTRS size_t ZSTD_BT_SEARCH_FN(dictMode, mls)( \
- ZSTD_matchState_t* ms, \
+ ZSTD_MatchState_t* ms, \
const BYTE* ip, const BYTE* const iLimit, \
size_t* offBasePtr) \
{ \
@@ -1374,7 +1374,7 @@ size_t ZSTD_RowFindBestMatch(
#define GEN_ZSTD_HC_SEARCH_FN(dictMode, mls) \
ZSTD_SEARCH_FN_ATTRS size_t ZSTD_HC_SEARCH_FN(dictMode, mls)( \
- ZSTD_matchState_t* ms, \
+ ZSTD_MatchState_t* ms, \
const BYTE* ip, const BYTE* const iLimit, \
size_t* offsetPtr) \
{ \
@@ -1384,7 +1384,7 @@ size_t ZSTD_RowFindBestMatch(
#define GEN_ZSTD_ROW_SEARCH_FN(dictMode, mls, rowLog) \
ZSTD_SEARCH_FN_ATTRS size_t ZSTD_ROW_SEARCH_FN(dictMode, mls, rowLog)( \
- ZSTD_matchState_t* ms, \
+ ZSTD_MatchState_t* ms, \
const BYTE* ip, const BYTE* const iLimit, \
size_t* offsetPtr) \
{ \
@@ -1485,7 +1485,7 @@ typedef enum { search_hashChain=0, search_binaryTree=1, search_rowHash=2 } searc
* If a match is found its offset is stored in @p offsetPtr.
*/
FORCE_INLINE_TEMPLATE size_t ZSTD_searchMax(
- ZSTD_matchState_t* ms,
+ ZSTD_MatchState_t* ms,
const BYTE* ip,
const BYTE* iend,
size_t* offsetPtr,
@@ -1514,7 +1514,7 @@ FORCE_INLINE_TEMPLATE size_t ZSTD_searchMax(
FORCE_INLINE_TEMPLATE
ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
size_t ZSTD_compressBlock_lazy_generic(
- ZSTD_matchState_t* ms, seqStore_t* seqStore,
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore,
U32 rep[ZSTD_REP_NUM],
const void* src, size_t srcSize,
const searchMethod_e searchMethod, const U32 depth,
@@ -1537,7 +1537,7 @@ size_t ZSTD_compressBlock_lazy_generic(
const int isDMS = dictMode == ZSTD_dictMatchState;
const int isDDS = dictMode == ZSTD_dedicatedDictSearch;
const int isDxS = isDMS || isDDS;
- const ZSTD_matchState_t* const dms = ms->dictMatchState;
+ const ZSTD_MatchState_t* const dms = ms->dictMatchState;
const U32 dictLowestIndex = isDxS ? dms->window.dictLimit : 0;
const BYTE* const dictBase = isDxS ? dms->window.base : NULL;
const BYTE* const dictLowest = isDxS ? dictBase + dictLowestIndex : NULL;
@@ -1590,7 +1590,7 @@ size_t ZSTD_compressBlock_lazy_generic(
&& repIndex < prefixLowestIndex) ?
dictBase + (repIndex - dictIndexDelta) :
base + repIndex;
- if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)
+ if ((ZSTD_index_overlap_check(prefixLowestIndex, repIndex))
&& (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
matchLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
@@ -1642,7 +1642,7 @@ size_t ZSTD_compressBlock_lazy_generic(
const BYTE* repMatch = repIndex < prefixLowestIndex ?
dictBase + (repIndex - dictIndexDelta) :
base + repIndex;
- if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)
+ if ((ZSTD_index_overlap_check(prefixLowestIndex, repIndex))
&& (MEM_read32(repMatch) == MEM_read32(ip)) ) {
const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
size_t const mlRep = ZSTD_count_2segments(ip+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
@@ -1678,7 +1678,7 @@ size_t ZSTD_compressBlock_lazy_generic(
const BYTE* repMatch = repIndex < prefixLowestIndex ?
dictBase + (repIndex - dictIndexDelta) :
base + repIndex;
- if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)
+ if ((ZSTD_index_overlap_check(prefixLowestIndex, repIndex))
&& (MEM_read32(repMatch) == MEM_read32(ip)) ) {
const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
size_t const mlRep = ZSTD_count_2segments(ip+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
@@ -1740,7 +1740,7 @@ size_t ZSTD_compressBlock_lazy_generic(
const BYTE* repMatch = repIndex < prefixLowestIndex ?
dictBase - dictIndexDelta + repIndex :
base + repIndex;
- if ( ((U32)((prefixLowestIndex-1) - (U32)repIndex) >= 3 /* intentional overflow */)
+ if ( (ZSTD_index_overlap_check(prefixLowestIndex, repIndex))
&& (MEM_read32(repMatch) == MEM_read32(ip)) ) {
const BYTE* const repEnd2 = repIndex < prefixLowestIndex ? dictEnd : iend;
matchLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd2, prefixLowest) + 4;
@@ -1782,42 +1782,42 @@ size_t ZSTD_compressBlock_lazy_generic(
#ifndef ZSTD_EXCLUDE_GREEDY_BLOCK_COMPRESSOR
size_t ZSTD_compressBlock_greedy(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_noDict);
}
size_t ZSTD_compressBlock_greedy_dictMatchState(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_dictMatchState);
}
size_t ZSTD_compressBlock_greedy_dedicatedDictSearch(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_dedicatedDictSearch);
}
size_t ZSTD_compressBlock_greedy_row(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 0, ZSTD_noDict);
}
size_t ZSTD_compressBlock_greedy_dictMatchState_row(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 0, ZSTD_dictMatchState);
}
size_t ZSTD_compressBlock_greedy_dedicatedDictSearch_row(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 0, ZSTD_dedicatedDictSearch);
@@ -1826,42 +1826,42 @@ size_t ZSTD_compressBlock_greedy_dedicatedDictSearch_row(
#ifndef ZSTD_EXCLUDE_LAZY_BLOCK_COMPRESSOR
size_t ZSTD_compressBlock_lazy(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_noDict);
}
size_t ZSTD_compressBlock_lazy_dictMatchState(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_dictMatchState);
}
size_t ZSTD_compressBlock_lazy_dedicatedDictSearch(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_dedicatedDictSearch);
}
size_t ZSTD_compressBlock_lazy_row(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 1, ZSTD_noDict);
}
size_t ZSTD_compressBlock_lazy_dictMatchState_row(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 1, ZSTD_dictMatchState);
}
size_t ZSTD_compressBlock_lazy_dedicatedDictSearch_row(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 1, ZSTD_dedicatedDictSearch);
@@ -1870,42 +1870,42 @@ size_t ZSTD_compressBlock_lazy_dedicatedDictSearch_row(
#ifndef ZSTD_EXCLUDE_LAZY2_BLOCK_COMPRESSOR
size_t ZSTD_compressBlock_lazy2(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_noDict);
}
size_t ZSTD_compressBlock_lazy2_dictMatchState(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_dictMatchState);
}
size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_dedicatedDictSearch);
}
size_t ZSTD_compressBlock_lazy2_row(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 2, ZSTD_noDict);
}
size_t ZSTD_compressBlock_lazy2_dictMatchState_row(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 2, ZSTD_dictMatchState);
}
size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch_row(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 2, ZSTD_dedicatedDictSearch);
@@ -1914,14 +1914,14 @@ size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch_row(
#ifndef ZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR
size_t ZSTD_compressBlock_btlazy2(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2, ZSTD_noDict);
}
size_t ZSTD_compressBlock_btlazy2_dictMatchState(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2, ZSTD_dictMatchState);
@@ -1935,7 +1935,7 @@ size_t ZSTD_compressBlock_btlazy2_dictMatchState(
FORCE_INLINE_TEMPLATE
ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
size_t ZSTD_compressBlock_lazy_extDict_generic(
- ZSTD_matchState_t* ms, seqStore_t* seqStore,
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore,
U32 rep[ZSTD_REP_NUM],
const void* src, size_t srcSize,
const searchMethod_e searchMethod, const U32 depth)
@@ -1986,7 +1986,7 @@ size_t ZSTD_compressBlock_lazy_extDict_generic(
const U32 repIndex = (U32)(curr+1 - offset_1);
const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
const BYTE* const repMatch = repBase + repIndex;
- if ( ((U32)((dictLimit-1) - repIndex) >= 3) /* intentional overflow */
+ if ( (ZSTD_index_overlap_check(dictLimit, repIndex))
& (offset_1 <= curr+1 - windowLow) ) /* note: we are searching at curr+1 */
if (MEM_read32(ip+1) == MEM_read32(repMatch)) {
/* repcode detected we should take it */
@@ -2027,7 +2027,7 @@ size_t ZSTD_compressBlock_lazy_extDict_generic(
const U32 repIndex = (U32)(curr - offset_1);
const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
const BYTE* const repMatch = repBase + repIndex;
- if ( ((U32)((dictLimit-1) - repIndex) >= 3) /* intentional overflow : do not test positions overlapping 2 memory segments */
+ if ( (ZSTD_index_overlap_check(dictLimit, repIndex))
& (offset_1 <= curr - windowLow) ) /* equivalent to `curr > repIndex >= windowLow` */
if (MEM_read32(ip) == MEM_read32(repMatch)) {
/* repcode detected */
@@ -2059,7 +2059,7 @@ size_t ZSTD_compressBlock_lazy_extDict_generic(
const U32 repIndex = (U32)(curr - offset_1);
const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
const BYTE* const repMatch = repBase + repIndex;
- if ( ((U32)((dictLimit-1) - repIndex) >= 3) /* intentional overflow : do not test positions overlapping 2 memory segments */
+ if ( (ZSTD_index_overlap_check(dictLimit, repIndex))
& (offset_1 <= curr - windowLow) ) /* equivalent to `curr > repIndex >= windowLow` */
if (MEM_read32(ip) == MEM_read32(repMatch)) {
/* repcode detected */
@@ -2113,7 +2113,7 @@ size_t ZSTD_compressBlock_lazy_extDict_generic(
const U32 repIndex = repCurrent - offset_2;
const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
const BYTE* const repMatch = repBase + repIndex;
- if ( ((U32)((dictLimit-1) - repIndex) >= 3) /* intentional overflow : do not test positions overlapping 2 memory segments */
+ if ( (ZSTD_index_overlap_check(dictLimit, repIndex))
& (offset_2 <= repCurrent - windowLow) ) /* equivalent to `curr > repIndex >= windowLow` */
if (MEM_read32(ip) == MEM_read32(repMatch)) {
/* repcode detected we should take it */
@@ -2139,14 +2139,14 @@ size_t ZSTD_compressBlock_lazy_extDict_generic(
#ifndef ZSTD_EXCLUDE_GREEDY_BLOCK_COMPRESSOR
size_t ZSTD_compressBlock_greedy_extDict(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0);
}
size_t ZSTD_compressBlock_greedy_extDict_row(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 0);
@@ -2155,7 +2155,7 @@ size_t ZSTD_compressBlock_greedy_extDict_row(
#ifndef ZSTD_EXCLUDE_LAZY_BLOCK_COMPRESSOR
size_t ZSTD_compressBlock_lazy_extDict(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
@@ -2163,7 +2163,7 @@ size_t ZSTD_compressBlock_lazy_extDict(
}
size_t ZSTD_compressBlock_lazy_extDict_row(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
@@ -2173,7 +2173,7 @@ size_t ZSTD_compressBlock_lazy_extDict_row(
#ifndef ZSTD_EXCLUDE_LAZY2_BLOCK_COMPRESSOR
size_t ZSTD_compressBlock_lazy2_extDict(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
@@ -2181,7 +2181,7 @@ size_t ZSTD_compressBlock_lazy2_extDict(
}
size_t ZSTD_compressBlock_lazy2_extDict_row(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 2);
@@ -2190,7 +2190,7 @@ size_t ZSTD_compressBlock_lazy2_extDict_row(
#ifndef ZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR
size_t ZSTD_compressBlock_btlazy2_extDict(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
diff --git a/lib/compress/zstd_lazy.h b/lib/compress/zstd_lazy.h
index 3635813bddf..bd8dc49e64c 100644
--- a/lib/compress/zstd_lazy.h
+++ b/lib/compress/zstd_lazy.h
@@ -11,10 +11,6 @@
#ifndef ZSTD_LAZY_H
#define ZSTD_LAZY_H
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
#include "zstd_compress_internal.h"
/**
@@ -31,38 +27,38 @@ extern "C" {
|| !defined(ZSTD_EXCLUDE_LAZY_BLOCK_COMPRESSOR) \
|| !defined(ZSTD_EXCLUDE_LAZY2_BLOCK_COMPRESSOR) \
|| !defined(ZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR)
-U32 ZSTD_insertAndFindFirstIndex(ZSTD_matchState_t* ms, const BYTE* ip);
-void ZSTD_row_update(ZSTD_matchState_t* const ms, const BYTE* ip);
+U32 ZSTD_insertAndFindFirstIndex(ZSTD_MatchState_t* ms, const BYTE* ip);
+void ZSTD_row_update(ZSTD_MatchState_t* const ms, const BYTE* ip);
-void ZSTD_dedicatedDictSearch_lazy_loadDictionary(ZSTD_matchState_t* ms, const BYTE* const ip);
+void ZSTD_dedicatedDictSearch_lazy_loadDictionary(ZSTD_MatchState_t* ms, const BYTE* const ip);
void ZSTD_preserveUnsortedMark (U32* const table, U32 const size, U32 const reducerValue); /*! used in ZSTD_reduceIndex(). preemptively increase value of ZSTD_DUBT_UNSORTED_MARK */
#endif
#ifndef ZSTD_EXCLUDE_GREEDY_BLOCK_COMPRESSOR
size_t ZSTD_compressBlock_greedy(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
size_t ZSTD_compressBlock_greedy_row(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
size_t ZSTD_compressBlock_greedy_dictMatchState(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
size_t ZSTD_compressBlock_greedy_dictMatchState_row(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
size_t ZSTD_compressBlock_greedy_dedicatedDictSearch(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
size_t ZSTD_compressBlock_greedy_dedicatedDictSearch_row(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
size_t ZSTD_compressBlock_greedy_extDict(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
size_t ZSTD_compressBlock_greedy_extDict_row(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
#define ZSTD_COMPRESSBLOCK_GREEDY ZSTD_compressBlock_greedy
@@ -86,28 +82,28 @@ size_t ZSTD_compressBlock_greedy_extDict_row(
#ifndef ZSTD_EXCLUDE_LAZY_BLOCK_COMPRESSOR
size_t ZSTD_compressBlock_lazy(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
size_t ZSTD_compressBlock_lazy_row(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
size_t ZSTD_compressBlock_lazy_dictMatchState(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
size_t ZSTD_compressBlock_lazy_dictMatchState_row(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
size_t ZSTD_compressBlock_lazy_dedicatedDictSearch(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
size_t ZSTD_compressBlock_lazy_dedicatedDictSearch_row(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
size_t ZSTD_compressBlock_lazy_extDict(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
size_t ZSTD_compressBlock_lazy_extDict_row(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
#define ZSTD_COMPRESSBLOCK_LAZY ZSTD_compressBlock_lazy
@@ -131,28 +127,28 @@ size_t ZSTD_compressBlock_lazy_extDict_row(
#ifndef ZSTD_EXCLUDE_LAZY2_BLOCK_COMPRESSOR
size_t ZSTD_compressBlock_lazy2(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
size_t ZSTD_compressBlock_lazy2_row(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
size_t ZSTD_compressBlock_lazy2_dictMatchState(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
size_t ZSTD_compressBlock_lazy2_dictMatchState_row(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch_row(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
size_t ZSTD_compressBlock_lazy2_extDict(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
size_t ZSTD_compressBlock_lazy2_extDict_row(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
#define ZSTD_COMPRESSBLOCK_LAZY2 ZSTD_compressBlock_lazy2
@@ -176,13 +172,13 @@ size_t ZSTD_compressBlock_lazy2_extDict_row(
#ifndef ZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR
size_t ZSTD_compressBlock_btlazy2(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
size_t ZSTD_compressBlock_btlazy2_dictMatchState(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
size_t ZSTD_compressBlock_btlazy2_extDict(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
#define ZSTD_COMPRESSBLOCK_BTLAZY2 ZSTD_compressBlock_btlazy2
@@ -194,9 +190,4 @@ size_t ZSTD_compressBlock_btlazy2_extDict(
#define ZSTD_COMPRESSBLOCK_BTLAZY2_EXTDICT NULL
#endif
-
-#if defined (__cplusplus)
-}
-#endif
-
#endif /* ZSTD_LAZY_H */
diff --git a/lib/compress/zstd_ldm.c b/lib/compress/zstd_ldm.c
index 17c069fe1d7..070551cad81 100644
--- a/lib/compress/zstd_ldm.c
+++ b/lib/compress/zstd_ldm.c
@@ -16,7 +16,7 @@
#include "zstd_double_fast.h" /* ZSTD_fillDoubleHashTable() */
#include "zstd_ldm_geartab.h"
-#define LDM_BUCKET_SIZE_LOG 3
+#define LDM_BUCKET_SIZE_LOG 4
#define LDM_MIN_MATCH_LENGTH 64
#define LDM_HASH_RLOG 7
@@ -133,21 +133,35 @@ static size_t ZSTD_ldm_gear_feed(ldmRollingHashState_t* state,
}
void ZSTD_ldm_adjustParameters(ldmParams_t* params,
- ZSTD_compressionParameters const* cParams)
+ const ZSTD_compressionParameters* cParams)
{
params->windowLog = cParams->windowLog;
ZSTD_STATIC_ASSERT(LDM_BUCKET_SIZE_LOG <= ZSTD_LDM_BUCKETSIZELOG_MAX);
DEBUGLOG(4, "ZSTD_ldm_adjustParameters");
- if (!params->bucketSizeLog) params->bucketSizeLog = LDM_BUCKET_SIZE_LOG;
- if (!params->minMatchLength) params->minMatchLength = LDM_MIN_MATCH_LENGTH;
+ if (params->hashRateLog == 0) {
+ if (params->hashLog > 0) {
+ /* if params->hashLog is set, derive hashRateLog from it */
+ assert(params->hashLog <= ZSTD_HASHLOG_MAX);
+ if (params->windowLog > params->hashLog) {
+ params->hashRateLog = params->windowLog - params->hashLog;
+ }
+ } else {
+ assert(1 <= (int)cParams->strategy && (int)cParams->strategy <= 9);
+ /* mapping from [fast, rate7] to [btultra2, rate4] */
+ params->hashRateLog = 7 - (cParams->strategy/3);
+ }
+ }
if (params->hashLog == 0) {
- params->hashLog = MAX(ZSTD_HASHLOG_MIN, params->windowLog - LDM_HASH_RLOG);
- assert(params->hashLog <= ZSTD_HASHLOG_MAX);
+ params->hashLog = BOUNDED(ZSTD_HASHLOG_MIN, params->windowLog - params->hashRateLog, ZSTD_HASHLOG_MAX);
}
- if (params->hashRateLog == 0) {
- params->hashRateLog = params->windowLog < params->hashLog
- ? 0
- : params->windowLog - params->hashLog;
+ if (params->minMatchLength == 0) {
+ params->minMatchLength = LDM_MIN_MATCH_LENGTH;
+ if (cParams->strategy >= ZSTD_btultra)
+ params->minMatchLength /= 2;
+ }
+ if (params->bucketSizeLog==0) {
+ assert(1 <= (int)cParams->strategy && (int)cParams->strategy <= 9);
+ params->bucketSizeLog = BOUNDED(LDM_BUCKET_SIZE_LOG, (U32)cParams->strategy, ZSTD_LDM_BUCKETSIZELOG_MAX);
}
params->bucketSizeLog = MIN(params->bucketSizeLog, params->hashLog);
}
@@ -170,22 +184,22 @@ size_t ZSTD_ldm_getMaxNbSeq(ldmParams_t params, size_t maxChunkSize)
/** ZSTD_ldm_getBucket() :
* Returns a pointer to the start of the bucket associated with hash. */
static ldmEntry_t* ZSTD_ldm_getBucket(
- ldmState_t* ldmState, size_t hash, ldmParams_t const ldmParams)
+ const ldmState_t* ldmState, size_t hash, U32 const bucketSizeLog)
{
- return ldmState->hashTable + (hash << ldmParams.bucketSizeLog);
+ return ldmState->hashTable + (hash << bucketSizeLog);
}
/** ZSTD_ldm_insertEntry() :
* Insert the entry with corresponding hash into the hash table */
static void ZSTD_ldm_insertEntry(ldmState_t* ldmState,
size_t const hash, const ldmEntry_t entry,
- ldmParams_t const ldmParams)
+ U32 const bucketSizeLog)
{
BYTE* const pOffset = ldmState->bucketOffsets + hash;
unsigned const offset = *pOffset;
- *(ZSTD_ldm_getBucket(ldmState, hash, ldmParams) + offset) = entry;
- *pOffset = (BYTE)((offset + 1) & ((1u << ldmParams.bucketSizeLog) - 1));
+ *(ZSTD_ldm_getBucket(ldmState, hash, bucketSizeLog) + offset) = entry;
+ *pOffset = (BYTE)((offset + 1) & ((1u << bucketSizeLog) - 1));
}
@@ -234,7 +248,7 @@ static size_t ZSTD_ldm_countBackwardsMatch_2segments(
*
* The tables for the other strategies are filled within their
* block compressors. */
-static size_t ZSTD_ldm_fillFastTables(ZSTD_matchState_t* ms,
+static size_t ZSTD_ldm_fillFastTables(ZSTD_MatchState_t* ms,
void const* end)
{
const BYTE* const iend = (const BYTE*)end;
@@ -273,7 +287,8 @@ void ZSTD_ldm_fillHashTable(
const BYTE* iend, ldmParams_t const* params)
{
U32 const minMatchLength = params->minMatchLength;
- U32 const hBits = params->hashLog - params->bucketSizeLog;
+ U32 const bucketSizeLog = params->bucketSizeLog;
+ U32 const hBits = params->hashLog - bucketSizeLog;
BYTE const* const base = ldmState->window.base;
BYTE const* const istart = ip;
ldmRollingHashState_t hashState;
@@ -288,7 +303,7 @@ void ZSTD_ldm_fillHashTable(
unsigned n;
numSplits = 0;
- hashed = ZSTD_ldm_gear_feed(&hashState, ip, iend - ip, splits, &numSplits);
+ hashed = ZSTD_ldm_gear_feed(&hashState, ip, (size_t)(iend - ip), splits, &numSplits);
for (n = 0; n < numSplits; n++) {
if (ip + splits[n] >= istart + minMatchLength) {
@@ -299,7 +314,7 @@ void ZSTD_ldm_fillHashTable(
entry.offset = (U32)(split - base);
entry.checksum = (U32)(xxhash >> 32);
- ZSTD_ldm_insertEntry(ldmState, hash, entry, *params);
+ ZSTD_ldm_insertEntry(ldmState, hash, entry, params->bucketSizeLog);
}
}
@@ -313,7 +328,7 @@ void ZSTD_ldm_fillHashTable(
* Sets cctx->nextToUpdate to a position corresponding closer to anchor
* if it is far way
* (after a long match, only update tables a limited amount). */
-static void ZSTD_ldm_limitTableUpdate(ZSTD_matchState_t* ms, const BYTE* anchor)
+static void ZSTD_ldm_limitTableUpdate(ZSTD_MatchState_t* ms, const BYTE* anchor)
{
U32 const curr = (U32)(anchor - ms->window.base);
if (curr > ms->nextToUpdate + 1024) {
@@ -325,7 +340,7 @@ static void ZSTD_ldm_limitTableUpdate(ZSTD_matchState_t* ms, const BYTE* anchor)
static
ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
size_t ZSTD_ldm_generateSequences_internal(
- ldmState_t* ldmState, rawSeqStore_t* rawSeqStore,
+ ldmState_t* ldmState, RawSeqStore_t* rawSeqStore,
ldmParams_t const* params, void const* src, size_t srcSize)
{
/* LDM parameters */
@@ -379,7 +394,7 @@ size_t ZSTD_ldm_generateSequences_internal(
candidates[n].split = split;
candidates[n].hash = hash;
candidates[n].checksum = (U32)(xxhash >> 32);
- candidates[n].bucket = ZSTD_ldm_getBucket(ldmState, hash, *params);
+ candidates[n].bucket = ZSTD_ldm_getBucket(ldmState, hash, params->bucketSizeLog);
PREFETCH_L1(candidates[n].bucket);
}
@@ -402,7 +417,7 @@ size_t ZSTD_ldm_generateSequences_internal(
* the previous one, we merely register it in the hash table and
* move on */
if (split < anchor) {
- ZSTD_ldm_insertEntry(ldmState, hash, newEntry, *params);
+ ZSTD_ldm_insertEntry(ldmState, hash, newEntry, params->bucketSizeLog);
continue;
}
@@ -449,7 +464,7 @@ size_t ZSTD_ldm_generateSequences_internal(
/* No match found -- insert an entry into the hash table
* and process the next candidate match */
if (bestEntry == NULL) {
- ZSTD_ldm_insertEntry(ldmState, hash, newEntry, *params);
+ ZSTD_ldm_insertEntry(ldmState, hash, newEntry, params->bucketSizeLog);
continue;
}
@@ -470,7 +485,7 @@ size_t ZSTD_ldm_generateSequences_internal(
/* Insert the current entry into the hash table --- it must be
* done after the previous block to avoid clobbering bestEntry */
- ZSTD_ldm_insertEntry(ldmState, hash, newEntry, *params);
+ ZSTD_ldm_insertEntry(ldmState, hash, newEntry, params->bucketSizeLog);
anchor = split + forwardMatchLength;
@@ -509,7 +524,7 @@ static void ZSTD_ldm_reduceTable(ldmEntry_t* const table, U32 const size,
}
size_t ZSTD_ldm_generateSequences(
- ldmState_t* ldmState, rawSeqStore_t* sequences,
+ ldmState_t* ldmState, RawSeqStore_t* sequences,
ldmParams_t const* params, void const* src, size_t srcSize)
{
U32 const maxDist = 1U << params->windowLog;
@@ -586,7 +601,7 @@ size_t ZSTD_ldm_generateSequences(
}
void
-ZSTD_ldm_skipSequences(rawSeqStore_t* rawSeqStore, size_t srcSize, U32 const minMatch)
+ZSTD_ldm_skipSequences(RawSeqStore_t* rawSeqStore, size_t srcSize, U32 const minMatch)
{
while (srcSize > 0 && rawSeqStore->pos < rawSeqStore->size) {
rawSeq* seq = rawSeqStore->seq + rawSeqStore->pos;
@@ -622,7 +637,7 @@ ZSTD_ldm_skipSequences(rawSeqStore_t* rawSeqStore, size_t srcSize, U32 const min
* Returns the current sequence to handle, or if the rest of the block should
* be literals, it returns a sequence with offset == 0.
*/
-static rawSeq maybeSplitSequence(rawSeqStore_t* rawSeqStore,
+static rawSeq maybeSplitSequence(RawSeqStore_t* rawSeqStore,
U32 const remaining, U32 const minMatch)
{
rawSeq sequence = rawSeqStore->seq[rawSeqStore->pos];
@@ -646,7 +661,7 @@ static rawSeq maybeSplitSequence(rawSeqStore_t* rawSeqStore,
return sequence;
}
-void ZSTD_ldm_skipRawSeqStoreBytes(rawSeqStore_t* rawSeqStore, size_t nbBytes) {
+void ZSTD_ldm_skipRawSeqStoreBytes(RawSeqStore_t* rawSeqStore, size_t nbBytes) {
U32 currPos = (U32)(rawSeqStore->posInSequence + nbBytes);
while (currPos && rawSeqStore->pos < rawSeqStore->size) {
rawSeq currSeq = rawSeqStore->seq[rawSeqStore->pos];
@@ -663,14 +678,14 @@ void ZSTD_ldm_skipRawSeqStoreBytes(rawSeqStore_t* rawSeqStore, size_t nbBytes) {
}
}
-size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore,
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
- ZSTD_paramSwitch_e useRowMatchFinder,
+size_t ZSTD_ldm_blockCompress(RawSeqStore_t* rawSeqStore,
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_ParamSwitch_e useRowMatchFinder,
void const* src, size_t srcSize)
{
const ZSTD_compressionParameters* const cParams = &ms->cParams;
unsigned const minMatch = cParams->minMatch;
- ZSTD_blockCompressor const blockCompressor =
+ ZSTD_BlockCompressor_f const blockCompressor =
ZSTD_selectBlockCompressor(cParams->strategy, useRowMatchFinder, ZSTD_matchState_dictMode(ms));
/* Input bounds */
BYTE const* const istart = (BYTE const*)src;
diff --git a/lib/compress/zstd_ldm.h b/lib/compress/zstd_ldm.h
index f147021d296..42736231aa8 100644
--- a/lib/compress/zstd_ldm.h
+++ b/lib/compress/zstd_ldm.h
@@ -11,10 +11,6 @@
#ifndef ZSTD_LDM_H
#define ZSTD_LDM_H
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
#include "zstd_compress_internal.h" /* ldmParams_t, U32 */
#include "../zstd.h" /* ZSTD_CCtx, size_t */
@@ -43,7 +39,7 @@ void ZSTD_ldm_fillHashTable(
* sequences.
*/
size_t ZSTD_ldm_generateSequences(
- ldmState_t* ldms, rawSeqStore_t* sequences,
+ ldmState_t* ldms, RawSeqStore_t* sequences,
ldmParams_t const* params, void const* src, size_t srcSize);
/**
@@ -64,9 +60,9 @@ size_t ZSTD_ldm_generateSequences(
* two. We handle that case correctly, and update `rawSeqStore` appropriately.
* NOTE: This function does not return any errors.
*/
-size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore,
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
- ZSTD_paramSwitch_e useRowMatchFinder,
+size_t ZSTD_ldm_blockCompress(RawSeqStore_t* rawSeqStore,
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_ParamSwitch_e useRowMatchFinder,
void const* src, size_t srcSize);
/**
@@ -76,7 +72,7 @@ size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore,
* Avoids emitting matches less than `minMatch` bytes.
* Must be called for data that is not passed to ZSTD_ldm_blockCompress().
*/
-void ZSTD_ldm_skipSequences(rawSeqStore_t* rawSeqStore, size_t srcSize,
+void ZSTD_ldm_skipSequences(RawSeqStore_t* rawSeqStore, size_t srcSize,
U32 const minMatch);
/* ZSTD_ldm_skipRawSeqStoreBytes():
@@ -84,7 +80,7 @@ void ZSTD_ldm_skipSequences(rawSeqStore_t* rawSeqStore, size_t srcSize,
* Not to be used in conjunction with ZSTD_ldm_skipSequences().
* Must be called for data with is not passed to ZSTD_ldm_blockCompress().
*/
-void ZSTD_ldm_skipRawSeqStoreBytes(rawSeqStore_t* rawSeqStore, size_t nbBytes);
+void ZSTD_ldm_skipRawSeqStoreBytes(RawSeqStore_t* rawSeqStore, size_t nbBytes);
/** ZSTD_ldm_getTableSize() :
* Estimate the space needed for long distance matching tables or 0 if LDM is
@@ -110,8 +106,4 @@ size_t ZSTD_ldm_getMaxNbSeq(ldmParams_t params, size_t maxChunkSize);
void ZSTD_ldm_adjustParameters(ldmParams_t* params,
ZSTD_compressionParameters const* cParams);
-#if defined (__cplusplus)
-}
-#endif
-
#endif /* ZSTD_FAST_H */
diff --git a/lib/compress/zstd_opt.c b/lib/compress/zstd_opt.c
index e63073e5a4f..3d7171b755b 100644
--- a/lib/compress/zstd_opt.c
+++ b/lib/compress/zstd_opt.c
@@ -408,7 +408,7 @@ MEM_STATIC U32 ZSTD_readMINMATCH(const void* memPtr, U32 length)
Assumption : always within prefix (i.e. not within extDict) */
static
ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
-U32 ZSTD_insertAndFindFirstIndexHash3 (const ZSTD_matchState_t* ms,
+U32 ZSTD_insertAndFindFirstIndexHash3 (const ZSTD_MatchState_t* ms,
U32* nextToUpdate3,
const BYTE* const ip)
{
@@ -440,7 +440,7 @@ U32 ZSTD_insertAndFindFirstIndexHash3 (const ZSTD_matchState_t* ms,
static
ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
U32 ZSTD_insertBt1(
- const ZSTD_matchState_t* ms,
+ const ZSTD_MatchState_t* ms,
const BYTE* const ip, const BYTE* const iend,
U32 const target,
U32 const mls, const int extDict)
@@ -560,7 +560,7 @@ U32 ZSTD_insertBt1(
FORCE_INLINE_TEMPLATE
ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
void ZSTD_updateTree_internal(
- ZSTD_matchState_t* ms,
+ ZSTD_MatchState_t* ms,
const BYTE* const ip, const BYTE* const iend,
const U32 mls, const ZSTD_dictMode_e dictMode)
{
@@ -580,7 +580,7 @@ void ZSTD_updateTree_internal(
ms->nextToUpdate = target;
}
-void ZSTD_updateTree(ZSTD_matchState_t* ms, const BYTE* ip, const BYTE* iend) {
+void ZSTD_updateTree(ZSTD_MatchState_t* ms, const BYTE* ip, const BYTE* iend) {
ZSTD_updateTree_internal(ms, ip, iend, ms->cParams.minMatch, ZSTD_noDict);
}
@@ -589,7 +589,7 @@ ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
U32
ZSTD_insertBtAndGetAllMatches (
ZSTD_match_t* matches, /* store result (found matches) in this table (presumed large enough) */
- ZSTD_matchState_t* ms,
+ ZSTD_MatchState_t* ms,
U32* nextToUpdate3,
const BYTE* const ip, const BYTE* const iLimit,
const ZSTD_dictMode_e dictMode,
@@ -625,7 +625,7 @@ ZSTD_insertBtAndGetAllMatches (
U32 mnum = 0;
U32 nbCompares = 1U << cParams->searchLog;
- const ZSTD_matchState_t* dms = dictMode == ZSTD_dictMatchState ? ms->dictMatchState : NULL;
+ const ZSTD_MatchState_t* dms = dictMode == ZSTD_dictMatchState ? ms->dictMatchState : NULL;
const ZSTD_compressionParameters* const dmsCParams =
dictMode == ZSTD_dictMatchState ? &dms->cParams : NULL;
const BYTE* const dmsBase = dictMode == ZSTD_dictMatchState ? dms->window.base : NULL;
@@ -664,13 +664,13 @@ ZSTD_insertBtAndGetAllMatches (
assert(curr >= windowLow);
if ( dictMode == ZSTD_extDict
&& ( ((repOffset-1) /*intentional overflow*/ < curr - windowLow) /* equivalent to `curr > repIndex >= windowLow` */
- & (((U32)((dictLimit-1) - repIndex) >= 3) ) /* intentional overflow : do not test positions overlapping 2 memory segments */)
+ & (ZSTD_index_overlap_check(dictLimit, repIndex)) )
&& (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch)) ) {
repLen = (U32)ZSTD_count_2segments(ip+minMatch, repMatch+minMatch, iLimit, dictEnd, prefixStart) + minMatch;
}
if (dictMode == ZSTD_dictMatchState
&& ( ((repOffset-1) /*intentional overflow*/ < curr - (dmsLowLimit + dmsIndexDelta)) /* equivalent to `curr > repIndex >= dmsLowLimit` */
- & ((U32)((dictLimit-1) - repIndex) >= 3) ) /* intentional overflow : do not test positions overlapping 2 memory segments */
+ & (ZSTD_index_overlap_check(dictLimit, repIndex)) )
&& (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch)) ) {
repLen = (U32)ZSTD_count_2segments(ip+minMatch, repMatch+minMatch, iLimit, dmsEnd, prefixStart) + minMatch;
} }
@@ -819,7 +819,7 @@ ZSTD_insertBtAndGetAllMatches (
typedef U32 (*ZSTD_getAllMatchesFn)(
ZSTD_match_t*,
- ZSTD_matchState_t*,
+ ZSTD_MatchState_t*,
U32*,
const BYTE*,
const BYTE*,
@@ -831,7 +831,7 @@ FORCE_INLINE_TEMPLATE
ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
U32 ZSTD_btGetAllMatches_internal(
ZSTD_match_t* matches,
- ZSTD_matchState_t* ms,
+ ZSTD_MatchState_t* ms,
U32* nextToUpdate3,
const BYTE* ip,
const BYTE* const iHighLimit,
@@ -854,7 +854,7 @@ U32 ZSTD_btGetAllMatches_internal(
#define GEN_ZSTD_BT_GET_ALL_MATCHES_(dictMode, mls) \
static U32 ZSTD_BT_GET_ALL_MATCHES_FN(dictMode, mls)( \
ZSTD_match_t* matches, \
- ZSTD_matchState_t* ms, \
+ ZSTD_MatchState_t* ms, \
U32* nextToUpdate3, \
const BYTE* ip, \
const BYTE* const iHighLimit, \
@@ -886,7 +886,7 @@ GEN_ZSTD_BT_GET_ALL_MATCHES(dictMatchState)
}
static ZSTD_getAllMatchesFn
-ZSTD_selectBtGetAllMatches(ZSTD_matchState_t const* ms, ZSTD_dictMode_e const dictMode)
+ZSTD_selectBtGetAllMatches(ZSTD_MatchState_t const* ms, ZSTD_dictMode_e const dictMode)
{
ZSTD_getAllMatchesFn const getAllMatchesFns[3][4] = {
ZSTD_BT_GET_ALL_MATCHES_ARRAY(noDict),
@@ -905,7 +905,7 @@ ZSTD_selectBtGetAllMatches(ZSTD_matchState_t const* ms, ZSTD_dictMode_e const di
/* Struct containing info needed to make decision about ldm inclusion */
typedef struct {
- rawSeqStore_t seqStore; /* External match candidates store for this block */
+ RawSeqStore_t seqStore; /* External match candidates store for this block */
U32 startPosInBlock; /* Start position of the current match candidate */
U32 endPosInBlock; /* End position of the current match candidate */
U32 offset; /* Offset of the match candidate */
@@ -915,7 +915,7 @@ typedef struct {
* Moves forward in @rawSeqStore by @nbBytes,
* which will update the fields 'pos' and 'posInSequence'.
*/
-static void ZSTD_optLdm_skipRawSeqStoreBytes(rawSeqStore_t* rawSeqStore, size_t nbBytes)
+static void ZSTD_optLdm_skipRawSeqStoreBytes(RawSeqStore_t* rawSeqStore, size_t nbBytes)
{
U32 currPos = (U32)(rawSeqStore->posInSequence + nbBytes);
while (currPos && rawSeqStore->pos < rawSeqStore->size) {
@@ -972,7 +972,7 @@ ZSTD_opt_getNextMatchAndUpdateSeqStore(ZSTD_optLdm_t* optLdm, U32 currPosInBlock
return;
}
- /* Matches may be < MINMATCH by this process. In that case, we will reject them
+ /* Matches may be < minMatch by this process. In that case, we will reject them
when we are deciding whether or not to add the ldm */
optLdm->startPosInBlock = currPosInBlock + literalsBytesRemaining;
optLdm->endPosInBlock = optLdm->startPosInBlock + matchBytesRemaining;
@@ -994,7 +994,8 @@ ZSTD_opt_getNextMatchAndUpdateSeqStore(ZSTD_optLdm_t* optLdm, U32 currPosInBlock
* into 'matches'. Maintains the correct ordering of 'matches'.
*/
static void ZSTD_optLdm_maybeAddMatch(ZSTD_match_t* matches, U32* nbMatches,
- const ZSTD_optLdm_t* optLdm, U32 currPosInBlock)
+ const ZSTD_optLdm_t* optLdm, U32 currPosInBlock,
+ U32 minMatch)
{
U32 const posDiff = currPosInBlock - optLdm->startPosInBlock;
/* Note: ZSTD_match_t actually contains offBase and matchLength (before subtracting MINMATCH) */
@@ -1003,7 +1004,7 @@ static void ZSTD_optLdm_maybeAddMatch(ZSTD_match_t* matches, U32* nbMatches,
/* Ensure that current block position is not outside of the match */
if (currPosInBlock < optLdm->startPosInBlock
|| currPosInBlock >= optLdm->endPosInBlock
- || candidateMatchLength < MINMATCH) {
+ || candidateMatchLength < minMatch) {
return;
}
@@ -1023,7 +1024,8 @@ static void ZSTD_optLdm_maybeAddMatch(ZSTD_match_t* matches, U32* nbMatches,
static void
ZSTD_optLdm_processMatchCandidate(ZSTD_optLdm_t* optLdm,
ZSTD_match_t* matches, U32* nbMatches,
- U32 currPosInBlock, U32 remainingBytes)
+ U32 currPosInBlock, U32 remainingBytes,
+ U32 minMatch)
{
if (optLdm->seqStore.size == 0 || optLdm->seqStore.pos >= optLdm->seqStore.size) {
return;
@@ -1040,7 +1042,7 @@ ZSTD_optLdm_processMatchCandidate(ZSTD_optLdm_t* optLdm,
}
ZSTD_opt_getNextMatchAndUpdateSeqStore(optLdm, currPosInBlock, remainingBytes);
}
- ZSTD_optLdm_maybeAddMatch(matches, nbMatches, optLdm, currPosInBlock);
+ ZSTD_optLdm_maybeAddMatch(matches, nbMatches, optLdm, currPosInBlock, minMatch);
}
@@ -1072,8 +1074,8 @@ listStats(const U32* table, int lastEltID)
FORCE_INLINE_TEMPLATE
ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
size_t
-ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
- seqStore_t* seqStore,
+ZSTD_compressBlock_opt_generic(ZSTD_MatchState_t* ms,
+ SeqStore_t* seqStore,
U32 rep[ZSTD_REP_NUM],
const void* src, size_t srcSize,
const int optLevel,
@@ -1122,7 +1124,8 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
U32 const ll0 = !litlen;
U32 nbMatches = getAllMatches(matches, ms, &nextToUpdate3, ip, iend, rep, ll0, minMatch);
ZSTD_optLdm_processMatchCandidate(&optLdm, matches, &nbMatches,
- (U32)(ip-istart), (U32)(iend-ip));
+ (U32)(ip-istart), (U32)(iend-ip),
+ minMatch);
if (!nbMatches) {
DEBUGLOG(8, "no match found at cPos %u", (unsigned)(ip-istart));
ip++;
@@ -1197,7 +1200,7 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
for (cur = 1; cur <= last_pos; cur++) {
const BYTE* const inr = ip + cur;
assert(cur <= ZSTD_OPT_NUM);
- DEBUGLOG(7, "cPos:%zi==rPos:%u", inr-istart, cur);
+ DEBUGLOG(7, "cPos:%i==rPos:%u", (int)(inr-istart), cur);
/* Fix current position with one literal if cheaper */
{ U32 const litlen = opt[cur-1].litlen + 1;
@@ -1207,8 +1210,8 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
assert(price < 1000000000); /* overflow check */
if (price <= opt[cur].price) {
ZSTD_optimal_t const prevMatch = opt[cur];
- DEBUGLOG(7, "cPos:%zi==rPos:%u : better price (%.2f<=%.2f) using literal (ll==%u) (hist:%u,%u,%u)",
- inr-istart, cur, ZSTD_fCost(price), ZSTD_fCost(opt[cur].price), litlen,
+ DEBUGLOG(7, "cPos:%i==rPos:%u : better price (%.2f<=%.2f) using literal (ll==%u) (hist:%u,%u,%u)",
+ (int)(inr-istart), cur, ZSTD_fCost(price), ZSTD_fCost(opt[cur].price), litlen,
opt[cur-1].rep[0], opt[cur-1].rep[1], opt[cur-1].rep[2]);
opt[cur] = opt[cur-1];
opt[cur].litlen = litlen;
@@ -1227,34 +1230,34 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
&& (with1literal < opt[cur+1].price) ) {
/* update offset history - before it disappears */
U32 const prev = cur - prevMatch.mlen;
- repcodes_t const newReps = ZSTD_newRep(opt[prev].rep, prevMatch.off, opt[prev].litlen==0);
+ Repcodes_t const newReps = ZSTD_newRep(opt[prev].rep, prevMatch.off, opt[prev].litlen==0);
assert(cur >= prevMatch.mlen);
DEBUGLOG(7, "==> match+1lit is cheaper (%.2f < %.2f) (hist:%u,%u,%u) !",
ZSTD_fCost(with1literal), ZSTD_fCost(withMoreLiterals),
newReps.rep[0], newReps.rep[1], newReps.rep[2] );
opt[cur+1] = prevMatch; /* mlen & offbase */
- ZSTD_memcpy(opt[cur+1].rep, &newReps, sizeof(repcodes_t));
+ ZSTD_memcpy(opt[cur+1].rep, &newReps, sizeof(Repcodes_t));
opt[cur+1].litlen = 1;
opt[cur+1].price = with1literal;
if (last_pos < cur+1) last_pos = cur+1;
}
}
} else {
- DEBUGLOG(7, "cPos:%zi==rPos:%u : literal would cost more (%.2f>%.2f)",
- inr-istart, cur, ZSTD_fCost(price), ZSTD_fCost(opt[cur].price));
+ DEBUGLOG(7, "cPos:%i==rPos:%u : literal would cost more (%.2f>%.2f)",
+ (int)(inr-istart), cur, ZSTD_fCost(price), ZSTD_fCost(opt[cur].price));
}
}
/* Offset history is not updated during match comparison.
* Do it here, now that the match is selected and confirmed.
*/
- ZSTD_STATIC_ASSERT(sizeof(opt[cur].rep) == sizeof(repcodes_t));
+ ZSTD_STATIC_ASSERT(sizeof(opt[cur].rep) == sizeof(Repcodes_t));
assert(cur >= opt[cur].mlen);
if (opt[cur].litlen == 0) {
/* just finished a match => alter offset history */
U32 const prev = cur - opt[cur].mlen;
- repcodes_t const newReps = ZSTD_newRep(opt[prev].rep, opt[cur].off, opt[prev].litlen==0);
- ZSTD_memcpy(opt[cur].rep, &newReps, sizeof(repcodes_t));
+ Repcodes_t const newReps = ZSTD_newRep(opt[prev].rep, opt[cur].off, opt[prev].litlen==0);
+ ZSTD_memcpy(opt[cur].rep, &newReps, sizeof(Repcodes_t));
}
/* last match must start at a minimum distance of 8 from oend */
@@ -1276,7 +1279,8 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
U32 matchNb;
ZSTD_optLdm_processMatchCandidate(&optLdm, matches, &nbMatches,
- (U32)(inr-istart), (U32)(iend-inr));
+ (U32)(inr-istart), (U32)(iend-inr),
+ minMatch);
if (!nbMatches) {
DEBUGLOG(7, "rPos:%u : no match found", cur);
@@ -1284,8 +1288,8 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
}
{ U32 const longestML = matches[nbMatches-1].len;
- DEBUGLOG(7, "cPos:%zi==rPos:%u, found %u matches, of longest ML=%u",
- inr-istart, cur, nbMatches, longestML);
+ DEBUGLOG(7, "cPos:%i==rPos:%u, found %u matches, of longest ML=%u",
+ (int)(inr-istart), cur, nbMatches, longestML);
if ( (longestML > sufficient_len)
|| (cur + longestML >= ZSTD_OPT_NUM)
@@ -1353,10 +1357,10 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
/* Update offset history */
if (lastStretch.litlen == 0) {
/* finishing on a match : update offset history */
- repcodes_t const reps = ZSTD_newRep(opt[cur].rep, lastStretch.off, opt[cur].litlen==0);
- ZSTD_memcpy(rep, &reps, sizeof(repcodes_t));
+ Repcodes_t const reps = ZSTD_newRep(opt[cur].rep, lastStretch.off, opt[cur].litlen==0);
+ ZSTD_memcpy(rep, &reps, sizeof(Repcodes_t));
} else {
- ZSTD_memcpy(rep, lastStretch.rep, sizeof(repcodes_t));
+ ZSTD_memcpy(rep, lastStretch.rep, sizeof(Repcodes_t));
assert(cur >= lastStretch.litlen);
cur -= lastStretch.litlen;
}
@@ -1411,8 +1415,8 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
U32 const mlen = opt[storePos].mlen;
U32 const offBase = opt[storePos].off;
U32 const advance = llen + mlen;
- DEBUGLOG(6, "considering seq starting at %zi, llen=%u, mlen=%u",
- anchor - istart, (unsigned)llen, (unsigned)mlen);
+ DEBUGLOG(6, "considering seq starting at %i, llen=%u, mlen=%u",
+ (int)(anchor - istart), (unsigned)llen, (unsigned)mlen);
if (mlen==0) { /* only literals => must be last "sequence", actually starting a new stream of sequences */
assert(storePos == storeEnd); /* must be last sequence */
@@ -1440,7 +1444,7 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
#ifndef ZSTD_EXCLUDE_BTOPT_BLOCK_COMPRESSOR
static size_t ZSTD_compressBlock_opt0(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
const void* src, size_t srcSize, const ZSTD_dictMode_e dictMode)
{
return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 0 /* optLevel */, dictMode);
@@ -1449,7 +1453,7 @@ static size_t ZSTD_compressBlock_opt0(
#ifndef ZSTD_EXCLUDE_BTULTRA_BLOCK_COMPRESSOR
static size_t ZSTD_compressBlock_opt2(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
const void* src, size_t srcSize, const ZSTD_dictMode_e dictMode)
{
return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /* optLevel */, dictMode);
@@ -1458,7 +1462,7 @@ static size_t ZSTD_compressBlock_opt2(
#ifndef ZSTD_EXCLUDE_BTOPT_BLOCK_COMPRESSOR
size_t ZSTD_compressBlock_btopt(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
const void* src, size_t srcSize)
{
DEBUGLOG(5, "ZSTD_compressBlock_btopt");
@@ -1477,8 +1481,8 @@ size_t ZSTD_compressBlock_btopt(
*/
static
ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
-void ZSTD_initStats_ultra(ZSTD_matchState_t* ms,
- seqStore_t* seqStore,
+void ZSTD_initStats_ultra(ZSTD_MatchState_t* ms,
+ SeqStore_t* seqStore,
U32 rep[ZSTD_REP_NUM],
const void* src, size_t srcSize)
{
@@ -1503,7 +1507,7 @@ void ZSTD_initStats_ultra(ZSTD_matchState_t* ms,
}
size_t ZSTD_compressBlock_btultra(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
const void* src, size_t srcSize)
{
DEBUGLOG(5, "ZSTD_compressBlock_btultra (srcSize=%zu)", srcSize);
@@ -1511,7 +1515,7 @@ size_t ZSTD_compressBlock_btultra(
}
size_t ZSTD_compressBlock_btultra2(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
const void* src, size_t srcSize)
{
U32 const curr = (U32)((const BYTE*)src - ms->window.base);
@@ -1541,14 +1545,14 @@ size_t ZSTD_compressBlock_btultra2(
#ifndef ZSTD_EXCLUDE_BTOPT_BLOCK_COMPRESSOR
size_t ZSTD_compressBlock_btopt_dictMatchState(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
const void* src, size_t srcSize)
{
return ZSTD_compressBlock_opt0(ms, seqStore, rep, src, srcSize, ZSTD_dictMatchState);
}
size_t ZSTD_compressBlock_btopt_extDict(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
const void* src, size_t srcSize)
{
return ZSTD_compressBlock_opt0(ms, seqStore, rep, src, srcSize, ZSTD_extDict);
@@ -1557,14 +1561,14 @@ size_t ZSTD_compressBlock_btopt_extDict(
#ifndef ZSTD_EXCLUDE_BTULTRA_BLOCK_COMPRESSOR
size_t ZSTD_compressBlock_btultra_dictMatchState(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
const void* src, size_t srcSize)
{
return ZSTD_compressBlock_opt2(ms, seqStore, rep, src, srcSize, ZSTD_dictMatchState);
}
size_t ZSTD_compressBlock_btultra_extDict(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
const void* src, size_t srcSize)
{
return ZSTD_compressBlock_opt2(ms, seqStore, rep, src, srcSize, ZSTD_extDict);
diff --git a/lib/compress/zstd_opt.h b/lib/compress/zstd_opt.h
index d4e71131572..756c7b1d0c5 100644
--- a/lib/compress/zstd_opt.h
+++ b/lib/compress/zstd_opt.h
@@ -11,28 +11,24 @@
#ifndef ZSTD_OPT_H
#define ZSTD_OPT_H
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
#include "zstd_compress_internal.h"
#if !defined(ZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR) \
|| !defined(ZSTD_EXCLUDE_BTOPT_BLOCK_COMPRESSOR) \
|| !defined(ZSTD_EXCLUDE_BTULTRA_BLOCK_COMPRESSOR)
/* used in ZSTD_loadDictionaryContent() */
-void ZSTD_updateTree(ZSTD_matchState_t* ms, const BYTE* ip, const BYTE* iend);
+void ZSTD_updateTree(ZSTD_MatchState_t* ms, const BYTE* ip, const BYTE* iend);
#endif
#ifndef ZSTD_EXCLUDE_BTOPT_BLOCK_COMPRESSOR
size_t ZSTD_compressBlock_btopt(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
size_t ZSTD_compressBlock_btopt_dictMatchState(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
size_t ZSTD_compressBlock_btopt_extDict(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
#define ZSTD_COMPRESSBLOCK_BTOPT ZSTD_compressBlock_btopt
@@ -46,20 +42,20 @@ size_t ZSTD_compressBlock_btopt_extDict(
#ifndef ZSTD_EXCLUDE_BTULTRA_BLOCK_COMPRESSOR
size_t ZSTD_compressBlock_btultra(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
size_t ZSTD_compressBlock_btultra_dictMatchState(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
size_t ZSTD_compressBlock_btultra_extDict(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
/* note : no btultra2 variant for extDict nor dictMatchState,
* because btultra2 is not meant to work with dictionaries
* and is only specific for the first block (no prefix) */
size_t ZSTD_compressBlock_btultra2(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
#define ZSTD_COMPRESSBLOCK_BTULTRA ZSTD_compressBlock_btultra
@@ -73,8 +69,4 @@ size_t ZSTD_compressBlock_btultra2(
#define ZSTD_COMPRESSBLOCK_BTULTRA2 NULL
#endif
-#if defined (__cplusplus)
-}
-#endif
-
#endif /* ZSTD_OPT_H */
diff --git a/lib/compress/zstd_preSplit.c b/lib/compress/zstd_preSplit.c
new file mode 100644
index 00000000000..d820c20ac24
--- /dev/null
+++ b/lib/compress/zstd_preSplit.c
@@ -0,0 +1,238 @@
+/*
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#include "../common/compiler.h" /* ZSTD_ALIGNOF */
+#include "../common/mem.h" /* S64 */
+#include "../common/zstd_deps.h" /* ZSTD_memset */
+#include "../common/zstd_internal.h" /* ZSTD_STATIC_ASSERT */
+#include "hist.h" /* HIST_add */
+#include "zstd_preSplit.h"
+
+
+#define BLOCKSIZE_MIN 3500
+#define THRESHOLD_PENALTY_RATE 16
+#define THRESHOLD_BASE (THRESHOLD_PENALTY_RATE - 2)
+#define THRESHOLD_PENALTY 3
+
+#define HASHLENGTH 2
+#define HASHLOG_MAX 10
+#define HASHTABLESIZE (1 << HASHLOG_MAX)
+#define HASHMASK (HASHTABLESIZE - 1)
+#define KNUTH 0x9e3779b9
+
+/* for hashLog > 8, hash 2 bytes.
+ * for hashLog == 8, just take the byte, no hashing.
+ * The speed of this method relies on compile-time constant propagation */
+FORCE_INLINE_TEMPLATE unsigned hash2(const void *p, unsigned hashLog)
+{
+ assert(hashLog >= 8);
+ if (hashLog == 8) return (U32)((const BYTE*)p)[0];
+ assert(hashLog <= HASHLOG_MAX);
+ return (U32)(MEM_read16(p)) * KNUTH >> (32 - hashLog);
+}
+
+
+typedef struct {
+ unsigned events[HASHTABLESIZE];
+ size_t nbEvents;
+} Fingerprint;
+typedef struct {
+ Fingerprint pastEvents;
+ Fingerprint newEvents;
+} FPStats;
+
+static void initStats(FPStats* fpstats)
+{
+ ZSTD_memset(fpstats, 0, sizeof(FPStats));
+}
+
+FORCE_INLINE_TEMPLATE void
+addEvents_generic(Fingerprint* fp, const void* src, size_t srcSize, size_t samplingRate, unsigned hashLog)
+{
+ const char* p = (const char*)src;
+ size_t limit = srcSize - HASHLENGTH + 1;
+ size_t n;
+ assert(srcSize >= HASHLENGTH);
+ for (n = 0; n < limit; n+=samplingRate) {
+ fp->events[hash2(p+n, hashLog)]++;
+ }
+ fp->nbEvents += limit/samplingRate;
+}
+
+FORCE_INLINE_TEMPLATE void
+recordFingerprint_generic(Fingerprint* fp, const void* src, size_t srcSize, size_t samplingRate, unsigned hashLog)
+{
+ ZSTD_memset(fp, 0, sizeof(unsigned) * ((size_t)1 << hashLog));
+ fp->nbEvents = 0;
+ addEvents_generic(fp, src, srcSize, samplingRate, hashLog);
+}
+
+typedef void (*RecordEvents_f)(Fingerprint* fp, const void* src, size_t srcSize);
+
+#define FP_RECORD(_rate) ZSTD_recordFingerprint_##_rate
+
+#define ZSTD_GEN_RECORD_FINGERPRINT(_rate, _hSize) \
+ static void FP_RECORD(_rate)(Fingerprint* fp, const void* src, size_t srcSize) \
+ { \
+ recordFingerprint_generic(fp, src, srcSize, _rate, _hSize); \
+ }
+
+ZSTD_GEN_RECORD_FINGERPRINT(1, 10)
+ZSTD_GEN_RECORD_FINGERPRINT(5, 10)
+ZSTD_GEN_RECORD_FINGERPRINT(11, 9)
+ZSTD_GEN_RECORD_FINGERPRINT(43, 8)
+
+
+static U64 abs64(S64 s64) { return (U64)((s64 < 0) ? -s64 : s64); }
+
+static U64 fpDistance(const Fingerprint* fp1, const Fingerprint* fp2, unsigned hashLog)
+{
+ U64 distance = 0;
+ size_t n;
+ assert(hashLog <= HASHLOG_MAX);
+ for (n = 0; n < ((size_t)1 << hashLog); n++) {
+ distance +=
+ abs64((S64)fp1->events[n] * (S64)fp2->nbEvents - (S64)fp2->events[n] * (S64)fp1->nbEvents);
+ }
+ return distance;
+}
+
+/* Compare newEvents with pastEvents
+ * return 1 when considered "too different"
+ */
+static int compareFingerprints(const Fingerprint* ref,
+ const Fingerprint* newfp,
+ int penalty,
+ unsigned hashLog)
+{
+ assert(ref->nbEvents > 0);
+ assert(newfp->nbEvents > 0);
+ { U64 p50 = (U64)ref->nbEvents * (U64)newfp->nbEvents;
+ U64 deviation = fpDistance(ref, newfp, hashLog);
+ U64 threshold = p50 * (U64)(THRESHOLD_BASE + penalty) / THRESHOLD_PENALTY_RATE;
+ return deviation >= threshold;
+ }
+}
+
+static void mergeEvents(Fingerprint* acc, const Fingerprint* newfp)
+{
+ size_t n;
+ for (n = 0; n < HASHTABLESIZE; n++) {
+ acc->events[n] += newfp->events[n];
+ }
+ acc->nbEvents += newfp->nbEvents;
+}
+
+static void flushEvents(FPStats* fpstats)
+{
+ size_t n;
+ for (n = 0; n < HASHTABLESIZE; n++) {
+ fpstats->pastEvents.events[n] = fpstats->newEvents.events[n];
+ }
+ fpstats->pastEvents.nbEvents = fpstats->newEvents.nbEvents;
+ ZSTD_memset(&fpstats->newEvents, 0, sizeof(fpstats->newEvents));
+}
+
+static void removeEvents(Fingerprint* acc, const Fingerprint* slice)
+{
+ size_t n;
+ for (n = 0; n < HASHTABLESIZE; n++) {
+ assert(acc->events[n] >= slice->events[n]);
+ acc->events[n] -= slice->events[n];
+ }
+ acc->nbEvents -= slice->nbEvents;
+}
+
+#define CHUNKSIZE (8 << 10)
+static size_t ZSTD_splitBlock_byChunks(const void* blockStart, size_t blockSize,
+ int level,
+ void* workspace, size_t wkspSize)
+{
+ static const RecordEvents_f records_fs[] = {
+ FP_RECORD(43), FP_RECORD(11), FP_RECORD(5), FP_RECORD(1)
+ };
+ static const unsigned hashParams[] = { 8, 9, 10, 10 };
+ const RecordEvents_f record_f = (assert(0<=level && level<=3), records_fs[level]);
+ FPStats* const fpstats = (FPStats*)workspace;
+ const char* p = (const char*)blockStart;
+ int penalty = THRESHOLD_PENALTY;
+ size_t pos = 0;
+ assert(blockSize == (128 << 10));
+ assert(workspace != NULL);
+ assert((size_t)workspace % ZSTD_ALIGNOF(FPStats) == 0);
+ ZSTD_STATIC_ASSERT(ZSTD_SLIPBLOCK_WORKSPACESIZE >= sizeof(FPStats));
+ assert(wkspSize >= sizeof(FPStats)); (void)wkspSize;
+
+ initStats(fpstats);
+ record_f(&fpstats->pastEvents, p, CHUNKSIZE);
+ for (pos = CHUNKSIZE; pos <= blockSize - CHUNKSIZE; pos += CHUNKSIZE) {
+ record_f(&fpstats->newEvents, p + pos, CHUNKSIZE);
+ if (compareFingerprints(&fpstats->pastEvents, &fpstats->newEvents, penalty, hashParams[level])) {
+ return pos;
+ } else {
+ mergeEvents(&fpstats->pastEvents, &fpstats->newEvents);
+ if (penalty > 0) penalty--;
+ }
+ }
+ assert(pos == blockSize);
+ return blockSize;
+ (void)flushEvents; (void)removeEvents;
+}
+
+/* ZSTD_splitBlock_fromBorders(): very fast strategy :
+ * compare fingerprint from beginning and end of the block,
+ * derive from their difference if it's preferable to split in the middle,
+ * repeat the process a second time, for finer grained decision.
+ * 3 times did not brought improvements, so I stopped at 2.
+ * Benefits are good enough for a cheap heuristic.
+ * More accurate splitting saves more, but speed impact is also more perceptible.
+ * For better accuracy, use more elaborate variant *_byChunks.
+ */
+static size_t ZSTD_splitBlock_fromBorders(const void* blockStart, size_t blockSize,
+ void* workspace, size_t wkspSize)
+{
+#define SEGMENT_SIZE 512
+ FPStats* const fpstats = (FPStats*)workspace;
+ Fingerprint* middleEvents = (Fingerprint*)(void*)((char*)workspace + 512 * sizeof(unsigned));
+ assert(blockSize == (128 << 10));
+ assert(workspace != NULL);
+ assert((size_t)workspace % ZSTD_ALIGNOF(FPStats) == 0);
+ ZSTD_STATIC_ASSERT(ZSTD_SLIPBLOCK_WORKSPACESIZE >= sizeof(FPStats));
+ assert(wkspSize >= sizeof(FPStats)); (void)wkspSize;
+
+ initStats(fpstats);
+ HIST_add(fpstats->pastEvents.events, blockStart, SEGMENT_SIZE);
+ HIST_add(fpstats->newEvents.events, (const char*)blockStart + blockSize - SEGMENT_SIZE, SEGMENT_SIZE);
+ fpstats->pastEvents.nbEvents = fpstats->newEvents.nbEvents = SEGMENT_SIZE;
+ if (!compareFingerprints(&fpstats->pastEvents, &fpstats->newEvents, 0, 8))
+ return blockSize;
+
+ HIST_add(middleEvents->events, (const char*)blockStart + blockSize/2 - SEGMENT_SIZE/2, SEGMENT_SIZE);
+ middleEvents->nbEvents = SEGMENT_SIZE;
+ { U64 const distFromBegin = fpDistance(&fpstats->pastEvents, middleEvents, 8);
+ U64 const distFromEnd = fpDistance(&fpstats->newEvents, middleEvents, 8);
+ U64 const minDistance = SEGMENT_SIZE * SEGMENT_SIZE / 3;
+ if (abs64((S64)distFromBegin - (S64)distFromEnd) < minDistance)
+ return 64 KB;
+ return (distFromBegin > distFromEnd) ? 32 KB : 96 KB;
+ }
+}
+
+size_t ZSTD_splitBlock(const void* blockStart, size_t blockSize,
+ int level,
+ void* workspace, size_t wkspSize)
+{
+ DEBUGLOG(6, "ZSTD_splitBlock (level=%i)", level);
+ assert(0<=level && level<=4);
+ if (level == 0)
+ return ZSTD_splitBlock_fromBorders(blockStart, blockSize, workspace, wkspSize);
+ /* level >= 1*/
+ return ZSTD_splitBlock_byChunks(blockStart, blockSize, level-1, workspace, wkspSize);
+}
diff --git a/lib/compress/zstd_preSplit.h b/lib/compress/zstd_preSplit.h
new file mode 100644
index 00000000000..b89a200dccd
--- /dev/null
+++ b/lib/compress/zstd_preSplit.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_PRESPLIT_H
+#define ZSTD_PRESPLIT_H
+
+#include /* size_t */
+
+#define ZSTD_SLIPBLOCK_WORKSPACESIZE 8208
+
+/* ZSTD_splitBlock():
+ * @level must be a value between 0 and 4.
+ * higher levels spend more energy to detect block boundaries.
+ * @workspace must be aligned for size_t.
+ * @wkspSize must be at least >= ZSTD_SLIPBLOCK_WORKSPACESIZE
+ * note:
+ * For the time being, this function only accepts full 128 KB blocks.
+ * Therefore, @blockSize must be == 128 KB.
+ * While this could be extended to smaller sizes in the future,
+ * it is not yet clear if this would be useful. TBD.
+ */
+size_t ZSTD_splitBlock(const void* blockStart, size_t blockSize,
+ int level,
+ void* workspace, size_t wkspSize);
+
+#endif /* ZSTD_PRESPLIT_H */
diff --git a/lib/compress/zstdmt_compress.c b/lib/compress/zstdmt_compress.c
index 86ccce31849..0f1fe6d7469 100644
--- a/lib/compress/zstdmt_compress.c
+++ b/lib/compress/zstdmt_compress.c
@@ -90,9 +90,9 @@ static unsigned long long GetCurrentClockTimeMicroseconds(void)
typedef struct buffer_s {
void* start;
size_t capacity;
-} buffer_t;
+} Buffer;
-static const buffer_t g_nullBuffer = { NULL, 0 };
+static const Buffer g_nullBuffer = { NULL, 0 };
typedef struct ZSTDMT_bufferPool_s {
ZSTD_pthread_mutex_t poolMutex;
@@ -100,7 +100,7 @@ typedef struct ZSTDMT_bufferPool_s {
unsigned totalBuffers;
unsigned nbBuffers;
ZSTD_customMem cMem;
- buffer_t* buffers;
+ Buffer* buffers;
} ZSTDMT_bufferPool;
static void ZSTDMT_freeBufferPool(ZSTDMT_bufferPool* bufPool)
@@ -128,7 +128,7 @@ static ZSTDMT_bufferPool* ZSTDMT_createBufferPool(unsigned maxNbBuffers, ZSTD_cu
ZSTD_customFree(bufPool, cMem);
return NULL;
}
- bufPool->buffers = (buffer_t*)ZSTD_customCalloc(maxNbBuffers * sizeof(buffer_t), cMem);
+ bufPool->buffers = (Buffer*)ZSTD_customCalloc(maxNbBuffers * sizeof(Buffer), cMem);
if (bufPool->buffers==NULL) {
ZSTDMT_freeBufferPool(bufPool);
return NULL;
@@ -144,7 +144,7 @@ static ZSTDMT_bufferPool* ZSTDMT_createBufferPool(unsigned maxNbBuffers, ZSTD_cu
static size_t ZSTDMT_sizeof_bufferPool(ZSTDMT_bufferPool* bufPool)
{
size_t const poolSize = sizeof(*bufPool);
- size_t const arraySize = bufPool->totalBuffers * sizeof(buffer_t);
+ size_t const arraySize = bufPool->totalBuffers * sizeof(Buffer);
unsigned u;
size_t totalBufferSize = 0;
ZSTD_pthread_mutex_lock(&bufPool->poolMutex);
@@ -189,13 +189,13 @@ static ZSTDMT_bufferPool* ZSTDMT_expandBufferPool(ZSTDMT_bufferPool* srcBufPool,
* assumption : bufPool must be valid
* @return : a buffer, with start pointer and size
* note: allocation may fail, in this case, start==NULL and size==0 */
-static buffer_t ZSTDMT_getBuffer(ZSTDMT_bufferPool* bufPool)
+static Buffer ZSTDMT_getBuffer(ZSTDMT_bufferPool* bufPool)
{
size_t const bSize = bufPool->bufferSize;
DEBUGLOG(5, "ZSTDMT_getBuffer: bSize = %u", (U32)bufPool->bufferSize);
ZSTD_pthread_mutex_lock(&bufPool->poolMutex);
if (bufPool->nbBuffers) { /* try to use an existing buffer */
- buffer_t const buf = bufPool->buffers[--(bufPool->nbBuffers)];
+ Buffer const buf = bufPool->buffers[--(bufPool->nbBuffers)];
size_t const availBufferSize = buf.capacity;
bufPool->buffers[bufPool->nbBuffers] = g_nullBuffer;
if ((availBufferSize >= bSize) & ((availBufferSize>>3) <= bSize)) {
@@ -212,7 +212,7 @@ static buffer_t ZSTDMT_getBuffer(ZSTDMT_bufferPool* bufPool)
ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);
/* create new buffer */
DEBUGLOG(5, "ZSTDMT_getBuffer: create a new buffer");
- { buffer_t buffer;
+ { Buffer buffer;
void* const start = ZSTD_customMalloc(bSize, bufPool->cMem);
buffer.start = start; /* note : start can be NULL if malloc fails ! */
buffer.capacity = (start==NULL) ? 0 : bSize;
@@ -231,12 +231,12 @@ static buffer_t ZSTDMT_getBuffer(ZSTDMT_bufferPool* bufPool)
* @return : a buffer that is at least the buffer pool buffer size.
* If a reallocation happens, the data in the input buffer is copied.
*/
-static buffer_t ZSTDMT_resizeBuffer(ZSTDMT_bufferPool* bufPool, buffer_t buffer)
+static Buffer ZSTDMT_resizeBuffer(ZSTDMT_bufferPool* bufPool, Buffer buffer)
{
size_t const bSize = bufPool->bufferSize;
if (buffer.capacity < bSize) {
void* const start = ZSTD_customMalloc(bSize, bufPool->cMem);
- buffer_t newBuffer;
+ Buffer newBuffer;
newBuffer.start = start;
newBuffer.capacity = start == NULL ? 0 : bSize;
if (start != NULL) {
@@ -252,7 +252,7 @@ static buffer_t ZSTDMT_resizeBuffer(ZSTDMT_bufferPool* bufPool, buffer_t buffer)
#endif
/* store buffer for later re-use, up to pool capacity */
-static void ZSTDMT_releaseBuffer(ZSTDMT_bufferPool* bufPool, buffer_t buf)
+static void ZSTDMT_releaseBuffer(ZSTDMT_bufferPool* bufPool, Buffer buf)
{
DEBUGLOG(5, "ZSTDMT_releaseBuffer");
if (buf.start == NULL) return; /* compatible with release on NULL */
@@ -290,23 +290,23 @@ static size_t ZSTDMT_sizeof_seqPool(ZSTDMT_seqPool* seqPool)
return ZSTDMT_sizeof_bufferPool(seqPool);
}
-static rawSeqStore_t bufferToSeq(buffer_t buffer)
+static RawSeqStore_t bufferToSeq(Buffer buffer)
{
- rawSeqStore_t seq = kNullRawSeqStore;
+ RawSeqStore_t seq = kNullRawSeqStore;
seq.seq = (rawSeq*)buffer.start;
seq.capacity = buffer.capacity / sizeof(rawSeq);
return seq;
}
-static buffer_t seqToBuffer(rawSeqStore_t seq)
+static Buffer seqToBuffer(RawSeqStore_t seq)
{
- buffer_t buffer;
+ Buffer buffer;
buffer.start = seq.seq;
buffer.capacity = seq.capacity * sizeof(rawSeq);
return buffer;
}
-static rawSeqStore_t ZSTDMT_getSeq(ZSTDMT_seqPool* seqPool)
+static RawSeqStore_t ZSTDMT_getSeq(ZSTDMT_seqPool* seqPool)
{
if (seqPool->bufferSize == 0) {
return kNullRawSeqStore;
@@ -315,13 +315,13 @@ static rawSeqStore_t ZSTDMT_getSeq(ZSTDMT_seqPool* seqPool)
}
#if ZSTD_RESIZE_SEQPOOL
-static rawSeqStore_t ZSTDMT_resizeSeq(ZSTDMT_seqPool* seqPool, rawSeqStore_t seq)
+static RawSeqStore_t ZSTDMT_resizeSeq(ZSTDMT_seqPool* seqPool, RawSeqStore_t seq)
{
return bufferToSeq(ZSTDMT_resizeBuffer(seqPool, seqToBuffer(seq)));
}
#endif
-static void ZSTDMT_releaseSeq(ZSTDMT_seqPool* seqPool, rawSeqStore_t seq)
+static void ZSTDMT_releaseSeq(ZSTDMT_seqPool* seqPool, RawSeqStore_t seq)
{
ZSTDMT_releaseBuffer(seqPool, seqToBuffer(seq));
}
@@ -466,7 +466,7 @@ static void ZSTDMT_releaseCCtx(ZSTDMT_CCtxPool* pool, ZSTD_CCtx* cctx)
typedef struct {
void const* start;
size_t size;
-} range_t;
+} Range;
typedef struct {
/* All variables in the struct are protected by mutex. */
@@ -482,10 +482,10 @@ typedef struct {
ZSTD_pthread_mutex_t ldmWindowMutex;
ZSTD_pthread_cond_t ldmWindowCond; /* Signaled when ldmWindow is updated */
ZSTD_window_t ldmWindow; /* A thread-safe copy of ldmState.window */
-} serialState_t;
+} SerialState;
static int
-ZSTDMT_serialState_reset(serialState_t* serialState,
+ZSTDMT_serialState_reset(SerialState* serialState,
ZSTDMT_seqPool* seqPool,
ZSTD_CCtx_params params,
size_t jobSize,
@@ -555,7 +555,7 @@ ZSTDMT_serialState_reset(serialState_t* serialState,
return 0;
}
-static int ZSTDMT_serialState_init(serialState_t* serialState)
+static int ZSTDMT_serialState_init(SerialState* serialState)
{
int initError = 0;
ZSTD_memset(serialState, 0, sizeof(*serialState));
@@ -566,7 +566,7 @@ static int ZSTDMT_serialState_init(serialState_t* serialState)
return initError;
}
-static void ZSTDMT_serialState_free(serialState_t* serialState)
+static void ZSTDMT_serialState_free(SerialState* serialState)
{
ZSTD_customMem cMem = serialState->params.customMem;
ZSTD_pthread_mutex_destroy(&serialState->mutex);
@@ -577,9 +577,10 @@ static void ZSTDMT_serialState_free(serialState_t* serialState)
ZSTD_customFree(serialState->ldmState.bucketOffsets, cMem);
}
-static void ZSTDMT_serialState_update(serialState_t* serialState,
- ZSTD_CCtx* jobCCtx, rawSeqStore_t seqStore,
- range_t src, unsigned jobID)
+static void
+ZSTDMT_serialState_genSequences(SerialState* serialState,
+ RawSeqStore_t* seqStore,
+ Range src, unsigned jobID)
{
/* Wait for our turn */
ZSTD_PTHREAD_MUTEX_LOCK(&serialState->mutex);
@@ -592,12 +593,13 @@ static void ZSTDMT_serialState_update(serialState_t* serialState,
/* It is now our turn, do any processing necessary */
if (serialState->params.ldmParams.enableLdm == ZSTD_ps_enable) {
size_t error;
- assert(seqStore.seq != NULL && seqStore.pos == 0 &&
- seqStore.size == 0 && seqStore.capacity > 0);
+ DEBUGLOG(6, "ZSTDMT_serialState_genSequences: LDM update");
+ assert(seqStore->seq != NULL && seqStore->pos == 0 &&
+ seqStore->size == 0 && seqStore->capacity > 0);
assert(src.size <= serialState->params.jobSize);
ZSTD_window_update(&serialState->ldmState.window, src.start, src.size, /* forceNonContiguous */ 0);
error = ZSTD_ldm_generateSequences(
- &serialState->ldmState, &seqStore,
+ &serialState->ldmState, seqStore,
&serialState->params.ldmParams, src.start, src.size);
/* We provide a large enough buffer to never fail. */
assert(!ZSTD_isError(error)); (void)error;
@@ -616,14 +618,22 @@ static void ZSTDMT_serialState_update(serialState_t* serialState,
serialState->nextJobID++;
ZSTD_pthread_cond_broadcast(&serialState->cond);
ZSTD_pthread_mutex_unlock(&serialState->mutex);
+}
- if (seqStore.size > 0) {
- ZSTD_referenceExternalSequences(jobCCtx, seqStore.seq, seqStore.size);
- assert(serialState->params.ldmParams.enableLdm == ZSTD_ps_enable);
+static void
+ZSTDMT_serialState_applySequences(const SerialState* serialState, /* just for an assert() check */
+ ZSTD_CCtx* jobCCtx,
+ const RawSeqStore_t* seqStore)
+{
+ if (seqStore->size > 0) {
+ DEBUGLOG(5, "ZSTDMT_serialState_applySequences: uploading %u external sequences", (unsigned)seqStore->size);
+ assert(serialState->params.ldmParams.enableLdm == ZSTD_ps_enable); (void)serialState;
+ assert(jobCCtx);
+ ZSTD_referenceExternalSequences(jobCCtx, seqStore->seq, seqStore->size);
}
}
-static void ZSTDMT_serialState_ensureFinished(serialState_t* serialState,
+static void ZSTDMT_serialState_ensureFinished(SerialState* serialState,
unsigned jobID, size_t cSize)
{
ZSTD_PTHREAD_MUTEX_LOCK(&serialState->mutex);
@@ -647,28 +657,28 @@ static void ZSTDMT_serialState_ensureFinished(serialState_t* serialState,
/* ===== Worker thread ===== */
/* ------------------------------------------ */
-static const range_t kNullRange = { NULL, 0 };
+static const Range kNullRange = { NULL, 0 };
typedef struct {
- size_t consumed; /* SHARED - set0 by mtctx, then modified by worker AND read by mtctx */
- size_t cSize; /* SHARED - set0 by mtctx, then modified by worker AND read by mtctx, then set0 by mtctx */
- ZSTD_pthread_mutex_t job_mutex; /* Thread-safe - used by mtctx and worker */
- ZSTD_pthread_cond_t job_cond; /* Thread-safe - used by mtctx and worker */
- ZSTDMT_CCtxPool* cctxPool; /* Thread-safe - used by mtctx and (all) workers */
- ZSTDMT_bufferPool* bufPool; /* Thread-safe - used by mtctx and (all) workers */
- ZSTDMT_seqPool* seqPool; /* Thread-safe - used by mtctx and (all) workers */
- serialState_t* serial; /* Thread-safe - used by mtctx and (all) workers */
- buffer_t dstBuff; /* set by worker (or mtctx), then read by worker & mtctx, then modified by mtctx => no barrier */
- range_t prefix; /* set by mtctx, then read by worker & mtctx => no barrier */
- range_t src; /* set by mtctx, then read by worker & mtctx => no barrier */
- unsigned jobID; /* set by mtctx, then read by worker => no barrier */
- unsigned firstJob; /* set by mtctx, then read by worker => no barrier */
- unsigned lastJob; /* set by mtctx, then read by worker => no barrier */
- ZSTD_CCtx_params params; /* set by mtctx, then read by worker => no barrier */
- const ZSTD_CDict* cdict; /* set by mtctx, then read by worker => no barrier */
- unsigned long long fullFrameSize; /* set by mtctx, then read by worker => no barrier */
- size_t dstFlushed; /* used only by mtctx */
- unsigned frameChecksumNeeded; /* used only by mtctx */
+ size_t consumed; /* SHARED - set0 by mtctx, then modified by worker AND read by mtctx */
+ size_t cSize; /* SHARED - set0 by mtctx, then modified by worker AND read by mtctx, then set0 by mtctx */
+ ZSTD_pthread_mutex_t job_mutex; /* Thread-safe - used by mtctx and worker */
+ ZSTD_pthread_cond_t job_cond; /* Thread-safe - used by mtctx and worker */
+ ZSTDMT_CCtxPool* cctxPool; /* Thread-safe - used by mtctx and (all) workers */
+ ZSTDMT_bufferPool* bufPool; /* Thread-safe - used by mtctx and (all) workers */
+ ZSTDMT_seqPool* seqPool; /* Thread-safe - used by mtctx and (all) workers */
+ SerialState* serial; /* Thread-safe - used by mtctx and (all) workers */
+ Buffer dstBuff; /* set by worker (or mtctx), then read by worker & mtctx, then modified by mtctx => no barrier */
+ Range prefix; /* set by mtctx, then read by worker & mtctx => no barrier */
+ Range src; /* set by mtctx, then read by worker & mtctx => no barrier */
+ unsigned jobID; /* set by mtctx, then read by worker => no barrier */
+ unsigned firstJob; /* set by mtctx, then read by worker => no barrier */
+ unsigned lastJob; /* set by mtctx, then read by worker => no barrier */
+ ZSTD_CCtx_params params; /* set by mtctx, then read by worker => no barrier */
+ const ZSTD_CDict* cdict; /* set by mtctx, then read by worker => no barrier */
+ unsigned long long fullFrameSize; /* set by mtctx, then read by worker => no barrier */
+ size_t dstFlushed; /* used only by mtctx */
+ unsigned frameChecksumNeeded; /* used only by mtctx */
} ZSTDMT_jobDescription;
#define JOB_ERROR(e) \
@@ -685,10 +695,11 @@ static void ZSTDMT_compressionJob(void* jobDescription)
ZSTDMT_jobDescription* const job = (ZSTDMT_jobDescription*)jobDescription;
ZSTD_CCtx_params jobParams = job->params; /* do not modify job->params ! copy it, modify the copy */
ZSTD_CCtx* const cctx = ZSTDMT_getCCtx(job->cctxPool);
- rawSeqStore_t rawSeqStore = ZSTDMT_getSeq(job->seqPool);
- buffer_t dstBuff = job->dstBuff;
+ RawSeqStore_t rawSeqStore = ZSTDMT_getSeq(job->seqPool);
+ Buffer dstBuff = job->dstBuff;
size_t lastCBlockSize = 0;
+ DEBUGLOG(5, "ZSTDMT_compressionJob: job %u", job->jobID);
/* resources */
if (cctx==NULL) JOB_ERROR(ERROR(memory_allocation));
if (dstBuff.start == NULL) { /* streaming job : doesn't provide a dstBuffer */
@@ -710,11 +721,15 @@ static void ZSTDMT_compressionJob(void* jobDescription)
/* init */
+
+ /* Perform serial step as early as possible */
+ ZSTDMT_serialState_genSequences(job->serial, &rawSeqStore, job->src, job->jobID);
+
if (job->cdict) {
size_t const initError = ZSTD_compressBegin_advanced_internal(cctx, NULL, 0, ZSTD_dct_auto, ZSTD_dtlm_fast, job->cdict, &jobParams, job->fullFrameSize);
assert(job->firstJob); /* only allowed for first job */
if (ZSTD_isError(initError)) JOB_ERROR(initError);
- } else { /* srcStart points at reloaded section */
+ } else {
U64 const pledgedSrcSize = job->firstJob ? job->fullFrameSize : job->src.size;
{ size_t const forceWindowError = ZSTD_CCtxParams_setParameter(&jobParams, ZSTD_c_forceMaxWindow, !job->firstJob);
if (ZSTD_isError(forceWindowError)) JOB_ERROR(forceWindowError);
@@ -723,16 +738,17 @@ static void ZSTDMT_compressionJob(void* jobDescription)
size_t const err = ZSTD_CCtxParams_setParameter(&jobParams, ZSTD_c_deterministicRefPrefix, 0);
if (ZSTD_isError(err)) JOB_ERROR(err);
}
+ DEBUGLOG(6, "ZSTDMT_compressionJob: job %u: loading prefix of size %zu", job->jobID, job->prefix.size);
{ size_t const initError = ZSTD_compressBegin_advanced_internal(cctx,
- job->prefix.start, job->prefix.size, ZSTD_dct_rawContent, /* load dictionary in "content-only" mode (no header analysis) */
+ job->prefix.start, job->prefix.size, ZSTD_dct_rawContent,
ZSTD_dtlm_fast,
NULL, /*cdict*/
&jobParams, pledgedSrcSize);
if (ZSTD_isError(initError)) JOB_ERROR(initError);
} }
- /* Perform serial step as early as possible, but after CCtx initialization */
- ZSTDMT_serialState_update(job->serial, cctx, rawSeqStore, job->src, job->jobID);
+ /* External Sequences can only be applied after CCtx initialization */
+ ZSTDMT_serialState_applySequences(job->serial, cctx, &rawSeqStore);
if (!job->firstJob) { /* flush and overwrite frame header when it's not first job */
size_t const hSize = ZSTD_compressContinue_public(cctx, dstBuff.start, dstBuff.capacity, job->src.start, 0);
@@ -741,7 +757,7 @@ static void ZSTDMT_compressionJob(void* jobDescription)
ZSTD_invalidateRepCodes(cctx);
}
- /* compress */
+ /* compress the entire job by smaller chunks, for better granularity */
{ size_t const chunkSize = 4*ZSTD_BLOCKSIZE_MAX;
int const nbChunks = (int)((job->src.size + (chunkSize-1)) / chunkSize);
const BYTE* ip = (const BYTE*) job->src.start;
@@ -809,10 +825,10 @@ static void ZSTDMT_compressionJob(void* jobDescription)
/* ------------------------------------------ */
typedef struct {
- range_t prefix; /* read-only non-owned prefix buffer */
- buffer_t buffer;
+ Range prefix; /* read-only non-owned prefix buffer */
+ Buffer buffer;
size_t filled;
-} inBuff_t;
+} InBuff_t;
typedef struct {
BYTE* buffer; /* The round input buffer. All jobs get references
@@ -826,9 +842,9 @@ typedef struct {
* the inBuff is sent to the worker thread.
* pos <= capacity.
*/
-} roundBuff_t;
+} RoundBuff_t;
-static const roundBuff_t kNullRoundBuff = {NULL, 0, 0};
+static const RoundBuff_t kNullRoundBuff = {NULL, 0, 0};
#define RSYNC_LENGTH 32
/* Don't create chunks smaller than the zstd block size.
@@ -845,7 +861,7 @@ typedef struct {
U64 hash;
U64 hitMask;
U64 primePower;
-} rsyncState_t;
+} RSyncState_t;
struct ZSTDMT_CCtx_s {
POOL_ctx* factory;
@@ -857,10 +873,10 @@ struct ZSTDMT_CCtx_s {
size_t targetSectionSize;
size_t targetPrefixSize;
int jobReady; /* 1 => one job is already prepared, but pool has shortage of workers. Don't create a new job. */
- inBuff_t inBuff;
- roundBuff_t roundBuff;
- serialState_t serial;
- rsyncState_t rsync;
+ InBuff_t inBuff;
+ RoundBuff_t roundBuff;
+ SerialState serial;
+ RSyncState_t rsync;
unsigned jobIDMask;
unsigned doneJobID;
unsigned nextJobID;
@@ -1245,13 +1261,11 @@ size_t ZSTDMT_initCStream_internal(
/* init */
if (params.nbWorkers != mtctx->params.nbWorkers)
- FORWARD_IF_ERROR( ZSTDMT_resize(mtctx, params.nbWorkers) , "");
+ FORWARD_IF_ERROR( ZSTDMT_resize(mtctx, (unsigned)params.nbWorkers) , "");
if (params.jobSize != 0 && params.jobSize < ZSTDMT_JOBSIZE_MIN) params.jobSize = ZSTDMT_JOBSIZE_MIN;
if (params.jobSize > (size_t)ZSTDMT_JOBSIZE_MAX) params.jobSize = (size_t)ZSTDMT_JOBSIZE_MAX;
- DEBUGLOG(4, "ZSTDMT_initCStream_internal: %u workers", params.nbWorkers);
-
if (mtctx->allJobsCompleted == 0) { /* previous compression not correctly finished */
ZSTDMT_waitForAllJobsCompleted(mtctx);
ZSTDMT_releaseAllJobResources(mtctx);
@@ -1260,15 +1274,14 @@ size_t ZSTDMT_initCStream_internal(
mtctx->params = params;
mtctx->frameContentSize = pledgedSrcSize;
+ ZSTD_freeCDict(mtctx->cdictLocal);
if (dict) {
- ZSTD_freeCDict(mtctx->cdictLocal);
mtctx->cdictLocal = ZSTD_createCDict_advanced(dict, dictSize,
ZSTD_dlm_byCopy, dictContentType, /* note : a loadPrefix becomes an internal CDict */
params.cParams, mtctx->cMem);
mtctx->cdict = mtctx->cdictLocal;
if (mtctx->cdictLocal == NULL) return ERROR(memory_allocation);
} else {
- ZSTD_freeCDict(mtctx->cdictLocal);
mtctx->cdictLocal = NULL;
mtctx->cdict = cdict;
}
@@ -1334,9 +1347,32 @@ size_t ZSTDMT_initCStream_internal(
mtctx->allJobsCompleted = 0;
mtctx->consumed = 0;
mtctx->produced = 0;
+
+ /* update dictionary */
+ ZSTD_freeCDict(mtctx->cdictLocal);
+ mtctx->cdictLocal = NULL;
+ mtctx->cdict = NULL;
+ if (dict) {
+ if (dictContentType == ZSTD_dct_rawContent) {
+ mtctx->inBuff.prefix.start = (const BYTE*)dict;
+ mtctx->inBuff.prefix.size = dictSize;
+ } else {
+ /* note : a loadPrefix becomes an internal CDict */
+ mtctx->cdictLocal = ZSTD_createCDict_advanced(dict, dictSize,
+ ZSTD_dlm_byRef, dictContentType,
+ params.cParams, mtctx->cMem);
+ mtctx->cdict = mtctx->cdictLocal;
+ if (mtctx->cdictLocal == NULL) return ERROR(memory_allocation);
+ }
+ } else {
+ mtctx->cdict = cdict;
+ }
+
if (ZSTDMT_serialState_reset(&mtctx->serial, mtctx->seqPool, params, mtctx->targetSectionSize,
dict, dictSize, dictContentType))
return ERROR(memory_allocation);
+
+
return 0;
}
@@ -1403,7 +1439,7 @@ static size_t ZSTDMT_createCompressionJob(ZSTDMT_CCtx* mtctx, size_t srcSize, ZS
mtctx->roundBuff.pos += srcSize;
mtctx->inBuff.buffer = g_nullBuffer;
mtctx->inBuff.filled = 0;
- /* Set the prefix */
+ /* Set the prefix for next job */
if (!endFrame) {
size_t const newPrefixSize = MIN(srcSize, mtctx->targetPrefixSize);
mtctx->inBuff.prefix.start = src + srcSize - newPrefixSize;
@@ -1540,12 +1576,17 @@ static size_t ZSTDMT_flushProduced(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, u
* If the data of the first job is broken up into two segments, we cover both
* sections.
*/
-static range_t ZSTDMT_getInputDataInUse(ZSTDMT_CCtx* mtctx)
+static Range ZSTDMT_getInputDataInUse(ZSTDMT_CCtx* mtctx)
{
unsigned const firstJobID = mtctx->doneJobID;
unsigned const lastJobID = mtctx->nextJobID;
unsigned jobID;
+ /* no need to check during first round */
+ size_t roundBuffCapacity = mtctx->roundBuff.capacity;
+ size_t nbJobs1stRoundMin = roundBuffCapacity / mtctx->targetSectionSize;
+ if (lastJobID < nbJobs1stRoundMin) return kNullRange;
+
for (jobID = firstJobID; jobID < lastJobID; ++jobID) {
unsigned const wJobID = jobID & mtctx->jobIDMask;
size_t consumed;
@@ -1555,7 +1596,7 @@ static range_t ZSTDMT_getInputDataInUse(ZSTDMT_CCtx* mtctx)
ZSTD_pthread_mutex_unlock(&mtctx->jobs[wJobID].job_mutex);
if (consumed < mtctx->jobs[wJobID].src.size) {
- range_t range = mtctx->jobs[wJobID].prefix;
+ Range range = mtctx->jobs[wJobID].prefix;
if (range.size == 0) {
/* Empty prefix */
range = mtctx->jobs[wJobID].src;
@@ -1571,7 +1612,7 @@ static range_t ZSTDMT_getInputDataInUse(ZSTDMT_CCtx* mtctx)
/**
* Returns non-zero iff buffer and range overlap.
*/
-static int ZSTDMT_isOverlapped(buffer_t buffer, range_t range)
+static int ZSTDMT_isOverlapped(Buffer buffer, Range range)
{
BYTE const* const bufferStart = (BYTE const*)buffer.start;
BYTE const* const rangeStart = (BYTE const*)range.start;
@@ -1591,10 +1632,10 @@ static int ZSTDMT_isOverlapped(buffer_t buffer, range_t range)
}
}
-static int ZSTDMT_doesOverlapWindow(buffer_t buffer, ZSTD_window_t window)
+static int ZSTDMT_doesOverlapWindow(Buffer buffer, ZSTD_window_t window)
{
- range_t extDict;
- range_t prefix;
+ Range extDict;
+ Range prefix;
DEBUGLOG(5, "ZSTDMT_doesOverlapWindow");
extDict.start = window.dictBase + window.lowLimit;
@@ -1613,7 +1654,7 @@ static int ZSTDMT_doesOverlapWindow(buffer_t buffer, ZSTD_window_t window)
|| ZSTDMT_isOverlapped(buffer, prefix);
}
-static void ZSTDMT_waitForLdmComplete(ZSTDMT_CCtx* mtctx, buffer_t buffer)
+static void ZSTDMT_waitForLdmComplete(ZSTDMT_CCtx* mtctx, Buffer buffer)
{
if (mtctx->params.ldmParams.enableLdm == ZSTD_ps_enable) {
ZSTD_pthread_mutex_t* mutex = &mtctx->serial.ldmWindowMutex;
@@ -1638,16 +1679,16 @@ static void ZSTDMT_waitForLdmComplete(ZSTDMT_CCtx* mtctx, buffer_t buffer)
*/
static int ZSTDMT_tryGetInputRange(ZSTDMT_CCtx* mtctx)
{
- range_t const inUse = ZSTDMT_getInputDataInUse(mtctx);
+ Range const inUse = ZSTDMT_getInputDataInUse(mtctx);
size_t const spaceLeft = mtctx->roundBuff.capacity - mtctx->roundBuff.pos;
- size_t const target = mtctx->targetSectionSize;
- buffer_t buffer;
+ size_t const spaceNeeded = mtctx->targetSectionSize;
+ Buffer buffer;
DEBUGLOG(5, "ZSTDMT_tryGetInputRange");
assert(mtctx->inBuff.buffer.start == NULL);
- assert(mtctx->roundBuff.capacity >= target);
+ assert(mtctx->roundBuff.capacity >= spaceNeeded);
- if (spaceLeft < target) {
+ if (spaceLeft < spaceNeeded) {
/* ZSTD_invalidateRepCodes() doesn't work for extDict variants.
* Simply copy the prefix to the beginning in that case.
*/
@@ -1666,7 +1707,7 @@ static int ZSTDMT_tryGetInputRange(ZSTDMT_CCtx* mtctx)
mtctx->roundBuff.pos = prefixSize;
}
buffer.start = mtctx->roundBuff.buffer + mtctx->roundBuff.pos;
- buffer.capacity = target;
+ buffer.capacity = spaceNeeded;
if (ZSTDMT_isOverlapped(buffer, inUse)) {
DEBUGLOG(5, "Waiting for buffer...");
@@ -1693,7 +1734,7 @@ static int ZSTDMT_tryGetInputRange(ZSTDMT_CCtx* mtctx)
typedef struct {
size_t toLoad; /* The number of bytes to load from the input. */
int flush; /* Boolean declaring if we must flush because we found a synchronization point. */
-} syncPoint_t;
+} SyncPoint;
/**
* Searches through the input for a synchronization point. If one is found, we
@@ -1701,14 +1742,14 @@ typedef struct {
* Otherwise, we will load as many bytes as possible and instruct the caller
* to continue as normal.
*/
-static syncPoint_t
+static SyncPoint
findSynchronizationPoint(ZSTDMT_CCtx const* mtctx, ZSTD_inBuffer const input)
{
BYTE const* const istart = (BYTE const*)input.src + input.pos;
U64 const primePower = mtctx->rsync.primePower;
U64 const hitMask = mtctx->rsync.hitMask;
- syncPoint_t syncPoint;
+ SyncPoint syncPoint;
U64 hash;
BYTE const* prev;
size_t pos;
@@ -1840,7 +1881,7 @@ size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx,
DEBUGLOG(5, "ZSTDMT_tryGetInputRange completed successfully : mtctx->inBuff.buffer.start = %p", mtctx->inBuff.buffer.start);
}
if (mtctx->inBuff.buffer.start != NULL) {
- syncPoint_t const syncPoint = findSynchronizationPoint(mtctx, *input);
+ SyncPoint const syncPoint = findSynchronizationPoint(mtctx, *input);
if (syncPoint.flush && endOp == ZSTD_e_continue) {
endOp = ZSTD_e_flush;
}
diff --git a/lib/compress/zstdmt_compress.h b/lib/compress/zstdmt_compress.h
index ed4dc0e99df..91b489b9cb4 100644
--- a/lib/compress/zstdmt_compress.h
+++ b/lib/compress/zstdmt_compress.h
@@ -11,10 +11,10 @@
#ifndef ZSTDMT_COMPRESS_H
#define ZSTDMT_COMPRESS_H
- #if defined (__cplusplus)
- extern "C" {
- #endif
-
+/* === Dependencies === */
+#include "../common/zstd_deps.h" /* size_t */
+#define ZSTD_STATIC_LINKING_ONLY /* ZSTD_parameters */
+#include "../zstd.h" /* ZSTD_inBuffer, ZSTD_outBuffer, ZSTDLIB_API */
/* Note : This is an internal API.
* These APIs used to be exposed with ZSTDLIB_API,
@@ -25,12 +25,6 @@
* otherwise ZSTDMT_createCCtx*() will fail.
*/
-/* === Dependencies === */
-#include "../common/zstd_deps.h" /* size_t */
-#define ZSTD_STATIC_LINKING_ONLY /* ZSTD_parameters */
-#include "../zstd.h" /* ZSTD_inBuffer, ZSTD_outBuffer, ZSTDLIB_API */
-
-
/* === Constants === */
#ifndef ZSTDMT_NBWORKERS_MAX /* a different value can be selected at compile time */
# define ZSTDMT_NBWORKERS_MAX ((sizeof(void*)==4) /*32-bit*/ ? 64 : 256)
@@ -105,9 +99,4 @@ void ZSTDMT_updateCParams_whileCompressing(ZSTDMT_CCtx* mtctx, const ZSTD_CCtx_p
*/
ZSTD_frameProgression ZSTDMT_getFrameProgression(ZSTDMT_CCtx* mtctx);
-
-#if defined (__cplusplus)
-}
-#endif
-
#endif /* ZSTDMT_COMPRESS_H */
diff --git a/lib/decompress/huf_decompress_amd64.S b/lib/decompress/huf_decompress_amd64.S
index 78da291ee3c..656aada95b8 100644
--- a/lib/decompress/huf_decompress_amd64.S
+++ b/lib/decompress/huf_decompress_amd64.S
@@ -42,13 +42,11 @@
/* Calling convention:
*
- * %rdi contains the first argument: HUF_DecompressAsmArgs*.
+ * %rdi (or %rcx on Windows) contains the first argument: HUF_DecompressAsmArgs*.
* %rbp isn't maintained (no frame pointer).
* %rsp contains the stack pointer that grows down.
* No red-zone is assumed, only addresses >= %rsp are used.
* All register contents are preserved.
- *
- * TODO: Support Windows calling convention.
*/
ZSTD_HIDE_ASM_FUNCTION(HUF_decompress4X1_usingDTable_internal_fast_asm_loop)
@@ -137,7 +135,11 @@ HUF_decompress4X1_usingDTable_internal_fast_asm_loop:
push %r15
/* Read HUF_DecompressAsmArgs* args from %rax */
+#if defined(_WIN32)
+ movq %rcx, %rax
+#else
movq %rdi, %rax
+#endif
movq 0(%rax), %ip0
movq 8(%rax), %ip1
movq 16(%rax), %ip2
@@ -391,7 +393,12 @@ HUF_decompress4X2_usingDTable_internal_fast_asm_loop:
push %r14
push %r15
+ /* Read HUF_DecompressAsmArgs* args from %rax */
+#if defined(_WIN32)
+ movq %rcx, %rax
+#else
movq %rdi, %rax
+#endif
movq 0(%rax), %ip0
movq 8(%rax), %ip1
movq 16(%rax), %ip2
diff --git a/lib/decompress/zstd_decompress.c b/lib/decompress/zstd_decompress.c
index 2f03cf7b0c7..9eb98327ef3 100644
--- a/lib/decompress/zstd_decompress.c
+++ b/lib/decompress/zstd_decompress.c
@@ -444,7 +444,7 @@ size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize)
* @return : 0, `zfhPtr` is correctly filled,
* >0, `srcSize` is too small, value is wanted `srcSize` amount,
** or an error code, which can be tested using ZSTD_isError() */
-size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize, ZSTD_format_e format)
+size_t ZSTD_getFrameHeader_advanced(ZSTD_FrameHeader* zfhPtr, const void* src, size_t srcSize, ZSTD_format_e format)
{
const BYTE* ip = (const BYTE*)src;
size_t const minInputSize = ZSTD_startingInputLength(format);
@@ -484,8 +484,10 @@ size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, s
if (srcSize < ZSTD_SKIPPABLEHEADERSIZE)
return ZSTD_SKIPPABLEHEADERSIZE; /* magic number + frame length */
ZSTD_memset(zfhPtr, 0, sizeof(*zfhPtr));
- zfhPtr->frameContentSize = MEM_readLE32((const char *)src + ZSTD_FRAMEIDSIZE);
zfhPtr->frameType = ZSTD_skippableFrame;
+ zfhPtr->dictID = MEM_readLE32(src) - ZSTD_MAGIC_SKIPPABLE_START;
+ zfhPtr->headerSize = ZSTD_SKIPPABLEHEADERSIZE;
+ zfhPtr->frameContentSize = MEM_readLE32((const char *)src + ZSTD_FRAMEIDSIZE);
return 0;
}
RETURN_ERROR(prefix_unknown, "");
@@ -554,7 +556,7 @@ size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, s
* @return : 0, `zfhPtr` is correctly filled,
* >0, `srcSize` is too small, value is wanted `srcSize` amount,
* or an error code, which can be tested using ZSTD_isError() */
-size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize)
+size_t ZSTD_getFrameHeader(ZSTD_FrameHeader* zfhPtr, const void* src, size_t srcSize)
{
return ZSTD_getFrameHeader_advanced(zfhPtr, src, srcSize, ZSTD_f_zstd1);
}
@@ -572,7 +574,7 @@ unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize)
return ret == 0 ? ZSTD_CONTENTSIZE_UNKNOWN : ret;
}
#endif
- { ZSTD_frameHeader zfh;
+ { ZSTD_FrameHeader zfh;
if (ZSTD_getFrameHeader(&zfh, src, srcSize) != 0)
return ZSTD_CONTENTSIZE_ERROR;
if (zfh.frameType == ZSTD_skippableFrame) {
@@ -750,7 +752,7 @@ static ZSTD_frameSizeInfo ZSTD_findFrameSizeInfo(const void* src, size_t srcSize
const BYTE* const ipstart = ip;
size_t remainingSize = srcSize;
size_t nbBlocks = 0;
- ZSTD_frameHeader zfh;
+ ZSTD_FrameHeader zfh;
/* Extract Frame Header */
{ size_t const ret = ZSTD_getFrameHeader_advanced(&zfh, src, srcSize, format);
@@ -811,7 +813,7 @@ size_t ZSTD_findFrameCompressedSize(const void *src, size_t srcSize)
/** ZSTD_decompressBound() :
* compatible with legacy mode
- * `src` must point to the start of a ZSTD frame or a skippeable frame
+ * `src` must point to the start of a ZSTD frame or a skippable frame
* `srcSize` must be at least as large as the frame contained
* @return : the maximum decompressed size of the compressed source
*/
@@ -843,7 +845,7 @@ size_t ZSTD_decompressionMargin(void const* src, size_t srcSize)
ZSTD_frameSizeInfo const frameSizeInfo = ZSTD_findFrameSizeInfo(src, srcSize, ZSTD_f_zstd1);
size_t const compressedSize = frameSizeInfo.compressedSize;
unsigned long long const decompressedBound = frameSizeInfo.decompressedBound;
- ZSTD_frameHeader zfh;
+ ZSTD_FrameHeader zfh;
FORWARD_IF_ERROR(ZSTD_getFrameHeader(&zfh, src, srcSize), "");
if (ZSTD_isError(compressedSize) || decompressedBound == ZSTD_CONTENTSIZE_ERROR)
@@ -917,7 +919,7 @@ static size_t ZSTD_setRleBlock(void* dst, size_t dstCapacity,
return regenSize;
}
-static void ZSTD_DCtx_trace_end(ZSTD_DCtx const* dctx, U64 uncompressedSize, U64 compressedSize, unsigned streaming)
+static void ZSTD_DCtx_trace_end(ZSTD_DCtx const* dctx, U64 uncompressedSize, U64 compressedSize, int streaming)
{
#if ZSTD_TRACE
if (dctx->traceCtx && ZSTD_trace_decompress_end != NULL) {
@@ -1057,7 +1059,7 @@ static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx,
}
ZSTD_DCtx_trace_end(dctx, (U64)(op-ostart), (U64)(ip-istart), /* streaming */ 0);
/* Allow caller to get size read */
- DEBUGLOG(4, "ZSTD_decompressFrame: decompressed frame of size %zi, consuming %zi bytes of input", op-ostart, ip - (const BYTE*)*srcPtr);
+ DEBUGLOG(4, "ZSTD_decompressFrame: decompressed frame of size %i, consuming %i bytes of input", (int)(op-ostart), (int)(ip - (const BYTE*)*srcPtr));
*srcPtr = ip;
*srcSizePtr = remainingSrcSize;
return (size_t)(op-ostart);
@@ -1641,7 +1643,7 @@ unsigned ZSTD_getDictID_fromDict(const void* dict, size_t dictSize)
* ZSTD_getFrameHeader(), which will provide a more precise error code. */
unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize)
{
- ZSTD_frameHeader zfp = { 0, 0, 0, ZSTD_frame, 0, 0, 0, 0, 0 };
+ ZSTD_FrameHeader zfp = { 0, 0, 0, ZSTD_frame, 0, 0, 0, 0, 0 };
size_t const hError = ZSTD_getFrameHeader(&zfp, src, srcSize);
if (ZSTD_isError(hError)) return 0;
return zfp.dictID;
@@ -1999,7 +2001,7 @@ size_t ZSTD_estimateDStreamSize(size_t windowSize)
size_t ZSTD_estimateDStreamSize_fromFrame(const void* src, size_t srcSize)
{
U32 const windowSizeMax = 1U << ZSTD_WINDOWLOG_MAX; /* note : should be user-selectable, but requires an additional parameter (or a dctx) */
- ZSTD_frameHeader zfh;
+ ZSTD_FrameHeader zfh;
size_t const err = ZSTD_getFrameHeader(&zfh, src, srcSize);
if (ZSTD_isError(err)) return err;
RETURN_ERROR_IF(err>0, srcSize_wrong, "");
@@ -2094,6 +2096,7 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
U32 someMoreWork = 1;
DEBUGLOG(5, "ZSTD_decompressStream");
+ assert(zds != NULL);
RETURN_ERROR_IF(
input->pos > input->size,
srcSize_wrong,
diff --git a/lib/decompress/zstd_decompress_block.c b/lib/decompress/zstd_decompress_block.c
index 76d7332e888..862785a49c6 100644
--- a/lib/decompress/zstd_decompress_block.c
+++ b/lib/decompress/zstd_decompress_block.c
@@ -139,7 +139,7 @@ static size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
RETURN_ERROR_IF(srcSize < MIN_CBLOCK_SIZE, corruption_detected, "");
{ const BYTE* const istart = (const BYTE*) src;
- symbolEncodingType_e const litEncType = (symbolEncodingType_e)(istart[0] & 3);
+ SymbolEncodingType_e const litEncType = (SymbolEncodingType_e)(istart[0] & 3);
size_t const blockSizeMax = ZSTD_blockSizeMax(dctx);
switch(litEncType)
@@ -358,7 +358,7 @@ size_t ZSTD_decodeLiteralsBlock_wrapper(ZSTD_DCtx* dctx,
* - start from default distributions, present in /lib/common/zstd_internal.h
* - generate tables normally, using ZSTD_buildFSETable()
* - printout the content of tables
- * - pretify output, report below, test with fuzzer to ensure it's correct */
+ * - prettify output, report below, test with fuzzer to ensure it's correct */
/* Default FSE distribution table for Literal Lengths */
static const ZSTD_seqSymbol LL_defaultDTable[(1< iend, srcSize_wrong, ""); /* minimum possible size: 1 byte for symbol encoding types */
RETURN_ERROR_IF(*ip & 3, corruption_detected, ""); /* The last field, Reserved, must be all-zeroes. */
- { symbolEncodingType_e const LLtype = (symbolEncodingType_e)(*ip >> 6);
- symbolEncodingType_e const OFtype = (symbolEncodingType_e)((*ip >> 4) & 3);
- symbolEncodingType_e const MLtype = (symbolEncodingType_e)((*ip >> 2) & 3);
+ { SymbolEncodingType_e const LLtype = (SymbolEncodingType_e)(*ip >> 6);
+ SymbolEncodingType_e const OFtype = (SymbolEncodingType_e)((*ip >> 4) & 3);
+ SymbolEncodingType_e const MLtype = (SymbolEncodingType_e)((*ip >> 2) & 3);
ip++;
/* Build DTables */
@@ -1935,12 +1935,6 @@ ZSTD_decompressSequencesLong_bmi2(ZSTD_DCtx* dctx,
#endif /* DYNAMIC_BMI2 */
-typedef size_t (*ZSTD_decompressSequences_t)(
- ZSTD_DCtx* dctx,
- void* dst, size_t maxDstSize,
- const void* seqStart, size_t seqSize, int nbSeq,
- const ZSTD_longOffset_e isLongOffset);
-
#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG
static size_t
ZSTD_decompressSequences(ZSTD_DCtx* dctx, void* dst, size_t maxDstSize,
diff --git a/lib/decompress/zstd_decompress_internal.h b/lib/decompress/zstd_decompress_internal.h
index 83a7a0115fd..e4bffdbc6cf 100644
--- a/lib/decompress/zstd_decompress_internal.h
+++ b/lib/decompress/zstd_decompress_internal.h
@@ -136,7 +136,7 @@ struct ZSTD_DCtx_s
const void* virtualStart; /* virtual start of previous segment if it was just before current one */
const void* dictEnd; /* end of previous segment */
size_t expected;
- ZSTD_frameHeader fParams;
+ ZSTD_FrameHeader fParams;
U64 processedCSize;
U64 decodedSize;
blockType_e bType; /* used in ZSTD_decompressContinue(), store blockType between block header decoding and block decompression stages */
@@ -154,7 +154,7 @@ struct ZSTD_DCtx_s
size_t rleSize;
size_t staticSize;
int isFrameDecompression;
-#if DYNAMIC_BMI2 != 0
+#if DYNAMIC_BMI2
int bmi2; /* == 1 if the CPU supports BMI2 and 0 otherwise. CPU support is determined dynamically once per context lifetime. */
#endif
@@ -211,11 +211,11 @@ struct ZSTD_DCtx_s
}; /* typedef'd to ZSTD_DCtx within "zstd.h" */
MEM_STATIC int ZSTD_DCtx_get_bmi2(const struct ZSTD_DCtx_s *dctx) {
-#if DYNAMIC_BMI2 != 0
- return dctx->bmi2;
+#if DYNAMIC_BMI2
+ return dctx->bmi2;
#else
(void)dctx;
- return 0;
+ return 0;
#endif
}
diff --git a/lib/dictBuilder/cover.c b/lib/dictBuilder/cover.c
index 44f9029acd9..2ef33c73e5d 100644
--- a/lib/dictBuilder/cover.c
+++ b/lib/dictBuilder/cover.c
@@ -21,8 +21,17 @@
/*-*************************************
* Dependencies
***************************************/
+/* qsort_r is an extension. */
+#if defined(__linux) || defined(__linux__) || defined(linux) || defined(__gnu_linux__) || \
+ defined(__CYGWIN__) || defined(__MSYS__)
+#if !defined(_GNU_SOURCE) && !defined(__ANDROID__) /* NDK doesn't ship qsort_r(). */
+#define _GNU_SOURCE
+#endif
+#endif
+
#include /* fprintf */
-#include /* malloc, free, qsort */
+#include /* malloc, free, qsort_r */
+
#include /* memset */
#include /* clock */
@@ -232,8 +241,10 @@ typedef struct {
unsigned d;
} COVER_ctx_t;
-/* We need a global context for qsort... */
+#if !defined(_GNU_SOURCE) && !defined(__APPLE__) && !defined(_MSC_VER)
+/* C90 only offers qsort() that needs a global context. */
static COVER_ctx_t *g_coverCtx = NULL;
+#endif
/*-*************************************
* Helper functions
@@ -276,11 +287,15 @@ static int COVER_cmp8(COVER_ctx_t *ctx, const void *lp, const void *rp) {
/**
* Same as COVER_cmp() except ties are broken by pointer value
- * NOTE: g_coverCtx must be set to call this function. A global is required because
- * qsort doesn't take an opaque pointer.
*/
-static int WIN_CDECL COVER_strict_cmp(const void *lp, const void *rp) {
- int result = COVER_cmp(g_coverCtx, lp, rp);
+#if (defined(_WIN32) && defined(_MSC_VER)) || defined(__APPLE__)
+static int WIN_CDECL COVER_strict_cmp(void* g_coverCtx, const void* lp, const void* rp) {
+#elif defined(_GNU_SOURCE)
+static int COVER_strict_cmp(const void *lp, const void *rp, void *g_coverCtx) {
+#else /* C90 fallback.*/
+static int COVER_strict_cmp(const void *lp, const void *rp) {
+#endif
+ int result = COVER_cmp((COVER_ctx_t*)g_coverCtx, lp, rp);
if (result == 0) {
result = lp < rp ? -1 : 1;
}
@@ -289,14 +304,50 @@ static int WIN_CDECL COVER_strict_cmp(const void *lp, const void *rp) {
/**
* Faster version for d <= 8.
*/
-static int WIN_CDECL COVER_strict_cmp8(const void *lp, const void *rp) {
- int result = COVER_cmp8(g_coverCtx, lp, rp);
+#if (defined(_WIN32) && defined(_MSC_VER)) || defined(__APPLE__)
+static int WIN_CDECL COVER_strict_cmp8(void* g_coverCtx, const void* lp, const void* rp) {
+#elif defined(_GNU_SOURCE)
+static int COVER_strict_cmp8(const void *lp, const void *rp, void *g_coverCtx) {
+#else /* C90 fallback.*/
+static int COVER_strict_cmp8(const void *lp, const void *rp) {
+#endif
+ int result = COVER_cmp8((COVER_ctx_t*)g_coverCtx, lp, rp);
if (result == 0) {
result = lp < rp ? -1 : 1;
}
return result;
}
+/**
+ * Abstract away divergence of qsort_r() parameters.
+ * Hopefully when C11 become the norm, we will be able
+ * to clean it up.
+ */
+static void stableSort(COVER_ctx_t *ctx) {
+#if defined(__APPLE__)
+ qsort_r(ctx->suffix, ctx->suffixSize, sizeof(U32),
+ ctx,
+ (ctx->d <= 8 ? &COVER_strict_cmp8 : &COVER_strict_cmp));
+#elif defined(_GNU_SOURCE)
+ qsort_r(ctx->suffix, ctx->suffixSize, sizeof(U32),
+ (ctx->d <= 8 ? &COVER_strict_cmp8 : &COVER_strict_cmp),
+ ctx);
+#elif defined(_WIN32) && defined(_MSC_VER)
+ qsort_s(ctx->suffix, ctx->suffixSize, sizeof(U32),
+ (ctx->d <= 8 ? &COVER_strict_cmp8 : &COVER_strict_cmp),
+ ctx);
+#elif defined(__OpenBSD__)
+ g_coverCtx = ctx;
+ mergesort(ctx->suffix, ctx->suffixSize, sizeof(U32),
+ (ctx->d <= 8 ? &COVER_strict_cmp8 : &COVER_strict_cmp));
+#else /* C90 fallback.*/
+ g_coverCtx = ctx;
+ /* TODO(cavalcanti): implement a reentrant qsort() when is not available. */
+ qsort(ctx->suffix, ctx->suffixSize, sizeof(U32),
+ (ctx->d <= 8 ? &COVER_strict_cmp8 : &COVER_strict_cmp));
+#endif
+}
+
/**
* Returns the first pointer in [first, last) whose element does not compare
* less than value. If no such element exists it returns last.
@@ -620,17 +671,7 @@ static size_t COVER_ctx_init(COVER_ctx_t *ctx, const void *samplesBuffer,
for (i = 0; i < ctx->suffixSize; ++i) {
ctx->suffix[i] = i;
}
- /* qsort doesn't take an opaque pointer, so pass as a global.
- * On OpenBSD qsort() is not guaranteed to be stable, their mergesort() is.
- */
- g_coverCtx = ctx;
-#if defined(__OpenBSD__)
- mergesort(ctx->suffix, ctx->suffixSize, sizeof(U32),
- (ctx->d <= 8 ? &COVER_strict_cmp8 : &COVER_strict_cmp));
-#else
- qsort(ctx->suffix, ctx->suffixSize, sizeof(U32),
- (ctx->d <= 8 ? &COVER_strict_cmp8 : &COVER_strict_cmp));
-#endif
+ stableSort(ctx);
}
DISPLAYLEVEL(2, "Computing frequencies\n");
/* For each dmer group (group of positions with the same first d bytes):
diff --git a/lib/dictBuilder/divsufsort.h b/lib/dictBuilder/divsufsort.h
index 5440994af15..3ed2b287ab1 100644
--- a/lib/dictBuilder/divsufsort.h
+++ b/lib/dictBuilder/divsufsort.h
@@ -27,11 +27,6 @@
#ifndef _DIVSUFSORT_H
#define _DIVSUFSORT_H 1
-#ifdef __cplusplus
-extern "C" {
-#endif /* __cplusplus */
-
-
/*- Prototypes -*/
/**
@@ -59,9 +54,4 @@ divsufsort(const unsigned char *T, int *SA, int n, int openMP);
int
divbwt(const unsigned char *T, unsigned char *U, int *A, int n, unsigned char * num_indexes, int * indexes, int openMP);
-
-#ifdef __cplusplus
-} /* extern "C" */
-#endif /* __cplusplus */
-
#endif /* _DIVSUFSORT_H */
diff --git a/lib/dictBuilder/zdict.c b/lib/dictBuilder/zdict.c
index 82e999e80e3..d5e60a4da32 100644
--- a/lib/dictBuilder/zdict.c
+++ b/lib/dictBuilder/zdict.c
@@ -580,7 +580,7 @@ static void ZDICT_countEStats(EStats_ress_t esr, const ZSTD_parameters* params,
if (ZSTD_isError(cSize)) { DISPLAYLEVEL(3, "warning : could not compress sample size %u \n", (unsigned)srcSize); return; }
if (cSize) { /* if == 0; block is not compressible */
- const seqStore_t* const seqStorePtr = ZSTD_getSeqStore(esr.zc);
+ const SeqStore_t* const seqStorePtr = ZSTD_getSeqStore(esr.zc);
/* literals stats */
{ const BYTE* bytePtr;
@@ -608,7 +608,7 @@ static void ZDICT_countEStats(EStats_ress_t esr, const ZSTD_parameters* params,
}
if (nbSeq >= 2) { /* rep offsets */
- const seqDef* const seq = seqStorePtr->sequencesStart;
+ const SeqDef* const seq = seqStorePtr->sequencesStart;
U32 offset1 = seq[0].offBase - ZSTD_REP_NUM;
U32 offset2 = seq[1].offBase - ZSTD_REP_NUM;
if (offset1 >= MAXREPOFFSET) offset1 = 0;
diff --git a/lib/legacy/zstd_v01.c b/lib/legacy/zstd_v01.c
index 6cf51234a24..ad3c9330ef9 100644
--- a/lib/legacy/zstd_v01.c
+++ b/lib/legacy/zstd_v01.c
@@ -1383,7 +1383,7 @@ typedef struct {
BYTE* matchLength;
BYTE* dumpsStart;
BYTE* dumps;
-} seqStore_t;
+} SeqStore_t;
typedef struct ZSTD_Cctx_s
@@ -1391,7 +1391,7 @@ typedef struct ZSTD_Cctx_s
const BYTE* base;
U32 current;
U32 nextUpdate;
- seqStore_t seqStore;
+ SeqStore_t seqStore;
#ifdef __AVX2__
__m256i hashTable[HASH_TABLESIZE>>3];
#else
diff --git a/lib/legacy/zstd_v02.c b/lib/legacy/zstd_v02.c
index 6d39b6e5b2d..d1e00385a23 100644
--- a/lib/legacy/zstd_v02.c
+++ b/lib/legacy/zstd_v02.c
@@ -2722,7 +2722,7 @@ typedef struct {
BYTE* matchLength;
BYTE* dumpsStart;
BYTE* dumps;
-} seqStore_t;
+} SeqStore_t;
/* *************************************
diff --git a/lib/legacy/zstd_v03.c b/lib/legacy/zstd_v03.c
index 47195f33741..7d82db6669f 100644
--- a/lib/legacy/zstd_v03.c
+++ b/lib/legacy/zstd_v03.c
@@ -2362,7 +2362,7 @@ typedef struct {
BYTE* matchLength;
BYTE* dumpsStart;
BYTE* dumps;
-} seqStore_t;
+} SeqStore_t;
/* *************************************
diff --git a/lib/legacy/zstd_v05.c b/lib/legacy/zstd_v05.c
index 44a877bf139..7a3af4214f8 100644
--- a/lib/legacy/zstd_v05.c
+++ b/lib/legacy/zstd_v05.c
@@ -491,7 +491,7 @@ typedef struct {
U32 litLengthSum;
U32 litSum;
U32 offCodeSum;
-} seqStore_t;
+} SeqStore_t;
diff --git a/lib/legacy/zstd_v06.c b/lib/legacy/zstd_v06.c
index 00d6ef79aa2..88a39e2a070 100644
--- a/lib/legacy/zstd_v06.c
+++ b/lib/legacy/zstd_v06.c
@@ -552,9 +552,9 @@ typedef struct {
U32 cachedLitLength;
const BYTE* cachedLiterals;
ZSTDv06_stats_t stats;
-} seqStore_t;
+} SeqStore_t;
-void ZSTDv06_seqToCodes(const seqStore_t* seqStorePtr, size_t const nbSeq);
+void ZSTDv06_seqToCodes(const SeqStore_t* seqStorePtr, size_t const nbSeq);
#endif /* ZSTDv06_CCOMMON_H_MODULE */
@@ -3919,6 +3919,10 @@ ZBUFFv06_DCtx* ZBUFFv06_createDCtx(void)
if (zbd==NULL) return NULL;
memset(zbd, 0, sizeof(*zbd));
zbd->zd = ZSTDv06_createDCtx();
+ if (zbd->zd==NULL) {
+ ZBUFFv06_freeDCtx(zbd); /* avoid leaking the context */
+ return NULL;
+ }
zbd->stage = ZBUFFds_init;
return zbd;
}
diff --git a/lib/legacy/zstd_v07.c b/lib/legacy/zstd_v07.c
index 8778f079ca2..cdc56288cb2 100644
--- a/lib/legacy/zstd_v07.c
+++ b/lib/legacy/zstd_v07.c
@@ -2787,9 +2787,9 @@ typedef struct {
U32 cachedLitLength;
const BYTE* cachedLiterals;
ZSTDv07_stats_t stats;
-} seqStore_t;
+} SeqStore_t;
-void ZSTDv07_seqToCodes(const seqStore_t* seqStorePtr, size_t const nbSeq);
+void ZSTDv07_seqToCodes(const SeqStore_t* seqStorePtr, size_t const nbSeq);
/* custom memory allocation functions */
static const ZSTDv07_customMem defaultCustomMem = { ZSTDv07_defaultAllocFunction, ZSTDv07_defaultFreeFunction, NULL };
diff --git a/lib/libzstd.mk b/lib/libzstd.mk
index a308a6ef6c9..91bd4caf382 100644
--- a/lib/libzstd.mk
+++ b/lib/libzstd.mk
@@ -22,7 +22,7 @@ LIBZSTD_MK_INCLUDED := 1
# By default, library's directory is same as this included makefile
LIB_SRCDIR ?= $(dir $(realpath $(lastword $(MAKEFILE_LIST))))
-LIB_BINDIR ?= $(LIBSRC_DIR)
+LIB_BINDIR ?= $(LIB_SRCDIR)
# ZSTD_LIB_MINIFY is a helper variable that
# configures a bunch of other variables to space-optimized defaults.
@@ -206,15 +206,13 @@ endif
endif
CPPFLAGS += -DZSTD_LEGACY_SUPPORT=$(ZSTD_LEGACY_SUPPORT)
-UNAME := $(shell uname)
+UNAME := $(shell sh -c 'MSYSTEM="MSYS" uname')
ifndef BUILD_DIR
ifeq ($(UNAME), Darwin)
ifeq ($(shell md5 < /dev/null > /dev/null; echo $$?), 0)
HASH ?= md5
endif
-else ifeq ($(UNAME), FreeBSD)
- HASH ?= gmd5sum
else ifeq ($(UNAME), NetBSD)
HASH ?= md5 -n
else ifeq ($(UNAME), OpenBSD)
diff --git a/lib/libzstd.pc.in b/lib/libzstd.pc.in
index d5cc0270cea..d7b6c858220 100644
--- a/lib/libzstd.pc.in
+++ b/lib/libzstd.pc.in
@@ -11,6 +11,6 @@ Name: zstd
Description: fast lossless compression algorithm library
URL: https://facebook.github.io/zstd/
Version: @VERSION@
-Libs: -L${libdir} -lzstd
+Libs: -L${libdir} -lzstd @LIBS_MT@
Libs.private: @LIBS_PRIVATE@
-Cflags: -I${includedir}
+Cflags: -I${includedir} @LIBS_MT@
diff --git a/lib/zdict.h b/lib/zdict.h
index 2268f948a5d..599b793013b 100644
--- a/lib/zdict.h
+++ b/lib/zdict.h
@@ -8,16 +8,16 @@
* You may select, at your option, one of the above-listed licenses.
*/
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
#ifndef ZSTD_ZDICT_H
#define ZSTD_ZDICT_H
+
/*====== Dependencies ======*/
#include /* size_t */
+#if defined (__cplusplus)
+extern "C" {
+#endif
/* ===== ZDICTLIB_API : control library symbols visibility ===== */
#ifndef ZDICTLIB_VISIBLE
@@ -248,7 +248,7 @@ typedef struct {
* is presumed that the most profitable content is at the end of the dictionary,
* since that is the cheapest to reference.
*
- * `maxDictSize` must be >= max(dictContentSize, ZSTD_DICTSIZE_MIN).
+ * `maxDictSize` must be >= max(dictContentSize, ZDICT_DICTSIZE_MIN).
*
* @return: size of dictionary stored into `dstDictBuffer` (<= `maxDictSize`),
* or an error code, which can be tested by ZDICT_isError().
@@ -271,11 +271,19 @@ ZDICTLIB_API size_t ZDICT_getDictHeaderSize(const void* dictBuffer, size_t dictS
ZDICTLIB_API unsigned ZDICT_isError(size_t errorCode);
ZDICTLIB_API const char* ZDICT_getErrorName(size_t errorCode);
+#if defined (__cplusplus)
+}
+#endif
+
#endif /* ZSTD_ZDICT_H */
#if defined(ZDICT_STATIC_LINKING_ONLY) && !defined(ZSTD_ZDICT_H_STATIC)
#define ZSTD_ZDICT_H_STATIC
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
/* This can be overridden externally to hide static symbols. */
#ifndef ZDICTLIB_STATIC_API
# if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1)
@@ -466,9 +474,8 @@ ZDICTLIB_STATIC_API
size_t ZDICT_addEntropyTablesFromBuffer(void* dictBuffer, size_t dictContentSize, size_t dictBufferCapacity,
const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples);
-
-#endif /* ZSTD_ZDICT_H_STATIC */
-
#if defined (__cplusplus)
}
#endif
+
+#endif /* ZSTD_ZDICT_H_STATIC */
diff --git a/lib/zstd.h b/lib/zstd.h
index 5d1fef8a6b4..b8c0644a7ec 100644
--- a/lib/zstd.h
+++ b/lib/zstd.h
@@ -7,17 +7,22 @@
* in the COPYING file in the root directory of this source tree).
* You may select, at your option, one of the above-listed licenses.
*/
-#if defined (__cplusplus)
-extern "C" {
-#endif
#ifndef ZSTD_H_235446
#define ZSTD_H_235446
+
/* ====== Dependencies ======*/
-#include /* INT_MAX */
#include /* size_t */
+#include "zstd_errors.h" /* list of errors */
+#if defined(ZSTD_STATIC_LINKING_ONLY) && !defined(ZSTD_H_ZSTD_STATIC_LINKING_ONLY)
+#include /* INT_MAX */
+#endif /* ZSTD_STATIC_LINKING_ONLY */
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
/* ===== ZSTDLIB_API : control library symbols visibility ===== */
#ifndef ZSTDLIB_VISIBLE
@@ -57,7 +62,7 @@ extern "C" {
#else
# if defined (__cplusplus) && (__cplusplus >= 201402) /* C++14 or greater */
# define ZSTD_DEPRECATED(message) [[deprecated(message)]]
-# elif (defined(GNUC) && (GNUC > 4 || (GNUC == 4 && GNUC_MINOR >= 5))) || defined(__clang__)
+# elif (defined(GNUC) && (GNUC > 4 || (GNUC == 4 && GNUC_MINOR >= 5))) || defined(__clang__) || defined(__IAR_SYSTEMS_ICC__)
# define ZSTD_DEPRECATED(message) __attribute__((deprecated(message)))
# elif defined(__GNUC__) && (__GNUC__ >= 3)
# define ZSTD_DEPRECATED(message) __attribute__((deprecated))
@@ -106,7 +111,7 @@ extern "C" {
/*------ Version ------*/
#define ZSTD_VERSION_MAJOR 1
#define ZSTD_VERSION_MINOR 5
-#define ZSTD_VERSION_RELEASE 6
+#define ZSTD_VERSION_RELEASE 7
#define ZSTD_VERSION_NUMBER (ZSTD_VERSION_MAJOR *100*100 + ZSTD_VERSION_MINOR *100 + ZSTD_VERSION_RELEASE)
/*! ZSTD_versionNumber() :
@@ -144,7 +149,7 @@ ZSTDLIB_API const char* ZSTD_versionString(void);
/***************************************
-* Simple API
+* Simple Core API
***************************************/
/*! ZSTD_compress() :
* Compresses `src` content as a single zstd compressed frame into already allocated `dst`.
@@ -157,68 +162,80 @@ ZSTDLIB_API size_t ZSTD_compress( void* dst, size_t dstCapacity,
int compressionLevel);
/*! ZSTD_decompress() :
- * `compressedSize` : must be the _exact_ size of some number of compressed and/or skippable frames.
- * `dstCapacity` is an upper bound of originalSize to regenerate.
- * If user cannot imply a maximum upper bound, it's better to use streaming mode to decompress data.
- * @return : the number of bytes decompressed into `dst` (<= `dstCapacity`),
- * or an errorCode if it fails (which can be tested using ZSTD_isError()). */
+ * `compressedSize` : must be the _exact_ size of some number of compressed and/or skippable frames.
+ * Multiple compressed frames can be decompressed at once with this method.
+ * The result will be the concatenation of all decompressed frames, back to back.
+ * `dstCapacity` is an upper bound of originalSize to regenerate.
+ * First frame's decompressed size can be extracted using ZSTD_getFrameContentSize().
+ * If maximum upper bound isn't known, prefer using streaming mode to decompress data.
+ * @return : the number of bytes decompressed into `dst` (<= `dstCapacity`),
+ * or an errorCode if it fails (which can be tested using ZSTD_isError()). */
ZSTDLIB_API size_t ZSTD_decompress( void* dst, size_t dstCapacity,
const void* src, size_t compressedSize);
+
+/*====== Decompression helper functions ======*/
+
/*! ZSTD_getFrameContentSize() : requires v1.3.0+
- * `src` should point to the start of a ZSTD encoded frame.
- * `srcSize` must be at least as large as the frame header.
- * hint : any size >= `ZSTD_frameHeaderSize_max` is large enough.
- * @return : - decompressed size of `src` frame content, if known
- * - ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined
- * - ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small)
- * note 1 : a 0 return value means the frame is valid but "empty".
- * note 2 : decompressed size is an optional field, it may not be present, typically in streaming mode.
- * When `return==ZSTD_CONTENTSIZE_UNKNOWN`, data to decompress could be any size.
- * In which case, it's necessary to use streaming mode to decompress data.
- * Optionally, application can rely on some implicit limit,
- * as ZSTD_decompress() only needs an upper bound of decompressed size.
- * (For example, data could be necessarily cut into blocks <= 16 KB).
- * note 3 : decompressed size is always present when compression is completed using single-pass functions,
- * such as ZSTD_compress(), ZSTD_compressCCtx() ZSTD_compress_usingDict() or ZSTD_compress_usingCDict().
- * note 4 : decompressed size can be very large (64-bits value),
- * potentially larger than what local system can handle as a single memory segment.
- * In which case, it's necessary to use streaming mode to decompress data.
- * note 5 : If source is untrusted, decompressed size could be wrong or intentionally modified.
- * Always ensure return value fits within application's authorized limits.
- * Each application can set its own limits.
- * note 6 : This function replaces ZSTD_getDecompressedSize() */
+ * `src` should point to the start of a ZSTD encoded frame.
+ * `srcSize` must be at least as large as the frame header.
+ * hint : any size >= `ZSTD_frameHeaderSize_max` is large enough.
+ * @return : - decompressed size of `src` frame content, if known
+ * - ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined
+ * - ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small)
+ * note 1 : a 0 return value means the frame is valid but "empty".
+ * When invoking this method on a skippable frame, it will return 0.
+ * note 2 : decompressed size is an optional field, it may not be present (typically in streaming mode).
+ * When `return==ZSTD_CONTENTSIZE_UNKNOWN`, data to decompress could be any size.
+ * In which case, it's necessary to use streaming mode to decompress data.
+ * Optionally, application can rely on some implicit limit,
+ * as ZSTD_decompress() only needs an upper bound of decompressed size.
+ * (For example, data could be necessarily cut into blocks <= 16 KB).
+ * note 3 : decompressed size is always present when compression is completed using single-pass functions,
+ * such as ZSTD_compress(), ZSTD_compressCCtx() ZSTD_compress_usingDict() or ZSTD_compress_usingCDict().
+ * note 4 : decompressed size can be very large (64-bits value),
+ * potentially larger than what local system can handle as a single memory segment.
+ * In which case, it's necessary to use streaming mode to decompress data.
+ * note 5 : If source is untrusted, decompressed size could be wrong or intentionally modified.
+ * Always ensure return value fits within application's authorized limits.
+ * Each application can set its own limits.
+ * note 6 : This function replaces ZSTD_getDecompressedSize() */
#define ZSTD_CONTENTSIZE_UNKNOWN (0ULL - 1)
#define ZSTD_CONTENTSIZE_ERROR (0ULL - 2)
ZSTDLIB_API unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize);
-/*! ZSTD_getDecompressedSize() :
- * NOTE: This function is now obsolete, in favor of ZSTD_getFrameContentSize().
+/*! ZSTD_getDecompressedSize() (obsolete):
+ * This function is now obsolete, in favor of ZSTD_getFrameContentSize().
* Both functions work the same way, but ZSTD_getDecompressedSize() blends
* "empty", "unknown" and "error" results to the same return value (0),
* while ZSTD_getFrameContentSize() gives them separate return values.
* @return : decompressed size of `src` frame content _if known and not empty_, 0 otherwise. */
ZSTD_DEPRECATED("Replaced by ZSTD_getFrameContentSize")
-ZSTDLIB_API
-unsigned long long ZSTD_getDecompressedSize(const void* src, size_t srcSize);
+ZSTDLIB_API unsigned long long ZSTD_getDecompressedSize(const void* src, size_t srcSize);
/*! ZSTD_findFrameCompressedSize() : Requires v1.4.0+
* `src` should point to the start of a ZSTD frame or skippable frame.
* `srcSize` must be >= first frame size
* @return : the compressed size of the first frame starting at `src`,
* suitable to pass as `srcSize` to `ZSTD_decompress` or similar,
- * or an error code if input is invalid */
+ * or an error code if input is invalid
+ * Note 1: this method is called _find*() because it's not enough to read the header,
+ * it may have to scan through the frame's content, to reach its end.
+ * Note 2: this method also works with Skippable Frames. In which case,
+ * it returns the size of the complete skippable frame,
+ * which is always equal to its content size + 8 bytes for headers. */
ZSTDLIB_API size_t ZSTD_findFrameCompressedSize(const void* src, size_t srcSize);
-/*====== Helper functions ======*/
-/* ZSTD_compressBound() :
+/*====== Compression helper functions ======*/
+
+/*! ZSTD_compressBound() :
* maximum compressed size in worst case single-pass scenario.
- * When invoking `ZSTD_compress()` or any other one-pass compression function,
+ * When invoking `ZSTD_compress()`, or any other one-pass compression function,
* it's recommended to provide @dstCapacity >= ZSTD_compressBound(srcSize)
* as it eliminates one potential failure scenario,
* aka not enough room in dst buffer to write the compressed frame.
- * Note : ZSTD_compressBound() itself can fail, if @srcSize > ZSTD_MAX_INPUT_SIZE .
+ * Note : ZSTD_compressBound() itself can fail, if @srcSize >= ZSTD_MAX_INPUT_SIZE .
* In which case, ZSTD_compressBound() will return an error code
* which can be tested using ZSTD_isError().
*
@@ -226,21 +243,25 @@ ZSTDLIB_API size_t ZSTD_findFrameCompressedSize(const void* src, size_t srcSize)
* same as ZSTD_compressBound(), but as a macro.
* It can be used to produce constants, which can be useful for static allocation,
* for example to size a static array on stack.
- * Will produce constant value 0 if srcSize too large.
+ * Will produce constant value 0 if srcSize is too large.
*/
#define ZSTD_MAX_INPUT_SIZE ((sizeof(size_t)==8) ? 0xFF00FF00FF00FF00ULL : 0xFF00FF00U)
#define ZSTD_COMPRESSBOUND(srcSize) (((size_t)(srcSize) >= ZSTD_MAX_INPUT_SIZE) ? 0 : (srcSize) + ((srcSize)>>8) + (((srcSize) < (128<<10)) ? (((128<<10) - (srcSize)) >> 11) /* margin, from 64 to 0 */ : 0)) /* this formula ensures that bound(A) + bound(B) <= bound(A+B) as long as A and B >= 128 KB */
ZSTDLIB_API size_t ZSTD_compressBound(size_t srcSize); /*!< maximum compressed size in worst case single-pass scenario */
+
+
+/*====== Error helper functions ======*/
/* ZSTD_isError() :
* Most ZSTD_* functions returning a size_t value can be tested for error,
* using ZSTD_isError().
* @return 1 if error, 0 otherwise
*/
-ZSTDLIB_API unsigned ZSTD_isError(size_t code); /*!< tells if a `size_t` function result is an error code */
-ZSTDLIB_API const char* ZSTD_getErrorName(size_t code); /*!< provides readable string from an error code */
-ZSTDLIB_API int ZSTD_minCLevel(void); /*!< minimum negative compression level allowed, requires v1.4.0+ */
-ZSTDLIB_API int ZSTD_maxCLevel(void); /*!< maximum compression level available */
-ZSTDLIB_API int ZSTD_defaultCLevel(void); /*!< default compression level, specified by ZSTD_CLEVEL_DEFAULT, requires v1.5.0+ */
+ZSTDLIB_API unsigned ZSTD_isError(size_t result); /*!< tells if a `size_t` function result is an error code */
+ZSTDLIB_API ZSTD_ErrorCode ZSTD_getErrorCode(size_t functionResult); /* convert a result into an error code, which can be compared to error enum list */
+ZSTDLIB_API const char* ZSTD_getErrorName(size_t result); /*!< provides readable string from a function result */
+ZSTDLIB_API int ZSTD_minCLevel(void); /*!< minimum negative compression level allowed, requires v1.4.0+ */
+ZSTDLIB_API int ZSTD_maxCLevel(void); /*!< maximum compression level available */
+ZSTDLIB_API int ZSTD_defaultCLevel(void); /*!< default compression level, specified by ZSTD_CLEVEL_DEFAULT, requires v1.5.0+ */
/***************************************
@@ -248,17 +269,17 @@ ZSTDLIB_API int ZSTD_defaultCLevel(void); /*!< default compres
***************************************/
/*= Compression context
* When compressing many times,
- * it is recommended to allocate a context just once,
+ * it is recommended to allocate a compression context just once,
* and reuse it for each successive compression operation.
- * This will make workload friendlier for system's memory.
+ * This will make the workload easier for system's memory.
* Note : re-using context is just a speed / resource optimization.
* It doesn't change the compression ratio, which remains identical.
- * Note 2 : In multi-threaded environments,
- * use one different context per thread for parallel execution.
+ * Note 2: For parallel execution in multi-threaded environments,
+ * use one different context per thread .
*/
typedef struct ZSTD_CCtx_s ZSTD_CCtx;
ZSTDLIB_API ZSTD_CCtx* ZSTD_createCCtx(void);
-ZSTDLIB_API size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx); /* accept NULL pointer */
+ZSTDLIB_API size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx); /* compatible with NULL pointer */
/*! ZSTD_compressCCtx() :
* Same as ZSTD_compress(), using an explicit ZSTD_CCtx.
@@ -266,7 +287,7 @@ ZSTDLIB_API size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx); /* accept NULL pointer *
* this function compresses at the requested compression level,
* __ignoring any other advanced parameter__ .
* If any advanced parameter was set using the advanced API,
- * they will all be reset. Only `compressionLevel` remains.
+ * they will all be reset. Only @compressionLevel remains.
*/
ZSTDLIB_API size_t ZSTD_compressCCtx(ZSTD_CCtx* cctx,
void* dst, size_t dstCapacity,
@@ -392,7 +413,7 @@ typedef enum {
* Special: value 0 means "use default strategy". */
ZSTD_c_targetCBlockSize=130, /* v1.5.6+
- * Attempts to fit compressed block size into approximatively targetCBlockSize.
+ * Attempts to fit compressed block size into approximately targetCBlockSize.
* Bound by ZSTD_TARGETCBLOCKSIZE_MIN and ZSTD_TARGETCBLOCKSIZE_MAX.
* Note that it's not a guarantee, just a convergence target (default:0).
* No target when targetCBlockSize == 0.
@@ -488,7 +509,8 @@ typedef enum {
* ZSTD_c_stableOutBuffer
* ZSTD_c_blockDelimiters
* ZSTD_c_validateSequences
- * ZSTD_c_useBlockSplitter
+ * ZSTD_c_blockSplitterLevel
+ * ZSTD_c_splitAfterSequences
* ZSTD_c_useRowMatchFinder
* ZSTD_c_prefetchCDictTables
* ZSTD_c_enableSeqProducerFallback
@@ -515,7 +537,8 @@ typedef enum {
ZSTD_c_experimentalParam16=1013,
ZSTD_c_experimentalParam17=1014,
ZSTD_c_experimentalParam18=1015,
- ZSTD_c_experimentalParam19=1016
+ ZSTD_c_experimentalParam19=1016,
+ ZSTD_c_experimentalParam20=1017
} ZSTD_cParameter;
typedef struct {
@@ -855,7 +878,7 @@ ZSTDLIB_API size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output);
*
* A ZSTD_DStream object is required to track streaming operations.
* Use ZSTD_createDStream() and ZSTD_freeDStream() to create/release resources.
-* ZSTD_DStream objects can be reused multiple times.
+* ZSTD_DStream objects can be re-employed multiple times.
*
* Use ZSTD_initDStream() to start a new decompression operation.
* @return : recommended first input size
@@ -865,16 +888,21 @@ ZSTDLIB_API size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output);
* The function will update both `pos` fields.
* If `input.pos < input.size`, some input has not been consumed.
* It's up to the caller to present again remaining data.
+*
* The function tries to flush all data decoded immediately, respecting output buffer size.
* If `output.pos < output.size`, decoder has flushed everything it could.
-* But if `output.pos == output.size`, there might be some data left within internal buffers.,
+*
+* However, when `output.pos == output.size`, it's more difficult to know.
+* If @return > 0, the frame is not complete, meaning
+* either there is still some data left to flush within internal buffers,
+* or there is more input to read to complete the frame (or both).
* In which case, call ZSTD_decompressStream() again to flush whatever remains in the buffer.
* Note : with no additional input provided, amount of data flushed is necessarily <= ZSTD_BLOCKSIZE_MAX.
* @return : 0 when a frame is completely decoded and fully flushed,
* or an error code, which can be tested using ZSTD_isError(),
* or any other value > 0, which means there is still some decoding or flushing to do to complete current frame :
* the return value is a suggested next input size (just a hint for better latency)
-* that will never request more than the remaining frame size.
+* that will never request more than the remaining content of the compressed frame.
* *******************************************************************************/
typedef ZSTD_DCtx ZSTD_DStream; /**< DCtx and DStream are now effectively same object (>= v1.3.0) */
@@ -901,9 +929,10 @@ ZSTDLIB_API size_t ZSTD_initDStream(ZSTD_DStream* zds);
* Function will update both input and output `pos` fields exposing current state via these fields:
* - `input.pos < input.size`, some input remaining and caller should provide remaining input
* on the next call.
- * - `output.pos < output.size`, decoder finished and flushed all remaining buffers.
- * - `output.pos == output.size`, potentially uncflushed data present in the internal buffers,
- * call ZSTD_decompressStream() again to flush remaining data to output.
+ * - `output.pos < output.size`, decoder flushed internal output buffer.
+ * - `output.pos == output.size`, unflushed data potentially present in the internal buffers,
+ * check ZSTD_decompressStream() @return value,
+ * if > 0, invoke it again to flush remaining data to output.
* Note : with no additional input, amount of data flushed <= ZSTD_BLOCKSIZE_MAX.
*
* @return : 0 when a frame is completely decoded and fully flushed,
@@ -1181,6 +1210,10 @@ ZSTDLIB_API size_t ZSTD_sizeof_DStream(const ZSTD_DStream* zds);
ZSTDLIB_API size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict);
ZSTDLIB_API size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict);
+#if defined (__cplusplus)
+}
+#endif
+
#endif /* ZSTD_H_235446 */
@@ -1196,6 +1229,10 @@ ZSTDLIB_API size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict);
#if defined(ZSTD_STATIC_LINKING_ONLY) && !defined(ZSTD_H_ZSTD_STATIC_LINKING_ONLY)
#define ZSTD_H_ZSTD_STATIC_LINKING_ONLY
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
/* This can be overridden externally to hide static symbols. */
#ifndef ZSTDLIB_STATIC_API
# if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1)
@@ -1307,7 +1344,7 @@ typedef struct {
*
* Note: This field is optional. ZSTD_generateSequences() will calculate the value of
* 'rep', but repeat offsets do not necessarily need to be calculated from an external
- * sequence provider's perspective. For example, ZSTD_compressSequences() does not
+ * sequence provider perspective. For example, ZSTD_compressSequences() does not
* use this 'rep' field at all (as of now).
*/
} ZSTD_Sequence;
@@ -1412,14 +1449,15 @@ typedef enum {
} ZSTD_literalCompressionMode_e;
typedef enum {
- /* Note: This enum controls features which are conditionally beneficial. Zstd typically will make a final
- * decision on whether or not to enable the feature (ZSTD_ps_auto), but setting the switch to ZSTD_ps_enable
- * or ZSTD_ps_disable allow for a force enable/disable the feature.
+ /* Note: This enum controls features which are conditionally beneficial.
+ * Zstd can take a decision on whether or not to enable the feature (ZSTD_ps_auto),
+ * but setting the switch to ZSTD_ps_enable or ZSTD_ps_disable force enable/disable the feature.
*/
ZSTD_ps_auto = 0, /* Let the library automatically determine whether the feature shall be enabled */
ZSTD_ps_enable = 1, /* Force-enable the feature */
ZSTD_ps_disable = 2 /* Do not use the feature */
-} ZSTD_paramSwitch_e;
+} ZSTD_ParamSwitch_e;
+#define ZSTD_paramSwitch_e ZSTD_ParamSwitch_e /* old name */
/***************************************
* Frame header and size functions
@@ -1464,34 +1502,36 @@ ZSTDLIB_STATIC_API unsigned long long ZSTD_findDecompressedSize(const void* src,
ZSTDLIB_STATIC_API unsigned long long ZSTD_decompressBound(const void* src, size_t srcSize);
/*! ZSTD_frameHeaderSize() :
- * srcSize must be >= ZSTD_FRAMEHEADERSIZE_PREFIX.
+ * srcSize must be large enough, aka >= ZSTD_FRAMEHEADERSIZE_PREFIX.
* @return : size of the Frame Header,
* or an error code (if srcSize is too small) */
ZSTDLIB_STATIC_API size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize);
-typedef enum { ZSTD_frame, ZSTD_skippableFrame } ZSTD_frameType_e;
+typedef enum { ZSTD_frame, ZSTD_skippableFrame } ZSTD_FrameType_e;
+#define ZSTD_frameType_e ZSTD_FrameType_e /* old name */
typedef struct {
unsigned long long frameContentSize; /* if == ZSTD_CONTENTSIZE_UNKNOWN, it means this field is not available. 0 means "empty" */
unsigned long long windowSize; /* can be very large, up to <= frameContentSize */
unsigned blockSizeMax;
- ZSTD_frameType_e frameType; /* if == ZSTD_skippableFrame, frameContentSize is the size of skippable content */
+ ZSTD_FrameType_e frameType; /* if == ZSTD_skippableFrame, frameContentSize is the size of skippable content */
unsigned headerSize;
- unsigned dictID;
+ unsigned dictID; /* for ZSTD_skippableFrame, contains the skippable magic variant [0-15] */
unsigned checksumFlag;
unsigned _reserved1;
unsigned _reserved2;
-} ZSTD_frameHeader;
+} ZSTD_FrameHeader;
+#define ZSTD_frameHeader ZSTD_FrameHeader /* old name */
/*! ZSTD_getFrameHeader() :
- * decode Frame Header, or requires larger `srcSize`.
- * @return : 0, `zfhPtr` is correctly filled,
- * >0, `srcSize` is too small, value is wanted `srcSize` amount,
+ * decode Frame Header into `zfhPtr`, or requires larger `srcSize`.
+ * @return : 0 => header is complete, `zfhPtr` is correctly filled,
+ * >0 => `srcSize` is too small, @return value is the wanted `srcSize` amount, `zfhPtr` is not filled,
* or an error code, which can be tested using ZSTD_isError() */
-ZSTDLIB_STATIC_API size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize); /**< doesn't consume input */
+ZSTDLIB_STATIC_API size_t ZSTD_getFrameHeader(ZSTD_FrameHeader* zfhPtr, const void* src, size_t srcSize);
/*! ZSTD_getFrameHeader_advanced() :
* same as ZSTD_getFrameHeader(),
* with added capability to select a format (like ZSTD_f_zstd1_magicless) */
-ZSTDLIB_STATIC_API size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize, ZSTD_format_e format);
+ZSTDLIB_STATIC_API size_t ZSTD_getFrameHeader_advanced(ZSTD_FrameHeader* zfhPtr, const void* src, size_t srcSize, ZSTD_format_e format);
/*! ZSTD_decompressionMargin() :
* Zstd supports in-place decompression, where the input and output buffers overlap.
@@ -1539,9 +1579,10 @@ ZSTDLIB_STATIC_API size_t ZSTD_decompressionMargin(const void* src, size_t srcSi
))
typedef enum {
- ZSTD_sf_noBlockDelimiters = 0, /* Representation of ZSTD_Sequence has no block delimiters, sequences only */
- ZSTD_sf_explicitBlockDelimiters = 1 /* Representation of ZSTD_Sequence contains explicit block delimiters */
-} ZSTD_sequenceFormat_e;
+ ZSTD_sf_noBlockDelimiters = 0, /* ZSTD_Sequence[] has no block delimiters, just sequences */
+ ZSTD_sf_explicitBlockDelimiters = 1 /* ZSTD_Sequence[] contains explicit block delimiters */
+} ZSTD_SequenceFormat_e;
+#define ZSTD_sequenceFormat_e ZSTD_SequenceFormat_e /* old name */
/*! ZSTD_sequenceBound() :
* `srcSize` : size of the input buffer
@@ -1565,7 +1606,7 @@ ZSTDLIB_STATIC_API size_t ZSTD_sequenceBound(size_t srcSize);
* @param zc The compression context to be used for ZSTD_compress2(). Set any
* compression parameters you need on this context.
* @param outSeqs The output sequences buffer of size @p outSeqsSize
- * @param outSeqsSize The size of the output sequences buffer.
+ * @param outSeqsCapacity The size of the output sequences buffer.
* ZSTD_sequenceBound(srcSize) is an upper bound on the number
* of sequences that can be generated.
* @param src The source buffer to generate sequences from of size @p srcSize.
@@ -1583,7 +1624,7 @@ ZSTDLIB_STATIC_API size_t ZSTD_sequenceBound(size_t srcSize);
ZSTD_DEPRECATED("For debugging only, will be replaced by ZSTD_extractSequences()")
ZSTDLIB_STATIC_API size_t
ZSTD_generateSequences(ZSTD_CCtx* zc,
- ZSTD_Sequence* outSeqs, size_t outSeqsSize,
+ ZSTD_Sequence* outSeqs, size_t outSeqsCapacity,
const void* src, size_t srcSize);
/*! ZSTD_mergeBlockDelimiters() :
@@ -1603,7 +1644,7 @@ ZSTDLIB_STATIC_API size_t ZSTD_mergeBlockDelimiters(ZSTD_Sequence* sequences, si
* Compress an array of ZSTD_Sequence, associated with @src buffer, into dst.
* @src contains the entire input (not just the literals).
* If @srcSize > sum(sequence.length), the remaining bytes are considered all literals
- * If a dictionary is included, then the cctx should reference the dict. (see: ZSTD_CCtx_refCDict(), ZSTD_CCtx_loadDictionary(), etc.)
+ * If a dictionary is included, then the cctx should reference the dict (see: ZSTD_CCtx_refCDict(), ZSTD_CCtx_loadDictionary(), etc.).
* The entire source is compressed into a single frame.
*
* The compression behavior changes based on cctx params. In particular:
@@ -1612,11 +1653,17 @@ ZSTDLIB_STATIC_API size_t ZSTD_mergeBlockDelimiters(ZSTD_Sequence* sequences, si
* the block size derived from the cctx, and sequences may be split. This is the default setting.
*
* If ZSTD_c_blockDelimiters == ZSTD_sf_explicitBlockDelimiters, the array of ZSTD_Sequence is expected to contain
- * block delimiters (defined in ZSTD_Sequence). Behavior is undefined if no block delimiters are provided.
+ * valid block delimiters (defined in ZSTD_Sequence). Behavior is undefined if no block delimiters are provided.
*
- * If ZSTD_c_validateSequences == 0, this function will blindly accept the sequences provided. Invalid sequences cause undefined
- * behavior. If ZSTD_c_validateSequences == 1, then if sequence is invalid (see doc/zstd_compression_format.md for
- * specifics regarding offset/matchlength requirements) then the function will bail out and return an error.
+ * When ZSTD_c_blockDelimiters == ZSTD_sf_explicitBlockDelimiters, it's possible to decide generating repcodes
+ * using the advanced parameter ZSTD_c_repcodeResolution. Repcodes will improve compression ratio, though the benefit
+ * can vary greatly depending on Sequences. On the other hand, repcode resolution is an expensive operation.
+ * By default, it's disabled at low (<10) compression levels, and enabled above the threshold (>=10).
+ * ZSTD_c_repcodeResolution makes it possible to directly manage this processing in either direction.
+ *
+ * If ZSTD_c_validateSequences == 0, this function blindly accepts the Sequences provided. Invalid Sequences cause undefined
+ * behavior. If ZSTD_c_validateSequences == 1, then the function will detect invalid Sequences (see doc/zstd_compression_format.md for
+ * specifics regarding offset/matchlength requirements) and then bail out and return an error.
*
* In addition to the two adjustable experimental params, there are other important cctx params.
* - ZSTD_c_minMatch MUST be set as less than or equal to the smallest match generated by the match finder. It has a minimum value of ZSTD_MINMATCH_MIN.
@@ -1624,15 +1671,42 @@ ZSTDLIB_STATIC_API size_t ZSTD_mergeBlockDelimiters(ZSTD_Sequence* sequences, si
* - ZSTD_c_windowLog affects offset validation: this function will return an error at higher debug levels if a provided offset
* is larger than what the spec allows for a given window log and dictionary (if present). See: doc/zstd_compression_format.md
*
- * Note: Repcodes are, as of now, always re-calculated within this function, so ZSTD_Sequence::rep is unused.
- * Note 2: Once we integrate ability to ingest repcodes, the explicit block delims mode must respect those repcodes exactly,
- * and cannot emit an RLE block that disagrees with the repcode history
+ * Note: Repcodes are, as of now, always re-calculated within this function, ZSTD_Sequence.rep is effectively unused.
+ * Dev Note: Once ability to ingest repcodes become available, the explicit block delims mode must respect those repcodes exactly,
+ * and cannot emit an RLE block that disagrees with the repcode history.
+ * @return : final compressed size, or a ZSTD error code.
+ */
+ZSTDLIB_STATIC_API size_t
+ZSTD_compressSequences(ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const ZSTD_Sequence* inSeqs, size_t inSeqsSize,
+ const void* src, size_t srcSize);
+
+
+/*! ZSTD_compressSequencesAndLiterals() :
+ * This is a variant of ZSTD_compressSequences() which,
+ * instead of receiving (src,srcSize) as input parameter, receives (literals,litSize),
+ * aka all the literals, already extracted and laid out into a single continuous buffer.
+ * This can be useful if the process generating the sequences also happens to generate the buffer of literals,
+ * thus skipping an extraction + caching stage.
+ * It's a speed optimization, useful when the right conditions are met,
+ * but it also features the following limitations:
+ * - Only supports explicit delimiter mode
+ * - Currently does not support Sequences validation (so input Sequences are trusted)
+ * - Not compatible with frame checksum, which must be disabled
+ * - If any block is incompressible, will fail and return an error
+ * - @litSize must be == sum of all @.litLength fields in @inSeqs. Any discrepancy will generate an error.
+ * - @litBufCapacity is the size of the underlying buffer into which literals are written, starting at address @literals.
+ * @litBufCapacity must be at least 8 bytes larger than @litSize.
+ * - @decompressedSize must be correct, and correspond to the sum of all Sequences. Any discrepancy will generate an error.
* @return : final compressed size, or a ZSTD error code.
*/
ZSTDLIB_STATIC_API size_t
-ZSTD_compressSequences( ZSTD_CCtx* cctx, void* dst, size_t dstSize,
- const ZSTD_Sequence* inSeqs, size_t inSeqsSize,
- const void* src, size_t srcSize);
+ZSTD_compressSequencesAndLiterals(ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const ZSTD_Sequence* inSeqs, size_t nbSequences,
+ const void* literals, size_t litSize, size_t litBufCapacity,
+ size_t decompressedSize);
/*! ZSTD_writeSkippableFrame() :
@@ -1640,8 +1714,8 @@ ZSTD_compressSequences( ZSTD_CCtx* cctx, void* dst, size_t dstSize,
*
* Skippable frames begin with a 4-byte magic number. There are 16 possible choices of magic number,
* ranging from ZSTD_MAGIC_SKIPPABLE_START to ZSTD_MAGIC_SKIPPABLE_START+15.
- * As such, the parameter magicVariant controls the exact skippable frame magic number variant used, so
- * the magic number used will be ZSTD_MAGIC_SKIPPABLE_START + magicVariant.
+ * As such, the parameter magicVariant controls the exact skippable frame magic number variant used,
+ * so the magic number used will be ZSTD_MAGIC_SKIPPABLE_START + magicVariant.
*
* Returns an error if destination buffer is not large enough, if the source size is not representable
* with a 4-byte unsigned int, or if the parameter magicVariant is greater than 15 (and therefore invalid).
@@ -1649,26 +1723,28 @@ ZSTD_compressSequences( ZSTD_CCtx* cctx, void* dst, size_t dstSize,
* @return : number of bytes written or a ZSTD error.
*/
ZSTDLIB_STATIC_API size_t ZSTD_writeSkippableFrame(void* dst, size_t dstCapacity,
- const void* src, size_t srcSize, unsigned magicVariant);
+ const void* src, size_t srcSize,
+ unsigned magicVariant);
/*! ZSTD_readSkippableFrame() :
- * Retrieves a zstd skippable frame containing data given by src, and writes it to dst buffer.
+ * Retrieves the content of a zstd skippable frame starting at @src, and writes it to @dst buffer.
*
- * The parameter magicVariant will receive the magicVariant that was supplied when the frame was written,
- * i.e. magicNumber - ZSTD_MAGIC_SKIPPABLE_START. This can be NULL if the caller is not interested
- * in the magicVariant.
+ * The parameter @magicVariant will receive the magicVariant that was supplied when the frame was written,
+ * i.e. magicNumber - ZSTD_MAGIC_SKIPPABLE_START.
+ * This can be NULL if the caller is not interested in the magicVariant.
*
* Returns an error if destination buffer is not large enough, or if the frame is not skippable.
*
* @return : number of bytes written or a ZSTD error.
*/
-ZSTDLIB_API size_t ZSTD_readSkippableFrame(void* dst, size_t dstCapacity, unsigned* magicVariant,
- const void* src, size_t srcSize);
+ZSTDLIB_STATIC_API size_t ZSTD_readSkippableFrame(void* dst, size_t dstCapacity,
+ unsigned* magicVariant,
+ const void* src, size_t srcSize);
/*! ZSTD_isSkippableFrame() :
* Tells if the content of `buffer` starts with a valid Frame Identifier for a skippable frame.
*/
-ZSTDLIB_API unsigned ZSTD_isSkippableFrame(const void* buffer, size_t size);
+ZSTDLIB_STATIC_API unsigned ZSTD_isSkippableFrame(const void* buffer, size_t size);
@@ -1796,7 +1872,15 @@ static
#ifdef __GNUC__
__attribute__((__unused__))
#endif
+
+#if defined(__clang__) && __clang_major__ >= 5
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant"
+#endif
ZSTD_customMem const ZSTD_defaultCMem = { NULL, NULL, NULL }; /**< this constant defers to stdlib's functions */
+#if defined(__clang__) && __clang_major__ >= 5
+#pragma clang diagnostic pop
+#endif
ZSTDLIB_STATIC_API ZSTD_CCtx* ZSTD_createCCtx_advanced(ZSTD_customMem customMem);
ZSTDLIB_STATIC_API ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem);
@@ -1976,7 +2060,7 @@ ZSTDLIB_STATIC_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const vo
* See the comments on that enum for an explanation of the feature. */
#define ZSTD_c_forceAttachDict ZSTD_c_experimentalParam4
-/* Controlled with ZSTD_paramSwitch_e enum.
+/* Controlled with ZSTD_ParamSwitch_e enum.
* Default is ZSTD_ps_auto.
* Set to ZSTD_ps_disable to never compress literals.
* Set to ZSTD_ps_enable to always compress literals. (Note: uncompressed literals
@@ -2117,22 +2201,46 @@ ZSTDLIB_STATIC_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const vo
/* ZSTD_c_validateSequences
* Default is 0 == disabled. Set to 1 to enable sequence validation.
*
- * For use with sequence compression API: ZSTD_compressSequences().
- * Designates whether or not we validate sequences provided to ZSTD_compressSequences()
+ * For use with sequence compression API: ZSTD_compressSequences*().
+ * Designates whether or not provided sequences are validated within ZSTD_compressSequences*()
* during function execution.
*
- * Without validation, providing a sequence that does not conform to the zstd spec will cause
- * undefined behavior, and may produce a corrupted block.
+ * When Sequence validation is disabled (default), Sequences are compressed as-is,
+ * so they must correct, otherwise it would result in a corruption error.
*
- * With validation enabled, if sequence is invalid (see doc/zstd_compression_format.md for
+ * Sequence validation adds some protection, by ensuring that all values respect boundary conditions.
+ * If a Sequence is detected invalid (see doc/zstd_compression_format.md for
* specifics regarding offset/matchlength requirements) then the function will bail out and
* return an error.
- *
*/
#define ZSTD_c_validateSequences ZSTD_c_experimentalParam12
-/* ZSTD_c_useBlockSplitter
- * Controlled with ZSTD_paramSwitch_e enum.
+/* ZSTD_c_blockSplitterLevel
+ * note: this parameter only influences the first splitter stage,
+ * which is active before producing the sequences.
+ * ZSTD_c_splitAfterSequences controls the next splitter stage,
+ * which is active after sequence production.
+ * Note that both can be combined.
+ * Allowed values are between 0 and ZSTD_BLOCKSPLITTER_LEVEL_MAX included.
+ * 0 means "auto", which will select a value depending on current ZSTD_c_strategy.
+ * 1 means no splitting.
+ * Then, values from 2 to 6 are sorted in increasing cpu load order.
+ *
+ * Note that currently the first block is never split,
+ * to ensure expansion guarantees in presence of incompressible data.
+ */
+#define ZSTD_BLOCKSPLITTER_LEVEL_MAX 6
+#define ZSTD_c_blockSplitterLevel ZSTD_c_experimentalParam20
+
+/* ZSTD_c_splitAfterSequences
+ * This is a stronger splitter algorithm,
+ * based on actual sequences previously produced by the selected parser.
+ * It's also slower, and as a consequence, mostly used for high compression levels.
+ * While the post-splitter does overlap with the pre-splitter,
+ * both can nonetheless be combined,
+ * notably with ZSTD_c_blockSplitterLevel at ZSTD_BLOCKSPLITTER_LEVEL_MAX,
+ * resulting in higher compression ratio than just one of them.
+ *
* Default is ZSTD_ps_auto.
* Set to ZSTD_ps_disable to never use block splitter.
* Set to ZSTD_ps_enable to always use block splitter.
@@ -2140,10 +2248,10 @@ ZSTDLIB_STATIC_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const vo
* By default, in ZSTD_ps_auto, the library will decide at runtime whether to use
* block splitting based on the compression parameters.
*/
-#define ZSTD_c_useBlockSplitter ZSTD_c_experimentalParam13
+#define ZSTD_c_splitAfterSequences ZSTD_c_experimentalParam13
/* ZSTD_c_useRowMatchFinder
- * Controlled with ZSTD_paramSwitch_e enum.
+ * Controlled with ZSTD_ParamSwitch_e enum.
* Default is ZSTD_ps_auto.
* Set to ZSTD_ps_disable to never use row-based matchfinder.
* Set to ZSTD_ps_enable to force usage of row-based matchfinder.
@@ -2175,7 +2283,7 @@ ZSTDLIB_STATIC_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const vo
#define ZSTD_c_deterministicRefPrefix ZSTD_c_experimentalParam15
/* ZSTD_c_prefetchCDictTables
- * Controlled with ZSTD_paramSwitch_e enum. Default is ZSTD_ps_auto.
+ * Controlled with ZSTD_ParamSwitch_e enum. Default is ZSTD_ps_auto.
*
* In some situations, zstd uses CDict tables in-place rather than copying them
* into the working context. (See docs on ZSTD_dictAttachPref_e above for details).
@@ -2219,19 +2327,21 @@ ZSTDLIB_STATIC_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const vo
* that overrides the default ZSTD_BLOCKSIZE_MAX. It cannot be used to set upper
* bounds greater than ZSTD_BLOCKSIZE_MAX or bounds lower than 1KB (will make
* compressBound() inaccurate). Only currently meant to be used for testing.
- *
*/
#define ZSTD_c_maxBlockSize ZSTD_c_experimentalParam18
-/* ZSTD_c_searchForExternalRepcodes
- * This parameter affects how zstd parses external sequences, such as sequences
- * provided through the compressSequences() API or from an external block-level
- * sequence producer.
+/* ZSTD_c_repcodeResolution
+ * This parameter only has an effect if ZSTD_c_blockDelimiters is
+ * set to ZSTD_sf_explicitBlockDelimiters (may change in the future).
+ *
+ * This parameter affects how zstd parses external sequences,
+ * provided via the ZSTD_compressSequences*() API
+ * or from an external block-level sequence producer.
*
- * If set to ZSTD_ps_enable, the library will check for repeated offsets in
+ * If set to ZSTD_ps_enable, the library will check for repeated offsets within
* external sequences, even if those repcodes are not explicitly indicated in
* the "rep" field. Note that this is the only way to exploit repcode matches
- * while using compressSequences() or an external sequence producer, since zstd
+ * while using compressSequences*() or an external sequence producer, since zstd
* currently ignores the "rep" field of external sequences.
*
* If set to ZSTD_ps_disable, the library will not exploit repeated offsets in
@@ -2240,12 +2350,11 @@ ZSTDLIB_STATIC_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const vo
* compression ratio.
*
* The default value is ZSTD_ps_auto, for which the library will enable/disable
- * based on compression level.
- *
- * Note: for now, this param only has an effect if ZSTD_c_blockDelimiters is
- * set to ZSTD_sf_explicitBlockDelimiters. That may change in the future.
+ * based on compression level (currently: level<10 disables, level>=10 enables).
*/
-#define ZSTD_c_searchForExternalRepcodes ZSTD_c_experimentalParam19
+#define ZSTD_c_repcodeResolution ZSTD_c_experimentalParam19
+#define ZSTD_c_searchForExternalRepcodes ZSTD_c_experimentalParam19 /* older name */
+
/*! ZSTD_CCtx_getParameter() :
* Get the requested compression parameter value, selected by enum ZSTD_cParameter,
@@ -2952,7 +3061,7 @@ size_t ZSTD_compressBegin_usingCDict_advanced(ZSTD_CCtx* const cctx, const ZSTD_
>0 : `srcSize` is too small, please provide at least result bytes on next attempt.
errorCode, which can be tested using ZSTD_isError().
- It fills a ZSTD_frameHeader structure with important information to correctly decode the frame,
+ It fills a ZSTD_FrameHeader structure with important information to correctly decode the frame,
such as the dictionary ID, content size, or maximum back-reference distance (`windowSize`).
Note that these values could be wrong, either because of data corruption, or because a 3rd party deliberately spoofs false information.
As a consequence, check that values remain within valid application range.
@@ -3082,8 +3191,8 @@ ZSTDLIB_STATIC_API size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx, void* dst, size_
ZSTD_DEPRECATED("The block API is deprecated in favor of the normal compression API. See docs.")
ZSTDLIB_STATIC_API size_t ZSTD_insertBlock (ZSTD_DCtx* dctx, const void* blockStart, size_t blockSize); /**< insert uncompressed block into `dctx` history. Useful for multi-blocks decompression. */
-#endif /* ZSTD_H_ZSTD_STATIC_LINKING_ONLY */
-
#if defined (__cplusplus)
}
#endif
+
+#endif /* ZSTD_H_ZSTD_STATIC_LINKING_ONLY */
diff --git a/lib/zstd_errors.h b/lib/zstd_errors.h
index dc75eeebad9..8ebc95cbb2a 100644
--- a/lib/zstd_errors.h
+++ b/lib/zstd_errors.h
@@ -15,10 +15,6 @@
extern "C" {
#endif
-/*===== dependency =====*/
-#include /* size_t */
-
-
/* ===== ZSTDERRORLIB_API : control library symbols visibility ===== */
#ifndef ZSTDERRORLIB_VISIBLE
/* Backwards compatibility with old macro name */
@@ -80,6 +76,7 @@ typedef enum {
ZSTD_error_tableLog_tooLarge = 44,
ZSTD_error_maxSymbolValue_tooLarge = 46,
ZSTD_error_maxSymbolValue_tooSmall = 48,
+ ZSTD_error_cannotProduce_uncompressedBlock = 49,
ZSTD_error_stabilityCondition_notRespected = 50,
ZSTD_error_stage_wrong = 60,
ZSTD_error_init_missing = 62,
@@ -100,10 +97,6 @@ typedef enum {
ZSTD_error_maxCode = 120 /* never EVER use this value directly, it can change in future versions! Use ZSTD_isError() instead */
} ZSTD_ErrorCode;
-/*! ZSTD_getErrorCode() :
- convert a `size_t` function result into a `ZSTD_ErrorCode` enum type,
- which can be used to compare with enum list published above */
-ZSTDERRORLIB_API ZSTD_ErrorCode ZSTD_getErrorCode(size_t functionResult);
ZSTDERRORLIB_API const char* ZSTD_getErrorString(ZSTD_ErrorCode code); /**< Same as ZSTD_getErrorName, but using a `ZSTD_ErrorCode` enum argument */
diff --git a/programs/Makefile b/programs/Makefile
index 4dcd84105bb..f4af5e98134 100644
--- a/programs/Makefile
+++ b/programs/Makefile
@@ -232,7 +232,7 @@ zstd-dll : zstd
.PHONY: zstd-pgo
zstd-pgo : LLVM_PROFDATA?=llvm-profdata
zstd-pgo : PROF_GENERATE_FLAGS=-fprofile-generate $(if $(findstring gcc,$(CC)),-fprofile-dir=.)
-zstd-pgo : PROF_USE_FLAGS=-fprofile-use $(if $(findstring gcc,$(CC)),-fprofile-dir=. -Werror=missing-profile -Wno-error=coverage-mismatch)
+zstd-pgo : PROF_USE_FLAGS=-fprofile-use $(if $(findstring gcc,$(CC)),-fprofile-dir=. -Wno-error=missing-profile -Wno-error=coverage-mismatch)
zstd-pgo :
$(MAKE) clean HASH_DIR=$(HASH_DIR)
$(MAKE) zstd HASH_DIR=$(HASH_DIR) MOREFLAGS="$(PROF_GENERATE_FLAGS)"
@@ -345,7 +345,7 @@ include $(wildcard $(DEPFILES))
#-----------------------------------------------------------------------------
# make install is validated only for Linux, macOS, BSD, Hurd and Solaris targets
#-----------------------------------------------------------------------------
-ifneq (,$(filter $(UNAME),Linux Darwin GNU/kFreeBSD GNU OpenBSD FreeBSD NetBSD DragonFly SunOS Haiku AIX MSYS_NT CYGWIN_NT))
+ifneq (,$(filter Linux Darwin GNU/kFreeBSD GNU OpenBSD FreeBSD NetBSD DragonFly SunOS Haiku AIX MSYS_NT% CYGWIN_NT%,$(UNAME)))
HAVE_COLORNEVER = $(shell echo a | egrep --color=never a > /dev/null 2> /dev/null && echo 1 || echo 0)
EGREP_OPTIONS ?=
@@ -388,14 +388,14 @@ datarootdir ?= $(PREFIX)/share
mandir ?= $(datarootdir)/man
man1dir ?= $(mandir)/man1
-ifneq (,$(filter $(UNAME),OpenBSD FreeBSD NetBSD DragonFly SunOS))
+ifneq (,$(filter OpenBSD NetBSD DragonFly SunOS,$(UNAME)))
MANDIR ?= $(PREFIX)/man
MAN1DIR ?= $(MANDIR)/man1
else
MAN1DIR ?= $(man1dir)
endif
-ifneq (,$(filter $(UNAME),SunOS))
+ifneq (,$(filter SunOS,$(UNAME)))
INSTALL ?= ginstall
else
INSTALL ?= install
diff --git a/programs/README.md b/programs/README.md
index 1b9f47cbba9..43ef07a45be 100644
--- a/programs/README.md
+++ b/programs/README.md
@@ -129,89 +129,131 @@ CLI includes in-memory compression benchmark module for zstd.
The benchmark is conducted using given filenames. The files are read into memory and joined together.
It makes benchmark more precise as it eliminates I/O overhead.
Multiple filenames can be supplied, as multiple parameters, with wildcards,
-or names of directories can be used as parameters with `-r` option.
+or directory names can be used with `-r` option.
+If no file is provided, the benchmark will use a procedurally generated "lorem ipsum" content.
The benchmark measures ratio, compressed size, compression and decompression speed.
One can select compression levels starting from `-b` and ending with `-e`.
The `-i` parameter selects minimal time used for each of tested levels.
+The benchmark can also be used to test specific parameters,
+such as number of threads (`-T#`), or advanced parameters (`--zstd=#`), or dictionary compression (`-D DICTIONARY`),
+and many others available on command for regular compression and decompression.
+
### Usage of Command Line Interface
The full list of options can be obtained with `-h` or `-H` parameter:
```
-Usage :
- zstd [args] [FILE(s)] [-o file]
-
-FILE : a filename
- with no FILE, or when FILE is - , read standard input
-Arguments :
- -# : # compression level (1-19, default: 3)
- -d : decompression
- -D DICT: use DICT as Dictionary for compression or decompression
- -o file: result stored into `file` (only 1 output file)
- -f : overwrite output without prompting, also (de)compress links
---rm : remove source file(s) after successful de/compression
- -k : preserve source file(s) (default)
- -h/-H : display help/long help and exit
-
-Advanced arguments :
- -V : display Version number and exit
- -c : write to standard output (even if it is the console)
- -v : verbose mode; specify multiple times to increase verbosity
- -q : suppress warnings; specify twice to suppress errors too
---no-progress : do not display the progress counter
- -r : operate recursively on directories
---filelist FILE : read list of files to operate upon from FILE
---output-dir-flat DIR : processed files are stored into DIR
---output-dir-mirror DIR : processed files are stored into DIR respecting original directory structure
---[no-]asyncio : use asynchronous IO (default: enabled)
---[no-]check : during compression, add XXH64 integrity checksum to frame (default: enabled). If specified with -d, decompressor will ignore/validate checksums in compressed frame (default: validate).
--- : All arguments after "--" are treated as files
-
-Advanced compression arguments :
---ultra : enable levels beyond 19, up to 22 (requires more memory)
---long[=#]: enable long distance matching with given window log (default: 27)
---fast[=#]: switch to very fast compression levels (default: 1)
---adapt : dynamically adapt compression level to I/O conditions
---patch-from=FILE : specify the file to be used as a reference point for zstd's diff engine
- -T# : spawns # compression threads (default: 1, 0==# cores)
- -B# : select size of each job (default: 0==automatic)
---single-thread : use a single thread for both I/O and compression (result slightly different than -T1)
---rsyncable : compress using a rsync-friendly method (-B sets block size)
---exclude-compressed: only compress files that are not already compressed
---stream-size=# : specify size of streaming input from `stdin`
---size-hint=# optimize compression parameters for streaming input of approximately this size
---target-compressed-block-size=# : generate compressed block of approximately targeted size
---no-dictID : don't write dictID into header (dictionary compression only)
---[no-]compress-literals : force (un)compressed literals
---format=zstd : compress files to the .zst format (default)
---format=gzip : compress files to the .gz format
---format=xz : compress files to the .xz format
---format=lzma : compress files to the .lzma format
---format=lz4 : compress files to the .lz4 format
-
-Advanced decompression arguments :
- -l : print information about zstd compressed files
---test : test compressed file integrity
- -M# : Set a memory usage limit for decompression
---[no-]sparse : sparse mode (default: disabled)
-
-Dictionary builder :
---train ## : create a dictionary from a training set of files
---train-cover[=k=#,d=#,steps=#,split=#,shrink[=#]] : use the cover algorithm with optional args
---train-fastcover[=k=#,d=#,f=#,steps=#,split=#,accel=#,shrink[=#]] : use the fast cover algorithm with optional args
---train-legacy[=s=#] : use the legacy algorithm with selectivity (default: 9)
- -o DICT : DICT is dictionary name (default: dictionary)
---maxdict=# : limit dictionary to specified size (default: 112640)
---dictID=# : force dictionary ID to specified value (default: random)
-
-Benchmark arguments :
- -b# : benchmark file(s), using # compression level (default: 3)
- -e# : test all compression levels successively from -b# to -e# (default: 1)
- -i# : minimum evaluation time in seconds (default: 3s)
- -B# : cut file into independent chunks of size # (default: no chunking)
- -S : output one benchmark result per input file (default: consolidated result)
---priority=rt : set process priority to real-time
+*** Zstandard CLI (64-bit) v1.5.6, by Yann Collet ***
+
+Compress or decompress the INPUT file(s); reads from STDIN if INPUT is `-` or not provided.
+
+Usage: zstd [OPTIONS...] [INPUT... | -] [-o OUTPUT]
+
+Options:
+ -o OUTPUT Write output to a single file, OUTPUT.
+ -k, --keep Preserve INPUT file(s). [Default]
+ --rm Remove INPUT file(s) after successful (de)compression.
+
+ -# Desired compression level, where `#` is a number between 1 and 19;
+ lower numbers provide faster compression, higher numbers yield
+ better compression ratios. [Default: 3]
+
+ -d, --decompress Perform decompression.
+ -D DICT Use DICT as the dictionary for compression or decompression.
+
+ -f, --force Disable input and output checks. Allows overwriting existing files,
+ receiving input from the console, printing output to STDOUT, and
+ operating on links, block devices, etc. Unrecognized formats will be
+ passed-through through as-is.
+
+ -h Display short usage and exit.
+ -H, --help Display full help and exit.
+ -V, --version Display the program version and exit.
+
+Advanced options:
+ -c, --stdout Write to STDOUT (even if it is a console) and keep the INPUT file(s).
+
+ -v, --verbose Enable verbose output; pass multiple times to increase verbosity.
+ -q, --quiet Suppress warnings; pass twice to suppress errors.
+ --trace LOG Log tracing information to LOG.
+
+ --[no-]progress Forcibly show/hide the progress counter. NOTE: Any (de)compressed
+ output to terminal will mix with progress counter text.
+
+ -r Operate recursively on directories.
+ --filelist LIST Read a list of files to operate on from LIST.
+ --output-dir-flat DIR Store processed files in DIR.
+ --output-dir-mirror DIR Store processed files in DIR, respecting original directory structure.
+ --[no-]asyncio Use asynchronous IO. [Default: Enabled]
+
+ --[no-]check Add XXH64 integrity checksums during compression. [Default: Add, Validate]
+ If `-d` is present, ignore/validate checksums during decompression.
+
+ -- Treat remaining arguments after `--` as files.
+
+Advanced compression options:
+ --ultra Enable levels beyond 19, up to 22; requires more memory.
+ --fast[=#] Use to very fast compression levels. [Default: 1]
+ --adapt Dynamically adapt compression level to I/O conditions.
+ --long[=#] Enable long distance matching with window log #. [Default: 27]
+ --patch-from=REF Use REF as the reference point for Zstandard's diff engine.
+
+ -T# Spawn # compression threads. [Default: 1; pass 0 for core count.]
+ --single-thread Share a single thread for I/O and compression (slightly different than `-T1`).
+ --auto-threads={physical|logical}
+ Use physical/logical cores when using `-T0`. [Default: Physical]
+
+ -B# Set job size to #. [Default: 0 (automatic)]
+ --rsyncable Compress using a rsync-friendly method (`-B` sets block size).
+
+ --exclude-compressed Only compress files that are not already compressed.
+
+ --stream-size=# Specify size of streaming input from STDIN.
+ --size-hint=# Optimize compression parameters for streaming input of approximately size #.
+ --target-compressed-block-size=#
+ Generate compressed blocks of approximately # size.
+
+ --no-dictID Don't write `dictID` into the header (dictionary compression only).
+ --[no-]compress-literals Force (un)compressed literals.
+ --[no-]row-match-finder Explicitly enable/disable the fast, row-based matchfinder for
+ the 'greedy', 'lazy', and 'lazy2' strategies.
+
+ --format=zstd Compress files to the `.zst` format. [Default]
+ --[no-]mmap-dict Memory-map dictionary file rather than mallocing and loading all at once
+ --format=gzip Compress files to the `.gz` format.
+ --format=xz Compress files to the `.xz` format.
+ --format=lzma Compress files to the `.lzma` format.
+ --format=lz4 Compress files to the `.lz4` format.
+
+Advanced decompression options:
+ -l Print information about Zstandard-compressed files.
+ --test Test compressed file integrity.
+ -M# Set the memory usage limit to # megabytes.
+ --[no-]sparse Enable sparse mode. [Default: Enabled for files, disabled for STDOUT.]
+ --[no-]pass-through Pass through uncompressed files as-is. [Default: Disabled]
+
+Dictionary builder:
+ --train Create a dictionary from a training set of files.
+
+ --train-cover[=k=#,d=#,steps=#,split=#,shrink[=#]]
+ Use the cover algorithm (with optional arguments).
+ --train-fastcover[=k=#,d=#,f=#,steps=#,split=#,accel=#,shrink[=#]]
+ Use the fast cover algorithm (with optional arguments).
+
+ --train-legacy[=s=#] Use the legacy algorithm with selectivity #. [Default: 9]
+ -o NAME Use NAME as dictionary name. [Default: dictionary]
+ --maxdict=# Limit dictionary to specified size #. [Default: 112640]
+ --dictID=# Force dictionary ID to #. [Default: Random]
+
+Benchmark options:
+ -b# Perform benchmarking with compression level #. [Default: 3]
+ -e# Test all compression levels up to #; starting level is `-b#`. [Default: 1]
+ -i# Set the minimum evaluation to time # seconds. [Default: 3]
+ -B# Cut file into independent chunks of size #. [Default: No chunking]
+ -S Output one benchmark result per input file. [Default: Consolidated result]
+ -D dictionary Benchmark using dictionary
+ --priority=rt Set process priority to real-time.
```
### Passing parameters through Environment Variables
diff --git a/programs/benchfn.h b/programs/benchfn.h
index 1bd93d13519..3fc6e0d0455 100644
--- a/programs/benchfn.h
+++ b/programs/benchfn.h
@@ -15,17 +15,12 @@
* or detecting and returning an error
*/
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
#ifndef BENCH_FN_H_23876
#define BENCH_FN_H_23876
/* === Dependencies === */
#include /* size_t */
-
/* ==== Benchmark any function, iterated on a set of blocks ==== */
/* BMK_runTime_t: valid result return type */
@@ -175,9 +170,4 @@ typedef union {
} BMK_timedFnState_shell;
BMK_timedFnState_t* BMK_initStatic_timedFnState(void* buffer, size_t size, unsigned total_ms, unsigned run_ms);
-
#endif /* BENCH_FN_H_23876 */
-
-#if defined (__cplusplus)
-}
-#endif
diff --git a/programs/benchzstd.c b/programs/benchzstd.c
index 29ee595c174..f9274a5172c 100644
--- a/programs/benchzstd.c
+++ b/programs/benchzstd.c
@@ -139,52 +139,61 @@ static const size_t maxMemory = (sizeof(size_t) == 4)
return r; \
}
-/* replacement for snprintf(), which is not supported by C89
- * sprintf() would be the supported one, but it's labelled unsafe,
- * so some modern static analyzer will flag it as such, making it unusable.
- * formatString_u() replaces snprintf() for the specific case where there are only %u arguments */
+static size_t uintSize(unsigned value)
+{
+ size_t size = 1;
+ while (value >= 10) {
+ size++;
+ value /= 10;
+ }
+ return size;
+}
+
+/* Note: presume @buffer is large enough */
+static void writeUint_varLen(char* buffer, size_t capacity, unsigned value)
+{
+ int endPos = (int)uintSize(value) - 1;
+ assert(uintSize(value) >= 1);
+ assert(uintSize(value) < capacity); (void)capacity;
+ while (endPos >= 0) {
+ char c = '0' + (char)(value % 10);
+ buffer[endPos--] = c;
+ value /= 10;
+ }
+}
+
+/* replacement for snprintf(), which is not supported by C89.
+ * sprintf() would be the supported one, but it's labelled unsafe:
+ * modern static analyzer will flag sprintf() as dangerous, making it unusable.
+ * formatString_u() replaces snprintf() for the specific case where there is only one %u argument */
static int formatString_u(char* buffer, size_t buffer_size, const char* formatString, unsigned int value)
{
+ size_t const valueSize = uintSize(value);
size_t written = 0;
int i;
- assert(value <= 100);
- for (i = 0; formatString[i] != '\0' && written < buffer_size - 1; ++i) {
+ for (i = 0; formatString[i] != '\0' && written < buffer_size - 1; i++) {
if (formatString[i] != '%') {
buffer[written++] = formatString[i];
continue;
}
- if (formatString[++i] == 'u') {
- /* Handle single digit */
- if (value < 10) {
- buffer[written++] = '0' + (char)value;
- } else if (value < 100) {
- /* Handle two digits */
- if (written >= buffer_size - 2) {
- return -1; /* buffer overflow */
- }
- buffer[written++] = '0' + (char)(value / 10);
- buffer[written++] = '0' + (char)(value % 10);
- } else { /* 100 */
- if (written >= buffer_size - 3) {
- return -1; /* buffer overflow */
- }
- buffer[written++] = '1';
- buffer[written++] = '0';
- buffer[written++] = '0';
- }
+ i++;
+ if (formatString[i] == 'u') {
+ if (written + valueSize >= buffer_size) abort(); /* buffer not large enough */
+ writeUint_varLen(buffer + written, buffer_size - written, value);
+ written += valueSize;
} else if (formatString[i] == '%') { /* Check for escaped percent sign */
buffer[written++] = '%';
} else {
- return -1; /* unsupported format */
+ abort(); /* unsupported format */
}
}
if (written < buffer_size) {
buffer[written] = '\0';
} else {
- buffer[0] = '\0'; /* Handle truncation */
+ abort(); /* buffer not large enough */
}
return (int)written;
@@ -624,7 +633,7 @@ static BMK_benchOutcome_t BMK_benchMemAdvancedNoAlloc(
}
{
- int const ratioAccuracy = (ratio < 10.) ? 3 : 2;
+ int const ratioDigits = 1 + (ratio < 100.) + (ratio < 10.);
assert(cSize < UINT_MAX);
OUTPUTLEVEL(
2,
@@ -633,7 +642,7 @@ static BMK_benchOutcome_t BMK_benchMemAdvancedNoAlloc(
displayName,
(unsigned)srcSize,
(unsigned)cSize,
- ratioAccuracy,
+ ratioDigits,
ratio,
benchResult.cSpeed < (10 * MB_UNIT) ? 2 : 1,
(double)benchResult.cSpeed / MB_UNIT);
@@ -660,7 +669,7 @@ static BMK_benchOutcome_t BMK_benchMemAdvancedNoAlloc(
}
{
- int const ratioAccuracy = (ratio < 10.) ? 3 : 2;
+ int const ratioDigits = 1 + (ratio < 100.) + (ratio < 10.);
OUTPUTLEVEL(
2,
"%2s-%-17.17s :%10u ->%10u (x%5.*f), %6.*f MB/s, %6.1f MB/s\r",
@@ -668,7 +677,7 @@ static BMK_benchOutcome_t BMK_benchMemAdvancedNoAlloc(
displayName,
(unsigned)srcSize,
(unsigned)cSize,
- ratioAccuracy,
+ ratioDigits,
ratio,
benchResult.cSpeed < (10 * MB_UNIT) ? 2 : 1,
(double)benchResult.cSpeed / MB_UNIT,
@@ -919,12 +928,13 @@ BMK_benchOutcome_t BMK_benchMem(
&adv);
}
-static BMK_benchOutcome_t BMK_benchCLevel(
+/* @return: 0 on success, !0 if error */
+static int BMK_benchCLevels(
const void* srcBuffer,
size_t benchedSize,
const size_t* fileSizes,
unsigned nbFiles,
- int cLevel,
+ int startCLevel, int endCLevel,
const ZSTD_compressionParameters* comprParams,
const void* dictBuffer,
size_t dictBufferSize,
@@ -932,12 +942,22 @@ static BMK_benchOutcome_t BMK_benchCLevel(
const char* displayName,
BMK_advancedParams_t const* const adv)
{
+ int level;
const char* pch = strrchr(displayName, '\\'); /* Windows */
if (!pch)
pch = strrchr(displayName, '/'); /* Linux */
if (pch)
displayName = pch + 1;
+ if (endCLevel > ZSTD_maxCLevel()) {
+ DISPLAYLEVEL(1, "Invalid Compression Level \n");
+ return 15;
+ }
+ if (endCLevel < startCLevel) {
+ DISPLAYLEVEL(1, "Invalid Compression Level Range \n");
+ return 15;
+ }
+
if (adv->realTime) {
DISPLAYLEVEL(2, "Note : switching to real-time priority \n");
SET_REALTIME_PRIORITY;
@@ -951,25 +971,29 @@ static BMK_benchOutcome_t BMK_benchCLevel(
adv->nbSeconds,
(unsigned)(adv->blockSize >> 10));
- return BMK_benchMemAdvanced(
+ for (level = startCLevel; level <= endCLevel; level++) {
+ BMK_benchOutcome_t res = BMK_benchMemAdvanced(
srcBuffer,
benchedSize,
NULL,
0,
fileSizes,
nbFiles,
- cLevel,
+ level,
comprParams,
dictBuffer,
dictBufferSize,
displayLevel,
displayName,
adv);
+ if (!BMK_isSuccessful_benchOutcome(res)) return 1;
+ }
+ return 0;
}
int BMK_syntheticTest(
- int cLevel,
double compressibility,
+ int startingCLevel, int endCLevel,
const ZSTD_compressionParameters* compressionParams,
int displayLevel,
const BMK_advancedParams_t* adv)
@@ -977,18 +1001,11 @@ int BMK_syntheticTest(
char nameBuff[20] = { 0 };
const char* name = nameBuff;
size_t const benchedSize = adv->blockSize ? adv->blockSize : 10000000;
- void* srcBuffer;
- BMK_benchOutcome_t res;
-
- if (cLevel > ZSTD_maxCLevel()) {
- DISPLAYLEVEL(1, "Invalid Compression Level");
- return 15;
- }
/* Memory allocation */
- srcBuffer = malloc(benchedSize);
+ void* const srcBuffer = malloc(benchedSize);
if (!srcBuffer) {
- DISPLAYLEVEL(1, "allocation error : not enough memory");
+ DISPLAYLEVEL(1, "allocation error : not enough memory \n");
return 16;
}
@@ -1006,23 +1023,21 @@ int BMK_syntheticTest(
}
/* Bench */
- res = BMK_benchCLevel(
- srcBuffer,
- benchedSize,
- &benchedSize /* ? */,
- 1 /* ? */,
- cLevel,
- compressionParams,
- NULL,
- 0, /* dictionary */
- displayLevel,
- name,
- adv);
-
- /* clean up */
- free(srcBuffer);
-
- return !BMK_isSuccessful_benchOutcome(res);
+ { int res = BMK_benchCLevels(
+ srcBuffer,
+ benchedSize,
+ &benchedSize,
+ 1,
+ startingCLevel, endCLevel,
+ compressionParams,
+ NULL,
+ 0, /* dictionary */
+ displayLevel,
+ name,
+ adv);
+ free(srcBuffer);
+ return res;
+ }
}
static size_t BMK_findMaxMem(U64 requiredMem)
@@ -1058,11 +1073,12 @@ static int BMK_loadFiles(
size_t pos = 0, totalSize = 0;
unsigned n;
for (n = 0; n < nbFiles; n++) {
+ const char* const filename = fileNamesTable[n];
U64 fileSize = UTIL_getFileSize(
- fileNamesTable[n]); /* last file may be shortened */
- if (UTIL_isDirectory(fileNamesTable[n])) {
+ filename); /* last file may be shortened */
+ if (UTIL_isDirectory(filename)) {
DISPLAYLEVEL(
- 2, "Ignoring %s directory... \n", fileNamesTable[n]);
+ 2, "Ignoring %s directory... \n", filename);
fileSizes[n] = 0;
continue;
}
@@ -1070,25 +1086,29 @@ static int BMK_loadFiles(
DISPLAYLEVEL(
2,
"Cannot evaluate size of %s, ignoring ... \n",
- fileNamesTable[n]);
+ filename);
fileSizes[n] = 0;
continue;
}
- {
- FILE* const f = fopen(fileNamesTable[n], "rb");
- if (f == NULL)
+ if (fileSize > bufferSize - pos) {
+ /* buffer too small - limit quantity loaded */
+ fileSize = bufferSize - pos;
+ nbFiles = n; /* stop after this file */
+ }
+
+ { FILE* const f = fopen(filename, "rb");
+ if (f == NULL) {
RETURN_ERROR_INT(
- 10, "impossible to open file %s", fileNamesTable[n]);
- OUTPUTLEVEL(2, "Loading %s... \r", fileNamesTable[n]);
- if (fileSize > bufferSize - pos)
- fileSize = bufferSize - pos,
- nbFiles = n; /* buffer too small - stop after this file */
- {
- size_t const readSize =
+ 10, "cannot open file %s", filename);
+ }
+ OUTPUTLEVEL(2, "Loading %s... \r", filename);
+ { size_t const readSize =
fread(((char*)buffer) + pos, 1, (size_t)fileSize, f);
- if (readSize != (size_t)fileSize)
+ if (readSize != (size_t)fileSize) {
+ fclose(f);
RETURN_ERROR_INT(
- 11, "could not read %s", fileNamesTable[n]);
+ 11, "invalid read %s", filename);
+ }
pos += readSize;
}
fileSizes[n] = (size_t)fileSize;
@@ -1106,7 +1126,7 @@ int BMK_benchFilesAdvanced(
const char* const* fileNamesTable,
unsigned nbFiles,
const char* dictFileName,
- int cLevel,
+ int startCLevel, int endCLevel,
const ZSTD_compressionParameters* compressionParams,
int displayLevel,
const BMK_advancedParams_t* adv)
@@ -1116,7 +1136,7 @@ int BMK_benchFilesAdvanced(
void* dictBuffer = NULL;
size_t dictBufferSize = 0;
size_t* fileSizes = NULL;
- BMK_benchOutcome_t res;
+ int res = 1;
U64 const totalSizeToLoad = UTIL_getTotalFileSize(fileNamesTable, nbFiles);
if (!nbFiles) {
@@ -1124,7 +1144,7 @@ int BMK_benchFilesAdvanced(
return 13;
}
- if (cLevel > ZSTD_maxCLevel()) {
+ if (endCLevel > ZSTD_maxCLevel()) {
DISPLAYLEVEL(1, "Invalid Compression Level");
return 14;
}
@@ -1178,7 +1198,6 @@ int BMK_benchFilesAdvanced(
1 /*?*/,
displayLevel);
if (errorCode) {
- res = BMK_benchOutcome_error();
goto _cleanUp;
}
}
@@ -1210,7 +1229,6 @@ int BMK_benchFilesAdvanced(
nbFiles,
displayLevel);
if (errorCode) {
- res = BMK_benchOutcome_error();
goto _cleanUp;
}
}
@@ -1219,15 +1237,14 @@ int BMK_benchFilesAdvanced(
{
char mfName[20] = { 0 };
formatString_u(mfName, sizeof(mfName), " %u files", nbFiles);
- {
- const char* const displayName =
+ { const char* const displayName =
(nbFiles > 1) ? mfName : fileNamesTable[0];
- res = BMK_benchCLevel(
+ res = BMK_benchCLevels(
srcBuffer,
benchedSize,
fileSizes,
nbFiles,
- cLevel,
+ startCLevel, endCLevel,
compressionParams,
dictBuffer,
dictBufferSize,
@@ -1241,7 +1258,7 @@ int BMK_benchFilesAdvanced(
free(srcBuffer);
free(dictBuffer);
free(fileSizes);
- return !BMK_isSuccessful_benchOutcome(res);
+ return res;
}
int BMK_benchFiles(
@@ -1257,7 +1274,7 @@ int BMK_benchFiles(
fileNamesTable,
nbFiles,
dictFileName,
- cLevel,
+ cLevel, cLevel,
compressionParams,
displayLevel,
&adv);
diff --git a/programs/benchzstd.h b/programs/benchzstd.h
index ad3088cd43b..4fd0e5a8af4 100644
--- a/programs/benchzstd.h
+++ b/programs/benchzstd.h
@@ -14,10 +14,6 @@
* and display progress result and final summary
*/
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
#ifndef BENCH_ZSTD_H_3242387
#define BENCH_ZSTD_H_3242387
@@ -26,7 +22,6 @@ extern "C" {
#define ZSTD_STATIC_LINKING_ONLY /* ZSTD_compressionParameters */
#include "../lib/zstd.h" /* ZSTD_compressionParameters */
-
/* === Constants === */
#define MB_UNIT 1000000
@@ -109,7 +104,7 @@ typedef struct {
int ldmHashLog;
int ldmBucketSizeLog;
int ldmHashRateLog;
- ZSTD_paramSwitch_e literalCompressionMode;
+ ZSTD_ParamSwitch_e literalCompressionMode;
int useRowMatchFinder; /* use row-based matchfinder if possible */
} BMK_advancedParams_t;
@@ -122,12 +117,13 @@ BMK_advancedParams_t BMK_initAdvancedParams(void);
int BMK_benchFilesAdvanced(
const char* const * fileNamesTable, unsigned nbFiles,
const char* dictFileName,
- int cLevel, const ZSTD_compressionParameters* compressionParams,
+ int startCLevel, int endCLevel,
+ const ZSTD_compressionParameters* compressionParams,
int displayLevel, const BMK_advancedParams_t* adv);
/*! BMK_syntheticTest() -- called from zstdcli */
-/* Generates a sample with datagen, using compressibility argument */
-/* @cLevel - compression level to benchmark, errors if invalid
+/* Generates a sample with datagen, using @compressibility argument
+ * @cLevel - compression level to benchmark, errors if invalid
* @compressibility - determines compressibility of sample, range [0.0 - 1.0]
* if @compressibility < 0.0, uses the lorem ipsum generator
* @compressionParams - basic compression Parameters
@@ -135,7 +131,8 @@ int BMK_benchFilesAdvanced(
* @adv - see advanced_Params_t
* @return: 0 on success, !0 on error
*/
-int BMK_syntheticTest(int cLevel, double compressibility,
+int BMK_syntheticTest(double compressibility,
+ int startingCLevel, int endCLevel,
const ZSTD_compressionParameters* compressionParams,
int displayLevel, const BMK_advancedParams_t* adv);
@@ -192,7 +189,3 @@ BMK_benchOutcome_t BMK_benchMemAdvanced(const void* srcBuffer, size_t srcSize,
#endif /* BENCH_ZSTD_H_3242387 */
-
-#if defined (__cplusplus)
-}
-#endif
diff --git a/programs/datagen.h b/programs/datagen.h
index ca72700063f..461fb716c43 100644
--- a/programs/datagen.h
+++ b/programs/datagen.h
@@ -14,6 +14,10 @@
#include /* size_t */
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
void RDG_genStdout(unsigned long long size, double matchProba, double litProba, unsigned seed);
void RDG_genBuffer(void* buffer, size_t size, double matchProba, double litProba, unsigned seed);
/*!RDG_genBuffer
@@ -27,4 +31,8 @@ void RDG_genBuffer(void* buffer, size_t size, double matchProba, double litProba
Same as RDG_genBuffer, but generates data into stdout
*/
+#if defined (__cplusplus)
+} /* extern "C" */
+#endif
+
#endif
diff --git a/programs/dibio.c b/programs/dibio.c
index 26ebe5ca1d6..7ba22d15b97 100644
--- a/programs/dibio.c
+++ b/programs/dibio.c
@@ -298,7 +298,7 @@ static fileStats DiB_fileStats(const char** fileNamesTable, int nbFiles, size_t
fs.oneSampleTooLarge |= (fileSize > 2*SAMPLESIZE_MAX);
/* Limit to the first SAMPLESIZE_MAX (128kB) of the file */
- DISPLAYLEVEL(3, "Sample file '%s' is too large, limiting to %d KB",
+ DISPLAYLEVEL(3, "Sample file '%s' is too large, limiting to %d KB\n",
fileNamesTable[n], SAMPLESIZE_MAX / (1 KB));
}
fs.nbSamples += 1;
diff --git a/programs/fileio.c b/programs/fileio.c
index e3012a71667..0ecca40d2ab 100644
--- a/programs/fileio.c
+++ b/programs/fileio.c
@@ -423,7 +423,7 @@ void FIO_setTestMode(FIO_prefs_t* const prefs, int testMode) {
void FIO_setLiteralCompressionMode(
FIO_prefs_t* const prefs,
- ZSTD_paramSwitch_e mode) {
+ ZSTD_ParamSwitch_e mode) {
prefs->literalCompressionMode = mode;
}
@@ -485,7 +485,7 @@ void FIO_setPassThroughFlag(FIO_prefs_t* const prefs, int value) {
prefs->passThrough = (value != 0);
}
-void FIO_setMMapDict(FIO_prefs_t* const prefs, ZSTD_paramSwitch_e value)
+void FIO_setMMapDict(FIO_prefs_t* const prefs, ZSTD_ParamSwitch_e value)
{
prefs->mmapDict = value;
}
@@ -1100,11 +1100,12 @@ static void FIO_adjustParamsForPatchFromMode(FIO_prefs_t* const prefs,
FIO_setLdmFlag(prefs, 1);
}
if (cParams.strategy >= ZSTD_btopt) {
- DISPLAYLEVEL(3, "[Optimal parser notes] Consider the following to improve patch size at the cost of speed:\n");
- DISPLAYLEVEL(3, "- Use --single-thread mode in the zstd cli\n");
- DISPLAYLEVEL(3, "- Set a larger targetLength (e.g. --zstd=targetLength=4096)\n");
- DISPLAYLEVEL(3, "- Set a larger chainLog (e.g. --zstd=chainLog=%u)\n", ZSTD_CHAINLOG_MAX);
- DISPLAYLEVEL(3, "Also consider playing around with searchLog and hashLog\n");
+ DISPLAYLEVEL(4, "[Optimal parser notes] Consider the following to improve patch size at the cost of speed:\n");
+ DISPLAYLEVEL(4, "- Set a larger targetLength (e.g. --zstd=targetLength=4096)\n");
+ DISPLAYLEVEL(4, "- Set a larger chainLog (e.g. --zstd=chainLog=%u)\n", ZSTD_CHAINLOG_MAX);
+ DISPLAYLEVEL(4, "- Set a larger LDM hashLog (e.g. --zstd=ldmHashLog=%u)\n", ZSTD_LDM_HASHLOG_MAX);
+ DISPLAYLEVEL(4, "- Set a smaller LDM rateLog (e.g. --zstd=ldmHashRateLog=%u)\n", ZSTD_LDM_HASHRATELOG_MIN);
+ DISPLAYLEVEL(4, "Also consider playing around with searchLog and hashLog\n");
}
}
@@ -1494,7 +1495,7 @@ FIO_compressZstdFrame(FIO_ctx_t* const fCtx,
int compressionLevel, U64* readsize)
{
cRess_t const ress = *ressPtr;
- IOJob_t *writeJob = AIO_WritePool_acquireJob(ressPtr->writeCtx);
+ IOJob_t* writeJob = AIO_WritePool_acquireJob(ressPtr->writeCtx);
U64 compressedfilesize = 0;
ZSTD_EndDirective directive = ZSTD_e_continue;
@@ -1526,8 +1527,7 @@ FIO_compressZstdFrame(FIO_ctx_t* const fCtx,
CHECK( ZSTD_CCtx_setPledgedSrcSize(ress.cctx, prefs->streamSrcSize) );
}
- {
- int windowLog;
+ { int windowLog;
UTIL_HumanReadableSize_t windowSize;
CHECK(ZSTD_CCtx_getParameter(ress.cctx, ZSTD_c_windowLog, &windowLog));
if (windowLog == 0) {
@@ -1542,7 +1542,6 @@ FIO_compressZstdFrame(FIO_ctx_t* const fCtx,
windowSize = UTIL_makeHumanReadableSize(MAX(1ULL, MIN(1ULL << windowLog, pledgedSrcSize)));
DISPLAYLEVEL(4, "Decompression will require %.*f%s of memory\n", windowSize.precision, windowSize.value, windowSize.suffix);
}
- (void)srcFileName;
/* Main compression loop */
do {
@@ -2403,7 +2402,7 @@ FIO_zstdErrorHelp(const FIO_prefs_t* const prefs,
size_t err,
const char* srcFileName)
{
- ZSTD_frameHeader header;
+ ZSTD_FrameHeader header;
/* Help message only for one specific error */
if (ZSTD_getErrorCode(err) != ZSTD_error_frameParameter_windowTooLarge)
@@ -2439,12 +2438,14 @@ FIO_decompressZstdFrame(FIO_ctx_t* const fCtx, dRess_t* ress,
U64 alreadyDecoded) /* for multi-frames streams */
{
U64 frameSize = 0;
- IOJob_t *writeJob = AIO_WritePool_acquireJob(ress->writeCtx);
+ const char* srcFName20 = srcFileName;
+ IOJob_t* writeJob = AIO_WritePool_acquireJob(ress->writeCtx);
+ assert(writeJob);
/* display last 20 characters only when not --verbose */
{ size_t const srcFileLength = strlen(srcFileName);
if ((srcFileLength>20) && (g_display_prefs.displayLevel<3))
- srcFileName += srcFileLength-20;
+ srcFName20 += srcFileLength-20;
}
ZSTD_DCtx_reset(ress->dctx, ZSTD_reset_session_only);
@@ -2471,19 +2472,12 @@ FIO_decompressZstdFrame(FIO_ctx_t* const fCtx, dRess_t* ress,
AIO_WritePool_enqueueAndReacquireWriteJob(&writeJob);
frameSize += outBuff.pos;
if (fCtx->nbFilesTotal > 1) {
- size_t srcFileNameSize = strlen(srcFileName);
- if (srcFileNameSize > 18) {
- const char* truncatedSrcFileName = srcFileName + srcFileNameSize - 15;
- DISPLAYUPDATE_PROGRESS(
- "\rDecompress: %2u/%2u files. Current: ...%s : %.*f%s... ",
- fCtx->currFileIdx+1, fCtx->nbFilesTotal, truncatedSrcFileName, hrs.precision, hrs.value, hrs.suffix);
- } else {
- DISPLAYUPDATE_PROGRESS("\rDecompress: %2u/%2u files. Current: %s : %.*f%s... ",
- fCtx->currFileIdx+1, fCtx->nbFilesTotal, srcFileName, hrs.precision, hrs.value, hrs.suffix);
- }
+ DISPLAYUPDATE_PROGRESS(
+ "\rDecompress: %2u/%2u files. Current: %s : %.*f%s... ",
+ fCtx->currFileIdx+1, fCtx->nbFilesTotal, srcFName20, hrs.precision, hrs.value, hrs.suffix);
} else {
DISPLAYUPDATE_PROGRESS("\r%-20.20s : %.*f%s... ",
- srcFileName, hrs.precision, hrs.value, hrs.suffix);
+ srcFName20, hrs.precision, hrs.value, hrs.suffix);
}
AIO_ReadPool_consumeBytes(ress->readCtx, inBuff.pos);
@@ -3208,7 +3202,7 @@ FIO_analyzeFrames(fileInfo_t* info, FILE* const srcFile)
{ U32 const magicNumber = MEM_readLE32(headerBuffer);
/* Zstandard frame */
if (magicNumber == ZSTD_MAGICNUMBER) {
- ZSTD_frameHeader header;
+ ZSTD_FrameHeader header;
U64 const frameContentSize = ZSTD_getFrameContentSize(headerBuffer, numBytesRead);
if ( frameContentSize == ZSTD_CONTENTSIZE_ERROR
|| frameContentSize == ZSTD_CONTENTSIZE_UNKNOWN ) {
diff --git a/programs/fileio.h b/programs/fileio.h
index 224d89525dc..cb53ef53781 100644
--- a/programs/fileio.h
+++ b/programs/fileio.h
@@ -17,11 +17,6 @@
#define ZSTD_STATIC_LINKING_ONLY /* ZSTD_compressionParameters */
#include "../lib/zstd.h" /* ZSTD_* */
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-
/* *************************************
* Special i/o constants
**************************************/
@@ -95,7 +90,7 @@ void FIO_setSrcSizeHint(FIO_prefs_t* const prefs, size_t srcSizeHint);
void FIO_setTestMode(FIO_prefs_t* const prefs, int testMode);
void FIO_setLiteralCompressionMode(
FIO_prefs_t* const prefs,
- ZSTD_paramSwitch_e mode);
+ ZSTD_ParamSwitch_e mode);
void FIO_setProgressSetting(FIO_progressSetting_e progressSetting);
void FIO_setNotificationLevel(int level);
@@ -106,7 +101,7 @@ void FIO_setContentSize(FIO_prefs_t* const prefs, int value);
void FIO_displayCompressionParameters(const FIO_prefs_t* prefs);
void FIO_setAsyncIOFlag(FIO_prefs_t* const prefs, int value);
void FIO_setPassThroughFlag(FIO_prefs_t* const prefs, int value);
-void FIO_setMMapDict(FIO_prefs_t* const prefs, ZSTD_paramSwitch_e value);
+void FIO_setMMapDict(FIO_prefs_t* const prefs, ZSTD_ParamSwitch_e value);
/* FIO_ctx_t functions */
void FIO_setNbFilesTotal(FIO_ctx_t* const fCtx, int value);
@@ -173,9 +168,4 @@ char const* FIO_zlibVersion(void);
char const* FIO_lz4Version(void);
char const* FIO_lzmaVersion(void);
-
-#if defined (__cplusplus)
-}
-#endif
-
#endif /* FILEIO_H_23981798732 */
diff --git a/programs/fileio_asyncio.c b/programs/fileio_asyncio.c
index ae6db69e0a9..42a47201656 100644
--- a/programs/fileio_asyncio.c
+++ b/programs/fileio_asyncio.c
@@ -268,7 +268,7 @@ static void AIO_IOPool_destroy(IOPoolCtx_t* ctx) {
/* AIO_IOPool_acquireJob:
* Returns an available io job to be used for a future io. */
static IOJob_t* AIO_IOPool_acquireJob(IOPoolCtx_t* ctx) {
- IOJob_t *job;
+ IOJob_t* job;
assert(ctx->file != NULL || ctx->prefs->testMode);
AIO_IOPool_lockJobsMutex(ctx);
assert(ctx->availableJobsCount > 0);
diff --git a/programs/fileio_asyncio.h b/programs/fileio_asyncio.h
index feb25a3f9e9..d4980ef5b01 100644
--- a/programs/fileio_asyncio.h
+++ b/programs/fileio_asyncio.h
@@ -22,10 +22,6 @@
#ifndef ZSTD_FILEIO_ASYNCIO_H
#define ZSTD_FILEIO_ASYNCIO_H
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
#include "../lib/common/mem.h" /* U32, U64 */
#include "fileio_types.h"
#include "platform.h"
@@ -196,8 +192,4 @@ FILE* AIO_ReadPool_getFile(const ReadPoolCtx_t *ctx);
* Closes the current set file. Waits for all current enqueued tasks to complete and resets state. */
int AIO_ReadPool_closeFile(ReadPoolCtx_t *ctx);
-#if defined (__cplusplus)
-}
-#endif
-
#endif /* ZSTD_FILEIO_ASYNCIO_H */
diff --git a/programs/fileio_common.h b/programs/fileio_common.h
index 55491b8e328..8aa70edea81 100644
--- a/programs/fileio_common.h
+++ b/programs/fileio_common.h
@@ -11,10 +11,6 @@
#ifndef ZSTD_FILEIO_COMMON_H
#define ZSTD_FILEIO_COMMON_H
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
#include "../lib/common/mem.h" /* U32, U64 */
#include "fileio_types.h"
#include "platform.h"
@@ -28,11 +24,14 @@ extern "C" {
#define GB *(1U<<30)
#undef MAX
#define MAX(a,b) ((a)>(b) ? (a) : (b))
+#undef MIN /* in case it would be already defined */
+#define MIN(a,b) ((a) < (b) ? (a) : (b))
extern FIO_display_prefs_t g_display_prefs;
-#define DISPLAY(...) fprintf(stderr, __VA_ARGS__)
-#define DISPLAYOUT(...) fprintf(stdout, __VA_ARGS__)
+#define DISPLAY_F(f, ...) fprintf((f), __VA_ARGS__)
+#define DISPLAYOUT(...) DISPLAY_F(stdout, __VA_ARGS__)
+#define DISPLAY(...) DISPLAY_F(stderr, __VA_ARGS__)
#define DISPLAYLEVEL(l, ...) { if (g_display_prefs.displayLevel>=l) { DISPLAY(__VA_ARGS__); } }
extern UTIL_time_t g_displayClock;
@@ -56,10 +55,6 @@ extern UTIL_time_t g_displayClock;
#define DISPLAYUPDATE_PROGRESS(...) { if (SHOULD_DISPLAY_PROGRESS()) { DISPLAYUPDATE(1, __VA_ARGS__); }}
#define DISPLAY_SUMMARY(...) { if (SHOULD_DISPLAY_SUMMARY()) { DISPLAYLEVEL(1, __VA_ARGS__); } }
-#undef MIN /* in case it would be already defined */
-#define MIN(a,b) ((a) < (b) ? (a) : (b))
-
-
#define EXM_THROW(error, ...) \
{ \
DISPLAYLEVEL(1, "zstd: "); \
@@ -80,12 +75,16 @@ extern UTIL_time_t g_displayClock;
/* Avoid fseek()'s 2GiB barrier with MSVC, macOS, *BSD, MinGW */
-#if defined(_MSC_VER) && _MSC_VER >= 1400
+#if defined(LIBC_NO_FSEEKO)
+/* Some older libc implementations don't include these functions (e.g. Bionic < 24) */
+# define LONG_SEEK fseek
+# define LONG_TELL ftell
+#elif defined(_MSC_VER) && _MSC_VER >= 1400
# define LONG_SEEK _fseeki64
# define LONG_TELL _ftelli64
#elif !defined(__64BIT__) && (PLATFORM_POSIX_VERSION >= 200112L) /* No point defining Large file for 64 bit */
-# define LONG_SEEK fseeko
-# define LONG_TELL ftello
+# define LONG_SEEK fseeko
+# define LONG_TELL ftello
#elif defined(__MINGW32__) && !defined(__STRICT_ANSI__) && !defined(__NO_MINGW_LFS) && defined(__MSVCRT__)
# define LONG_SEEK fseeko64
# define LONG_TELL ftello64
@@ -119,7 +118,4 @@ extern UTIL_time_t g_displayClock;
# define LONG_TELL ftell
#endif
-#if defined (__cplusplus)
-}
-#endif
#endif /* ZSTD_FILEIO_COMMON_H */
diff --git a/programs/fileio_types.h b/programs/fileio_types.h
index 2994a60929f..23bda4168d8 100644
--- a/programs/fileio_types.h
+++ b/programs/fileio_types.h
@@ -53,7 +53,7 @@ typedef struct FIO_prefs_s {
size_t targetCBlockSize;
int srcSizeHint;
int testMode;
- ZSTD_paramSwitch_e literalCompressionMode;
+ ZSTD_ParamSwitch_e literalCompressionMode;
/* IO preferences */
int removeSrcFile;
@@ -69,7 +69,7 @@ typedef struct FIO_prefs_s {
int contentSize;
int allowBlockDevices;
int passThrough;
- ZSTD_paramSwitch_e mmapDict;
+ ZSTD_ParamSwitch_e mmapDict;
} FIO_prefs_t;
typedef enum {FIO_mallocDict, FIO_mmapDict} FIO_dictBufferType_t;
diff --git a/programs/platform.h b/programs/platform.h
index 4d2b9490e6d..e2cc1c3e65f 100644
--- a/programs/platform.h
+++ b/programs/platform.h
@@ -11,12 +11,6 @@
#ifndef PLATFORM_H_MODULE
#define PLATFORM_H_MODULE
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-
-
/* **************************************
* Compiler Options
****************************************/
@@ -38,7 +32,7 @@ extern "C" {
#if defined __ia64 || defined _M_IA64 /* Intel Itanium */ \
|| defined __powerpc64__ || defined __ppc64__ || defined __PPC64__ /* POWER 64-bit */ \
|| (defined __sparc && (defined __sparcv9 || defined __sparc_v9__ || defined __arch64__)) || defined __sparc64__ /* SPARC 64-bit */ \
- || defined __x86_64__s || defined _M_X64 /* x86 64-bit */ \
+ || defined __x86_64__ || defined _M_X64 /* x86 64-bit */ \
|| defined __arm64__ || defined __aarch64__ || defined __ARM64_ARCH_8__ /* ARM 64-bit */ \
|| (defined __mips && (__mips == 64 || __mips == 4 || __mips == 3)) /* MIPS 64-bit */ \
|| defined _LP64 || defined __LP64__ /* NetBSD, OpenBSD */ || defined __64BIT__ /* AIX */ || defined _ADDR64 /* Cray */ \
@@ -144,10 +138,20 @@ extern "C" {
# include /* _isatty */
# include /* DeviceIoControl, HANDLE, FSCTL_SET_SPARSE */
# include /* FILE */
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
static __inline int IS_CONSOLE(FILE* stdStream) {
DWORD dummy;
return _isatty(_fileno(stdStream)) && GetConsoleMode((HANDLE)_get_osfhandle(_fileno(stdStream)), &dummy);
}
+
+#if defined (__cplusplus)
+}
+#endif
+
#else
# define IS_CONSOLE(stdStream) 0
#endif
@@ -210,9 +214,4 @@ static __inline int IS_CONSOLE(FILE* stdStream) {
# endif
#endif
-
-#if defined (__cplusplus)
-}
-#endif
-
#endif /* PLATFORM_H_MODULE */
diff --git a/programs/timefn.h b/programs/timefn.h
index b814ff8d8da..80f72e228a3 100644
--- a/programs/timefn.h
+++ b/programs/timefn.h
@@ -11,12 +11,6 @@
#ifndef TIME_FN_H_MODULE_287987
#define TIME_FN_H_MODULE_287987
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-
-
/*-****************************************
* Types
******************************************/
@@ -62,9 +56,4 @@ PTime UTIL_clockSpanMicro(UTIL_time_t clockStart);
#define SEC_TO_MICRO ((PTime)1000000) /* nb of microseconds in a second */
-
-#if defined (__cplusplus)
-}
-#endif
-
#endif /* TIME_FN_H_MODULE_287987 */
diff --git a/programs/util.c b/programs/util.c
index 7f65f93731f..065a35855f2 100644
--- a/programs/util.c
+++ b/programs/util.c
@@ -8,11 +8,6 @@
* You may select, at your option, one of the above-listed licenses.
*/
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-
/*-****************************************
* Dependencies
******************************************/
@@ -1646,7 +1641,3 @@ int UTIL_countLogicalCores(void)
{
return UTIL_countCores(1);
}
-
-#if defined (__cplusplus)
-}
-#endif
diff --git a/programs/util.h b/programs/util.h
index 571d3942198..d768e766091 100644
--- a/programs/util.h
+++ b/programs/util.h
@@ -11,25 +11,26 @@
#ifndef UTIL_H_MODULE
#define UTIL_H_MODULE
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-
/*-****************************************
* Dependencies
******************************************/
#include "platform.h" /* PLATFORM_POSIX_VERSION, ZSTD_NANOSLEEP_SUPPORT, ZSTD_SETPRIORITY_SUPPORT */
#include /* size_t, ptrdiff_t */
+#include /* FILE */
#include /* stat, utime */
#include /* stat, chmod */
#include "../lib/common/mem.h" /* U64 */
-
+#if !(defined(_MSC_VER) || defined(__MINGW32__) || defined (__MSVCRT__))
+#include
+#endif
/*-************************************************************
-* Avoid fseek()'s 2GiB barrier with MSVC, macOS, *BSD, MinGW
+* Fix fseek()'s 2GiB barrier with MSVC, macOS, *BSD, MinGW
***************************************************************/
-#if defined(_MSC_VER) && (_MSC_VER >= 1400)
+#if defined(LIBC_NO_FSEEKO)
+/* Some older libc implementations don't include these functions (e.g. Bionic < 24) */
+# define UTIL_fseek fseek
+#elif defined(_MSC_VER) && (_MSC_VER >= 1400)
# define UTIL_fseek _fseeki64
#elif !defined(__64BIT__) && (PLATFORM_POSIX_VERSION >= 200112L) /* No point defining Large file for 64 bit */
# define UTIL_fseek fseeko
@@ -39,7 +40,6 @@ extern "C" {
# define UTIL_fseek fseek
#endif
-
/*-*************************************************
* Sleep & priority functions: Windows - Posix - others
***************************************************/
@@ -88,6 +88,10 @@ extern "C" {
#endif
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
/*-****************************************
* Console log
******************************************/
@@ -118,7 +122,6 @@ int UTIL_requireUserConfirmation(const char* prompt, const char* abortMsg, const
#define STRDUP(s) _strdup(s)
#else
#define PATH_SEP '/'
-#include
#define STRDUP(s) strdup(s)
#endif
diff --git a/programs/zstd.1 b/programs/zstd.1
index 2b5a98511f0..5f1519f3323 100644
--- a/programs/zstd.1
+++ b/programs/zstd.1
@@ -1,566 +1,392 @@
-.
-.TH "ZSTD" "1" "March 2024" "zstd 1.5.6" "User Commands"
-.
+.TH "ZSTD" "1" "October 2024" "zstd 1.5.6" "User Commands"
.SH "NAME"
\fBzstd\fR \- zstd, zstdmt, unzstd, zstdcat \- Compress or decompress \.zst files
-.
.SH "SYNOPSIS"
-\fBzstd\fR [\fIOPTIONS\fR] [\-|\fIINPUT\-FILE\fR] [\-o \fIOUTPUT\-FILE\fR]
-.
+.TS
+allbox;
+\fBzstd\fR [\fIOPTIONS\fR] [\- \fIINPUT\-FILE\fR] [\-o \fIOUTPUT\-FILE\fR]
+.TE
.P
\fBzstdmt\fR is equivalent to \fBzstd \-T0\fR
-.
.P
\fBunzstd\fR is equivalent to \fBzstd \-d\fR
-.
.P
\fBzstdcat\fR is equivalent to \fBzstd \-dcf\fR
-.
.SH "DESCRIPTION"
\fBzstd\fR is a fast lossless compression algorithm and data compression tool, with command line syntax similar to \fBgzip\fR(1) and \fBxz\fR(1)\. It is based on the \fBLZ77\fR family, with further FSE & huff0 entropy stages\. \fBzstd\fR offers highly configurable compression speed, from fast modes at > 200 MB/s per core, to strong modes with excellent compression ratios\. It also features a very fast decoder, with speeds > 500 MB/s per core, which remains roughly stable at all compression settings\.
-.
.P
\fBzstd\fR command line syntax is generally similar to gzip, but features the following few differences:
-.
.IP "\(bu" 4
-Source files are preserved by default\. It\'s possible to remove them automatically by using the \fB\-\-rm\fR command\.
-.
+Source files are preserved by default\. It's possible to remove them automatically by using the \fB\-\-rm\fR command\.
.IP "\(bu" 4
When compressing a single file, \fBzstd\fR displays progress notifications and result summary by default\. Use \fB\-q\fR to turn them off\.
-.
.IP "\(bu" 4
\fBzstd\fR displays a short help page when command line is an error\. Use \fB\-q\fR to turn it off\.
-.
.IP "\(bu" 4
-\fBzstd\fR does not accept input from console, though it does accept \fBstdin\fR when it\'s not the console\.
-.
+\fBzstd\fR does not accept input from console, though it does accept \fBstdin\fR when it's not the console\.
.IP "\(bu" 4
-\fBzstd\fR does not store the input\'s filename or attributes, only its contents\.
-.
+\fBzstd\fR does not store the input's filename or attributes, only its contents\.
.IP "" 0
-.
.P
\fBzstd\fR processes each \fIfile\fR according to the selected operation mode\. If no \fIfiles\fR are given or \fIfile\fR is \fB\-\fR, \fBzstd\fR reads from standard input and writes the processed data to standard output\. \fBzstd\fR will refuse to write compressed data to standard output if it is a terminal: it will display an error message and skip the file\. Similarly, \fBzstd\fR will refuse to read compressed data from standard input if it is a terminal\.
-.
.P
Unless \fB\-\-stdout\fR or \fB\-o\fR is specified, \fIfiles\fR are written to a new file whose name is derived from the source \fIfile\fR name:
-.
.IP "\(bu" 4
When compressing, the suffix \fB\.zst\fR is appended to the source filename to get the target filename\.
-.
.IP "\(bu" 4
When decompressing, the \fB\.zst\fR suffix is removed from the source filename to get the target filename
-.
.IP "" 0
-.
.SS "Concatenation with \.zst Files"
It is possible to concatenate multiple \fB\.zst\fR files\. \fBzstd\fR will decompress such agglomerated file as if it was a single \fB\.zst\fR file\.
-.
.SH "OPTIONS"
-.
.SS "Integer Suffixes and Special Values"
In most places where an integer argument is expected, an optional suffix is supported to easily indicate large integers\. There must be no space between the integer and the suffix\.
-.
.TP
\fBKiB\fR
-Multiply the integer by 1,024 (2^10)\. \fBKi\fR, \fBK\fR, and \fBKB\fR are accepted as synonyms for \fBKiB\fR\.
-.
+Multiply the integer by 1,024 (2\e^10)\. \fBKi\fR, \fBK\fR, and \fBKB\fR are accepted as synonyms for \fBKiB\fR\.
.TP
\fBMiB\fR
-Multiply the integer by 1,048,576 (2^20)\. \fBMi\fR, \fBM\fR, and \fBMB\fR are accepted as synonyms for \fBMiB\fR\.
-.
+Multiply the integer by 1,048,576 (2\e^20)\. \fBMi\fR, \fBM\fR, and \fBMB\fR are accepted as synonyms for \fBMiB\fR\.
.SS "Operation Mode"
If multiple operation mode options are given, the last one takes effect\.
-.
.TP
\fB\-z\fR, \fB\-\-compress\fR
Compress\. This is the default operation mode when no operation mode option is specified and no other operation mode is implied from the command name (for example, \fBunzstd\fR implies \fB\-\-decompress\fR)\.
-.
.TP
\fB\-d\fR, \fB\-\-decompress\fR, \fB\-\-uncompress\fR
Decompress\.
-.
.TP
\fB\-t\fR, \fB\-\-test\fR
Test the integrity of compressed \fIfiles\fR\. This option is equivalent to \fB\-\-decompress \-\-stdout > /dev/null\fR, decompressed data is discarded and checksummed for errors\. No files are created or removed\.
-.
.TP
\fB\-b#\fR
Benchmark file(s) using compression level \fI#\fR\. See \fIBENCHMARK\fR below for a description of this operation\.
-.
.TP
\fB\-\-train FILES\fR
Use \fIFILES\fR as a training set to create a dictionary\. The training set should contain a lot of small files (> 100)\. See \fIDICTIONARY BUILDER\fR below for a description of this operation\.
-.
.TP
\fB\-l\fR, \fB\-\-list\fR
-Display information related to a zstd compressed file, such as size, ratio, and checksum\. Some of these fields may not be available\. This command\'s output can be augmented with the \fB\-v\fR modifier\.
-.
+Display information related to a zstd compressed file, such as size, ratio, and checksum\. Some of these fields may not be available\. This command's output can be augmented with the \fB\-v\fR modifier\.
.SS "Operation Modifiers"
-.
.IP "\(bu" 4
-\fB\-#\fR: selects \fB#\fR compression level [1\-19] (default: 3)\. Higher compression levels \fIgenerally\fR produce higher compression ratio at the expense of speed and memory\. A rough rule of thumb is that compression speed is expected to be divided by 2 every 2 levels\. Technically, each level is mapped to a set of advanced parameters (that can also be modified individually, see below)\. Because the compressor\'s behavior highly depends on the content to compress, there\'s no guarantee of a smooth progression from one level to another\.
-.
+\fB\-#\fR: selects \fB#\fR compression level [1\-19] (default: 3)\. Higher compression levels \fIgenerally\fR produce higher compression ratio at the expense of speed and memory\. A rough rule of thumb is that compression speed is expected to be divided by 2 every 2 levels\. Technically, each level is mapped to a set of advanced parameters (that can also be modified individually, see below)\. Because the compressor's behavior highly depends on the content to compress, there's no guarantee of a smooth progression from one level to another\.
.IP "\(bu" 4
\fB\-\-ultra\fR: unlocks high compression levels 20+ (maximum 22), using a lot more memory\. Note that decompression will also require more memory when using these levels\.
-.
.IP "\(bu" 4
\fB\-\-fast[=#]\fR: switch to ultra\-fast compression levels\. If \fB=#\fR is not present, it defaults to \fB1\fR\. The higher the value, the faster the compression speed, at the cost of some compression ratio\. This setting overwrites compression level if one was set previously\. Similarly, if a compression level is set after \fB\-\-fast\fR, it overrides it\.
-.
.IP "\(bu" 4
\fB\-T#\fR, \fB\-\-threads=#\fR: Compress using \fB#\fR working threads (default: 1)\. If \fB#\fR is 0, attempt to detect and use the number of physical CPU cores\. In all cases, the nb of threads is capped to \fBZSTDMT_NBWORKERS_MAX\fR, which is either 64 in 32\-bit mode, or 256 for 64\-bit environments\. This modifier does nothing if \fBzstd\fR is compiled without multithread support\.
-.
.IP "\(bu" 4
\fB\-\-single\-thread\fR: Use a single thread for both I/O and compression\. As compression is serialized with I/O, this can be slightly slower\. Single\-thread mode features significantly lower memory usage, which can be useful for systems with limited amount of memory, such as 32\-bit systems\.
-.
.IP
Note 1: this mode is the only available one when multithread support is disabled\.
-.
.IP
Note 2: this mode is different from \fB\-T1\fR, which spawns 1 compression thread in parallel with I/O\. Final compressed result is also slightly different from \fB\-T1\fR\.
-.
.IP "\(bu" 4
\fB\-\-auto\-threads={physical,logical} (default: physical)\fR: When using a default amount of threads via \fB\-T0\fR, choose the default based on the number of detected physical or logical cores\.
-.
.IP "\(bu" 4
\fB\-\-adapt[=min=#,max=#]\fR: \fBzstd\fR will dynamically adapt compression level to perceived I/O conditions\. Compression level adaptation can be observed live by using command \fB\-v\fR\. Adaptation can be constrained between supplied \fBmin\fR and \fBmax\fR levels\. The feature works when combined with multi\-threading and \fB\-\-long\fR mode\. It does not work with \fB\-\-single\-thread\fR\. It sets window size to 8 MiB by default (can be changed manually, see \fBwlog\fR)\. Due to the chaotic nature of dynamic adaptation, compressed result is not reproducible\.
-.
.IP
\fINote\fR: at the time of this writing, \fB\-\-adapt\fR can remain stuck at low speed when combined with multiple worker threads (>=2)\.
-.
.IP "\(bu" 4
\fB\-\-long[=#]\fR: enables long distance matching with \fB#\fR \fBwindowLog\fR, if \fB#\fR is not present it defaults to \fB27\fR\. This increases the window size (\fBwindowLog\fR) and memory usage for both the compressor and decompressor\. This setting is designed to improve the compression ratio for files with long matches at a large distance\.
-.
.IP
Note: If \fBwindowLog\fR is set to larger than 27, \fB\-\-long=windowLog\fR or \fB\-\-memory=windowSize\fR needs to be passed to the decompressor\.
-.
.IP "\(bu" 4
\fB\-D DICT\fR: use \fBDICT\fR as Dictionary to compress or decompress FILE(s)
-.
.IP "\(bu" 4
-\fB\-\-patch\-from FILE\fR: Specify the file to be used as a reference point for zstd\'s diff engine\. This is effectively dictionary compression with some convenient parameter selection, namely that \fIwindowSize\fR > \fIsrcSize\fR\.
-.
+\fB\-\-patch\-from FILE\fR: Specify the file to be used as a reference point for zstd's diff engine\. This is effectively dictionary compression with some convenient parameter selection, namely that \fIwindowSize\fR > \fIsrcSize\fR\.
.IP
Note: cannot use both this and \fB\-D\fR together\.
-.
.IP
Note: \fB\-\-long\fR mode will be automatically activated if \fIchainLog\fR < \fIfileLog\fR (\fIfileLog\fR being the \fIwindowLog\fR required to cover the whole file)\. You can also manually force it\.
-.
.IP
-Note: for all levels, you can use \fB\-\-patch\-from\fR in \fB\-\-single\-thread\fR mode to improve compression ratio at the cost of speed\.
-.
+Note: up to level 15, you can use \fB\-\-patch\-from\fR in \fB\-\-single\-thread\fR mode to improve compression ratio marginally at the cost of speed\. Using '\-\-single\-thread' above level 15 will lead to lower compression ratios\.
.IP
Note: for level 19, you can get increased compression ratio at the cost of speed by specifying \fB\-\-zstd=targetLength=\fR to be something large (i\.e\. 4096), and by setting a large \fB\-\-zstd=chainLog=\fR\.
-.
.IP "\(bu" 4
-\fB\-\-rsyncable\fR: \fBzstd\fR will periodically synchronize the compression state to make the compressed file more rsync\-friendly\. There is a negligible impact to compression ratio, and a potential impact to compression speed, perceptible at higher speeds, for example when combining \fB\-\-rsyncable\fR with many parallel worker threads\. This feature does not work with \fB\-\-single\-thread\fR\. You probably don\'t want to use it with long range mode, since it will decrease the effectiveness of the synchronization points, but your mileage may vary\.
-.
+\fB\-\-rsyncable\fR: \fBzstd\fR will periodically synchronize the compression state to make the compressed file more rsync\-friendly\. There is a negligible impact to compression ratio, and a potential impact to compression speed, perceptible at higher speeds, for example when combining \fB\-\-rsyncable\fR with many parallel worker threads\. This feature does not work with \fB\-\-single\-thread\fR\. You probably don't want to use it with long range mode, since it will decrease the effectiveness of the synchronization points, but your mileage may vary\.
.IP "\(bu" 4
\fB\-C\fR, \fB\-\-[no\-]check\fR: add integrity check computed from uncompressed data (default: enabled)
-.
.IP "\(bu" 4
\fB\-\-[no\-]content\-size\fR: enable / disable whether or not the original size of the file is placed in the header of the compressed file\. The default option is \fB\-\-content\-size\fR (meaning that the original size will be placed in the header)\.
-.
.IP "\(bu" 4
-\fB\-\-no\-dictID\fR: do not store dictionary ID within frame header (dictionary compression)\. The decoder will have to rely on implicit knowledge about which dictionary to use, it won\'t be able to check if it\'s correct\.
-.
+\fB\-\-no\-dictID\fR: do not store dictionary ID within frame header (dictionary compression)\. The decoder will have to rely on implicit knowledge about which dictionary to use, it won't be able to check if it's correct\.
.IP "\(bu" 4
\fB\-M#\fR, \fB\-\-memory=#\fR: Set a memory usage limit\. By default, \fBzstd\fR uses 128 MiB for decompression as the maximum amount of memory the decompressor is allowed to use, but you can override this manually if need be in either direction (i\.e\. you can increase or decrease it)\.
-.
.IP
This is also used during compression when using with \fB\-\-patch\-from=\fR\. In this case, this parameter overrides that maximum size allowed for a dictionary\. (128 MiB)\.
-.
.IP
Additionally, this can be used to limit memory for dictionary training\. This parameter overrides the default limit of 2 GiB\. zstd will load training samples up to the memory limit and ignore the rest\.
-.
.IP "\(bu" 4
\fB\-\-stream\-size=#\fR: Sets the pledged source size of input coming from a stream\. This value must be exact, as it will be included in the produced frame header\. Incorrect stream sizes will cause an error\. This information will be used to better optimize compression parameters, resulting in better and potentially faster compression, especially for smaller source sizes\.
-.
.IP "\(bu" 4
\fB\-\-size\-hint=#\fR: When handling input from a stream, \fBzstd\fR must guess how large the source size will be when optimizing compression parameters\. If the stream size is relatively small, this guess may be a poor one, resulting in a higher compression ratio than expected\. This feature allows for controlling the guess when needed\. Exact guesses result in better compression ratios\. Overestimates result in slightly degraded compression ratios, while underestimates may result in significant degradation\.
-.
.IP "\(bu" 4
\fB\-\-target\-compressed\-block\-size=#\fR: Attempt to produce compressed blocks of approximately this size\. This will split larger blocks in order to approach this target\. This feature is notably useful for improved latency, when the receiver can leverage receiving early incomplete data\. This parameter defines a loose target: compressed blocks will target this size "on average", but individual blocks can still be larger or smaller\. Enabling this feature can decrease compression speed by up to ~10% at level 1\. Higher levels will see smaller relative speed regression, becoming invisible at higher settings\.
-.
.IP "\(bu" 4
\fB\-f\fR, \fB\-\-force\fR: disable input and output checks\. Allows overwriting existing files, input from console, output to stdout, operating on links, block devices, etc\. During decompression and when the output destination is stdout, pass\-through unrecognized formats as\-is\.
-.
.IP "\(bu" 4
\fB\-c\fR, \fB\-\-stdout\fR: write to standard output (even if it is the console); keep original files (disable \fB\-\-rm\fR)\.
-.
.IP "\(bu" 4
\fB\-o FILE\fR: save result into \fBFILE\fR\. Note that this operation is in conflict with \fB\-c\fR\. If both operations are present on the command line, the last expressed one wins\.
-.
.IP "\(bu" 4
\fB\-\-[no\-]sparse\fR: enable / disable sparse FS support, to make files with many zeroes smaller on disk\. Creating sparse files may save disk space and speed up decompression by reducing the amount of disk I/O\. default: enabled when output is into a file, and disabled when output is stdout\. This setting overrides default and can force sparse mode over stdout\.
-.
.IP "\(bu" 4
\fB\-\-[no\-]pass\-through\fR enable / disable passing through uncompressed files as\-is\. During decompression when pass\-through is enabled, unrecognized formats will be copied as\-is from the input to the output\. By default, pass\-through will occur when the output destination is stdout and the force (\fB\-f\fR) option is set\.
-.
.IP "\(bu" 4
\fB\-\-rm\fR: remove source file(s) after successful compression or decompression\. This command is silently ignored if output is \fBstdout\fR\. If used in combination with \fB\-o\fR, triggers a confirmation prompt (which can be silenced with \fB\-f\fR), as this is a destructive operation\.
-.
.IP "\(bu" 4
\fB\-k\fR, \fB\-\-keep\fR: keep source file(s) after successful compression or decompression\. This is the default behavior\.
-.
.IP "\(bu" 4
\fB\-r\fR: operate recursively on directories\. It selects all files in the named directory and all its subdirectories\. This can be useful both to reduce command line typing, and to circumvent shell expansion limitations, when there are a lot of files and naming breaks the maximum size of a command line\.
-.
.IP "\(bu" 4
\fB\-\-filelist FILE\fR read a list of files to process as content from \fBFILE\fR\. Format is compatible with \fBls\fR output, with one file per line\.
-.
.IP "\(bu" 4
\fB\-\-output\-dir\-flat DIR\fR: resulting files are stored into target \fBDIR\fR directory, instead of same directory as origin file\. Be aware that this command can introduce name collision issues, if multiple files, from different directories, end up having the same name\. Collision resolution ensures first file with a given name will be present in \fBDIR\fR, while in combination with \fB\-f\fR, the last file will be present instead\.
-.
.IP "\(bu" 4
\fB\-\-output\-dir\-mirror DIR\fR: similar to \fB\-\-output\-dir\-flat\fR, the output files are stored underneath target \fBDIR\fR directory, but this option will replicate input directory hierarchy into output \fBDIR\fR\.
-.
.IP
If input directory contains "\.\.", the files in this directory will be ignored\. If input directory is an absolute directory (i\.e\. "/var/tmp/abc"), it will be stored into the "output\-dir/var/tmp/abc"\. If there are multiple input files or directories, name collision resolution will follow the same rules as \fB\-\-output\-dir\-flat\fR\.
-.
.IP "\(bu" 4
\fB\-\-format=FORMAT\fR: compress and decompress in other formats\. If compiled with support, zstd can compress to or decompress from other compression algorithm formats\. Possibly available options are \fBzstd\fR, \fBgzip\fR, \fBxz\fR, \fBlzma\fR, and \fBlz4\fR\. If no such format is provided, \fBzstd\fR is the default\.
-.
.IP "\(bu" 4
\fB\-h\fR/\fB\-H\fR, \fB\-\-help\fR: display help/long help and exit
-.
.IP "\(bu" 4
\fB\-V\fR, \fB\-\-version\fR: display version number and immediately exit\. note that, since it exits, flags specified after \fB\-V\fR are effectively ignored\. Advanced: \fB\-vV\fR also displays supported formats\. \fB\-vvV\fR also displays POSIX support\. \fB\-qV\fR will only display the version number, suitable for machine reading\.
-.
.IP "\(bu" 4
\fB\-v\fR, \fB\-\-verbose\fR: verbose mode, display more information
-.
.IP "\(bu" 4
\fB\-q\fR, \fB\-\-quiet\fR: suppress warnings, interactivity, and notifications\. specify twice to suppress errors too\.
-.
.IP "\(bu" 4
\fB\-\-no\-progress\fR: do not display the progress bar, but keep all other messages\.
-.
.IP "\(bu" 4
\fB\-\-show\-default\-cparams\fR: shows the default compression parameters that will be used for a particular input file, based on the provided compression level and the input size\. If the provided file is not a regular file (e\.g\. a pipe), this flag will output the parameters used for inputs of unknown size\.
-.
.IP "\(bu" 4
\fB\-\-exclude\-compressed\fR: only compress files that are not already compressed\.
-.
.IP "\(bu" 4
\fB\-\-\fR: All arguments after \fB\-\-\fR are treated as files
-.
.IP "" 0
-.
.SS "gzip Operation Modifiers"
When invoked via a \fBgzip\fR symlink, \fBzstd\fR will support further options that intend to mimic the \fBgzip\fR behavior:
-.
.TP
\fB\-n\fR, \fB\-\-no\-name\fR
do not store the original filename and timestamps when compressing a file\. This is the default behavior and hence a no\-op\.
-.
.TP
\fB\-\-best\fR
alias to the option \fB\-9\fR\.
-.
.SS "Environment Variables"
Employing environment variables to set parameters has security implications\. Therefore, this avenue is intentionally limited\. Only \fBZSTD_CLEVEL\fR and \fBZSTD_NBTHREADS\fR are currently supported\. They set the default compression level and number of threads to use during compression, respectively\.
-.
.P
\fBZSTD_CLEVEL\fR can be used to set the level between 1 and 19 (the "normal" range)\. If the value of \fBZSTD_CLEVEL\fR is not a valid integer, it will be ignored with a warning message\. \fBZSTD_CLEVEL\fR just replaces the default compression level (\fB3\fR)\.
-.
.P
\fBZSTD_NBTHREADS\fR can be used to set the number of threads \fBzstd\fR will attempt to use during compression\. If the value of \fBZSTD_NBTHREADS\fR is not a valid unsigned integer, it will be ignored with a warning message\. \fBZSTD_NBTHREADS\fR has a default value of (\fB1\fR), and is capped at ZSTDMT_NBWORKERS_MAX==200\. \fBzstd\fR must be compiled with multithread support for this variable to have any effect\.
-.
.P
They can both be overridden by corresponding command line arguments: \fB\-#\fR for compression level and \fB\-T#\fR for number of compression threads\.
-.
.SH "ADVANCED COMPRESSION OPTIONS"
\fBzstd\fR provides 22 predefined regular compression levels plus the fast levels\. A compression level is translated internally into multiple advanced parameters that control the behavior of the compressor (one can observe the result of this translation with \fB\-\-show\-default\-cparams\fR)\. These advanced parameters can be overridden using advanced compression options\.
-.
.SS "\-\-zstd[=options]:"
The \fIoptions\fR are provided as a comma\-separated list\. You may specify only the options you want to change and the rest will be taken from the selected or default compression level\. The list of available \fIoptions\fR:
-.
.TP
\fBstrategy\fR=\fIstrat\fR, \fBstrat\fR=\fIstrat\fR
Specify a strategy used by a match finder\.
-.
.IP
There are 9 strategies numbered from 1 to 9, from fastest to strongest: 1=\fBZSTD_fast\fR, 2=\fBZSTD_dfast\fR, 3=\fBZSTD_greedy\fR, 4=\fBZSTD_lazy\fR, 5=\fBZSTD_lazy2\fR, 6=\fBZSTD_btlazy2\fR, 7=\fBZSTD_btopt\fR, 8=\fBZSTD_btultra\fR, 9=\fBZSTD_btultra2\fR\.
-.
.TP
\fBwindowLog\fR=\fIwlog\fR, \fBwlog\fR=\fIwlog\fR
Specify the maximum number of bits for a match distance\.
-.
.IP
The higher number of increases the chance to find a match which usually improves compression ratio\. It also increases memory requirements for the compressor and decompressor\. The minimum \fIwlog\fR is 10 (1 KiB) and the maximum is 30 (1 GiB) on 32\-bit platforms and 31 (2 GiB) on 64\-bit platforms\.
-.
.IP
Note: If \fBwindowLog\fR is set to larger than 27, \fB\-\-long=windowLog\fR or \fB\-\-memory=windowSize\fR needs to be passed to the decompressor\.
-.
.TP
\fBhashLog\fR=\fIhlog\fR, \fBhlog\fR=\fIhlog\fR
Specify the maximum number of bits for a hash table\.
-.
.IP
Bigger hash tables cause fewer collisions which usually makes compression faster, but requires more memory during compression\.
-.
.IP
The minimum \fIhlog\fR is 6 (64 entries / 256 B) and the maximum is 30 (1B entries / 4 GiB)\.
-.
.TP
\fBchainLog\fR=\fIclog\fR, \fBclog\fR=\fIclog\fR
Specify the maximum number of bits for the secondary search structure, whose form depends on the selected \fBstrategy\fR\.
-.
.IP
Higher numbers of bits increases the chance to find a match which usually improves compression ratio\. It also slows down compression speed and increases memory requirements for compression\. This option is ignored for the \fBZSTD_fast\fR \fBstrategy\fR, which only has the primary hash table\.
-.
.IP
The minimum \fIclog\fR is 6 (64 entries / 256 B) and the maximum is 29 (512M entries / 2 GiB) on 32\-bit platforms and 30 (1B entries / 4 GiB) on 64\-bit platforms\.
-.
.TP
\fBsearchLog\fR=\fIslog\fR, \fBslog\fR=\fIslog\fR
Specify the maximum number of searches in a hash chain or a binary tree using logarithmic scale\.
-.
.IP
More searches increases the chance to find a match which usually increases compression ratio but decreases compression speed\.
-.
.IP
-The minimum \fIslog\fR is 1 and the maximum is \'windowLog\' \- 1\.
-.
+The minimum \fIslog\fR is 1 and the maximum is 'windowLog' \- 1\.
.TP
\fBminMatch\fR=\fImml\fR, \fBmml\fR=\fImml\fR
Specify the minimum searched length of a match in a hash table\.
-.
.IP
Larger search lengths usually decrease compression ratio but improve decompression speed\.
-.
.IP
The minimum \fImml\fR is 3 and the maximum is 7\.
-.
.TP
\fBtargetLength\fR=\fItlen\fR, \fBtlen\fR=\fItlen\fR
The impact of this field vary depending on selected strategy\.
-.
.IP
For \fBZSTD_btopt\fR, \fBZSTD_btultra\fR and \fBZSTD_btultra2\fR, it specifies the minimum match length that causes match finder to stop searching\. A larger \fBtargetLength\fR usually improves compression ratio but decreases compression speed\.
-.
.IP
For \fBZSTD_fast\fR, it triggers ultra\-fast mode when > 0\. The value represents the amount of data skipped between match sampling\. Impact is reversed: a larger \fBtargetLength\fR increases compression speed but decreases compression ratio\.
-.
.IP
For all other strategies, this field has no impact\.
-.
.IP
The minimum \fItlen\fR is 0 and the maximum is 128 KiB\.
-.
.TP
\fBoverlapLog\fR=\fIovlog\fR, \fBovlog\fR=\fIovlog\fR
Determine \fBoverlapSize\fR, amount of data reloaded from previous job\. This parameter is only available when multithreading is enabled\. Reloading more data improves compression ratio, but decreases speed\.
-.
.IP
The minimum \fIovlog\fR is 0, and the maximum is 9\. 1 means "no overlap", hence completely independent jobs\. 9 means "full overlap", meaning up to \fBwindowSize\fR is reloaded from previous job\. Reducing \fIovlog\fR by 1 reduces the reloaded amount by a factor 2\. For example, 8 means "windowSize/2", and 6 means "windowSize/8"\. Value 0 is special and means "default": \fIovlog\fR is automatically determined by \fBzstd\fR\. In which case, \fIovlog\fR will range from 6 to 9, depending on selected \fIstrat\fR\.
-.
.TP
\fBldmHashLog\fR=\fIlhlog\fR, \fBlhlog\fR=\fIlhlog\fR
Specify the maximum size for a hash table used for long distance matching\.
-.
.IP
This option is ignored unless long distance matching is enabled\.
-.
.IP
Bigger hash tables usually improve compression ratio at the expense of more memory during compression and a decrease in compression speed\.
-.
.IP
The minimum \fIlhlog\fR is 6 and the maximum is 30 (default: 20)\.
-.
.TP
\fBldmMinMatch\fR=\fIlmml\fR, \fBlmml\fR=\fIlmml\fR
Specify the minimum searched length of a match for long distance matching\.
-.
.IP
This option is ignored unless long distance matching is enabled\.
-.
.IP
Larger/very small values usually decrease compression ratio\.
-.
.IP
The minimum \fIlmml\fR is 4 and the maximum is 4096 (default: 64)\.
-.
.TP
\fBldmBucketSizeLog\fR=\fIlblog\fR, \fBlblog\fR=\fIlblog\fR
Specify the size of each bucket for the hash table used for long distance matching\.
-.
.IP
This option is ignored unless long distance matching is enabled\.
-.
.IP
Larger bucket sizes improve collision resolution but decrease compression speed\.
-.
.IP
The minimum \fIlblog\fR is 1 and the maximum is 8 (default: 3)\.
-.
.TP
\fBldmHashRateLog\fR=\fIlhrlog\fR, \fBlhrlog\fR=\fIlhrlog\fR
Specify the frequency of inserting entries into the long distance matching hash table\.
-.
.IP
This option is ignored unless long distance matching is enabled\.
-.
.IP
Larger values will improve compression speed\. Deviating far from the default value will likely result in a decrease in compression ratio\.
-.
.IP
The default value is \fBwlog \- lhlog\fR\.
-.
.SS "Example"
The following parameters sets advanced compression options to something similar to predefined level 19 for files bigger than 256 KB:
-.
.P
\fB\-\-zstd\fR=wlog=23,clog=23,hlog=22,slog=6,mml=3,tlen=48,strat=6
-.
.SS "\-B#:"
Specify the size of each compression job\. This parameter is only available when multi\-threading is enabled\. Each compression job is run in parallel, so this value indirectly impacts the nb of active threads\. Default job size varies depending on compression level (generally \fB4 * windowSize\fR)\. \fB\-B#\fR makes it possible to manually select a custom size\. Note that job size must respect a minimum value which is enforced transparently\. This minimum is either 512 KB, or \fBoverlapSize\fR, whichever is largest\. Different job sizes will lead to non\-identical compressed frames\.
-.
.SH "DICTIONARY BUILDER"
-\fBzstd\fR offers \fIdictionary\fR compression, which greatly improves efficiency on small files and messages\. It\'s possible to train \fBzstd\fR with a set of samples, the result of which is saved into a file called a \fBdictionary\fR\. Then, during compression and decompression, reference the same dictionary, using command \fB\-D dictionaryFileName\fR\. Compression of small files similar to the sample set will be greatly improved\.
-.
+\fBzstd\fR offers \fIdictionary\fR compression, which greatly improves efficiency on small files and messages\. It's possible to train \fBzstd\fR with a set of samples, the result of which is saved into a file called a \fBdictionary\fR\. Then, during compression and decompression, reference the same dictionary, using command \fB\-D dictionaryFileName\fR\. Compression of small files similar to the sample set will be greatly improved\.
.TP
\fB\-\-train FILEs\fR
Use FILEs as training set to create a dictionary\. The training set should ideally contain a lot of samples (> 100), and weight typically 100x the target dictionary size (for example, ~10 MB for a 100 KB dictionary)\. \fB\-\-train\fR can be combined with \fB\-r\fR to indicate a directory rather than listing all the files, which can be useful to circumvent shell expansion limits\.
-.
.IP
Since dictionary compression is mostly effective for small files, the expectation is that the training set will only contain small files\. In the case where some samples happen to be large, only the first 128 KiB of these samples will be used for training\.
-.
.IP
\fB\-\-train\fR supports multithreading if \fBzstd\fR is compiled with threading support (default)\. Additional advanced parameters can be specified with \fB\-\-train\-fastcover\fR\. The legacy dictionary builder can be accessed with \fB\-\-train\-legacy\fR\. The slower cover dictionary builder can be accessed with \fB\-\-train\-cover\fR\. Default \fB\-\-train\fR is equivalent to \fB\-\-train\-fastcover=d=8,steps=4\fR\.
-.
.TP
\fB\-o FILE\fR
Dictionary saved into \fBFILE\fR (default name: dictionary)\.
-.
.TP
\fB\-\-maxdict=#\fR
-Limit dictionary to specified size (default: 112640 bytes)\. As usual, quantities are expressed in bytes by default, and it\'s possible to employ suffixes (like \fBKB\fR or \fBMB\fR) to specify larger values\.
-.
+Limit dictionary to specified size (default: 112640 bytes)\. As usual, quantities are expressed in bytes by default, and it's possible to employ suffixes (like \fBKB\fR or \fBMB\fR) to specify larger values\.
.TP
\fB\-#\fR
Use \fB#\fR compression level during training (optional)\. Will generate statistics more tuned for selected compression level, resulting in a \fIsmall\fR compression ratio improvement for this level\.
-.
.TP
\fB\-B#\fR
Split input files into blocks of size # (default: no split)
-.
.TP
\fB\-M#\fR, \fB\-\-memory=#\fR
Limit the amount of sample data loaded for training (default: 2 GB)\. Note that the default (2 GB) is also the maximum\. This parameter can be useful in situations where the training set size is not well controlled and could be potentially very large\. Since speed of the training process is directly correlated to the size of the training sample set, a smaller sample set leads to faster training\.
-.
.IP
In situations where the training set is larger than maximum memory, the CLI will randomly select samples among the available ones, up to the maximum allowed memory budget\. This is meant to improve dictionary relevance by mitigating the potential impact of clustering, such as selecting only files from the beginning of a list sorted by modification date, or sorted by alphabetical order\. The randomization process is deterministic, so training of the same list of files with the same parameters will lead to the creation of the same dictionary\.
-.
.TP
\fB\-\-dictID=#\fR
-A dictionary ID is a locally unique ID\. The decoder will use this value to verify it is using the right dictionary\. By default, zstd will create a 4\-bytes random number ID\. It\'s possible to provide an explicit number ID instead\. It\'s up to the dictionary manager to not assign twice the same ID to 2 different dictionaries\. Note that short numbers have an advantage: an ID < 256 will only need 1 byte in the compressed frame header, and an ID < 65536 will only need 2 bytes\. This compares favorably to 4 bytes default\.
-.
+A dictionary ID is a locally unique ID\. The decoder will use this value to verify it is using the right dictionary\. By default, zstd will create a 4\-bytes random number ID\. It's possible to provide an explicit number ID instead\. It's up to the dictionary manager to not assign twice the same ID to 2 different dictionaries\. Note that short numbers have an advantage: an ID < 256 will only need 1 byte in the compressed frame header, and an ID < 65536 will only need 2 bytes\. This compares favorably to 4 bytes default\.
.IP
-Note that RFC8878 reserves IDs less than 32768 and greater than or equal to 2^31, so they should not be used in public\.
-.
+Note that RFC8878 reserves IDs less than 32768 and greater than or equal to 2\e^31, so they should not be used in public\.
.TP
\fB\-\-train\-cover[=k#,d=#,steps=#,split=#,shrink[=#]]\fR
Select parameters for the default dictionary builder algorithm named cover\. If \fId\fR is not specified, then it tries \fId\fR = 6 and \fId\fR = 8\. If \fIk\fR is not specified, then it tries \fIsteps\fR values in the range [50, 2000]\. If \fIsteps\fR is not specified, then the default value of 40 is used\. If \fIsplit\fR is not specified or split <= 0, then the default value of 100 is used\. Requires that \fId\fR <= \fIk\fR\. If \fIshrink\fR flag is not used, then the default value for \fIshrinkDict\fR of 0 is used\. If \fIshrink\fR is not specified, then the default value for \fIshrinkDictMaxRegression\fR of 1 is used\.
-.
.IP
Selects segments of size \fIk\fR with highest score to put in the dictionary\. The score of a segment is computed by the sum of the frequencies of all the subsegments of size \fId\fR\. Generally \fId\fR should be in the range [6, 8], occasionally up to 16, but the algorithm will run faster with d <= \fI8\fR\. Good values for \fIk\fR vary widely based on the input data, but a safe range is [2 * \fId\fR, 2000]\. If \fIsplit\fR is 100, all input samples are used for both training and testing to find optimal \fId\fR and \fIk\fR to build dictionary\. Supports multithreading if \fBzstd\fR is compiled with threading support\. Having \fIshrink\fR enabled takes a truncated dictionary of minimum size and doubles in size until compression ratio of the truncated dictionary is at most \fIshrinkDictMaxRegression%\fR worse than the compression ratio of the largest dictionary\.
-.
.IP
Examples:
-.
.IP
\fBzstd \-\-train\-cover FILEs\fR
-.
.IP
\fBzstd \-\-train\-cover=k=50,d=8 FILEs\fR
-.
.IP
\fBzstd \-\-train\-cover=d=8,steps=500 FILEs\fR
-.
.IP
\fBzstd \-\-train\-cover=k=50 FILEs\fR
-.
.IP
\fBzstd \-\-train\-cover=k=50,split=60 FILEs\fR
-.
.IP
\fBzstd \-\-train\-cover=shrink FILEs\fR
-.
.IP
\fBzstd \-\-train\-cover=shrink=2 FILEs\fR
-.
.TP
\fB\-\-train\-fastcover[=k#,d=#,f=#,steps=#,split=#,accel=#]\fR
Same as cover but with extra parameters \fIf\fR and \fIaccel\fR and different default value of split If \fIsplit\fR is not specified, then it tries \fIsplit\fR = 75\. If \fIf\fR is not specified, then it tries \fIf\fR = 20\. Requires that 0 < \fIf\fR < 32\. If \fIaccel\fR is not specified, then it tries \fIaccel\fR = 1\. Requires that 0 < \fIaccel\fR <= 10\. Requires that \fId\fR = 6 or \fId\fR = 8\.
-.
.IP
\fIf\fR is log of size of array that keeps track of frequency of subsegments of size \fId\fR\. The subsegment is hashed to an index in the range [0,2^\fIf\fR \- 1]\. It is possible that 2 different subsegments are hashed to the same index, and they are considered as the same subsegment when computing frequency\. Using a higher \fIf\fR reduces collision but takes longer\.
-.
.IP
Examples:
-.
.IP
\fBzstd \-\-train\-fastcover FILEs\fR
-.
.IP
\fBzstd \-\-train\-fastcover=d=8,f=15,accel=2 FILEs\fR
-.
.TP
\fB\-\-train\-legacy[=selectivity=#]\fR
Use legacy dictionary builder algorithm with the given dictionary \fIselectivity\fR (default: 9)\. The smaller the \fIselectivity\fR value, the denser the dictionary, improving its efficiency but reducing its achievable maximum size\. \fB\-\-train\-legacy=s=#\fR is also accepted\.
-.
.IP
Examples:
-.
.IP
\fBzstd \-\-train\-legacy FILEs\fR
-.
.IP
\fBzstd \-\-train\-legacy=selectivity=8 FILEs\fR
-.
.SH "BENCHMARK"
-The \fBzstd\fR CLI provides a benchmarking mode that can be used to easily find suitable compression parameters, or alternatively to benchmark a computer\'s performance\. Note that the results are highly dependent on the content being compressed\.
-.
-.TP
-\fB\-b#\fR
-benchmark file(s) using compression level #
-.
-.TP
-\fB\-e#\fR
-benchmark file(s) using multiple compression levels, from \fB\-b#\fR to \fB\-e#\fR (inclusive)
-.
-.TP
-\fB\-d\fR
-benchmark decompression speed only (requires providing an already zstd\-compressed content)
-.
-.TP
-\fB\-i#\fR
-minimum evaluation time, in seconds (default: 3s), benchmark mode only
-.
-.TP
-\fB\-B#\fR, \fB\-\-block\-size=#\fR
-cut file(s) into independent chunks of size # (default: no chunking)
-.
-.TP
-\fB\-\-priority=rt\fR
-set process priority to real\-time (Windows)
-.
+The \fBzstd\fR CLI provides a benchmarking mode that can be used to easily find suitable compression parameters, or alternatively to benchmark a computer's performance\. \fBzstd \-b [FILE(s)]\fR will benchmark \fBzstd\fR for both compression and decompression using default compression level\. Note that results are very dependent on the content being compressed\. It's possible to pass multiple files to the benchmark, and even a directory with \fB\-r DIRECTORY\fR\. When no \fBFILE\fR is provided, the benchmark will use a procedurally generated \fBlorem ipsum\fR text\.
+.IP "\(bu" 4
+\fB\-b#\fR: benchmark file(s) using compression level #
+.IP "\(bu" 4
+\fB\-e#\fR: benchmark file(s) using multiple compression levels, from \fB\-b#\fR to \fB\-e#\fR (inclusive)
+.IP "\(bu" 4
+\fB\-d\fR: benchmark decompression speed only (requires providing a zstd\-compressed content)
+.IP "\(bu" 4
+\fB\-i#\fR: minimum evaluation time, in seconds (default: 3s), benchmark mode only
+.IP "\(bu" 4
+\fB\-B#\fR, \fB\-\-block\-size=#\fR: cut file(s) into independent chunks of size # (default: no chunking)
+.IP "\(bu" 4
+\fB\-S\fR: output one benchmark result per input file (default: consolidated result)
+.IP "\(bu" 4
+\fB\-D dictionary\fR benchmark using dictionary
+.IP "\(bu" 4
+\fB\-\-priority=rt\fR: set process priority to real\-time (Windows)
+.IP "" 0
+.P
+Beyond compression levels, benchmarking is also compatible with other parameters, such as number of threads (\fB\-T#\fR), advanced compression parameters (\fB\-\-zstd=###\fR), dictionary compression (\fB\-D dictionary\fR), or even disabling checksum verification for example\.
.P
\fBOutput Format:\fR CompressionLevel#Filename: InputSize \-> OutputSize (CompressionRatio), CompressionSpeed, DecompressionSpeed
-.
.P
-\fBMethodology:\fR For both compression and decompression speed, the entire input is compressed/decompressed in\-memory to measure speed\. A run lasts at least 1 sec, so when files are small, they are compressed/decompressed several times per run, in order to improve measurement accuracy\.
-.
+\fBMethodology:\fR For speed measurement, the entire input is compressed/decompressed in\-memory to measure speed\. A run lasts at least 1 sec, so when files are small, they are compressed/decompressed several times per run, in order to improve measurement accuracy\.
.SH "SEE ALSO"
\fBzstdgrep\fR(1), \fBzstdless\fR(1), \fBgzip\fR(1), \fBxz\fR(1)
-.
.P
-The \fIzstandard\fR format is specified in Y\. Collet, "Zstandard Compression and the \'application/zstd\' Media Type", https://www\.ietf\.org/rfc/rfc8878\.txt, Internet RFC 8878 (February 2021)\.
-.
+The \fIzstandard\fR format is specified in Y\. Collet, "Zstandard Compression and the 'application/zstd' Media Type", https://www\.ietf\.org/rfc/rfc8878\.txt, Internet RFC 8878 (February 2021)\.
.SH "BUGS"
Report bugs at: https://github\.com/facebook/zstd/issues
-.
.SH "AUTHOR"
Yann Collet
diff --git a/programs/zstd.1.md b/programs/zstd.1.md
index fcbfb457301..e5c1b7fd215 100644
--- a/programs/zstd.1.md
+++ b/programs/zstd.1.md
@@ -161,6 +161,10 @@ the last one takes effect.
Note: If `windowLog` is set to larger than 27, `--long=windowLog` or
`--memory=windowSize` needs to be passed to the decompressor.
+* `--max`:
+ set advanced parameters to maximum compression.
+ warning: this setting is very slow and uses a lot of resources.
+ It's inappropriate for 32-bit mode and therefore disabled in this mode.
* `-D DICT`:
use `DICT` as Dictionary to compress or decompress FILE(s)
* `--patch-from FILE`:
@@ -174,8 +178,10 @@ the last one takes effect.
(_fileLog_ being the _windowLog_ required to cover the whole file). You
can also manually force it.
- Note: for all levels, you can use `--patch-from` in `--single-thread` mode
- to improve compression ratio at the cost of speed.
+ Note: up to level 15, you can use `--patch-from` in `--single-thread` mode
+ to improve compression ratio marginally at the cost of speed. Using
+ '--single-thread' above level 15 will lead to lower compression
+ ratios.
Note: for level 19, you can get increased compression ratio at the cost
of speed by specifying `--zstd=targetLength=` to be something large
@@ -341,7 +347,7 @@ If the value of `ZSTD_CLEVEL` is not a valid integer, it will be ignored with a
`ZSTD_NBTHREADS` can be used to set the number of threads `zstd` will attempt to use during compression.
If the value of `ZSTD_NBTHREADS` is not a valid unsigned integer, it will be ignored with a warning message.
-`ZSTD_NBTHREADS` has a default value of (`1`), and is capped at ZSTDMT_NBWORKERS_MAX==200.
+`ZSTD_NBTHREADS` has a default value of `max(1, min(4, nbCores/4))`, and is capped at ZSTDMT_NBWORKERS_MAX==200.
`zstd` must be compiled with multithread support for this variable to have any effect.
They can both be overridden by corresponding command line arguments:
@@ -449,6 +455,17 @@ The list of available _options_:
Value 0 is special and means "default": _ovlog_ is automatically determined by `zstd`.
In which case, _ovlog_ will range from 6 to 9, depending on selected _strat_.
+- `ldmHashRateLog`=_lhrlog_, `lhrlog`=_lhrlog_:
+ Specify the frequency of inserting entries into the long distance matching
+ hash table.
+
+ This option is ignored unless long distance matching is enabled.
+
+ Larger values will improve compression speed. Deviating far from the
+ default value will likely result in a decrease in compression ratio.
+
+ The default value varies between 4 and 7, depending on `strategy`.
+
- `ldmHashLog`=_lhlog_, `lhlog`=_lhlog_:
Specify the maximum size for a hash table used for long distance matching.
@@ -457,7 +474,7 @@ The list of available _options_:
Bigger hash tables usually improve compression ratio at the expense of more
memory during compression and a decrease in compression speed.
- The minimum _lhlog_ is 6 and the maximum is 30 (default: 20).
+ The minimum _lhlog_ is 6 and the maximum is 30 (default: `windowLog - ldmHashRateLog`).
- `ldmMinMatch`=_lmml_, `lmml`=_lmml_:
Specify the minimum searched length of a match for long distance matching.
@@ -466,7 +483,7 @@ The list of available _options_:
Larger/very small values usually decrease compression ratio.
- The minimum _lmml_ is 4 and the maximum is 4096 (default: 64).
+ The minimum _lmml_ is 4 and the maximum is 4096 (default: 32 to 64, depending on `strategy`).
- `ldmBucketSizeLog`=_lblog_, `lblog`=_lblog_:
Specify the size of each bucket for the hash table used for long distance
@@ -477,18 +494,8 @@ The list of available _options_:
Larger bucket sizes improve collision resolution but decrease compression
speed.
- The minimum _lblog_ is 1 and the maximum is 8 (default: 3).
-
-- `ldmHashRateLog`=_lhrlog_, `lhrlog`=_lhrlog_:
- Specify the frequency of inserting entries into the long distance matching
- hash table.
+ The minimum _lblog_ is 1 and the maximum is 8 (default: 4 to 8, depending on `strategy`).
- This option is ignored unless long distance matching is enabled.
-
- Larger values will improve compression speed. Deviating far from the
- default value will likely result in a decrease in compression ratio.
-
- The default value is `wlog - lhlog`.
### Example
The following parameters sets advanced compression options to something
@@ -660,24 +667,36 @@ Compression of small files similar to the sample set will be greatly improved.
BENCHMARK
---------
The `zstd` CLI provides a benchmarking mode that can be used to easily find suitable compression parameters, or alternatively to benchmark a computer's performance.
-Note that the results are highly dependent on the content being compressed.
+`zstd -b [FILE(s)]` will benchmark `zstd` for both compression and decompression using default compression level.
+Note that results are very dependent on the content being compressed.
+
+It's possible to pass multiple files to the benchmark, and even a directory with `-r DIRECTORY`.
+When no `FILE` is provided, the benchmark will use a procedurally generated `lorem ipsum` text.
+
+Benchmarking will employ `max(1, min(4, nbCores/4))` worker threads by default in order to match the behavior of the normal CLI I/O.
* `-b#`:
benchmark file(s) using compression level #
* `-e#`:
benchmark file(s) using multiple compression levels, from `-b#` to `-e#` (inclusive)
* `-d`:
- benchmark decompression speed only (requires providing an already zstd-compressed content)
+ benchmark decompression speed only (requires providing a zstd-compressed content)
* `-i#`:
minimum evaluation time, in seconds (default: 3s), benchmark mode only
* `-B#`, `--block-size=#`:
cut file(s) into independent chunks of size # (default: no chunking)
+* `-S`:
+ output one benchmark result per input file (default: consolidated result)
+* `-D dictionary`
+ benchmark using dictionary
* `--priority=rt`:
set process priority to real-time (Windows)
+Beyond compression levels, benchmarking is also compatible with other parameters, such as number of threads (`-T#`), advanced compression parameters (`--zstd=###`), dictionary compression (`-D dictionary`), or even disabling checksum verification for example.
+
**Output Format:** CompressionLevel#Filename: InputSize -> OutputSize (CompressionRatio), CompressionSpeed, DecompressionSpeed
-**Methodology:** For both compression and decompression speed, the entire input is compressed/decompressed in-memory to measure speed. A run lasts at least 1 sec, so when files are small, they are compressed/decompressed several times per run, in order to improve measurement accuracy.
+**Methodology:** For speed measurement, the entire input is compressed/decompressed in-memory to measure speed. A run lasts at least 1 sec, so when files are small, they are compressed/decompressed several times per run, in order to improve measurement accuracy.
SEE ALSO
diff --git a/programs/zstdcli.c b/programs/zstdcli.c
index 9dd6b051a7b..83d9b881e50 100644
--- a/programs/zstdcli.c
+++ b/programs/zstdcli.c
@@ -8,22 +8,6 @@
* You may select, at your option, one of the above-listed licenses.
*/
-
-/*-************************************
-* Tuning parameters
-**************************************/
-#ifndef ZSTDCLI_CLEVEL_DEFAULT
-# define ZSTDCLI_CLEVEL_DEFAULT 3
-#endif
-
-#ifndef ZSTDCLI_CLEVEL_MAX
-# define ZSTDCLI_CLEVEL_MAX 19 /* without using --ultra */
-#endif
-
-#ifndef ZSTDCLI_NBTHREADS_DEFAULT
-# define ZSTDCLI_NBTHREADS_DEFAULT 1
-#endif
-
/*-************************************
* Dependencies
**************************************/
@@ -32,7 +16,6 @@
#include /* getenv */
#include /* strcmp, strlen */
#include /* fprintf(), stdin, stdout, stderr */
-#include /* errno */
#include /* assert */
#include "fileio.h" /* stdinmark, stdoutmark, ZSTD_EXTENSION */
@@ -47,6 +30,23 @@
#endif
#include "../lib/zstd.h" /* ZSTD_VERSION_STRING, ZSTD_minCLevel, ZSTD_maxCLevel */
#include "fileio_asyncio.h"
+#include "fileio_common.h"
+
+/*-************************************
+* Tuning parameters
+**************************************/
+#ifndef ZSTDCLI_CLEVEL_DEFAULT
+# define ZSTDCLI_CLEVEL_DEFAULT 3
+#endif
+
+#ifndef ZSTDCLI_CLEVEL_MAX
+# define ZSTDCLI_CLEVEL_MAX 19 /* without using --ultra */
+#endif
+
+#ifndef ZSTDCLI_NBTHREADS_DEFAULT
+#define ZSTDCLI_NBTHREADS_DEFAULT MAX(1, MIN(4, UTIL_countLogicalCores() / 4))
+#endif
+
/*-************************************
@@ -100,9 +100,7 @@ typedef enum { cover, fastCover, legacy } dictType;
/*-************************************
* Display Macros
**************************************/
-#define DISPLAY_F(f, ...) fprintf((f), __VA_ARGS__)
-#define DISPLAYOUT(...) DISPLAY_F(stdout, __VA_ARGS__)
-#define DISPLAY(...) DISPLAY_F(stderr, __VA_ARGS__)
+#undef DISPLAYLEVEL
#define DISPLAYLEVEL(l, ...) { if (g_displayLevel>=l) { DISPLAY(__VA_ARGS__); } }
static int g_displayLevel = DISPLAY_LEVEL_DEFAULT; /* 0 : no display, 1: errors, 2 : + result + interaction + warnings, 3 : + progression, 4 : + information */
@@ -311,6 +309,7 @@ static void usageAdvanced(const char* programName)
DISPLAYOUT(" -i# Set the minimum evaluation to time # seconds. [Default: 3]\n");
DISPLAYOUT(" -B# Cut file into independent chunks of size #. [Default: No chunking]\n");
DISPLAYOUT(" -S Output one benchmark result per input file. [Default: Consolidated result]\n");
+ DISPLAYOUT(" -D dictionary Benchmark using dictionary \n");
DISPLAYOUT(" --priority=rt Set process priority to real-time.\n");
#endif
@@ -636,12 +635,26 @@ static unsigned parseCompressionParameters(const char* stringPtr, ZSTD_compressi
return 0;
}
- DISPLAYLEVEL(4, "windowLog=%d, chainLog=%d, hashLog=%d, searchLog=%d \n", params->windowLog, params->chainLog, params->hashLog, params->searchLog);
- DISPLAYLEVEL(4, "minMatch=%d, targetLength=%d, strategy=%d \n", params->minMatch, params->targetLength, params->strategy);
if (stringPtr[0] != 0) return 0; /* check the end of string */
return 1;
}
+static void setMaxCompression(ZSTD_compressionParameters* params)
+{
+ params->windowLog = ZSTD_WINDOWLOG_MAX;
+ params->chainLog = ZSTD_CHAINLOG_MAX;
+ params->hashLog = ZSTD_HASHLOG_MAX;
+ params->searchLog = ZSTD_SEARCHLOG_MAX;
+ params->minMatch = ZSTD_MINMATCH_MIN;
+ params->targetLength = ZSTD_TARGETLENGTH_MAX;
+ params->strategy = ZSTD_STRATEGY_MAX;
+ g_overlapLog = ZSTD_OVERLAPLOG_MAX;
+ g_ldmHashLog = ZSTD_LDM_HASHLOG_MAX;
+ g_ldmHashRateLog = 0; /* automatically derived */
+ g_ldmMinMatch = 16; /* heuristic */
+ g_ldmBucketSizeLog = ZSTD_LDM_BUCKETSIZELOG_MAX;
+}
+
static void printVersion(void)
{
if (g_displayLevel < DISPLAY_LEVEL_DEFAULT) {
@@ -696,7 +709,7 @@ static void printDefaultCParams(const char* filename, const char* dictFileName,
unsigned long long fileSize = UTIL_getFileSize(filename);
const size_t dictSize = dictFileName != NULL ? (size_t)UTIL_getFileSize(dictFileName) : 0;
const ZSTD_compressionParameters cParams = ZSTD_getCParams(cLevel, fileSize, dictSize);
- if (fileSize != UTIL_FILESIZE_UNKNOWN) DISPLAY("%s (%u bytes)\n", filename, (unsigned)fileSize);
+ if (fileSize != UTIL_FILESIZE_UNKNOWN) DISPLAY("%s (%llu bytes)\n", filename, fileSize);
else DISPLAY("%s (src size unknown)\n", filename);
DISPLAY(" - windowLog : %u\n", cParams.windowLog);
DISPLAY(" - chainLog : %u\n", cParams.chainLog);
@@ -760,7 +773,7 @@ static int init_cLevel(void) {
}
#ifdef ZSTD_MULTITHREAD
-static unsigned init_nbThreads(void) {
+static unsigned default_nbThreads(void) {
const char* const env = getenv(ENV_NBTHREADS);
if (env != NULL) {
const char* ptr = env;
@@ -797,22 +810,22 @@ static unsigned init_nbThreads(void) {
CLEAN_RETURN(1); \
} } }
-#define NEXT_UINT32(val32) { \
- const char* __nb; \
- NEXT_FIELD(__nb); \
+#define NEXT_UINT32(val32) { \
+ const char* __nb; \
+ NEXT_FIELD(__nb); \
val32 = readU32FromChar(&__nb); \
- if(*__nb != 0) { \
+ if(*__nb != 0) { \
errorOut("error: only numeric values with optional suffixes K, KB, KiB, M, MB, MiB are allowed"); \
- } \
+ } \
}
-#define NEXT_TSIZE(valTsize) { \
- const char* __nb; \
- NEXT_FIELD(__nb); \
+#define NEXT_TSIZE(valTsize) { \
+ const char* __nb; \
+ NEXT_FIELD(__nb); \
valTsize = readSizeTFromChar(&__nb); \
- if(*__nb != 0) { \
+ if(*__nb != 0) { \
errorOut("error: only numeric values with optional suffixes K, KB, KiB, M, MB, MiB are allowed"); \
- } \
+ } \
}
typedef enum { zom_compress, zom_decompress, zom_test, zom_bench, zom_train, zom_list } zstd_operation_mode;
@@ -852,10 +865,10 @@ int main(int argCount, const char* argv[])
ultra=0,
contentSize=1,
removeSrcFile=0;
- ZSTD_paramSwitch_e mmapDict=ZSTD_ps_auto;
- ZSTD_paramSwitch_e useRowMatchFinder = ZSTD_ps_auto;
+ ZSTD_ParamSwitch_e mmapDict=ZSTD_ps_auto;
+ ZSTD_ParamSwitch_e useRowMatchFinder = ZSTD_ps_auto;
FIO_compressionType_t cType = FIO_zstdCompression;
- unsigned nbWorkers = 0;
+ int nbWorkers = -1; /* -1 means unset */
double compressibility = -1.0; /* lorem ipsum generator */
unsigned bench_nbSeconds = 3; /* would be better if this value was synchronized from bench */
size_t blockSize = 0;
@@ -894,8 +907,7 @@ int main(int argCount, const char* argv[])
#ifndef ZSTD_NOBENCH
BMK_advancedParams_t benchParams = BMK_initAdvancedParams();
#endif
- ZSTD_paramSwitch_e literalCompressionMode = ZSTD_ps_auto;
-
+ ZSTD_ParamSwitch_e literalCompressionMode = ZSTD_ps_auto;
/* init */
checkLibVersion();
@@ -904,9 +916,6 @@ int main(int argCount, const char* argv[])
assert(argCount >= 1);
if ((filenames==NULL) || (file_of_names==NULL)) { DISPLAYLEVEL(1, "zstd: allocation error \n"); exit(1); }
programName = lastNameFromPath(programName);
-#ifdef ZSTD_MULTITHREAD
- nbWorkers = init_nbThreads();
-#endif
/* preset behaviors */
if (exeNameMatch(programName, ZSTD_ZSTDMT)) nbWorkers=0, singleThread=0;
@@ -1014,6 +1023,16 @@ int main(int argCount, const char* argv[])
if (!strcmp(argument, "--fake-stderr-is-console")) { UTIL_fakeStderrIsConsole(); continue; }
if (!strcmp(argument, "--trace-file-stat")) { UTIL_traceFileStat(); continue; }
+ if (!strcmp(argument, "--max")) {
+ if (sizeof(void*)==4) {
+ DISPLAYLEVEL(2, "--max is incompatible with 32-bit mode \n");
+ badUsage(programName, originalArgument);
+ CLEAN_RETURN(1);
+ }
+ ultra=1; ldmFlag = 1; setMaxCompression(&compressionParams);
+ continue;
+ }
+
/* long commands with arguments */
#ifndef ZSTD_NODICT
if (longCommandWArg(&argument, "--train-cover")) {
@@ -1089,10 +1108,11 @@ int main(int argCount, const char* argv[])
#ifndef ZSTD_NOTRACE
if (longCommandWArg(&argument, "--trace")) { char const* traceFile; NEXT_FIELD(traceFile); TRACE_enable(traceFile); continue; }
#endif
- if (longCommandWArg(&argument, "--patch-from")) { NEXT_FIELD(patchFromDictFileName); continue; }
+ if (longCommandWArg(&argument, "--patch-from")) { NEXT_FIELD(patchFromDictFileName); ultra = 1; continue; }
if (longCommandWArg(&argument, "--long")) {
unsigned ldmWindowLog = 0;
ldmFlag = 1;
+ ultra = 1;
/* Parse optional window log */
if (*argument == '=') {
++argument;
@@ -1298,7 +1318,7 @@ int main(int argCount, const char* argv[])
DISPLAYLEVEL(3, WELCOME_MESSAGE);
#ifdef ZSTD_MULTITHREAD
- if ((operation==zom_decompress) && (!singleThread) && (nbWorkers > 1)) {
+ if ((operation==zom_decompress) && (nbWorkers > 1)) {
DISPLAYLEVEL(2, "Warning : decompression does not support multi-threading\n");
}
if ((nbWorkers==0) && (!singleThread)) {
@@ -1311,6 +1331,16 @@ int main(int argCount, const char* argv[])
DISPLAYLEVEL(3, "Note: %d physical core(s) detected \n", nbWorkers);
}
}
+ /* Resolve to default if nbWorkers is still unset */
+ if (nbWorkers == -1) {
+ if (operation == zom_decompress) {
+ nbWorkers = 1;
+ } else {
+ nbWorkers = default_nbThreads();
+ }
+ }
+ if (operation != zom_bench)
+ DISPLAYLEVEL(4, "Compressing with %u worker threads \n", nbWorkers);
#else
(void)singleThread; (void)nbWorkers; (void)defaultLogicalCores;
#endif
@@ -1391,28 +1421,31 @@ int main(int argCount, const char* argv[])
}
benchParams.literalCompressionMode = literalCompressionMode;
+ if (benchParams.mode == BMK_decodeOnly) cLevel = cLevelLast = 0;
if (cLevel > ZSTD_maxCLevel()) cLevel = ZSTD_maxCLevel();
if (cLevelLast > ZSTD_maxCLevel()) cLevelLast = ZSTD_maxCLevel();
if (cLevelLast < cLevel) cLevelLast = cLevel;
- if (cLevelLast > cLevel)
- DISPLAYLEVEL(3, "Benchmarking levels from %d to %d\n", cLevel, cLevelLast);
+ DISPLAYLEVEL(3, "Benchmarking ");
+ if (filenames->tableSize > 1)
+ DISPLAYLEVEL(3, "%u files ", (unsigned)filenames->tableSize);
+ if (cLevelLast > cLevel) {
+ DISPLAYLEVEL(3, "from level %d to %d ", cLevel, cLevelLast);
+ } else {
+ DISPLAYLEVEL(3, "at level %d ", cLevel);
+ }
+ DISPLAYLEVEL(3, "using %i threads \n", nbWorkers);
if (filenames->tableSize > 0) {
if(separateFiles) {
unsigned i;
for(i = 0; i < filenames->tableSize; i++) {
- int c;
- DISPLAYLEVEL(3, "Benchmarking %s \n", filenames->fileNames[i]);
- for(c = cLevel; c <= cLevelLast; c++) {
- operationResult = BMK_benchFilesAdvanced(&filenames->fileNames[i], 1, dictFileName, c, &compressionParams, g_displayLevel, &benchParams);
- } }
+ operationResult = BMK_benchFilesAdvanced(&filenames->fileNames[i], 1, dictFileName, cLevel, cLevelLast, &compressionParams, g_displayLevel, &benchParams);
+ }
} else {
- for(; cLevel <= cLevelLast; cLevel++) {
- operationResult = BMK_benchFilesAdvanced(filenames->fileNames, (unsigned)filenames->tableSize, dictFileName, cLevel, &compressionParams, g_displayLevel, &benchParams);
- } }
+ operationResult = BMK_benchFilesAdvanced(filenames->fileNames, (unsigned)filenames->tableSize, dictFileName, cLevel, cLevelLast, &compressionParams, g_displayLevel, &benchParams);
+ }
} else {
- for(; cLevel <= cLevelLast; cLevel++) {
- operationResult = BMK_syntheticTest(cLevel, compressibility, &compressionParams, g_displayLevel, &benchParams);
- } }
+ operationResult = BMK_syntheticTest(compressibility, cLevel, cLevelLast, &compressionParams, g_displayLevel, &benchParams);
+ }
#else
(void)bench_nbSeconds; (void)blockSize; (void)setRealTimePrio; (void)separateFiles; (void)compressibility;
diff --git a/tests/Makefile b/tests/Makefile
index ed3692a24e8..406c7f20b79 100644
--- a/tests/Makefile
+++ b/tests/Makefile
@@ -26,6 +26,9 @@ export ZSTD_LEGACY_SUPPORT
DEBUGLEVEL ?= 2
export DEBUGLEVEL # transmit value to sub-makefiles
+.PHONY: default
+default: fullbench
+
LIBZSTD_MK_DIR := ../lib
include $(LIBZSTD_MK_DIR)/libzstd.mk
@@ -78,9 +81,6 @@ FUZZERTEST ?= -T200s
ZSTDRTTEST = --test-large-data
DECODECORPUS_TESTTIME ?= -T30
-.PHONY: default
-default: fullbench
-
.PHONY: all
all: fullbench fuzzer zstreamtest paramgrill datagen decodecorpus roundTripCrash poolTests
@@ -148,13 +148,14 @@ fullbench32: CPPFLAGS += -m32
$(FULLBENCHS) : CPPFLAGS += $(MULTITHREAD_CPP) -Wno-deprecated-declarations
$(FULLBENCHS) : LDFLAGS += $(MULTITHREAD_LD)
$(FULLBENCHS) : DEBUGFLAGS = -DNDEBUG # turn off assert() for speed measurements
+$(FULLBENCHS) : DEBUGLEVEL = 0 # turn off assert() for speed measurements
$(FULLBENCHS) : $(ZSTD_FILES)
-$(FULLBENCHS) : $(PRGDIR)/datagen.c $(PRGDIR)/util.c $(PRGDIR)/timefn.c $(PRGDIR)/benchfn.c fullbench.c
+$(FULLBENCHS) : $(PRGDIR)/datagen.c $(PRGDIR)/lorem.c $(PRGDIR)/util.c $(PRGDIR)/timefn.c $(PRGDIR)/benchfn.c fullbench.c
$(LINK.c) $^ -o $@$(EXT)
CLEAN += fullbench-lib
fullbench-lib : CPPFLAGS += -DXXH_NAMESPACE=ZSTD_
-fullbench-lib : $(PRGDIR)/datagen.c $(PRGDIR)/util.c $(PRGDIR)/timefn.c $(PRGDIR)/benchfn.c $(LIB_SRCDIR)/libzstd.a fullbench.c
+fullbench-lib : $(PRGDIR)/datagen.c $(PRGDIR)/lorem.c $(PRGDIR)/util.c $(PRGDIR)/timefn.c $(PRGDIR)/benchfn.c $(LIB_SRCDIR)/libzstd.a fullbench.c
$(LINK.c) $^ -o $@$(EXT)
# note : broken : requires symbols unavailable from dynamic library
@@ -224,9 +225,9 @@ roundTripCrash : $(ZSTD_OBJECTS) roundTripCrash.c
CLEAN += longmatch
longmatch : $(ZSTD_OBJECTS) longmatch.c
-CLEAN += bigdict
-bigdict: CFLAGS += $(MULTITHREAD)
-bigdict: $(ZSTDMT_OBJECTS) $(PRGDIR)/datagen.c bigdict.c
+CLEAN += largeDictionary
+largeDictionary: CFLAGS += $(MULTITHREAD)
+largeDictionary: $(ZSTDMT_OBJECTS) $(PRGDIR)/datagen.c largeDictionary.c
CLEAN += invalidDictionaries
invalidDictionaries : $(ZSTD_OBJECTS) invalidDictionaries.c
@@ -271,8 +272,8 @@ clean:
#----------------------------------------------------------------------------------
# valgrind tests validated only for some posix platforms
#----------------------------------------------------------------------------------
-UNAME := $(shell uname)
-ifneq (,$(filter $(UNAME),Linux Darwin GNU/kFreeBSD GNU OpenBSD FreeBSD NetBSD DragonFly SunOS AIX CYGWIN_NT))
+UNAME := $(shell sh -c 'MSYSTEM="MSYS" uname')
+ifneq (,$(filter Linux Darwin GNU/kFreeBSD GNU OpenBSD FreeBSD NetBSD DragonFly SunOS AIX CYGWIN_NT%,$(UNAME)))
HOST_OS = POSIX
.PHONY: test-valgrind
@@ -300,10 +301,10 @@ endif
#-----------------------------------------------------------------------------
# make tests validated only for below targets
#-----------------------------------------------------------------------------
-ifneq (,$(filter $(HOST_OS),MSYS POSIX))
+ifneq (,$(filter MSYS POSIX,$(HOST_OS)))
DIFF:=diff
-ifneq (,$(filter $(UNAME),SunOS))
+ifneq (,$(filter SunOS,$(UNAME)))
DIFF:=gdiff
endif
@@ -311,12 +312,12 @@ endif
list:
@$(MAKE) -pRrq -f $(lastword $(MAKEFILE_LIST)) : 2>/dev/null | awk -v RS= -F: '/^# File/,/^# Finished Make data base/ {if ($$1 !~ "^[#.]") {print $$1}}' | sort | egrep -v -e '^[^[:alnum:]]' -e '^$@$$' | xargs
-.PHONY: shortest
-shortest: ZSTDRTTEST= # remove long tests
-shortest: test-zstd
-
.PHONY: check
-check: shortest
+check: ZSTDRTTEST= # remove long tests
+check: test-zstd
+ @echo "\n******************************"
+ @echo "All tests completed successfully"
+ @echo "******************************"
.PHONY: fuzztest
fuzztest: test-fuzzer test-zstream test-decodecorpus
@@ -326,6 +327,9 @@ test: test-zstd test-cli-tests test-fullbench test-fuzzer test-zstream test-inva
ifeq ($(QEMU_SYS),)
test: test-pool
endif
+ @echo "\n******************************"
+ @echo "All tests completed successfully"
+ @echo "******************************"
.PHONY: test32
test32: test-zstd32 test-fullbench32 test-fuzzer32 test-zstream32
@@ -391,8 +395,8 @@ test-zstream32: zstreamtest32
test-longmatch: longmatch
$(QEMU_SYS) ./longmatch
-test-bigdict: bigdict
- $(QEMU_SYS) ./bigdict
+test-largeDictionary: largeDictionary
+ $(QEMU_SYS) ./largeDictionary
test-invalidDictionaries: invalidDictionaries
$(QEMU_SYS) ./invalidDictionaries
diff --git a/tests/bigdict.c b/tests/bigdict.c
index ff2bb2d7032..748b60e79b1 100644
--- a/tests/bigdict.c
+++ b/tests/bigdict.c
@@ -70,12 +70,14 @@ int main(int argc, const char** argv)
char* buffer = (char*)malloc(bufferSize);
void* out = malloc(outSize);
void* roundtrip = malloc(dataSize);
+ int _exit_code = 0;
(void)argc;
(void)argv;
if (!buffer || !out || !roundtrip || !cctx || !dctx) {
fprintf(stderr, "Allocation failure\n");
- return 1;
+ _exit_code = 1;
+ goto cleanup;
}
if (ZSTD_isError(ZSTD_CCtx_setParameter(cctx, ZSTD_c_windowLog, 31)))
@@ -119,10 +121,13 @@ int main(int argc, const char** argv)
fprintf(stderr, "Success!\n");
+ goto cleanup;
+
+cleanup:
free(roundtrip);
free(out);
free(buffer);
- ZSTD_freeDCtx(dctx);
ZSTD_freeCCtx(cctx);
- return 0;
+ ZSTD_freeDCtx(dctx);
+ return _exit_code;
}
diff --git a/tests/cli-tests/common/platform.sh b/tests/cli-tests/common/platform.sh
index 6eb45eab99e..a07f229dc0b 100644
--- a/tests/cli-tests/common/platform.sh
+++ b/tests/cli-tests/common/platform.sh
@@ -18,7 +18,6 @@ esac
case "$UNAME" in
Darwin) MD5SUM="md5 -r" ;;
- FreeBSD) MD5SUM="gmd5sum" ;;
NetBSD) MD5SUM="md5 -n" ;;
OpenBSD) MD5SUM="md5" ;;
*) MD5SUM="md5sum" ;;
diff --git a/tests/cli-tests/compression/levels.sh b/tests/cli-tests/compression/levels.sh
index cc2700a3097..b8230f2a346 100755
--- a/tests/cli-tests/compression/levels.sh
+++ b/tests/cli-tests/compression/levels.sh
@@ -5,14 +5,27 @@ set -v
datagen > file
+# Retrieve the program's version information
+# Note: command echoing differs between macos and linux, so it's disabled below
+set +v
+version_info=$(zstd -V)
+set -v
+
# Compress with various levels and ensure that their sizes are ordered
zstd --fast=10 file -o file-f10.zst -q
zstd --fast=1 file -o file-f1.zst -q
zstd -1 file -o file-1.zst -q
zstd -19 file -o file-19.zst -q
+if echo "$version_info" | grep -q '32-bit'; then
+ # skip --max test: not enough address space
+ cp file-19.zst file-max.zst
+else
+ zstd --max file -o file-max.zst -q
+fi
-zstd -t file-f10.zst file-f1.zst file-1.zst file-19.zst
+zstd -t file-f10.zst file-f1.zst file-1.zst file-19.zst file-max.zst
+cmp_size -le file-max.zst file-19.zst
cmp_size -lt file-19.zst file-1.zst
cmp_size -lt file-1.zst file-f1.zst
cmp_size -lt file-f1.zst file-f10.zst
diff --git a/tests/cli-tests/compression/levels.sh.stderr.exact b/tests/cli-tests/compression/levels.sh.stderr.exact
index c8fb79c6896..fd7c076d277 100644
--- a/tests/cli-tests/compression/levels.sh.stderr.exact
+++ b/tests/cli-tests/compression/levels.sh.stderr.exact
@@ -1,15 +1,26 @@
datagen > file
+# Retrieve the program's version information
+# Note: command echoing differs between macos and linux, so it's disabled below
+set +v
+
# Compress with various levels and ensure that their sizes are ordered
zstd --fast=10 file -o file-f10.zst -q
zstd --fast=1 file -o file-f1.zst -q
zstd -1 file -o file-1.zst -q
zstd -19 file -o file-19.zst -q
+if echo "$version_info" | grep -q '32-bit'; then
+ # skip --max test: not enough address space
+ cp file-19.zst file-max.zst
+else
+ zstd --max file -o file-max.zst -q
+fi
-zstd -t file-f10.zst file-f1.zst file-1.zst file-19.zst
-4 files decompressed : 262148 bytes total
+zstd -t file-f10.zst file-f1.zst file-1.zst file-19.zst file-max.zst
+5 files decompressed : 327685 bytes total
+cmp_size -le file-max.zst file-19.zst
cmp_size -lt file-19.zst file-1.zst
cmp_size -lt file-1.zst file-f1.zst
cmp_size -lt file-f1.zst file-f10.zst
diff --git a/tests/decodecorpus.c b/tests/decodecorpus.c
index 1abc7df8b71..c53d83251f6 100644
--- a/tests/decodecorpus.c
+++ b/tests/decodecorpus.c
@@ -182,7 +182,7 @@ BYTE CONTENT_BUFFER[MAX_DECOMPRESSED_SIZE];
BYTE FRAME_BUFFER[MAX_DECOMPRESSED_SIZE * 2];
BYTE LITERAL_BUFFER[ZSTD_BLOCKSIZE_MAX];
-seqDef SEQUENCE_BUFFER[MAX_NB_SEQ];
+SeqDef SEQUENCE_BUFFER[MAX_NB_SEQ];
BYTE SEQUENCE_LITERAL_BUFFER[ZSTD_BLOCKSIZE_MAX]; /* storeSeq expects a place to copy literals to */
BYTE SEQUENCE_LLCODE[ZSTD_BLOCKSIZE_MAX];
BYTE SEQUENCE_MLCODE[ZSTD_BLOCKSIZE_MAX];
@@ -247,6 +247,12 @@ typedef enum {
#define MIN(a, b) ((a) < (b) ? (a) : (b))
#endif
+typedef enum {
+ lt_raw,
+ lt_rle,
+ lt_compressed,
+} literalType_e;
+
/*-*******************************************************
* Global variables (set from command line)
*********************************************************/
@@ -259,7 +265,11 @@ U32 g_maxBlockSize = ZSTD_BLOCKSIZE_MAX; /* <= 128 KB */
struct {
int contentSize; /* force the content size to be present */
-} opts; /* advanced options on generation */
+ blockType_e *blockType; /* force specific block type */
+ literalType_e *literalType; /* force specific literals type */
+ int frame_header_only; /* generate only frame header */
+ int no_magic; /* do not generate magic number */
+} opts;
/* Generate and write a random frame header */
static void writeFrameHeader(U32* seed, frame_t* frame, dictInfo info)
@@ -288,10 +298,19 @@ static void writeFrameHeader(U32* seed, frame_t* frame, dictInfo info)
{
/* Generate random content size */
+ int force_block_type = opts.blockType != NULL;
size_t highBit;
if (RAND(seed) & 7 && g_maxDecompressedSizeLog > 7) {
/* do content of at least 128 bytes */
highBit = 1ULL << RAND_range(seed, 7, g_maxDecompressedSizeLog);
+ } else if (force_block_type) {
+ if ((RAND(seed) & 3) || (*(opts.blockType) == bt_rle)) {
+ /* do small content */
+ highBit = 1ULL << RAND_range(seed, 0, MIN(7, 1U << g_maxDecompressedSizeLog));
+ } else {
+ /* 0 size frame */
+ highBit = 0;
+ }
} else if (RAND(seed) & 3) {
/* do small content */
highBit = 1ULL << RAND_range(seed, 0, MIN(7, 1U << g_maxDecompressedSizeLog));
@@ -324,8 +343,10 @@ static void writeFrameHeader(U32* seed, frame_t* frame, dictInfo info)
}
/* write out the header */
- MEM_writeLE32(op + pos, ZSTD_MAGICNUMBER);
- pos += 4;
+ if (!opts.no_magic) {
+ MEM_writeLE32(op + pos, ZSTD_MAGICNUMBER);
+ pos += 4;
+ }
{
/*
@@ -370,8 +391,10 @@ static void writeFrameHeader(U32* seed, frame_t* frame, dictInfo info)
/* Write a literal block in either raw or RLE form, return the literals size */
static size_t writeLiteralsBlockSimple(U32* seed, frame_t* frame, size_t contentSize)
{
+ int force_literal_type = opts.literalType != NULL;
+ int const type = (force_literal_type) ? *(opts.literalType) : RAND(seed) % 2;
+
BYTE* op = (BYTE*)frame->data;
- int const type = RAND(seed) % 2;
int const sizeFormatDesc = RAND(seed) % 8;
size_t litSize;
size_t maxLitSize = MIN(contentSize, g_maxBlockSize);
@@ -482,7 +505,7 @@ static size_t writeLiteralsBlockCompressed(U32* seed, frame_t* frame, size_t con
size_t compressedSize = 0;
size_t maxLitSize = MIN(contentSize-3, g_maxBlockSize);
- symbolEncodingType_e hType;
+ SymbolEncodingType_e hType;
if (contentSize < 64) {
/* make sure we get reasonably-sized literals for compression */
@@ -619,15 +642,22 @@ static size_t writeLiteralsBlockCompressed(U32* seed, frame_t* frame, size_t con
static size_t writeLiteralsBlock(U32* seed, frame_t* frame, size_t contentSize)
{
- /* only do compressed for larger segments to avoid compressibility issues */
- if (RAND(seed) & 7 && contentSize >= 64) {
+ int select_compressed = 0;
+ if (opts.literalType) {
+ select_compressed = *(opts.literalType) == lt_compressed;
+ } else {
+ /* only do compressed for larger segments to avoid compressibility issues */
+ select_compressed = RAND(seed) & 7 && contentSize >= 64;
+ }
+
+ if (select_compressed) {
return writeLiteralsBlockCompressed(seed, frame, contentSize);
} else {
return writeLiteralsBlockSimple(seed, frame, contentSize);
}
}
-static inline void initSeqStore(seqStore_t *seqStore) {
+static inline void initSeqStore(SeqStore_t *seqStore) {
seqStore->maxNbSeq = MAX_NB_SEQ;
seqStore->maxNbLit = ZSTD_BLOCKSIZE_MAX;
seqStore->sequencesStart = SEQUENCE_BUFFER;
@@ -641,7 +671,7 @@ static inline void initSeqStore(seqStore_t *seqStore) {
/* Randomly generate sequence commands */
static U32
-generateSequences(U32* seed, frame_t* frame, seqStore_t* seqStore,
+generateSequences(U32* seed, frame_t* frame, SeqStore_t* seqStore,
size_t contentSize, size_t literalsSize, dictInfo info)
{
/* The total length of all the matches */
@@ -802,7 +832,7 @@ static int isSymbolSubset(const BYTE* symbols, size_t len, const BYTE* set, BYTE
return 1;
}
-static size_t writeSequences(U32* seed, frame_t* frame, seqStore_t* seqStorePtr,
+static size_t writeSequences(U32* seed, frame_t* frame, SeqStore_t* seqStorePtr,
size_t nbSeq)
{
/* This code is mostly copied from ZSTD_compressSequences in zstd_compress.c */
@@ -812,7 +842,7 @@ static size_t writeSequences(U32* seed, frame_t* frame, seqStore_t* seqStorePtr,
FSE_CTable* CTable_OffsetBits = frame->stats.offcodeCTable;
FSE_CTable* CTable_MatchLength = frame->stats.matchlengthCTable;
U32 LLtype, Offtype, MLtype; /* compressed, raw or rle */
- const seqDef* const sequences = seqStorePtr->sequencesStart;
+ const SeqDef* const sequences = seqStorePtr->sequencesStart;
const BYTE* const ofCodeTable = seqStorePtr->ofCode;
const BYTE* const llCodeTable = seqStorePtr->llCode;
const BYTE* const mlCodeTable = seqStorePtr->mlCode;
@@ -998,7 +1028,7 @@ static size_t writeSequences(U32* seed, frame_t* frame, seqStore_t* seqStorePtr,
static size_t writeSequencesBlock(U32* seed, frame_t* frame, size_t contentSize,
size_t literalsSize, dictInfo info)
{
- seqStore_t seqStore;
+ SeqStore_t seqStore;
size_t numSequences;
@@ -1034,7 +1064,8 @@ static size_t writeCompressedBlock(U32* seed, frame_t* frame, size_t contentSize
static void writeBlock(U32* seed, frame_t* frame, size_t contentSize,
int lastBlock, dictInfo info)
{
- int const blockTypeDesc = RAND(seed) % 8;
+ int force_block_type = opts.blockType != NULL;
+ int const blockTypeDesc = (force_block_type) ? *(opts.blockType) : RAND(seed) % 8;
size_t blockSize;
int blockType;
@@ -1073,7 +1104,7 @@ static void writeBlock(U32* seed, frame_t* frame, size_t contentSize,
frame->data = op;
compressedSize = writeCompressedBlock(seed, frame, contentSize, info);
- if (compressedSize >= contentSize) { /* compressed block must be strictly smaller than uncompressed one */
+ if (compressedSize >= contentSize && !force_block_type) { /* compressed block must be strictly smaller than uncompressed one */
blockType = 0;
memcpy(op, frame->src, contentSize);
@@ -1244,7 +1275,11 @@ static U32 generateFrame(U32 seed, frame_t* fr, dictInfo info)
DISPLAYLEVEL(3, "frame seed: %u\n", (unsigned)seed);
initFrame(fr);
+
writeFrameHeader(&seed, fr, info);
+ if (opts.frame_header_only)
+ return seed;
+
writeBlocks(&seed, fr, info);
writeChecksum(fr);
@@ -1772,6 +1807,9 @@ static void advancedUsage(const char* programName)
DISPLAY( " --max-block-size-log=# : max block size log, must be in range [2, 17]\n");
DISPLAY( " --max-content-size-log=# : max content size log, must be <= 20\n");
DISPLAY( " (this is ignored with gen-blocks)\n");
+ DISPLAY( " --block-type=# : force certain block type (raw=0, rle=1, compressed=2)\n");
+ DISPLAY( " --frame-header-only : dump only frame header\n");
+ DISPLAY( " --no-magic : do not add magic number\n");
}
/*! readU32FromChar() :
@@ -1893,6 +1931,18 @@ int main(int argc, char** argv)
U32 value = readU32FromChar(&argument);
g_maxDecompressedSizeLog =
MIN(MAX_DECOMPRESSED_SIZE_LOG, value);
+ } else if (longCommandWArg(&argument, "block-type=")) {
+ U32 value = readU32FromChar(&argument);
+ opts.blockType = malloc(sizeof(blockType_e));
+ *(opts.blockType) = value;
+ } else if (longCommandWArg(&argument, "literal-type=")) {
+ U32 value = readU32FromChar(&argument);
+ opts.literalType = malloc(sizeof(literalType_e));
+ *(opts.literalType) = value;
+ } else if (strcmp(argument, "frame-header-only") == 0) {
+ opts.frame_header_only = 1;
+ } else if (strcmp(argument, "no-magic") == 0) {
+ opts.no_magic = 1;
} else {
advancedUsage(argv[0]);
return 1;
@@ -1904,6 +1954,18 @@ int main(int argc, char** argv)
return 1;
} } } } /* for (argNb=1; argNb
#include "util.h" /* Compiler options, UTIL_GetFileSize */
#include /* malloc */
#include /* fprintf, fopen, ftello64 */
#include
-#include "timefn.h" /* UTIL_clockSpanNano, UTIL_getTime */
#include "mem.h" /* U32 */
+#include "compress/zstd_compress_internal.h"
#ifndef ZSTD_DLL_IMPORT
#include "zstd_internal.h" /* ZSTD_decodeSeqHeaders, ZSTD_blockHeaderSize, ZSTD_getcBlockSize, blockType_e, KB, MB */
#include "decompress/zstd_decompress_internal.h" /* ZSTD_DCtx struct */
@@ -33,10 +35,10 @@
#include "zstd.h" /* ZSTD_versionString */
#include "util.h" /* time functions */
#include "datagen.h"
+#include "lorem.h"
#include "benchfn.h" /* CustomBench */
#include "benchzstd.h" /* MB_UNIT */
-
/*_************************************
* Constants
**************************************/
@@ -51,7 +53,7 @@
#define DEFAULT_CLEVEL 1
-#define COMPRESSIBILITY_DEFAULT 0.50
+#define COMPRESSIBILITY_DEFAULT (-1.0)
static const size_t kSampleSizeDefault = 10000000;
#define TIMELOOP_NANOSEC (1*1000000000ULL) /* 1 second */
@@ -64,6 +66,7 @@ static const size_t kSampleSizeDefault = 10000000;
#define CONTROL(c) { if (!(c)) { abort(); } } /* like assert(), but cannot be disabled */
+
/*_************************************
* Benchmark Parameters
**************************************/
@@ -97,7 +100,6 @@ static size_t BMK_findMaxMem(U64 requiredMem)
*********************************************************/
static ZSTD_CCtx* g_zcc = NULL;
-
static size_t
local_ZSTD_compress(const void* src, size_t srcSize,
void* dst, size_t dstSize,
@@ -129,40 +131,88 @@ local_ZSTD_compress_freshCCtx(const void* src, size_t srcSize,
}
}
-static size_t g_cSize = 0;
+typedef struct {
+ void* prepBuffer;
+ size_t prepSize;
+ void* dst;
+ size_t dstCapacity;
+ size_t fixedOrigSize; /* optional, 0 means "no modification" */
+} PrepResult;
+#define PREPRESULT_INIT { NULL, 0, NULL, 0, 0 }
+
+static PrepResult prepDecompress(const void* src, size_t srcSize, int cLevel)
+{
+ size_t prepCapacity = ZSTD_compressBound(srcSize);
+ void* prepBuffer = malloc(prepCapacity);
+ size_t cSize = ZSTD_compress(prepBuffer, prepCapacity, src, srcSize, cLevel);
+ void* dst = malloc(srcSize);
+ PrepResult r = PREPRESULT_INIT;
+ assert(dst != NULL);
+ r.prepBuffer = prepBuffer;
+ r.prepSize = cSize;
+ r.dst = dst;
+ r.dstCapacity = srcSize;
+ return r;
+}
+
static size_t local_ZSTD_decompress(const void* src, size_t srcSize,
void* dst, size_t dstSize,
- void* buff2)
+ void* unused)
{
- (void)src; (void)srcSize;
- return ZSTD_decompress(dst, dstSize, buff2, g_cSize);
+ (void)unused;
+ return ZSTD_decompress(dst, dstSize, src, srcSize);
}
static ZSTD_DCtx* g_zdc = NULL; /* will be initialized within benchMem */
static size_t local_ZSTD_decompressDCtx(const void* src, size_t srcSize,
void* dst, size_t dstSize,
- void* buff2)
+ void* unused)
{
- (void)src; (void)srcSize;
- return ZSTD_decompressDCtx(g_zdc, dst, dstSize, buff2, g_cSize);
+ (void)unused;
+ return ZSTD_decompressDCtx(g_zdc, dst, dstSize, src, srcSize);
}
#ifndef ZSTD_DLL_IMPORT
-extern size_t ZSTD_decodeLiteralsBlock_wrapper(ZSTD_DCtx* dctx,
- const void* src, size_t srcSize,
- void* dst, size_t dstCapacity);
-static size_t local_ZSTD_decodeLiteralsBlock(const void* src, size_t srcSize, void* dst, size_t dstSize, void* buff2)
+static PrepResult prepLiterals(const void* src, size_t srcSize, int cLevel)
{
- (void)src; (void)srcSize; (void)dst; (void)dstSize;
- return ZSTD_decodeLiteralsBlock_wrapper(g_zdc, buff2, g_cSize, dst, dstSize);
+ PrepResult r = PREPRESULT_INIT;
+ size_t dstCapacity = srcSize;
+ void* dst = malloc(dstCapacity);
+ void* prepBuffer;
+ size_t prepSize = ZSTD_compress(dst, dstCapacity, src, srcSize, cLevel);
+ size_t frameHeaderSize = ZSTD_frameHeaderSize(dst, ZSTD_FRAMEHEADERSIZE_PREFIX(ZSTD_f_zstd1));
+ CONTROL(!ZSTD_isError(frameHeaderSize));
+ /* check block is compressible, hence contains a literals section */
+ { blockProperties_t bp;
+ ZSTD_getcBlockSize((char*)dst+frameHeaderSize, dstCapacity, &bp); /* Get 1st block type */
+ if (bp.blockType != bt_compressed) {
+ DISPLAY("no compressed literals\n");
+ return r;
+ } }
+ { size_t const skippedSize = frameHeaderSize + ZSTD_blockHeaderSize;
+ prepSize -= skippedSize;
+ prepBuffer = malloc(prepSize);
+ CONTROL(prepBuffer != NULL);
+ memmove(prepBuffer, (char*)dst+skippedSize, prepSize);
+ }
+ ZSTD_decompressBegin(g_zdc);
+ r.prepBuffer = prepBuffer;
+ r.prepSize = prepSize;
+ r.dst = dst;
+ r.dstCapacity = dstCapacity;
+ r.fixedOrigSize = srcSize > 128 KB ? 128 KB : srcSize; /* speed relative to block */
+ return r;
}
-static size_t local_ZSTD_decodeSeqHeaders(const void* src, size_t srcSize, void* dst, size_t dstSize, void* buff2)
+extern size_t ZSTD_decodeLiteralsBlock_wrapper(ZSTD_DCtx* dctx,
+ const void* src, size_t srcSize,
+ void* dst, size_t dstCapacity);
+static size_t
+local_ZSTD_decodeLiteralsBlock(const void* src, size_t srcSize, void* dst, size_t dstCapacity, void* unused)
{
- int nbSeq;
- (void)src; (void)srcSize; (void)dst; (void)dstSize;
- return ZSTD_decodeSeqHeaders(g_zdc, &nbSeq, buff2, g_cSize);
+ (void)unused;
+ return ZSTD_decodeLiteralsBlock_wrapper(g_zdc, src, srcSize, dst, dstCapacity);
}
FORCE_NOINLINE size_t ZSTD_decodeLiteralsHeader(ZSTD_DCtx* dctx, void const* src, size_t srcSize)
@@ -170,7 +220,7 @@ FORCE_NOINLINE size_t ZSTD_decodeLiteralsHeader(ZSTD_DCtx* dctx, void const* src
RETURN_ERROR_IF(srcSize < MIN_CBLOCK_SIZE, corruption_detected, "");
{
BYTE const* istart = (BYTE const*)src;
- symbolEncodingType_e const litEncType = (symbolEncodingType_e)(istart[0] & 3);
+ SymbolEncodingType_e const litEncType = (SymbolEncodingType_e)(istart[0] & 3);
if (litEncType == set_compressed) {
RETURN_ERROR_IF(srcSize < 5, corruption_detected, "srcSize >= MIN_CBLOCK_SIZE == 3; here we need up to 5 for case 3");
{
@@ -219,11 +269,59 @@ FORCE_NOINLINE size_t ZSTD_decodeLiteralsHeader(ZSTD_DCtx* dctx, void const* src
return 0;
}
-static size_t local_ZSTD_decodeLiteralsHeader(const void* src, size_t srcSize, void* dst, size_t dstSize, void* buff2)
+static size_t
+local_ZSTD_decodeLiteralsHeader(const void* src, size_t srcSize, void* dst, size_t dstCapacity, void* unused)
+{
+ (void)dst; (void)dstCapacity; (void)unused;
+ return ZSTD_decodeLiteralsHeader(g_zdc, src, srcSize);
+}
+
+static PrepResult prepSequences1stBlock(const void* src, size_t srcSize, int cLevel)
+{
+ PrepResult r = PREPRESULT_INIT;
+ size_t const dstCapacity = srcSize;
+ void* dst = malloc(dstCapacity);
+ const BYTE* ip = dst;
+ const BYTE* iend;
+ { size_t const cSize = ZSTD_compress(dst, dstCapacity, src, srcSize, cLevel);
+ CONTROL(cSize > ZSTD_FRAMEHEADERSIZE_PREFIX(ZSTD_f_zstd1));
+ }
+ /* Skip frame Header */
+ { size_t const frameHeaderSize = ZSTD_frameHeaderSize(dst, ZSTD_FRAMEHEADERSIZE_PREFIX(ZSTD_f_zstd1));
+ CONTROL(!ZSTD_isError(frameHeaderSize));
+ ip += frameHeaderSize;
+ }
+ /* Find end of block */
+ { blockProperties_t bp;
+ size_t const cBlockSize = ZSTD_getcBlockSize(ip, dstCapacity, &bp); /* Get 1st block type */
+ if (bp.blockType != bt_compressed) {
+ DISPLAY("no compressed sequences\n");
+ return r;
+ }
+ iend = ip + ZSTD_blockHeaderSize + cBlockSize; /* End of first block */
+ }
+ ip += ZSTD_blockHeaderSize; /* skip block header */
+ ZSTD_decompressBegin(g_zdc);
+ CONTROL(iend > ip);
+ ip += ZSTD_decodeLiteralsBlock_wrapper(g_zdc, ip, (size_t)(iend-ip), dst, dstCapacity); /* skip literal segment */
+ r.prepSize = (size_t)(iend-ip);
+ r.prepBuffer = malloc(r.prepSize);
+ CONTROL(r.prepBuffer != NULL);
+ memmove(r.prepBuffer, ip, r.prepSize); /* copy rest of block (it starts by SeqHeader) */
+ r.dst = dst;
+ r.dstCapacity = dstCapacity;
+ r.fixedOrigSize = srcSize > 128 KB ? 128 KB : srcSize; /* speed relative to block */
+ return r;
+}
+
+static size_t
+local_ZSTD_decodeSeqHeaders(const void* src, size_t srcSize, void* dst, size_t dstCapacity, void* unused)
{
- (void)dst, (void)dstSize, (void)src, (void)srcSize;
- return ZSTD_decodeLiteralsHeader(g_zdc, buff2, g_cSize);
+ int nbSeq;
+ (void)unused; (void)dst; (void)dstCapacity;
+ return ZSTD_decodeSeqHeaders(g_zdc, &nbSeq, src, srcSize);
}
+
#endif
static ZSTD_CStream* g_cstream= NULL;
@@ -346,23 +444,22 @@ static ZSTD_DStream* g_dstream= NULL;
static size_t
local_ZSTD_decompressStream(const void* src, size_t srcSize,
void* dst, size_t dstCapacity,
- void* buff2)
+ void* unused)
{
ZSTD_outBuffer buffOut;
ZSTD_inBuffer buffIn;
- (void)src; (void)srcSize;
+ (void)unused;
ZSTD_initDStream(g_dstream);
buffOut.dst = dst;
buffOut.size = dstCapacity;
buffOut.pos = 0;
- buffIn.src = buff2;
- buffIn.size = g_cSize;
+ buffIn.src = src;
+ buffIn.size = srcSize;
buffIn.pos = 0;
ZSTD_decompressStream(g_dstream, &buffOut, &buffIn);
return buffOut.pos;
}
-#ifndef ZSTD_DLL_IMPORT
static size_t local_ZSTD_compressContinue(const void* src, size_t srcSize,
void* dst, size_t dstCapacity,
void* payload)
@@ -408,15 +505,15 @@ local_ZSTD_compressContinue_extDict(const void* src, size_t srcSize,
static size_t local_ZSTD_decompressContinue(const void* src, size_t srcSize,
void* dst, size_t dstCapacity,
- void* buff2)
+ void* unused)
{
size_t regeneratedSize = 0;
- const BYTE* ip = (const BYTE*)buff2;
- const BYTE* const iend = ip + g_cSize;
+ const BYTE* ip = (const BYTE*)src;
+ const BYTE* const iend = ip + srcSize;
BYTE* op = (BYTE*)dst;
size_t remainingCapacity = dstCapacity;
- (void)src; (void)srcSize; /* unused */
+ (void)unused;
ZSTD_decompressBegin(g_zdc);
while (ip < iend) {
size_t const iSize = ZSTD_nextSrcSizeToDecompress(g_zdc);
@@ -429,99 +526,300 @@ static size_t local_ZSTD_decompressContinue(const void* src, size_t srcSize,
return regeneratedSize;
}
+
+static PrepResult prepSequences(const void* src, size_t srcSize, int cLevel)
+{
+ PrepResult r = PREPRESULT_INIT;
+ size_t const dstCapacity = ZSTD_compressBound(srcSize);
+ void* const dst = malloc(dstCapacity);
+ size_t const prepCapacity = dstCapacity * 4;
+ void* prepBuffer = malloc(prepCapacity);
+ void* sequencesStart = (char*)prepBuffer + 2*sizeof(unsigned);
+ ZSTD_Sequence* const seqs = sequencesStart;
+ size_t const seqsCapacity = prepCapacity / sizeof(ZSTD_Sequence);
+ size_t nbSeqs;
+ ZSTD_CCtx_reset(g_zcc, ZSTD_reset_session_and_parameters);
+ ZSTD_CCtx_setParameter(g_zcc, ZSTD_c_compressionLevel, cLevel);
+ nbSeqs = ZSTD_generateSequences(g_zcc, seqs, seqsCapacity, src, srcSize);
+ CONTROL(srcSize < UINT_MAX);
+ MEM_write32(prepBuffer, (U32)srcSize);
+ MEM_write32((char*)prepBuffer+4, (U32)nbSeqs);
+ memcpy(seqs + nbSeqs, src, srcSize);
+ r.prepBuffer = prepBuffer;
+ r.prepSize = 8 + sizeof(ZSTD_Sequence)*nbSeqs + srcSize;
+ r.dst = dst;
+ r.dstCapacity = dstCapacity;
+ return r;
+}
+
+static size_t local_compressSequences(const void* input, size_t inputSize,
+ void* dst, size_t dstCapacity,
+ void* payload)
+{
+ const char* ip = input;
+ size_t srcSize = MEM_read32(ip);
+ size_t nbSeqs = MEM_read32(ip+=4);
+ const ZSTD_Sequence* seqs = (const ZSTD_Sequence*)(const void*)(ip+=4);
+ const void* src = (ip+=nbSeqs * sizeof(ZSTD_Sequence));
+ ZSTD_CCtx_reset(g_zcc, ZSTD_reset_session_and_parameters);
+ ZSTD_CCtx_setParameter(g_zcc, ZSTD_c_blockDelimiters, ZSTD_sf_explicitBlockDelimiters);
+ assert(8 + nbSeqs * sizeof(ZSTD_Sequence) + srcSize == inputSize); (void)inputSize;
+ (void)payload;
+
+ return ZSTD_compressSequences(g_zcc, dst, dstCapacity, seqs, nbSeqs, src, srcSize);
+}
+
+static PrepResult prepSequencesAndLiterals(const void* src, size_t srcSize, int cLevel)
+{
+ PrepResult r = PREPRESULT_INIT;
+ size_t const dstCapacity = ZSTD_compressBound(srcSize);
+ void* const dst = malloc(dstCapacity);
+ size_t const prepCapacity = dstCapacity * 4;
+ void* prepBuffer = malloc(prepCapacity);
+ void* sequencesStart = (char*)prepBuffer + 3*sizeof(unsigned);
+ ZSTD_Sequence* const seqs = sequencesStart;
+ size_t const seqsCapacity = prepCapacity / sizeof(ZSTD_Sequence);
+ size_t nbSeqs;
+ ZSTD_CCtx_reset(g_zcc, ZSTD_reset_session_and_parameters);
+ ZSTD_CCtx_setParameter(g_zcc, ZSTD_c_compressionLevel, cLevel);
+ nbSeqs = ZSTD_generateSequences(g_zcc, seqs, seqsCapacity, src, srcSize);
+ CONTROL(srcSize < UINT_MAX);
+ MEM_write32(prepBuffer, (U32)srcSize);
+ MEM_write32((char*)prepBuffer+4, (U32)nbSeqs);
+ /* copy literals */
+ { char* const litStart = (char*)(seqs + nbSeqs);
+ size_t nbLiterals = 0;
+ const char* ip = src;
+ size_t n;
+ for (n=0; nseqStore);
+ ZSTD_CCtx_setParameter(g_zcc, ZSTD_c_blockDelimiters, ZSTD_sf_explicitBlockDelimiters);
+# if 0 /* for tests */
+ ZSTD_CCtx_setParameter(g_zcc, ZSTD_c_repcodeResolution, ZSTD_ps_enable);
#endif
+ assert(8 + nbSeqs * sizeof(ZSTD_Sequence) == inputSize); (void)inputSize;
+ (void)dst; (void)dstCapacity;
+ (void)payload; (void)blockSize;
+
+ (void)ZSTD_convertBlockSequences(g_zcc, seqs, nbSeqs, 0);
+ return nbSeqs;
+}
+
+static size_t
+check_compressedSequences(const void* compressed, size_t cSize, const void* orig, size_t origSize)
+{
+ size_t decSize;
+ int diff;
+ void* decompressed = malloc(origSize);
+ if (decompressed == NULL) return 2;
+
+ decSize = ZSTD_decompress(decompressed, origSize, compressed, cSize);
+ if (decSize != origSize) { free(decompressed); DISPLAY("ZSTD_decompress failed (%u) ", (unsigned)decSize); return 1; }
+
+ diff = memcmp(decompressed, orig, origSize);
+ if (diff) { free(decompressed); return 1; }
+
+ free(decompressed);
+ return 0;
+}
+
+static size_t
+local_get1BlockSummary(const void* input, size_t inputSize,
+ void* dst, size_t dstCapacity,
+ void* payload)
+{
+ const char* ip = input;
+ size_t const blockSize = MEM_read32(ip);
+ size_t const nbSeqs = MEM_read32(ip+=4);
+ const ZSTD_Sequence* seqs = (const ZSTD_Sequence*)(const void*)(ip+=4);
+ ZSTD_CCtx_reset(g_zcc, ZSTD_reset_session_and_parameters);
+ ZSTD_resetSeqStore(&g_zcc->seqStore);
+ ZSTD_CCtx_setParameter(g_zcc, ZSTD_c_blockDelimiters, ZSTD_sf_explicitBlockDelimiters);
+ assert(8 + nbSeqs * sizeof(ZSTD_Sequence) == inputSize); (void)inputSize;
+ (void)dst; (void)dstCapacity;
+ (void)payload; (void)blockSize;
+
+ (void)ZSTD_get1BlockSummary(seqs, nbSeqs);
+ return nbSeqs;
+}
+
+static PrepResult prepCopy(const void* src, size_t srcSize, int cLevel)
+{
+ PrepResult r = PREPRESULT_INIT;
+ (void)cLevel;
+ r.prepSize = srcSize;
+ r.prepBuffer = malloc(srcSize);
+ CONTROL(r.prepBuffer != NULL);
+ memcpy(r.prepBuffer, src, srcSize);
+ r.dstCapacity = ZSTD_compressBound(srcSize);
+ r.dst = malloc(r.dstCapacity);
+ CONTROL(r.dst != NULL);
+ return r;
+}
+
+static PrepResult prepShorterDstCapacity(const void* src, size_t srcSize, int cLevel)
+{
+ PrepResult r = prepCopy(src, srcSize, cLevel);
+ assert(r.dstCapacity > 1);
+ r.dstCapacity -= 1;
+ return r;
+}
+
+/*_*******************************************************
+* List of Scenarios
+*********************************************************/
+/* if PrepFunction_f returns PrepResult.prepBuffSize == 0, benchmarking is cancelled */
+typedef PrepResult (*PrepFunction_f)(const void* src, size_t srcSize, int cLevel);
+typedef size_t (*BenchedFunction_f)(const void* src, size_t srcSize, void* dst, size_t dstSize, void* opaque);
+/* must return 0, otherwise verification is considered failed */
+typedef size_t (*VerifFunction_f)(const void* processed, size_t procSize, const void* input, size_t inputSize);
+
+typedef struct {
+ const char* name;
+ PrepFunction_f preparation_f;
+ BenchedFunction_f benched_f;
+ VerifFunction_f verif_f; /* optional */
+} BenchScenario;
+
+static BenchScenario kScenarios[] = {
+ { "compress", NULL, local_ZSTD_compress, check_compressedSequences },
+ { "decompress", prepDecompress, local_ZSTD_decompress, NULL },
+ { "compress_freshCCtx", NULL, local_ZSTD_compress_freshCCtx, check_compressedSequences },
+ { "decompressDCtx", prepDecompress, local_ZSTD_decompressDCtx, NULL },
+ { "compressContinue", NULL, local_ZSTD_compressContinue, check_compressedSequences },
+ { "compressContinue_extDict", NULL, local_ZSTD_compressContinue_extDict, NULL },
+ { "decompressContinue", prepDecompress, local_ZSTD_decompressContinue, NULL },
+ { "compressStream", NULL, local_ZSTD_compressStream, check_compressedSequences },
+ { "compressStream_freshCCtx", NULL, local_ZSTD_compressStream_freshCCtx, check_compressedSequences },
+ { "decompressStream", prepDecompress, local_ZSTD_decompressStream, NULL },
+ { "compress2", NULL, local_ZSTD_compress2, check_compressedSequences },
+ { "compressStream2, end", NULL, local_ZSTD_compressStream2_end, check_compressedSequences },
+ { "compressStream2, end & short", prepShorterDstCapacity, local_ZSTD_compressStream2_end, check_compressedSequences },
+ { "compressStream2, continue", NULL, local_ZSTD_compressStream2_continue, check_compressedSequences },
+ { "compressStream2, -T2, continue", NULL, local_ZSTD_compress_generic_T2_continue, check_compressedSequences },
+ { "compressStream2, -T2, end", NULL, local_ZSTD_compress_generic_T2_end, check_compressedSequences },
+ { "compressSequences", prepSequences, local_compressSequences, check_compressedSequences },
+ { "compressSequencesAndLiterals", prepSequencesAndLiterals, local_compressSequencesAndLiterals, check_compressedSequences },
+ { "convertSequences (1st block)", prepConvertSequences, local_convertSequences, NULL },
+ { "get1BlockSummary (1st block)", prepConvertSequences, local_get1BlockSummary, NULL },
+#ifndef ZSTD_DLL_IMPORT
+ { "decodeLiteralsHeader (1st block)", prepLiterals, local_ZSTD_decodeLiteralsHeader, NULL },
+ { "decodeLiteralsBlock (1st block)", prepLiterals, local_ZSTD_decodeLiteralsBlock, NULL },
+ { "decodeSeqHeaders (1st block)", prepSequences1stBlock, local_ZSTD_decodeSeqHeaders, NULL },
+#endif
+};
+#define NB_SCENARIOS (sizeof(kScenarios) / sizeof(kScenarios[0]))
/*_*******************************************************
-* Bench functions
+* Bench loop
*********************************************************/
-static int benchMem(unsigned benchNb,
- const void* src, size_t srcSize,
+static int benchMem(unsigned scenarioID,
+ const void* origSrc, size_t origSrcSize,
int cLevel, ZSTD_compressionParameters cparams)
{
- size_t dstBuffSize = ZSTD_compressBound(srcSize);
- BYTE* dstBuff;
- void* dstBuff2;
+ size_t dstCapacity = 0;
+ void* dst = NULL;
+ void* prepBuff = NULL;
+ size_t prepBuffSize = 0;
void* payload;
const char* benchName;
BMK_benchFn_t benchFunction;
+ PrepFunction_f prep_f;
+ VerifFunction_f verif_f;
int errorcode = 0;
- /* Selection */
- switch(benchNb)
- {
- case 1:
- benchFunction = local_ZSTD_compress; benchName = "compress";
- break;
- case 2:
- benchFunction = local_ZSTD_decompress; benchName = "decompress";
- break;
- case 3:
- benchFunction = local_ZSTD_compress_freshCCtx; benchName = "compress_freshCCtx";
- break;
- case 4:
- benchFunction = local_ZSTD_decompressDCtx; benchName = "decompressDCtx";
- break;
-#ifndef ZSTD_DLL_IMPORT
- case 11:
- benchFunction = local_ZSTD_compressContinue; benchName = "compressContinue";
- break;
- case 12:
- benchFunction = local_ZSTD_compressContinue_extDict; benchName = "compressContinue_extDict";
- break;
- case 13:
- benchFunction = local_ZSTD_decompressContinue; benchName = "decompressContinue";
- break;
- case 30:
- benchFunction = local_ZSTD_decodeLiteralsHeader; benchName = "decodeLiteralsHeader";
- break;
- case 31:
- benchFunction = local_ZSTD_decodeLiteralsBlock; benchName = "decodeLiteralsBlock";
- break;
- case 32:
- benchFunction = local_ZSTD_decodeSeqHeaders; benchName = "decodeSeqHeaders";
- break;
-#endif
- case 41:
- benchFunction = local_ZSTD_compressStream; benchName = "compressStream";
- break;
- case 42:
- benchFunction = local_ZSTD_decompressStream; benchName = "decompressStream";
- break;
- case 43:
- benchFunction = local_ZSTD_compressStream_freshCCtx; benchName = "compressStream_freshCCtx";
- break;
- case 50:
- benchFunction = local_ZSTD_compress2; benchName = "compress2";
- break;
- case 51:
- benchFunction = local_ZSTD_compressStream2_end; benchName = "compressStream2, end";
- break;
- case 52:
- benchFunction = local_ZSTD_compressStream2_end; benchName = "compressStream2, end & short";
- break;
- case 53:
- benchFunction = local_ZSTD_compressStream2_continue; benchName = "compressStream2, continue";
- break;
- case 61:
- benchFunction = local_ZSTD_compress_generic_T2_continue; benchName = "compress_generic, -T2, continue";
- break;
- case 62:
- benchFunction = local_ZSTD_compress_generic_T2_end; benchName = "compress_generic, -T2, end";
- break;
- default :
- return 0;
- }
+ if (scenarioID >= NB_SCENARIOS) return 0; /* scenario doesn't exist */
- /* Allocation */
- dstBuff = (BYTE*)malloc(dstBuffSize);
- dstBuff2 = malloc(dstBuffSize);
- if ((!dstBuff) || (!dstBuff2)) {
- DISPLAY("\nError: not enough memory!\n");
- free(dstBuff); free(dstBuff2);
- return 12;
- }
- payload = dstBuff2;
+ benchName = kScenarios[scenarioID].name;
+ benchFunction = kScenarios[scenarioID].benched_f;
+ prep_f = kScenarios[scenarioID].preparation_f;
+ verif_f = kScenarios[scenarioID].verif_f;
+ if (prep_f == NULL) prep_f = prepCopy; /* default */
+
+ /* Initialization */
if (g_zcc==NULL) g_zcc = ZSTD_createCCtx();
if (g_zdc==NULL) g_zdc = ZSTD_createDCtx();
if (g_cstream==NULL) g_cstream = ZSTD_createCStream();
@@ -538,7 +836,7 @@ static int benchMem(unsigned benchNb,
ZSTD_CCtx_setParameter(g_zcc, ZSTD_c_searchLog, (int)cparams.searchLog);
ZSTD_CCtx_setParameter(g_zcc, ZSTD_c_minMatch, (int)cparams.minMatch);
ZSTD_CCtx_setParameter(g_zcc, ZSTD_c_targetLength, (int)cparams.targetLength);
- ZSTD_CCtx_setParameter(g_zcc, ZSTD_c_strategy, cparams.strategy);
+ ZSTD_CCtx_setParameter(g_zcc, ZSTD_c_strategy, (int)cparams.strategy);
ZSTD_CCtx_setParameter(g_cstream, ZSTD_c_compressionLevel, cLevel);
ZSTD_CCtx_setParameter(g_cstream, ZSTD_c_windowLog, (int)cparams.windowLog);
@@ -547,114 +845,26 @@ static int benchMem(unsigned benchNb,
ZSTD_CCtx_setParameter(g_cstream, ZSTD_c_searchLog, (int)cparams.searchLog);
ZSTD_CCtx_setParameter(g_cstream, ZSTD_c_minMatch, (int)cparams.minMatch);
ZSTD_CCtx_setParameter(g_cstream, ZSTD_c_targetLength, (int)cparams.targetLength);
- ZSTD_CCtx_setParameter(g_cstream, ZSTD_c_strategy, cparams.strategy);
+ ZSTD_CCtx_setParameter(g_cstream, ZSTD_c_strategy, (int)cparams.strategy);
/* Preparation */
- switch(benchNb)
- {
- case 1:
- payload = &cparams;
- break;
- case 2:
- g_cSize = ZSTD_compress(dstBuff2, dstBuffSize, src, srcSize, cLevel);
- break;
- case 3:
- payload = &cparams;
- break;
- case 4:
- g_cSize = ZSTD_compress(dstBuff2, dstBuffSize, src, srcSize, cLevel);
- break;
-#ifndef ZSTD_DLL_IMPORT
- case 11:
- payload = &cparams;
- break;
- case 12:
- payload = &cparams;
- break;
- case 13 :
- g_cSize = ZSTD_compress(dstBuff2, dstBuffSize, src, srcSize, cLevel);
- break;
- case 30: /* ZSTD_decodeLiteralsHeader */
- /* fall-through */
- case 31: /* ZSTD_decodeLiteralsBlock : starts literals block in dstBuff2 */
- { size_t frameHeaderSize;
- g_cSize = ZSTD_compress(dstBuff, dstBuffSize, src, srcSize, cLevel);
- frameHeaderSize = ZSTD_frameHeaderSize(dstBuff, ZSTD_FRAMEHEADERSIZE_PREFIX(ZSTD_f_zstd1));
- CONTROL(!ZSTD_isError(frameHeaderSize));
- /* check block is compressible, hence contains a literals section */
- { blockProperties_t bp;
- ZSTD_getcBlockSize(dstBuff+frameHeaderSize, dstBuffSize, &bp); /* Get 1st block type */
- if (bp.blockType != bt_compressed) {
- DISPLAY("ZSTD_decodeLiteralsBlock : impossible to test on this sample (not compressible)\n");
- goto _cleanOut;
- } }
- { size_t const skippedSize = frameHeaderSize + ZSTD_blockHeaderSize;
- memcpy(dstBuff2, dstBuff+skippedSize, g_cSize-skippedSize);
- }
- srcSize = srcSize > 128 KB ? 128 KB : srcSize; /* speed relative to block */
- ZSTD_decompressBegin(g_zdc);
- break;
- }
- case 32: /* ZSTD_decodeSeqHeaders */
- { blockProperties_t bp;
- const BYTE* ip = dstBuff;
- const BYTE* iend;
- { size_t const cSize = ZSTD_compress(dstBuff, dstBuffSize, src, srcSize, cLevel);
- CONTROL(cSize > ZSTD_FRAMEHEADERSIZE_PREFIX(ZSTD_f_zstd1));
- }
- /* Skip frame Header */
- { size_t const frameHeaderSize = ZSTD_frameHeaderSize(dstBuff, ZSTD_FRAMEHEADERSIZE_PREFIX(ZSTD_f_zstd1));
- CONTROL(!ZSTD_isError(frameHeaderSize));
- ip += frameHeaderSize;
- }
- /* Find end of block */
- { size_t const cBlockSize = ZSTD_getcBlockSize(ip, dstBuffSize, &bp); /* Get 1st block type */
- if (bp.blockType != bt_compressed) {
- DISPLAY("ZSTD_decodeSeqHeaders : impossible to test on this sample (not compressible)\n");
- goto _cleanOut;
- }
- iend = ip + ZSTD_blockHeaderSize + cBlockSize; /* End of first block */
- }
- ip += ZSTD_blockHeaderSize; /* skip block header */
- ZSTD_decompressBegin(g_zdc);
- CONTROL(iend > ip);
- ip += ZSTD_decodeLiteralsBlock_wrapper(g_zdc, ip, (size_t)(iend-ip), dstBuff, dstBuffSize); /* skip literal segment */
- g_cSize = (size_t)(iend-ip);
- memcpy(dstBuff2, ip, g_cSize); /* copy rest of block (it starts by SeqHeader) */
- srcSize = srcSize > 128 KB ? 128 KB : srcSize; /* speed relative to block */
- break;
- }
-#else
- case 31:
- goto _cleanOut;
-#endif
- case 41 :
- payload = &cparams;
- break;
- case 42 :
- g_cSize = ZSTD_compress(payload, dstBuffSize, src, srcSize, cLevel);
- break;
- case 43 :
- payload = &cparams;
- break;
-
- case 52 :
- /* compressStream2, short dstCapacity */
- dstBuffSize--;
- break;
-
- /* test functions */
- /* convention: test functions have ID > 100 */
-
- default : ;
+ payload = &cparams;
+ { PrepResult pr = prep_f(origSrc, origSrcSize, cLevel);
+ dst = pr.dst;
+ dstCapacity = pr.dstCapacity;
+ prepBuff = pr.prepBuffer;
+ prepBuffSize = pr.prepSize;
+ if (pr.fixedOrigSize) origSrcSize = pr.fixedOrigSize;
}
+ if (prepBuffSize==0) goto _cleanOut; /* failed preparation */
/* warming up dstBuff */
- { size_t i; for (i=0; i
#include
@@ -76,7 +77,7 @@ static char* generatePseudoRandomString(char* str, size_t size, FUZZ_dataProduce
static size_t decodeSequences(void* dst, size_t nbSequences,
size_t literalsSize,
const void* dict, size_t dictSize,
- ZSTD_sequenceFormat_e mode)
+ ZSTD_SequenceFormat_e mode)
{
const uint8_t* litPtr = literalsBuffer;
const uint8_t* const litBegin = literalsBuffer;
@@ -127,7 +128,7 @@ static size_t decodeSequences(void* dst, size_t nbSequences,
FUZZ_ASSERT(litPtr <= litEnd);
if (mode == ZSTD_sf_noBlockDelimiters) {
const uint32_t lastLLSize = (uint32_t)(litEnd - litPtr);
- if (lastLLSize <= oend - op) {
+ if (lastLLSize <= (uint32_t)(oend - op)) {
memcpy(op, litPtr, lastLLSize);
generatedSrcBufferSize += lastLLSize;
} }
@@ -141,7 +142,7 @@ static size_t decodeSequences(void* dst, size_t nbSequences,
*/
static size_t generateRandomSequences(FUZZ_dataProducer_t* producer,
size_t literalsSizeLimit, size_t dictSize,
- size_t windowLog, ZSTD_sequenceFormat_e mode)
+ size_t windowLog, ZSTD_SequenceFormat_e mode)
{
const uint32_t repCode = 0; /* not used by sequence ingestion api */
size_t windowSize = 1ULL << windowLog;
@@ -155,7 +156,7 @@ static size_t generateRandomSequences(FUZZ_dataProducer_t* producer,
if (mode == ZSTD_sf_explicitBlockDelimiters) {
/* ensure that no sequence can be larger than one block */
literalsSizeLimit = MIN(literalsSizeLimit, blockSizeMax/2);
- matchLengthMax = MIN(matchLengthMax, blockSizeMax/2);
+ matchLengthMax = MIN(matchLengthMax, (uint32_t)blockSizeMax/2);
}
while ( nbSeqGenerated < ZSTD_FUZZ_MAX_NBSEQ - 3 /* extra room for explicit delimiters */
@@ -171,7 +172,7 @@ static size_t generateRandomSequences(FUZZ_dataProducer_t* producer,
if (bytesGenerated > ZSTD_FUZZ_GENERATED_SRC_MAXSIZE) {
break;
}
- offsetBound = (bytesGenerated > windowSize) ? windowSize : bytesGenerated + (uint32_t)dictSize;
+ offsetBound = (bytesGenerated > windowSize) ? (uint32_t)windowSize : bytesGenerated + (uint32_t)dictSize;
offset = FUZZ_dataProducer_uint32Range(producer, 1, offsetBound);
if (dictSize > 0 && bytesGenerated <= windowSize) {
/* Prevent match length from being such that it would be associated with an offset too large
@@ -180,7 +181,7 @@ static size_t generateRandomSequences(FUZZ_dataProducer_t* producer,
*/
const size_t bytesToReachWindowSize = windowSize - bytesGenerated;
if (bytesToReachWindowSize < ZSTD_MINMATCH_MIN) {
- const uint32_t newOffsetBound = offsetBound > windowSize ? windowSize : offsetBound;
+ const uint32_t newOffsetBound = offsetBound > windowSize ? (uint32_t)windowSize : offsetBound;
offset = FUZZ_dataProducer_uint32Range(producer, 1, newOffsetBound);
} else {
matchBound = MIN(matchLengthMax, (uint32_t)bytesToReachWindowSize);
@@ -201,14 +202,14 @@ static size_t generateRandomSequences(FUZZ_dataProducer_t* producer,
if (blockSize + seqSize > blockSizeMax) { /* reaching limit : must end block now */
const ZSTD_Sequence endBlock = {0, 0, 0, 0};
generatedSequences[nbSeqGenerated++] = endBlock;
- blockSize = seqSize;
+ blockSize = (uint32_t)seqSize;
}
if (split) {
const ZSTD_Sequence endBlock = {0, lastLits, 0, 0};
generatedSequences[nbSeqGenerated++] = endBlock;
assert(lastLits <= seq.litLength);
seq.litLength -= lastLits;
- blockSize = seqSize - lastLits;
+ blockSize = (uint32_t)(seqSize - lastLits);
} else {
blockSize += seqSize;
}
@@ -227,12 +228,73 @@ static size_t generateRandomSequences(FUZZ_dataProducer_t* producer,
return nbSeqGenerated;
}
+static size_t
+transferLiterals(void* dst, size_t dstCapacity, const ZSTD_Sequence* seqs, size_t nbSeqs, const void* src, size_t srcSize)
+{
+ size_t n;
+ char* op = dst;
+ char* const oend = op + dstCapacity;
+ const char* ip = src;
+ const char* const iend = ip + srcSize;
+ for (n=0; n= 8);
+ return (size_t)(op - (char*)dst);
+}
+
+static size_t roundTripTest_compressSequencesAndLiterals(
+ void* result, size_t resultCapacity,
+ void* compressed, size_t compressedCapacity,
+ const void* src, size_t srcSize,
+ const ZSTD_Sequence* seqs, size_t nbSeqs)
+{
+ size_t const litCapacity = srcSize + 8;
+ void* literals = malloc(litCapacity);
+ size_t cSize, litSize;
+
+ assert(literals);
+ litSize = transferLiterals(literals, litCapacity, seqs, nbSeqs, src, srcSize);
+
+ cSize = ZSTD_compressSequencesAndLiterals(cctx,
+ compressed, compressedCapacity,
+ seqs, nbSeqs,
+ literals, litSize, litCapacity, srcSize);
+ free(literals);
+ if (ZSTD_getErrorCode(cSize) == ZSTD_error_cannotProduce_uncompressedBlock) {
+ /* Valid scenario : ZSTD_compressSequencesAndLiterals cannot generate uncompressed blocks */
+ return 0;
+ }
+ if (ZSTD_getErrorCode(cSize) == ZSTD_error_dstSize_tooSmall) {
+ /* Valid scenario : in explicit delimiter mode,
+ * it might be possible for the compressed size to outgrow dstCapacity.
+ * In which case, it's still a valid fuzzer scenario,
+ * but no roundtrip shall be possible */
+ return 0;
+ }
+
+ /* round-trip */
+ FUZZ_ZASSERT(cSize);
+ { size_t const dSize = ZSTD_decompressDCtx(dctx, result, resultCapacity, compressed, cSize);
+ FUZZ_ZASSERT(dSize);
+ FUZZ_ASSERT_MSG(dSize == srcSize, "Incorrect regenerated size");
+ FUZZ_ASSERT_MSG(!FUZZ_memcmp(src, result, srcSize), "Corruption!");
+ return dSize;
+ }
+}
+
static size_t roundTripTest(void* result, size_t resultCapacity,
void* compressed, size_t compressedCapacity,
const void* src, size_t srcSize,
- const ZSTD_Sequence* seqs, size_t seqSize,
+ const ZSTD_Sequence* seqs, size_t nbSeqs,
unsigned hasDict,
- ZSTD_sequenceFormat_e mode)
+ ZSTD_SequenceFormat_e mode)
{
size_t cSize;
size_t dSize;
@@ -242,8 +304,17 @@ static size_t roundTripTest(void* result, size_t resultCapacity,
FUZZ_ZASSERT(ZSTD_DCtx_refDDict(dctx, ddict));
}
+ { int blockMode, validation;
+ /* compressSequencesAndLiterals() only supports explicitBlockDelimiters and no validation */
+ FUZZ_ZASSERT(ZSTD_CCtx_getParameter(cctx, ZSTD_c_blockDelimiters, &blockMode));
+ FUZZ_ZASSERT(ZSTD_CCtx_getParameter(cctx, ZSTD_c_validateSequences, &validation));
+ if ((blockMode == ZSTD_sf_explicitBlockDelimiters) && (!validation)) {
+ FUZZ_ZASSERT(roundTripTest_compressSequencesAndLiterals(result, resultCapacity, compressed, compressedCapacity, src, srcSize, seqs, nbSeqs));
+ }
+ }
+
cSize = ZSTD_compressSequences(cctx, compressed, compressedCapacity,
- seqs, seqSize,
+ seqs, nbSeqs,
src, srcSize);
if ( (ZSTD_getErrorCode(cSize) == ZSTD_error_dstSize_tooSmall)
&& (mode == ZSTD_sf_explicitBlockDelimiters) ) {
@@ -276,7 +347,7 @@ int LLVMFuzzerTestOneInput(const uint8_t* src, size_t size)
unsigned hasDict;
unsigned wLog;
int cLevel;
- ZSTD_sequenceFormat_e mode;
+ ZSTD_SequenceFormat_e mode;
FUZZ_dataProducer_t* const producer = FUZZ_dataProducer_create(src, size);
FUZZ_ASSERT(producer);
@@ -293,15 +364,15 @@ int LLVMFuzzerTestOneInput(const uint8_t* src, size_t size)
/* Generate window log first so we don't generate offsets too large */
wLog = FUZZ_dataProducer_uint32Range(producer, ZSTD_WINDOWLOG_MIN, ZSTD_WINDOWLOG_MAX);
cLevel = FUZZ_dataProducer_int32Range(producer, -3, 22);
- mode = (ZSTD_sequenceFormat_e)FUZZ_dataProducer_int32Range(producer, 0, 1);
+ mode = (ZSTD_SequenceFormat_e)FUZZ_dataProducer_int32Range(producer, 0, 1);
ZSTD_CCtx_reset(cctx, ZSTD_reset_session_and_parameters);
ZSTD_CCtx_setParameter(cctx, ZSTD_c_nbWorkers, 0);
ZSTD_CCtx_setParameter(cctx, ZSTD_c_compressionLevel, cLevel);
- ZSTD_CCtx_setParameter(cctx, ZSTD_c_windowLog, wLog);
+ ZSTD_CCtx_setParameter(cctx, ZSTD_c_windowLog, (int)wLog);
ZSTD_CCtx_setParameter(cctx, ZSTD_c_minMatch, ZSTD_MINMATCH_MIN);
ZSTD_CCtx_setParameter(cctx, ZSTD_c_validateSequences, 1);
- ZSTD_CCtx_setParameter(cctx, ZSTD_c_blockDelimiters, mode);
+ ZSTD_CCtx_setParameter(cctx, ZSTD_c_blockDelimiters, (int)mode);
ZSTD_CCtx_setParameter(cctx, ZSTD_c_forceAttachDict, ZSTD_dictForceAttach);
if (!literalsBuffer) {
diff --git a/tests/fuzz/simple_round_trip.c b/tests/fuzz/simple_round_trip.c
index 660092e6106..ab50aadb43e 100644
--- a/tests/fuzz/simple_round_trip.c
+++ b/tests/fuzz/simple_round_trip.c
@@ -34,7 +34,7 @@ static size_t getDecompressionMargin(void const* compressed, size_t cSize, size_
/* The macro should be correct in this case, but it may be smaller
* because of e.g. block splitting, so take the smaller of the two.
*/
- ZSTD_frameHeader zfh;
+ ZSTD_FrameHeader zfh;
size_t marginM;
FUZZ_ZASSERT(ZSTD_getFrameHeader(&zfh, compressed, cSize));
if (maxBlockSize == 0) {
diff --git a/tests/fuzz/zstd_frame_info.c b/tests/fuzz/zstd_frame_info.c
index 95dbdd49a47..4d17bba7410 100644
--- a/tests/fuzz/zstd_frame_info.c
+++ b/tests/fuzz/zstd_frame_info.c
@@ -21,7 +21,7 @@
int LLVMFuzzerTestOneInput(const uint8_t *src, size_t size)
{
- ZSTD_frameHeader zfh;
+ ZSTD_FrameHeader zfh;
if (size == 0) {
src = NULL;
}
diff --git a/tests/fuzz/zstd_helpers.c b/tests/fuzz/zstd_helpers.c
index f4cb10823f4..f3b2e6fba4c 100644
--- a/tests/fuzz/zstd_helpers.c
+++ b/tests/fuzz/zstd_helpers.c
@@ -140,12 +140,13 @@ void FUZZ_setRandomParameters(ZSTD_CCtx *cctx, size_t srcSize, FUZZ_dataProducer
setRand(cctx, ZSTD_c_forceMaxWindow, 0, 1, producer);
setRand(cctx, ZSTD_c_literalCompressionMode, 0, 2, producer);
setRand(cctx, ZSTD_c_forceAttachDict, 0, 2, producer);
- setRand(cctx, ZSTD_c_useBlockSplitter, 0, 2, producer);
+ setRand(cctx, ZSTD_c_blockSplitterLevel, 0, ZSTD_BLOCKSPLITTER_LEVEL_MAX, producer);
+ setRand(cctx, ZSTD_c_splitAfterSequences, 0, 2, producer);
setRand(cctx, ZSTD_c_deterministicRefPrefix, 0, 1, producer);
setRand(cctx, ZSTD_c_prefetchCDictTables, 0, 2, producer);
setRand(cctx, ZSTD_c_maxBlockSize, ZSTD_BLOCKSIZE_MAX_MIN, ZSTD_BLOCKSIZE_MAX, producer);
setRand(cctx, ZSTD_c_validateSequences, 0, 1, producer);
- setRand(cctx, ZSTD_c_searchForExternalRepcodes, 0, 2, producer);
+ setRand(cctx, ZSTD_c_repcodeResolution, 0, 2, producer);
if (FUZZ_dataProducer_uint32Range(producer, 0, 1) == 0) {
setRand(cctx, ZSTD_c_srcSizeHint, ZSTD_SRCSIZEHINT_MIN, 2 * srcSize, producer);
}
diff --git a/tests/fuzzer.c b/tests/fuzzer.c
index f7bdae90e9a..b74460bb573 100644
--- a/tests/fuzzer.c
+++ b/tests/fuzzer.c
@@ -40,7 +40,6 @@
#include "datagen.h" /* RDG_genBuffer */
#define XXH_STATIC_LINKING_ONLY /* XXH64_state_t */
#include "xxhash.h" /* XXH64 */
-#include "util.h"
#include "timefn.h" /* SEC_TO_MICRO, UTIL_time_t, UTIL_TIME_INITIALIZER, UTIL_clockSpanMicro, UTIL_getTime */
/* must be included after util.h, due to ERROR macro redefinition issue on Visual Studio */
#include "zstd_internal.h" /* ZSTD_WORKSPACETOOLARGE_MAXDURATION, ZSTD_WORKSPACETOOLARGE_FACTOR, KB, MB */
@@ -310,7 +309,7 @@ static int FUZ_mallocTests(unsigned seed, double compressibility, unsigned part)
#endif
static void FUZ_decodeSequences(BYTE* dst, ZSTD_Sequence* seqs, size_t seqsSize,
- BYTE* src, size_t size, ZSTD_sequenceFormat_e format)
+ BYTE* src, size_t size, ZSTD_SequenceFormat_e format)
{
size_t i;
size_t j;
@@ -339,6 +338,35 @@ static void FUZ_decodeSequences(BYTE* dst, ZSTD_Sequence* seqs, size_t seqsSize,
}
}
+static size_t FUZ_getLitSize(const ZSTD_Sequence* seqs, size_t nbSeqs)
+{
+ size_t n, litSize = 0;
+ assert(seqs != NULL);
+ for (n=0; n 32*%zuKB (small) : ",
- largeCCtxSize>>10, smallCCtxSize>>10);
+ DISPLAYLEVEL(5, "(large) %uKB > 32*%uKB (small) : ",
+ (unsigned)(largeCCtxSize>>10), (unsigned)(smallCCtxSize>>10));
assert(largeCCtxSize > 32* smallCCtxSize); /* note : "too large" definition is handled within zstd_compress.c .
* make this test case extreme, so that it doesn't depend on a possibly fluctuating definition */
}
@@ -2190,7 +2280,7 @@ static int basicUnitTests(U32 const seed, double compressibility)
DISPLAYLEVEL(3, "test%3i : compress with block splitting : ", testNb++)
{ ZSTD_CCtx* cctx = ZSTD_createCCtx();
- CHECK_Z( ZSTD_CCtx_setParameter(cctx, ZSTD_c_useBlockSplitter, ZSTD_ps_enable) );
+ CHECK_Z( ZSTD_CCtx_setParameter(cctx, ZSTD_c_splitAfterSequences, ZSTD_ps_enable) );
cSize = ZSTD_compress2(cctx, compressedBuffer, compressedBufferSize, CNBuffer, CNBuffSize);
CHECK_Z(cSize);
ZSTD_freeCCtx(cctx);
@@ -2422,7 +2512,7 @@ static int basicUnitTests(U32 const seed, double compressibility)
CHECK_VAR(cSize, ZSTD_compressEnd(ctxDuplicated, compressedBuffer, ZSTD_compressBound(testSize),
(const char*)CNBuffer + dictSize, testSize) );
- { ZSTD_frameHeader zfh;
+ { ZSTD_FrameHeader zfh;
if (ZSTD_getFrameHeader(&zfh, compressedBuffer, cSize)) goto _output_error;
if ((zfh.frameContentSize != testSize) && (zfh.frameContentSize != 0)) goto _output_error;
} }
@@ -2450,7 +2540,7 @@ static int basicUnitTests(U32 const seed, double compressibility)
3742, 3675, 3674, 3665, 3664,
3663, 3662, 3661, 3660, 3660,
3660, 3660, 3660 };
- size_t const target_wdict_cSize[22+1] = { 2830, 2896, 2893, 2820, 2940,
+ size_t const target_wdict_cSize[22+1] = { 2830, 2896, 2893, 2840, 2950,
2950, 2950, 2925, 2900, 2892,
2910, 2910, 2910, 2780, 2775,
2765, 2760, 2755, 2754, 2753,
@@ -3390,7 +3480,7 @@ static int basicUnitTests(U32 const seed, double compressibility)
{ size_t const compressionResult = ZSTD_compress2(cctx,
compressedBuffer, compressedBufferSize,
CNBuffer, srcSize);
- DISPLAYLEVEL(5, "simple=%zu vs %zu=advanced : ", cSize_1pass, compressionResult);
+ DISPLAYLEVEL(5, "simple=%u vs %u=advanced : ", (unsigned)cSize_1pass, (unsigned)compressionResult);
if (ZSTD_isError(compressionResult)) goto _output_error;
if (compressionResult != cSize_1pass) goto _output_error;
} }
@@ -3495,7 +3585,7 @@ static int basicUnitTests(U32 const seed, double compressibility)
DISPLAYLEVEL(3, "test%3i : decompress of magic-less frame : ", testNb++);
ZSTD_DCtx_reset(dctx, ZSTD_reset_session_and_parameters);
CHECK_Z( ZSTD_DCtx_setParameter(dctx, ZSTD_d_format, ZSTD_f_zstd1_magicless) );
- { ZSTD_frameHeader zfh;
+ { ZSTD_FrameHeader zfh;
size_t const zfhrt = ZSTD_getFrameHeader_advanced(&zfh, compressedBuffer, cSize, ZSTD_f_zstd1_magicless);
if (zfhrt != 0) goto _output_error;
}
@@ -3653,16 +3743,19 @@ static int basicUnitTests(U32 const seed, double compressibility)
ZSTD_freeDCtx(dctx);
}
- /* long rle test */
+ /* rle detection test: must compress better blocks with a single identical byte repeated */
{ size_t sampleSize = 0;
- size_t expectedCompressedSize = 39; /* block 1, 2: compressed, block 3: RLE, zstd 1.4.4 */
- DISPLAYLEVEL(3, "test%3i : Long RLE test : ", testNb++);
- memset((char*)CNBuffer+sampleSize, 'B', 256 KB - 1);
- sampleSize += 256 KB - 1;
- memset((char*)CNBuffer+sampleSize, 'A', 96 KB);
- sampleSize += 96 KB;
+ size_t maxCompressedSize = 46; /* block 1, 2: compressed, block 3: RLE, zstd 1.4.4 */
+ DISPLAYLEVEL(3, "test%3i : RLE detection test : ", testNb++);
+ memset((char*)CNBuffer+sampleSize, 'B', 256 KB - 2);
+ sampleSize += 256 KB - 2;
+ memset((char*)CNBuffer+sampleSize, 'A', 100 KB);
+ sampleSize += 100 KB;
cSize = ZSTD_compress(compressedBuffer, ZSTD_compressBound(sampleSize), CNBuffer, sampleSize, 1);
- if (ZSTD_isError(cSize) || cSize > expectedCompressedSize) goto _output_error;
+ if (ZSTD_isError(cSize) || cSize > maxCompressedSize) {
+ DISPLAYLEVEL(4, "error: cSize %u > %u expected ! \n", (unsigned)cSize, (unsigned)maxCompressedSize);
+ goto _output_error;
+ }
{ CHECK_NEWV(regenSize, ZSTD_decompress(decodedBuffer, sampleSize, compressedBuffer, cSize));
if (regenSize!=sampleSize) goto _output_error; }
DISPLAYLEVEL(3, "OK \n");
@@ -3743,7 +3836,7 @@ static int basicUnitTests(U32 const seed, double compressibility)
if (seqs == NULL) goto _output_error;
assert(cctx != NULL);
- /* Populate src with random data */
+ /* Populate src with compressible random data */
RDG_genBuffer(CNBuffer, srcSize, compressibility, 0., seed);
/* Roundtrip Test with block delimiters generated by ZSTD_generateSequences() */
@@ -3785,6 +3878,108 @@ static int basicUnitTests(U32 const seed, double compressibility)
}
DISPLAYLEVEL(3, "OK \n");
+ DISPLAYLEVEL(3, "test%3i : ZSTD_compressSequencesAndLiterals : ", testNb++);
+ {
+ const size_t srcSize = 497000;
+ const BYTE* const src = (BYTE*)CNBuffer;
+ BYTE* const dst = (BYTE*)compressedBuffer;
+ const size_t dstCapacity = ZSTD_compressBound(srcSize);
+ const size_t decompressSize = srcSize;
+ char* const decompressBuffer = (char*)malloc(decompressSize);
+ char* const litBuffer = (char*)malloc(decompressSize);
+ size_t compressedSize;
+
+ ZSTD_CCtx* const cctx = ZSTD_createCCtx();
+ ZSTD_Sequence* const seqs = (ZSTD_Sequence*)malloc(srcSize * sizeof(ZSTD_Sequence));
+ size_t nbSeqs;
+
+ if (litBuffer == NULL) goto _output_error;
+ if (decompressBuffer == NULL) goto _output_error;
+ if (seqs == NULL) goto _output_error;
+ assert(cctx != NULL);
+
+ /* Populate src with compressible random data */
+ RDG_genBuffer(CNBuffer, srcSize, compressibility, 0., seed);
+
+ /* Roundtrip Test using the AndLiterals() variant */
+ nbSeqs = ZSTD_generateSequences(cctx, seqs, srcSize, src, srcSize);
+ ZSTD_CCtx_reset(cctx, ZSTD_reset_session_and_parameters);
+ ZSTD_CCtx_setParameter(cctx, ZSTD_c_blockDelimiters, ZSTD_sf_explicitBlockDelimiters);
+ { size_t const litSize = FUZ_getLitSize(seqs, nbSeqs);
+ FUZ_transferLiterals(litBuffer, decompressSize, CNBuffer, srcSize, seqs, nbSeqs);
+
+ /* not enough literals: must fail */
+ compressedSize = ZSTD_compressSequencesAndLiterals(cctx, dst, dstCapacity, seqs, nbSeqs, src, litSize-1, decompressSize, srcSize);
+ if (!ZSTD_isError(compressedSize)) {
+ DISPLAY("ZSTD_compressSequencesAndLiterals() should have failed: not enough literals provided\n");
+ goto _output_error;
+ }
+
+ /* too many literals: must fail */
+ compressedSize = ZSTD_compressSequencesAndLiterals(cctx, dst, dstCapacity, seqs, nbSeqs, src, litSize+1, decompressSize, srcSize);
+ if (!ZSTD_isError(compressedSize)) {
+ DISPLAY("ZSTD_compressSequencesAndLiterals() should have failed: too many literals provided\n");
+ goto _output_error;
+ }
+
+ /* srcSize too large: must fail */
+ compressedSize = ZSTD_compressSequencesAndLiterals(cctx, dst, dstCapacity, seqs, nbSeqs, litBuffer, litSize, decompressSize, srcSize+1);
+ if (!ZSTD_isError(compressedSize)) {
+ DISPLAY("ZSTD_compressSequencesAndLiterals() should have failed: srcSize is too large\n");
+ goto _output_error;
+ }
+
+ /* srcSize too small: must fail */
+ compressedSize = ZSTD_compressSequencesAndLiterals(cctx, dst, dstCapacity, seqs, nbSeqs, litBuffer, litSize, decompressSize, srcSize-1);
+ if (!ZSTD_isError(compressedSize)) {
+ DISPLAY("ZSTD_compressSequencesAndLiterals() should have failed: srcSize is too small\n");
+ goto _output_error;
+ }
+
+ /* correct amount of literals: should compress successfully */
+ compressedSize = ZSTD_compressSequencesAndLiterals(cctx, dst, dstCapacity, seqs, nbSeqs, litBuffer, litSize, decompressSize, srcSize);
+ if (ZSTD_isError(compressedSize)) {
+ DISPLAY("Error in ZSTD_compressSequencesAndLiterals()\n");
+ goto _output_error;
+ }
+ }
+ { ZSTD_FrameHeader zfh;
+ size_t const zfhStatus = ZSTD_getFrameHeader(&zfh, dst, compressedSize);
+ if (zfhStatus != 0) {
+ DISPLAY("Error reading frame header\n");
+ goto _output_error;
+ }
+ if (zfh.frameContentSize != srcSize) {
+ DISPLAY("Error: ZSTD_compressSequencesAndLiterals() did not report srcSize in the frame header\n");
+ goto _output_error;
+ }
+ if (zfh.windowSize > srcSize) {
+ DISPLAY("Error: ZSTD_compressSequencesAndLiterals() did not resized window size to smaller contentSize\n");
+ goto _output_error;
+ }
+ }
+ { size_t const dSize = ZSTD_decompress(decompressBuffer, decompressSize, dst, compressedSize);
+ if (ZSTD_isError(dSize)) {
+ DISPLAY("Error during decompression of frame produced by ZSTD_compressSequencesAndLiterals()\n");
+ goto _output_error;
+ }
+ if (dSize != srcSize) {
+ DISPLAY("Error: decompression of frame produced by ZSTD_compressSequencesAndLiterals() has different size\n");
+ goto _output_error;
+ }
+ if (memcmp(decompressBuffer, src, srcSize)) {
+ DISPLAY("Error: decompression of frame produced by ZSTD_compressSequencesAndLiterals() produces a different content (of same size)\n");
+ goto _output_error;
+ }
+ }
+
+ ZSTD_freeCCtx(cctx);
+ free(litBuffer);
+ free(decompressBuffer);
+ free(seqs);
+ }
+ DISPLAYLEVEL(3, "OK \n");
+
/* Multiple blocks of zeros test */
#define LONGZEROSLENGTH 1000000 /* 1MB of zeros */
DISPLAYLEVEL(3, "test%3i : compress %u zeroes : ", testNb++, LONGZEROSLENGTH);
@@ -3915,12 +4110,31 @@ static int basicUnitTests(U32 const seed, double compressibility)
DISPLAYLEVEL(3, "OK \n");
- /* findFrameCompressedSize on skippable frames */
- DISPLAYLEVEL(3, "test%3i : frame compressed size of skippable frame : ", testNb++);
- { const char* frame = "\x50\x2a\x4d\x18\x05\x0\x0\0abcde";
- size_t const frameSrcSize = 13;
- if (ZSTD_findFrameCompressedSize(frame, frameSrcSize) != frameSrcSize) goto _output_error; }
- DISPLAYLEVEL(3, "OK \n");
+ /* frame operations on skippable frames */
+ { const char skippableFrame[] = "\x52\x2a\x4d\x18\x05\x0\x0\0abcde";
+ size_t const skippableFrameSize = sizeof(skippableFrame) - 1 /* remove the terminating /0 */;
+
+ DISPLAYLEVEL(3, "test%3i : ZSTD_findFrameCompressedSize on skippable frame : ", testNb++);
+ CHECK(ZSTD_findFrameCompressedSize(skippableFrame, skippableFrameSize) == skippableFrameSize);
+ DISPLAYLEVEL(3, "OK \n");
+
+ DISPLAYLEVEL(3, "test%3i : ZSTD_getFrameContentSize on skippable frame : ", testNb++);
+ CHECK(ZSTD_getFrameContentSize(skippableFrame, skippableFrameSize) == 0);
+ DISPLAYLEVEL(3, "OK \n");
+
+ DISPLAYLEVEL(3, "test%3i : ZSTD_getFrameHeader on skippable frame : ", testNb++);
+ { ZSTD_FrameHeader zfh;
+ size_t const s = ZSTD_getFrameHeader(&zfh, skippableFrame, skippableFrameSize);
+ CHECK_Z(s);
+ CHECK(s == 0); /* success */
+ CHECK(zfh.frameType == ZSTD_skippableFrame);
+ CHECK(zfh.headerSize == ZSTD_SKIPPABLEHEADERSIZE);
+ CHECK(zfh.dictID == 2); /* magic variant */
+ assert(skippableFrameSize >= ZSTD_SKIPPABLEHEADERSIZE);
+ CHECK(zfh.frameContentSize == skippableFrameSize - ZSTD_SKIPPABLEHEADERSIZE);
+ }
+ DISPLAYLEVEL(3, "OK \n");
+ }
/* error string tests */
DISPLAYLEVEL(3, "test%3i : testing ZSTD error code strings : ", testNb++);
@@ -3939,8 +4153,8 @@ static int basicUnitTests(U32 const seed, double compressibility)
ZSTD_CCtx* const cctx = ZSTD_createCCtx();
ZSTD_CDict* const lgCDict = ZSTD_createCDict(CNBuffer, size, 1);
ZSTD_CDict* const smCDict = ZSTD_createCDict(CNBuffer, 1 KB, 1);
- ZSTD_frameHeader lgHeader;
- ZSTD_frameHeader smHeader;
+ ZSTD_FrameHeader lgHeader;
+ ZSTD_FrameHeader smHeader;
CHECK_Z(ZSTD_compress_usingCDict(cctx, compressedBuffer, compressedBufferSize, CNBuffer, size, lgCDict));
CHECK_Z(ZSTD_getFrameHeader(&lgHeader, compressedBuffer, compressedBufferSize));
@@ -4147,8 +4361,8 @@ static int basicUnitTests(U32 const seed, double compressibility)
for (; level < ZSTD_maxCLevel(); ++level) {
size_t const currSize = ZSTD_estimateCCtxSize(level);
if (prevSize > currSize) {
- DISPLAYLEVEL(3, "Error! previous cctx size: %zu at level: %d is larger than current cctx size: %zu at level: %d",
- prevSize, level-1, currSize, level);
+ DISPLAYLEVEL(3, "Error! previous cctx size: %u at level: %d is larger than current cctx size: %u at level: %d",
+ (unsigned)prevSize, level-1, (unsigned)currSize, level);
goto _output_error;
}
prevSize = currSize;
@@ -4172,8 +4386,8 @@ static int basicUnitTests(U32 const seed, double compressibility)
if (cctxSizeUsingLevel < cctxSizeUsingCParams
|| ZSTD_isError(cctxSizeUsingCParams)
|| ZSTD_isError(cctxSizeUsingLevel)) {
- DISPLAYLEVEL(3, "error! l: %d dict: %zu srcSize: %zu cctx size cpar: %zu, cctx size level: %zu\n",
- level, dictSize, srcSize, cctxSizeUsingCParams, cctxSizeUsingLevel);
+ DISPLAYLEVEL(3, "error! l: %d dict: %u srcSize: %u cctx size cpar: %u, cctx size level: %u\n",
+ level, (unsigned)dictSize, (unsigned)srcSize, (unsigned)cctxSizeUsingCParams, (unsigned)cctxSizeUsingLevel);
goto _output_error;
} } } } }
DISPLAYLEVEL(3, "OK \n");
@@ -4559,7 +4773,7 @@ static int fuzzerTests(U32 seed, unsigned nbTests, unsigned startTest, U32 const
} }
/* frame header decompression test */
- { ZSTD_frameHeader zfh;
+ { ZSTD_FrameHeader zfh;
CHECK_Z( ZSTD_getFrameHeader(&zfh, cBuffer, cSize) );
CHECK(zfh.frameContentSize != sampleSize, "Frame content size incorrect");
}
@@ -4701,7 +4915,7 @@ static int fuzzerTests(U32 seed, unsigned nbTests, unsigned startTest, U32 const
/* streaming decompression test */
DISPLAYLEVEL(5, "fuzzer t%u: Bufferless streaming decompression test \n", testNb);
/* ensure memory requirement is good enough (should always be true) */
- { ZSTD_frameHeader zfh;
+ { ZSTD_FrameHeader zfh;
CHECK( ZSTD_getFrameHeader(&zfh, cBuffer, ZSTD_FRAMEHEADERSIZE_MAX),
"ZSTD_getFrameHeader(): error retrieving frame information");
{ size_t const roundBuffSize = ZSTD_decodingBufferSize_min(zfh.windowSize, zfh.frameContentSize);
diff --git a/tests/golden-decompression-errors/truncated_huff_state.zst b/tests/golden-decompression-errors/truncated_huff_state.zst
new file mode 100644
index 00000000000..2ce18c0b7a9
Binary files /dev/null and b/tests/golden-decompression-errors/truncated_huff_state.zst differ
diff --git a/tests/gzip/Makefile b/tests/gzip/Makefile
index cca3109670f..23f5cfdc332 100644
--- a/tests/gzip/Makefile
+++ b/tests/gzip/Makefile
@@ -36,7 +36,7 @@ clean:
#------------------------------------------------------------------------------
# validated only for Linux, macOS, Hurd and some BSD targets
#------------------------------------------------------------------------------
-ifneq (,$(filter $(shell uname),Linux Darwin GNU/kFreeBSD GNU FreeBSD DragonFly NetBSD))
+ifneq (,$(filter Linux Darwin GNU/kFreeBSD GNU FreeBSD DragonFly NetBSD,$(shell sh -c 'MSYSTEM="MSYS" uname') ))
test-%: zstd
@./test-driver.sh --test-name $* --log-file $*.log --trs-file $*.trs --expect-failure "no" --color-tests "yes" --enable-hard-errors "yes" ./$*.sh
diff --git a/tests/largeDictionary.c b/tests/largeDictionary.c
new file mode 100644
index 00000000000..ff2bb2d7032
--- /dev/null
+++ b/tests/largeDictionary.c
@@ -0,0 +1,128 @@
+/*
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#include
+#include
+#include
+#include
+#include
+#include "datagen.h"
+#include "mem.h"
+#define ZSTD_STATIC_LINKING_ONLY
+#include "zstd.h"
+
+static int
+compress(ZSTD_CCtx* cctx, ZSTD_DCtx* dctx,
+ void* dst, size_t dstCapacity,
+ void const* src, size_t srcSize,
+ void* roundtrip, ZSTD_EndDirective end)
+{
+ ZSTD_inBuffer in = {src, srcSize, 0};
+ ZSTD_outBuffer out = {dst, dstCapacity, 0};
+ int ended = 0;
+
+ while (!ended && (in.pos < in.size || out.pos > 0)) {
+ size_t rc;
+ out.pos = 0;
+ rc = ZSTD_compressStream2(cctx, &out, &in, end);
+ if (ZSTD_isError(rc))
+ return 1;
+ if (end == ZSTD_e_end && rc == 0)
+ ended = 1;
+ {
+ ZSTD_inBuffer rtIn = {dst, out.pos, 0};
+ ZSTD_outBuffer rtOut = {roundtrip, srcSize, 0};
+ rc = 1;
+ while (rtIn.pos < rtIn.size || rtOut.pos > 0) {
+ rtOut.pos = 0;
+ rc = ZSTD_decompressStream(dctx, &rtOut, &rtIn);
+ if (ZSTD_isError(rc)) {
+ fprintf(stderr, "Decompression error: %s\n", ZSTD_getErrorName(rc));
+ return 1;
+ }
+ if (rc == 0)
+ break;
+ }
+ if (ended && rc != 0) {
+ fprintf(stderr, "Frame not finished!\n");
+ return 1;
+ }
+ }
+ }
+
+ return 0;
+}
+
+int main(int argc, const char** argv)
+{
+ ZSTD_CCtx* cctx = ZSTD_createCCtx();
+ ZSTD_DCtx* dctx = ZSTD_createDCtx();
+ const size_t dataSize = (size_t)1 << 30;
+ const size_t outSize = ZSTD_compressBound(dataSize);
+ const size_t bufferSize = (size_t)1 << 31;
+ char* buffer = (char*)malloc(bufferSize);
+ void* out = malloc(outSize);
+ void* roundtrip = malloc(dataSize);
+ (void)argc;
+ (void)argv;
+
+ if (!buffer || !out || !roundtrip || !cctx || !dctx) {
+ fprintf(stderr, "Allocation failure\n");
+ return 1;
+ }
+
+ if (ZSTD_isError(ZSTD_CCtx_setParameter(cctx, ZSTD_c_windowLog, 31)))
+ return 1;
+ if (ZSTD_isError(ZSTD_CCtx_setParameter(cctx, ZSTD_c_nbWorkers, 1)))
+ return 1;
+ if (ZSTD_isError(ZSTD_CCtx_setParameter(cctx, ZSTD_c_overlapLog, 9)))
+ return 1;
+ if (ZSTD_isError(ZSTD_CCtx_setParameter(cctx, ZSTD_c_checksumFlag, 1)))
+ return 1;
+ if (ZSTD_isError(ZSTD_CCtx_setParameter(cctx, ZSTD_c_strategy, ZSTD_btopt)))
+ return 1;
+ if (ZSTD_isError(ZSTD_CCtx_setParameter(cctx, ZSTD_c_targetLength, 7)))
+ return 1;
+ if (ZSTD_isError(ZSTD_CCtx_setParameter(cctx, ZSTD_c_minMatch, 7)))
+ return 1;
+ if (ZSTD_isError(ZSTD_CCtx_setParameter(cctx, ZSTD_c_searchLog, 1)))
+ return 1;
+ if (ZSTD_isError(ZSTD_CCtx_setParameter(cctx, ZSTD_c_hashLog, 10)))
+ return 1;
+ if (ZSTD_isError(ZSTD_CCtx_setParameter(cctx, ZSTD_c_chainLog, 10)))
+ return 1;
+
+ if (ZSTD_isError(ZSTD_DCtx_setParameter(dctx, ZSTD_d_windowLogMax, 31)))
+ return 1;
+
+ RDG_genBuffer(buffer, bufferSize, 1.0, 0.0, 0xbeefcafe);
+
+ /* Compress 30 GB */
+ {
+ int i;
+ for (i = 0; i < 10; ++i) {
+ fprintf(stderr, "Compressing 1 GB\n");
+ if (compress(cctx, dctx, out, outSize, buffer, dataSize, roundtrip, ZSTD_e_continue))
+ return 1;
+ }
+ }
+ fprintf(stderr, "Compressing 1 GB\n");
+ if (compress(cctx, dctx, out, outSize, buffer, dataSize, roundtrip, ZSTD_e_end))
+ return 1;
+
+ fprintf(stderr, "Success!\n");
+
+ free(roundtrip);
+ free(out);
+ free(buffer);
+ ZSTD_freeDCtx(dctx);
+ ZSTD_freeCCtx(cctx);
+ return 0;
+}
diff --git a/tests/paramgrill.c b/tests/paramgrill.c
index 8971c65d627..869e966e05c 100644
--- a/tests/paramgrill.c
+++ b/tests/paramgrill.c
@@ -1273,7 +1273,6 @@ static int createBuffers(buffers_t* buff, const char* const * const fileNamesTab
f = fopen(fileNamesTable[n], "rb");
if (f==NULL) {
DISPLAY("impossible to open file %s\n", fileNamesTable[n]);
- fclose(f);
ret = 10;
goto _cleanUp;
}
diff --git a/tests/playTests.sh b/tests/playTests.sh
index e2a0694f573..5435ff5b305 100755
--- a/tests/playTests.sh
+++ b/tests/playTests.sh
@@ -109,10 +109,7 @@ isTerminal=${isTerminal:-$detectedTerminal}
isWindows=false
INTOVOID="/dev/null"
-case "$UNAME" in
- GNU) DEVDEVICE="/dev/random" ;;
- *) DEVDEVICE="/dev/zero" ;;
-esac
+DEVDEVICE="/dev/zero"
case "$OS" in
Windows*)
isWindows=true
@@ -123,7 +120,6 @@ esac
case "$UNAME" in
Darwin) MD5SUM="md5 -r" ;;
- FreeBSD) MD5SUM="gmd5sum" ;;
NetBSD) MD5SUM="md5 -n" ;;
OpenBSD) MD5SUM="md5" ;;
*) MD5SUM="md5sum" ;;
@@ -1558,7 +1554,6 @@ then
roundTripTest -g4M "1 -T0 --auto-threads=physical"
roundTripTest -g4M "1 -T0 --auto-threads=logical"
roundTripTest -g8M "3 -T2"
- roundTripTest -g8M "19 --long"
roundTripTest -g8000K "2 --threads=2"
fileRoundTripTest -g4M "19 -T2 -B1M"
@@ -1854,6 +1849,8 @@ roundTripTest -g18000017 -P88 17
roundTripTest -g18000018 -P94 18
roundTripTest -g18000019 -P96 19
+roundTripTest -g8M "19 --long"
+
roundTripTest -g5000000000 -P99 "1 --zstd=wlog=25"
roundTripTest -g3700000000 -P0 "1 --zstd=strategy=6,wlog=25" # ensure btlazy2 can survive an overflow rescale
diff --git a/tests/regression/result.c b/tests/regression/result.c
index 8ccb8751e67..a13ef9c17a0 100644
--- a/tests/regression/result.c
+++ b/tests/regression/result.c
@@ -10,7 +10,7 @@
#include "result.h"
-char const* result_get_error_string(result_t result) {
+const char* result_get_error_string(result_t result) {
switch (result_get_error(result)) {
case result_error_ok:
return "okay";
@@ -24,5 +24,7 @@ char const* result_get_error_string(result_t result) {
return "decompression error";
case result_error_round_trip_error:
return "round trip error";
+ default:
+ return "unknown error";
}
}
diff --git a/tests/regression/result.h b/tests/regression/result.h
index 8a761ea4da8..818ec356c81 100644
--- a/tests/regression/result.h
+++ b/tests/regression/result.h
@@ -94,7 +94,7 @@ static result_error_t result_get_error(result_t result) {
return result.internal_error;
}
-char const* result_get_error_string(result_t result);
+const char* result_get_error_string(result_t result);
static result_data_t result_get_data(result_t result) {
return result.internal_data;
diff --git a/tests/regression/results.csv b/tests/regression/results.csv
index fc3fbe7c7e1..c0d4f4ae7ff 100644
--- a/tests/regression/results.csv
+++ b/tests/regression/results.csv
@@ -1,28 +1,28 @@
Data, Config, Method, Total compressed size
-silesia.tar, level -5, compress simple, 6861055
-silesia.tar, level -3, compress simple, 6505483
-silesia.tar, level -1, compress simple, 6179047
-silesia.tar, level 0, compress simple, 4854086
-silesia.tar, level 1, compress simple, 5327717
-silesia.tar, level 3, compress simple, 4854086
-silesia.tar, level 4, compress simple, 4791503
-silesia.tar, level 5, compress simple, 4679004
-silesia.tar, level 6, compress simple, 4614561
-silesia.tar, level 7, compress simple, 4579828
-silesia.tar, level 9, compress simple, 4555448
-silesia.tar, level 13, compress simple, 4502956
-silesia.tar, level 16, compress simple, 4360385
-silesia.tar, level 19, compress simple, 4260939
-silesia.tar, uncompressed literals, compress simple, 4854086
-silesia.tar, uncompressed literals optimal, compress simple, 4260939
-silesia.tar, huffman literals, compress simple, 6179047
-github.tar, level -5, compress simple, 52115
-github.tar, level -3, compress simple, 45678
-github.tar, level -1, compress simple, 42560
-github.tar, level 0, compress simple, 38831
+silesia.tar, level -5, compress simple, 6858730
+silesia.tar, level -3, compress simple, 6502944
+silesia.tar, level -1, compress simple, 6175652
+silesia.tar, level 0, compress simple, 4829268
+silesia.tar, level 1, compress simple, 5307443
+silesia.tar, level 3, compress simple, 4829268
+silesia.tar, level 4, compress simple, 4767074
+silesia.tar, level 5, compress simple, 4662847
+silesia.tar, level 6, compress simple, 4597877
+silesia.tar, level 7, compress simple, 4563998
+silesia.tar, level 9, compress simple, 4537558
+silesia.tar, level 13, compress simple, 4484732
+silesia.tar, level 16, compress simple, 4355572
+silesia.tar, level 19, compress simple, 4257629
+silesia.tar, uncompressed literals, compress simple, 4829268
+silesia.tar, uncompressed literals optimal, compress simple, 4257629
+silesia.tar, huffman literals, compress simple, 6175652
+github.tar, level -5, compress simple, 52173
+github.tar, level -3, compress simple, 45783
+github.tar, level -1, compress simple, 42606
+github.tar, level 0, compress simple, 38884
github.tar, level 1, compress simple, 39200
-github.tar, level 3, compress simple, 38831
-github.tar, level 4, compress simple, 38893
+github.tar, level 3, compress simple, 38884
+github.tar, level 4, compress simple, 38880
github.tar, level 5, compress simple, 39651
github.tar, level 6, compress simple, 39282
github.tar, level 7, compress simple, 38005
@@ -30,45 +30,45 @@ github.tar, level 9, compress
github.tar, level 13, compress simple, 35501
github.tar, level 16, compress simple, 40466
github.tar, level 19, compress simple, 32262
-github.tar, uncompressed literals, compress simple, 38831
+github.tar, uncompressed literals, compress simple, 38884
github.tar, uncompressed literals optimal, compress simple, 32262
-github.tar, huffman literals, compress simple, 42560
-silesia, level -5, compress cctx, 6857372
-silesia, level -3, compress cctx, 6503412
-silesia, level -1, compress cctx, 6172202
-silesia, level 0, compress cctx, 4842075
-silesia, level 1, compress cctx, 5306632
-silesia, level 3, compress cctx, 4842075
-silesia, level 4, compress cctx, 4779186
-silesia, level 5, compress cctx, 4667668
-silesia, level 6, compress cctx, 4604351
-silesia, level 7, compress cctx, 4570271
-silesia, level 9, compress cctx, 4545850
-silesia, level 13, compress cctx, 4493990
-silesia, level 16, compress cctx, 4359652
-silesia, level 19, compress cctx, 4266582
-silesia, long distance mode, compress cctx, 4842075
-silesia, multithreaded, compress cctx, 4842075
-silesia, multithreaded long distance mode, compress cctx, 4842075
-silesia, small window log, compress cctx, 7082951
-silesia, small hash log, compress cctx, 6526141
-silesia, small chain log, compress cctx, 4912197
-silesia, explicit params, compress cctx, 4794318
-silesia, uncompressed literals, compress cctx, 4842075
-silesia, uncompressed literals optimal, compress cctx, 4266582
-silesia, huffman literals, compress cctx, 6172202
-silesia, multithreaded with advanced params, compress cctx, 4842075
+github.tar, huffman literals, compress simple, 42606
+silesia, level -5, compress cctx, 6854688
+silesia, level -3, compress cctx, 6502839
+silesia, level -1, compress cctx, 6173625
+silesia, level 0, compress cctx, 4832054
+silesia, level 1, compress cctx, 5304296
+silesia, level 3, compress cctx, 4832054
+silesia, level 4, compress cctx, 4768799
+silesia, level 5, compress cctx, 4663718
+silesia, level 6, compress cctx, 4600034
+silesia, level 7, compress cctx, 4566069
+silesia, level 9, compress cctx, 4540520
+silesia, level 13, compress cctx, 4488969
+silesia, level 16, compress cctx, 4356799
+silesia, level 19, compress cctx, 4265851
+silesia, long distance mode, compress cctx, 4832054
+silesia, multithreaded, compress cctx, 4832054
+silesia, multithreaded long distance mode, compress cctx, 4832054
+silesia, small window log, compress cctx, 7082907
+silesia, small hash log, compress cctx, 6525510
+silesia, small chain log, compress cctx, 4912248
+silesia, explicit params, compress cctx, 4789676
+silesia, uncompressed literals, compress cctx, 4832054
+silesia, uncompressed literals optimal, compress cctx, 4265851
+silesia, huffman literals, compress cctx, 6173625
+silesia, multithreaded with advanced params, compress cctx, 4832054
github, level -5, compress cctx, 204407
github, level -5 with dict, compress cctx, 47581
github, level -3, compress cctx, 193253
github, level -3 with dict, compress cctx, 43043
github, level -1, compress cctx, 175468
github, level -1 with dict, compress cctx, 42044
-github, level 0, compress cctx, 136332
+github, level 0, compress cctx, 136331
github, level 0 with dict, compress cctx, 41534
github, level 1, compress cctx, 142365
github, level 1 with dict, compress cctx, 41715
-github, level 3, compress cctx, 136332
+github, level 3, compress cctx, 136331
github, level 3 with dict, compress cctx, 41534
github, level 4, compress cctx, 136199
github, level 4 with dict, compress cctx, 41725
@@ -93,75 +93,75 @@ github, small window log, compress
github, small hash log, compress cctx, 138949
github, small chain log, compress cctx, 139242
github, explicit params, compress cctx, 140932
-github, uncompressed literals, compress cctx, 136332
+github, uncompressed literals, compress cctx, 136331
github, uncompressed literals optimal, compress cctx, 132879
github, huffman literals, compress cctx, 175468
github, multithreaded with advanced params, compress cctx, 141069
-silesia, level -5, zstdcli, 6857420
-silesia, level -3, zstdcli, 6503460
-silesia, level -1, zstdcli, 6172250
-silesia, level 0, zstdcli, 4842123
-silesia, level 1, zstdcli, 5306680
-silesia, level 3, zstdcli, 4842123
-silesia, level 4, zstdcli, 4779234
-silesia, level 5, zstdcli, 4667716
-silesia, level 6, zstdcli, 4604399
-silesia, level 7, zstdcli, 4570319
-silesia, level 9, zstdcli, 4545898
-silesia, level 13, zstdcli, 4494038
-silesia, level 16, zstdcli, 4359700
-silesia, level 19, zstdcli, 4266630
-silesia, long distance mode, zstdcli, 4833785
-silesia, multithreaded, zstdcli, 4842123
-silesia, multithreaded long distance mode, zstdcli, 4833785
-silesia, small window log, zstdcli, 7095048
-silesia, small hash log, zstdcli, 6526189
-silesia, small chain log, zstdcli, 4912245
-silesia, explicit params, zstdcli, 4795840
-silesia, uncompressed literals, zstdcli, 5120614
-silesia, uncompressed literals optimal, zstdcli, 4316928
-silesia, huffman literals, zstdcli, 5321417
-silesia, multithreaded with advanced params, zstdcli, 5120614
-silesia.tar, level -5, zstdcli, 6862049
-silesia.tar, level -3, zstdcli, 6506509
-silesia.tar, level -1, zstdcli, 6179789
-silesia.tar, level 0, zstdcli, 4854164
-silesia.tar, level 1, zstdcli, 5329010
-silesia.tar, level 3, zstdcli, 4854164
-silesia.tar, level 4, zstdcli, 4792352
-silesia.tar, level 5, zstdcli, 4679860
-silesia.tar, level 6, zstdcli, 4615355
-silesia.tar, level 7, zstdcli, 4581791
-silesia.tar, level 9, zstdcli, 4555452
-silesia.tar, level 13, zstdcli, 4502960
-silesia.tar, level 16, zstdcli, 4360389
-silesia.tar, level 19, zstdcli, 4260943
-silesia.tar, no source size, zstdcli, 4854160
-silesia.tar, long distance mode, zstdcli, 4845745
-silesia.tar, multithreaded, zstdcli, 4854164
-silesia.tar, multithreaded long distance mode, zstdcli, 4845745
-silesia.tar, small window log, zstdcli, 7100701
-silesia.tar, small hash log, zstdcli, 6529264
-silesia.tar, small chain log, zstdcli, 4917022
-silesia.tar, explicit params, zstdcli, 4821112
-silesia.tar, uncompressed literals, zstdcli, 5122571
-silesia.tar, uncompressed literals optimal, zstdcli, 4308455
-silesia.tar, huffman literals, zstdcli, 5342074
-silesia.tar, multithreaded with advanced params, zstdcli, 5122571
+silesia, level -5, zstdcli, 6854509
+silesia, level -3, zstdcli, 6502336
+silesia, level -1, zstdcli, 6171366
+silesia, level 0, zstdcli, 4833113
+silesia, level 1, zstdcli, 5302161
+silesia, level 3, zstdcli, 4833113
+silesia, level 4, zstdcli, 4770061
+silesia, level 5, zstdcli, 4663332
+silesia, level 6, zstdcli, 4599601
+silesia, level 7, zstdcli, 4565601
+silesia, level 9, zstdcli, 4540082
+silesia, level 13, zstdcli, 4488438
+silesia, level 16, zstdcli, 4358150
+silesia, level 19, zstdcli, 4265929
+silesia, long distance mode, zstdcli, 4824341
+silesia, multithreaded, zstdcli, 4833113
+silesia, multithreaded long distance mode, zstdcli, 4824341
+silesia, small window log, zstdcli, 7094528
+silesia, small hash log, zstdcli, 6527214
+silesia, small chain log, zstdcli, 4911647
+silesia, explicit params, zstdcli, 4790803
+silesia, uncompressed literals, zstdcli, 5118235
+silesia, uncompressed literals optimal, zstdcli, 4316761
+silesia, huffman literals, zstdcli, 5316827
+silesia, multithreaded with advanced params, zstdcli, 5118235
+silesia.tar, level -5, zstdcli, 6859945
+silesia.tar, level -3, zstdcli, 6504296
+silesia.tar, level -1, zstdcli, 6176520
+silesia.tar, level 0, zstdcli, 4836004
+silesia.tar, level 1, zstdcli, 5309074
+silesia.tar, level 3, zstdcli, 4836004
+silesia.tar, level 4, zstdcli, 4774061
+silesia.tar, level 5, zstdcli, 4667310
+silesia.tar, level 6, zstdcli, 4602398
+silesia.tar, level 7, zstdcli, 4568891
+silesia.tar, level 9, zstdcli, 4541098
+silesia.tar, level 13, zstdcli, 4488484
+silesia.tar, level 16, zstdcli, 4357018
+silesia.tar, level 19, zstdcli, 4259593
+silesia.tar, no source size, zstdcli, 4836000
+silesia.tar, long distance mode, zstdcli, 4827830
+silesia.tar, multithreaded, zstdcli, 4836004
+silesia.tar, multithreaded long distance mode, zstdcli, 4827830
+silesia.tar, small window log, zstdcli, 7100110
+silesia.tar, small hash log, zstdcli, 6530127
+silesia.tar, small chain log, zstdcli, 4915865
+silesia.tar, explicit params, zstdcli, 4808370
+silesia.tar, uncompressed literals, zstdcli, 5116583
+silesia.tar, uncompressed literals optimal, zstdcli, 4306520
+silesia.tar, huffman literals, zstdcli, 5324019
+silesia.tar, multithreaded with advanced params, zstdcli, 5116583
github, level -5, zstdcli, 206407
github, level -5 with dict, zstdcli, 47832
github, level -3, zstdcli, 195253
github, level -3 with dict, zstdcli, 46671
github, level -1, zstdcli, 177468
github, level -1 with dict, zstdcli, 43825
-github, level 0, zstdcli, 138332
-github, level 0 with dict, zstdcli, 43148
+github, level 0, zstdcli, 138331
+github, level 0 with dict, zstdcli, 43118
github, level 1, zstdcli, 144365
github, level 1 with dict, zstdcli, 43266
-github, level 3, zstdcli, 138332
-github, level 3 with dict, zstdcli, 43148
+github, level 3, zstdcli, 138331
+github, level 3 with dict, zstdcli, 43118
github, level 4, zstdcli, 138199
-github, level 4 with dict, zstdcli, 43251
+github, level 4 with dict, zstdcli, 43229
github, level 5, zstdcli, 137121
github, level 5 with dict, zstdcli, 40728
github, level 6, zstdcli, 137122
@@ -176,30 +176,30 @@ github, level 16, zstdcli,
github, level 16 with dict, zstdcli, 39902
github, level 19, zstdcli, 134879
github, level 19 with dict, zstdcli, 39916
-github, long distance mode, zstdcli, 138332
-github, multithreaded, zstdcli, 138332
-github, multithreaded long distance mode, zstdcli, 138332
-github, small window log, zstdcli, 138332
+github, long distance mode, zstdcli, 138331
+github, multithreaded, zstdcli, 138331
+github, multithreaded long distance mode, zstdcli, 138331
+github, small window log, zstdcli, 138331
github, small hash log, zstdcli, 137590
github, small chain log, zstdcli, 138341
github, explicit params, zstdcli, 136197
-github, uncompressed literals, zstdcli, 167911
+github, uncompressed literals, zstdcli, 167909
github, uncompressed literals optimal, zstdcli, 154667
github, huffman literals, zstdcli, 144365
-github, multithreaded with advanced params, zstdcli, 167911
-github.tar, level -5, zstdcli, 52119
-github.tar, level -5 with dict, zstdcli, 51101
-github.tar, level -3, zstdcli, 45682
-github.tar, level -3 with dict, zstdcli, 44738
-github.tar, level -1, zstdcli, 42564
-github.tar, level -1 with dict, zstdcli, 41357
-github.tar, level 0, zstdcli, 38835
+github, multithreaded with advanced params, zstdcli, 167909
+github.tar, level -5, zstdcli, 52231
+github.tar, level -5 with dict, zstdcli, 51249
+github.tar, level -3, zstdcli, 45778
+github.tar, level -3 with dict, zstdcli, 44847
+github.tar, level -1, zstdcli, 42680
+github.tar, level -1 with dict, zstdcli, 41486
+github.tar, level 0, zstdcli, 38888
github.tar, level 0 with dict, zstdcli, 37999
-github.tar, level 1, zstdcli, 39204
-github.tar, level 1 with dict, zstdcli, 38123
-github.tar, level 3, zstdcli, 38835
+github.tar, level 1, zstdcli, 39340
+github.tar, level 1 with dict, zstdcli, 38230
+github.tar, level 3, zstdcli, 38888
github.tar, level 3 with dict, zstdcli, 37999
-github.tar, level 4, zstdcli, 38897
+github.tar, level 4, zstdcli, 38884
github.tar, level 4 with dict, zstdcli, 37952
github.tar, level 5, zstdcli, 39655
github.tar, level 5 with dict, zstdcli, 39073
@@ -215,97 +215,97 @@ github.tar, level 16, zstdcli,
github.tar, level 16 with dict, zstdcli, 33379
github.tar, level 19, zstdcli, 32266
github.tar, level 19 with dict, zstdcli, 32705
-github.tar, no source size, zstdcli, 38832
-github.tar, no source size with dict, zstdcli, 38004
-github.tar, long distance mode, zstdcli, 40236
-github.tar, multithreaded, zstdcli, 38835
-github.tar, multithreaded long distance mode, zstdcli, 40236
-github.tar, small window log, zstdcli, 198544
+github.tar, no source size, zstdcli, 38885
+github.tar, no source size with dict, zstdcli, 38115
+github.tar, long distance mode, zstdcli, 40143
+github.tar, multithreaded, zstdcli, 38888
+github.tar, multithreaded long distance mode, zstdcli, 40143
+github.tar, small window log, zstdcli, 198539
github.tar, small hash log, zstdcli, 129874
github.tar, small chain log, zstdcli, 41673
github.tar, explicit params, zstdcli, 41385
-github.tar, uncompressed literals, zstdcli, 41529
+github.tar, uncompressed literals, zstdcli, 41566
github.tar, uncompressed literals optimal, zstdcli, 35360
-github.tar, huffman literals, zstdcli, 38857
-github.tar, multithreaded with advanced params, zstdcli, 41529
-silesia, level -5, advanced one pass, 6857372
-silesia, level -3, advanced one pass, 6503412
-silesia, level -1, advanced one pass, 6172202
-silesia, level 0, advanced one pass, 4842075
-silesia, level 1, advanced one pass, 5306632
-silesia, level 3, advanced one pass, 4842075
-silesia, level 4, advanced one pass, 4779186
-silesia, level 5 row 1, advanced one pass, 4667668
-silesia, level 5 row 2, advanced one pass, 4670326
-silesia, level 5, advanced one pass, 4667668
-silesia, level 6, advanced one pass, 4604351
-silesia, level 7 row 1, advanced one pass, 4570271
-silesia, level 7 row 2, advanced one pass, 4565169
-silesia, level 7, advanced one pass, 4570271
-silesia, level 9, advanced one pass, 4545850
-silesia, level 11 row 1, advanced one pass, 4505658
-silesia, level 11 row 2, advanced one pass, 4503429
-silesia, level 12 row 1, advanced one pass, 4505658
-silesia, level 12 row 2, advanced one pass, 4503429
-silesia, level 13, advanced one pass, 4493990
-silesia, level 16, advanced one pass, 4359652
-silesia, level 19, advanced one pass, 4266582
-silesia, no source size, advanced one pass, 4842075
-silesia, long distance mode, advanced one pass, 4833710
-silesia, multithreaded, advanced one pass, 4842075
-silesia, multithreaded long distance mode, advanced one pass, 4833737
-silesia, small window log, advanced one pass, 7095000
-silesia, small hash log, advanced one pass, 6526141
-silesia, small chain log, advanced one pass, 4912197
-silesia, explicit params, advanced one pass, 4795840
-silesia, uncompressed literals, advanced one pass, 5120566
-silesia, uncompressed literals optimal, advanced one pass, 4316880
-silesia, huffman literals, advanced one pass, 5321369
-silesia, multithreaded with advanced params, advanced one pass, 5120566
-silesia.tar, level -5, advanced one pass, 6861055
-silesia.tar, level -3, advanced one pass, 6505483
-silesia.tar, level -1, advanced one pass, 6179047
-silesia.tar, level 0, advanced one pass, 4854086
-silesia.tar, level 1, advanced one pass, 5327717
-silesia.tar, level 3, advanced one pass, 4854086
-silesia.tar, level 4, advanced one pass, 4791503
-silesia.tar, level 5 row 1, advanced one pass, 4679004
-silesia.tar, level 5 row 2, advanced one pass, 4682334
-silesia.tar, level 5, advanced one pass, 4679004
-silesia.tar, level 6, advanced one pass, 4614561
-silesia.tar, level 7 row 1, advanced one pass, 4579828
-silesia.tar, level 7 row 2, advanced one pass, 4575602
-silesia.tar, level 7, advanced one pass, 4579828
-silesia.tar, level 9, advanced one pass, 4555448
-silesia.tar, level 11 row 1, advanced one pass, 4514962
-silesia.tar, level 11 row 2, advanced one pass, 4513816
-silesia.tar, level 12 row 1, advanced one pass, 4514517
-silesia.tar, level 12 row 2, advanced one pass, 4514007
-silesia.tar, level 13, advanced one pass, 4502956
-silesia.tar, level 16, advanced one pass, 4360385
-silesia.tar, level 19, advanced one pass, 4260939
-silesia.tar, no source size, advanced one pass, 4854086
-silesia.tar, long distance mode, advanced one pass, 4840452
-silesia.tar, multithreaded, advanced one pass, 4854160
-silesia.tar, multithreaded long distance mode, advanced one pass, 4845741
-silesia.tar, small window log, advanced one pass, 7100655
-silesia.tar, small hash log, advanced one pass, 6529206
-silesia.tar, small chain log, advanced one pass, 4917041
-silesia.tar, explicit params, advanced one pass, 4807274
-silesia.tar, uncompressed literals, advanced one pass, 5122473
-silesia.tar, uncompressed literals optimal, advanced one pass, 4308451
-silesia.tar, huffman literals, advanced one pass, 5341705
-silesia.tar, multithreaded with advanced params, advanced one pass, 5122567
+github.tar, huffman literals, zstdcli, 38989
+github.tar, multithreaded with advanced params, zstdcli, 41566
+silesia, level -5, advanced one pass, 6854688
+silesia, level -3, advanced one pass, 6502839
+silesia, level -1, advanced one pass, 6173625
+silesia, level 0, advanced one pass, 4832054
+silesia, level 1, advanced one pass, 5304296
+silesia, level 3, advanced one pass, 4832054
+silesia, level 4, advanced one pass, 4768799
+silesia, level 5 row 1, advanced one pass, 4663718
+silesia, level 5 row 2, advanced one pass, 4666272
+silesia, level 5, advanced one pass, 4663718
+silesia, level 6, advanced one pass, 4600034
+silesia, level 7 row 1, advanced one pass, 4566069
+silesia, level 7 row 2, advanced one pass, 4560893
+silesia, level 7, advanced one pass, 4566069
+silesia, level 9, advanced one pass, 4540520
+silesia, level 11 row 1, advanced one pass, 4500472
+silesia, level 11 row 2, advanced one pass, 4498174
+silesia, level 12 row 1, advanced one pass, 4500472
+silesia, level 12 row 2, advanced one pass, 4498174
+silesia, level 13, advanced one pass, 4488969
+silesia, level 16, advanced one pass, 4356799
+silesia, level 19, advanced one pass, 4265851
+silesia, no source size, advanced one pass, 4832054
+silesia, long distance mode, advanced one pass, 4823264
+silesia, multithreaded, advanced one pass, 4833065
+silesia, multithreaded long distance mode, advanced one pass, 4824293
+silesia, small window log, advanced one pass, 7094480
+silesia, small hash log, advanced one pass, 6525510
+silesia, small chain log, advanced one pass, 4912248
+silesia, explicit params, advanced one pass, 4791219
+silesia, uncompressed literals, advanced one pass, 5117526
+silesia, uncompressed literals optimal, advanced one pass, 4316644
+silesia, huffman literals, advanced one pass, 5319104
+silesia, multithreaded with advanced params, advanced one pass, 5118187
+silesia.tar, level -5, advanced one pass, 6858730
+silesia.tar, level -3, advanced one pass, 6502944
+silesia.tar, level -1, advanced one pass, 6175652
+silesia.tar, level 0, advanced one pass, 4829268
+silesia.tar, level 1, advanced one pass, 5307443
+silesia.tar, level 3, advanced one pass, 4829268
+silesia.tar, level 4, advanced one pass, 4767074
+silesia.tar, level 5 row 1, advanced one pass, 4662847
+silesia.tar, level 5 row 2, advanced one pass, 4666825
+silesia.tar, level 5, advanced one pass, 4662847
+silesia.tar, level 6, advanced one pass, 4597877
+silesia.tar, level 7 row 1, advanced one pass, 4563998
+silesia.tar, level 7 row 2, advanced one pass, 4559520
+silesia.tar, level 7, advanced one pass, 4563998
+silesia.tar, level 9, advanced one pass, 4537558
+silesia.tar, level 11 row 1, advanced one pass, 4496590
+silesia.tar, level 11 row 2, advanced one pass, 4495225
+silesia.tar, level 12 row 1, advanced one pass, 4496084
+silesia.tar, level 12 row 2, advanced one pass, 4495434
+silesia.tar, level 13, advanced one pass, 4484732
+silesia.tar, level 16, advanced one pass, 4355572
+silesia.tar, level 19, advanced one pass, 4257629
+silesia.tar, no source size, advanced one pass, 4829268
+silesia.tar, long distance mode, advanced one pass, 4815868
+silesia.tar, multithreaded, advanced one pass, 4836000
+silesia.tar, multithreaded long distance mode, advanced one pass, 4827826
+silesia.tar, small window log, advanced one pass, 7100064
+silesia.tar, small hash log, advanced one pass, 6530222
+silesia.tar, small chain log, advanced one pass, 4915689
+silesia.tar, explicit params, advanced one pass, 4790421
+silesia.tar, uncompressed literals, advanced one pass, 5114702
+silesia.tar, uncompressed literals optimal, advanced one pass, 4306289
+silesia.tar, huffman literals, advanced one pass, 5323421
+silesia.tar, multithreaded with advanced params, advanced one pass, 5116579
github, level -5, advanced one pass, 204407
github, level -5 with dict, advanced one pass, 45832
github, level -3, advanced one pass, 193253
github, level -3 with dict, advanced one pass, 44671
github, level -1, advanced one pass, 175468
github, level -1 with dict, advanced one pass, 41825
-github, level 0, advanced one pass, 136332
-github, level 0 with dict, advanced one pass, 41148
-github, level 0 with dict dms, advanced one pass, 41148
-github, level 0 with dict dds, advanced one pass, 41148
+github, level 0, advanced one pass, 136331
+github, level 0 with dict, advanced one pass, 41118
+github, level 0 with dict dms, advanced one pass, 41118
+github, level 0 with dict dds, advanced one pass, 41118
github, level 0 with dict copy, advanced one pass, 41124
github, level 0 with dict load, advanced one pass, 41847
github, level 1, advanced one pass, 142365
@@ -314,16 +314,16 @@ github, level 1 with dict dms, advanced
github, level 1 with dict dds, advanced one pass, 41266
github, level 1 with dict copy, advanced one pass, 41279
github, level 1 with dict load, advanced one pass, 43331
-github, level 3, advanced one pass, 136332
-github, level 3 with dict, advanced one pass, 41148
-github, level 3 with dict dms, advanced one pass, 41148
-github, level 3 with dict dds, advanced one pass, 41148
+github, level 3, advanced one pass, 136331
+github, level 3 with dict, advanced one pass, 41118
+github, level 3 with dict dms, advanced one pass, 41118
+github, level 3 with dict dds, advanced one pass, 41118
github, level 3 with dict copy, advanced one pass, 41124
github, level 3 with dict load, advanced one pass, 41847
github, level 4, advanced one pass, 136199
-github, level 4 with dict, advanced one pass, 41251
-github, level 4 with dict dms, advanced one pass, 41251
-github, level 4 with dict dds, advanced one pass, 41251
+github, level 4 with dict, advanced one pass, 41229
+github, level 4 with dict dms, advanced one pass, 41229
+github, level 4 with dict dds, advanced one pass, 41229
github, level 4 with dict copy, advanced one pass, 41216
github, level 4 with dict load, advanced one pass, 41548
github, level 5 row 1, advanced one pass, 134584
@@ -395,60 +395,60 @@ github, level 13 with dict, advanced
github, level 13 with dict dms, advanced one pass, 39900
github, level 13 with dict dds, advanced one pass, 39900
github, level 13 with dict copy, advanced one pass, 39948
-github, level 13 with dict load, advanced one pass, 42624
+github, level 13 with dict load, advanced one pass, 42643
github, level 16, advanced one pass, 133209
github, level 16 with dict, advanced one pass, 37902
github, level 16 with dict dms, advanced one pass, 37902
github, level 16 with dict dds, advanced one pass, 37902
github, level 16 with dict copy, advanced one pass, 37892
-github, level 16 with dict load, advanced one pass, 42402
+github, level 16 with dict load, advanced one pass, 42434
github, level 19, advanced one pass, 132879
github, level 19 with dict, advanced one pass, 37916
github, level 19 with dict dms, advanced one pass, 37916
github, level 19 with dict dds, advanced one pass, 37916
github, level 19 with dict copy, advanced one pass, 37906
-github, level 19 with dict load, advanced one pass, 39770
-github, no source size, advanced one pass, 136332
-github, no source size with dict, advanced one pass, 41148
-github, long distance mode, advanced one pass, 136332
-github, multithreaded, advanced one pass, 136332
-github, multithreaded long distance mode, advanced one pass, 136332
-github, small window log, advanced one pass, 136332
+github, level 19 with dict load, advanced one pass, 40405
+github, no source size, advanced one pass, 136331
+github, no source size with dict, advanced one pass, 41118
+github, long distance mode, advanced one pass, 136331
+github, multithreaded, advanced one pass, 136331
+github, multithreaded long distance mode, advanced one pass, 136331
+github, small window log, advanced one pass, 136331
github, small hash log, advanced one pass, 135590
github, small chain log, advanced one pass, 136341
github, explicit params, advanced one pass, 137727
-github, uncompressed literals, advanced one pass, 165911
+github, uncompressed literals, advanced one pass, 165909
github, uncompressed literals optimal, advanced one pass, 152667
github, huffman literals, advanced one pass, 142365
-github, multithreaded with advanced params, advanced one pass, 165911
-github.tar, level -5, advanced one pass, 52115
-github.tar, level -5 with dict, advanced one pass, 51097
-github.tar, level -3, advanced one pass, 45678
-github.tar, level -3 with dict, advanced one pass, 44734
-github.tar, level -1, advanced one pass, 42560
-github.tar, level -1 with dict, advanced one pass, 41353
-github.tar, level 0, advanced one pass, 38831
+github, multithreaded with advanced params, advanced one pass, 165909
+github.tar, level -5, advanced one pass, 52173
+github.tar, level -5 with dict, advanced one pass, 51161
+github.tar, level -3, advanced one pass, 45783
+github.tar, level -3 with dict, advanced one pass, 44768
+github.tar, level -1, advanced one pass, 42606
+github.tar, level -1 with dict, advanced one pass, 41397
+github.tar, level 0, advanced one pass, 38884
github.tar, level 0 with dict, advanced one pass, 37995
-github.tar, level 0 with dict dms, advanced one pass, 38003
-github.tar, level 0 with dict dds, advanced one pass, 38003
+github.tar, level 0 with dict dms, advanced one pass, 38114
+github.tar, level 0 with dict dds, advanced one pass, 38114
github.tar, level 0 with dict copy, advanced one pass, 37995
github.tar, level 0 with dict load, advanced one pass, 37956
github.tar, level 1, advanced one pass, 39200
-github.tar, level 1 with dict, advanced one pass, 38119
-github.tar, level 1 with dict dms, advanced one pass, 38406
-github.tar, level 1 with dict dds, advanced one pass, 38406
-github.tar, level 1 with dict copy, advanced one pass, 38119
-github.tar, level 1 with dict load, advanced one pass, 38364
-github.tar, level 3, advanced one pass, 38831
+github.tar, level 1 with dict, advanced one pass, 38189
+github.tar, level 1 with dict dms, advanced one pass, 38398
+github.tar, level 1 with dict dds, advanced one pass, 38398
+github.tar, level 1 with dict copy, advanced one pass, 38189
+github.tar, level 1 with dict load, advanced one pass, 38393
+github.tar, level 3, advanced one pass, 38884
github.tar, level 3 with dict, advanced one pass, 37995
-github.tar, level 3 with dict dms, advanced one pass, 38003
-github.tar, level 3 with dict dds, advanced one pass, 38003
+github.tar, level 3 with dict dms, advanced one pass, 38114
+github.tar, level 3 with dict dds, advanced one pass, 38114
github.tar, level 3 with dict copy, advanced one pass, 37995
github.tar, level 3 with dict load, advanced one pass, 37956
-github.tar, level 4, advanced one pass, 38893
+github.tar, level 4, advanced one pass, 38880
github.tar, level 4 with dict, advanced one pass, 37948
-github.tar, level 4 with dict dms, advanced one pass, 37954
-github.tar, level 4 with dict dds, advanced one pass, 37954
+github.tar, level 4 with dict dms, advanced one pass, 37995
+github.tar, level 4 with dict dds, advanced one pass, 37995
github.tar, level 4 with dict copy, advanced one pass, 37948
github.tar, level 4 with dict load, advanced one pass, 37927
github.tar, level 5 row 1, advanced one pass, 39651
@@ -533,97 +533,97 @@ github.tar, level 19 with dict dms, advanced
github.tar, level 19 with dict dds, advanced one pass, 32565
github.tar, level 19 with dict copy, advanced one pass, 32701
github.tar, level 19 with dict load, advanced one pass, 32428
-github.tar, no source size, advanced one pass, 38831
+github.tar, no source size, advanced one pass, 38884
github.tar, no source size with dict, advanced one pass, 37995
-github.tar, long distance mode, advanced one pass, 40252
-github.tar, multithreaded, advanced one pass, 38831
-github.tar, multithreaded long distance mode, advanced one pass, 40232
-github.tar, small window log, advanced one pass, 198540
+github.tar, long distance mode, advanced one pass, 40156
+github.tar, multithreaded, advanced one pass, 38884
+github.tar, multithreaded long distance mode, advanced one pass, 40139
+github.tar, small window log, advanced one pass, 198535
github.tar, small hash log, advanced one pass, 129870
github.tar, small chain log, advanced one pass, 41669
github.tar, explicit params, advanced one pass, 41385
-github.tar, uncompressed literals, advanced one pass, 41525
+github.tar, uncompressed literals, advanced one pass, 41562
github.tar, uncompressed literals optimal, advanced one pass, 35356
-github.tar, huffman literals, advanced one pass, 38853
-github.tar, multithreaded with advanced params, advanced one pass, 41525
-silesia, level -5, advanced one pass small out, 6857372
-silesia, level -3, advanced one pass small out, 6503412
-silesia, level -1, advanced one pass small out, 6172202
-silesia, level 0, advanced one pass small out, 4842075
-silesia, level 1, advanced one pass small out, 5306632
-silesia, level 3, advanced one pass small out, 4842075
-silesia, level 4, advanced one pass small out, 4779186
-silesia, level 5 row 1, advanced one pass small out, 4667668
-silesia, level 5 row 2, advanced one pass small out, 4670326
-silesia, level 5, advanced one pass small out, 4667668
-silesia, level 6, advanced one pass small out, 4604351
-silesia, level 7 row 1, advanced one pass small out, 4570271
-silesia, level 7 row 2, advanced one pass small out, 4565169
-silesia, level 7, advanced one pass small out, 4570271
-silesia, level 9, advanced one pass small out, 4545850
-silesia, level 11 row 1, advanced one pass small out, 4505658
-silesia, level 11 row 2, advanced one pass small out, 4503429
-silesia, level 12 row 1, advanced one pass small out, 4505658
-silesia, level 12 row 2, advanced one pass small out, 4503429
-silesia, level 13, advanced one pass small out, 4493990
-silesia, level 16, advanced one pass small out, 4359652
-silesia, level 19, advanced one pass small out, 4266582
-silesia, no source size, advanced one pass small out, 4842075
-silesia, long distance mode, advanced one pass small out, 4833710
-silesia, multithreaded, advanced one pass small out, 4842075
-silesia, multithreaded long distance mode, advanced one pass small out, 4833737
-silesia, small window log, advanced one pass small out, 7095000
-silesia, small hash log, advanced one pass small out, 6526141
-silesia, small chain log, advanced one pass small out, 4912197
-silesia, explicit params, advanced one pass small out, 4795840
-silesia, uncompressed literals, advanced one pass small out, 5120566
-silesia, uncompressed literals optimal, advanced one pass small out, 4316880
-silesia, huffman literals, advanced one pass small out, 5321369
-silesia, multithreaded with advanced params, advanced one pass small out, 5120566
-silesia.tar, level -5, advanced one pass small out, 6861055
-silesia.tar, level -3, advanced one pass small out, 6505483
-silesia.tar, level -1, advanced one pass small out, 6179047
-silesia.tar, level 0, advanced one pass small out, 4854086
-silesia.tar, level 1, advanced one pass small out, 5327717
-silesia.tar, level 3, advanced one pass small out, 4854086
-silesia.tar, level 4, advanced one pass small out, 4791503
-silesia.tar, level 5 row 1, advanced one pass small out, 4679004
-silesia.tar, level 5 row 2, advanced one pass small out, 4682334
-silesia.tar, level 5, advanced one pass small out, 4679004
-silesia.tar, level 6, advanced one pass small out, 4614561
-silesia.tar, level 7 row 1, advanced one pass small out, 4579828
-silesia.tar, level 7 row 2, advanced one pass small out, 4575602
-silesia.tar, level 7, advanced one pass small out, 4579828
-silesia.tar, level 9, advanced one pass small out, 4555448
-silesia.tar, level 11 row 1, advanced one pass small out, 4514962
-silesia.tar, level 11 row 2, advanced one pass small out, 4513816
-silesia.tar, level 12 row 1, advanced one pass small out, 4514517
-silesia.tar, level 12 row 2, advanced one pass small out, 4514007
-silesia.tar, level 13, advanced one pass small out, 4502956
-silesia.tar, level 16, advanced one pass small out, 4360385
-silesia.tar, level 19, advanced one pass small out, 4260939
-silesia.tar, no source size, advanced one pass small out, 4854086
-silesia.tar, long distance mode, advanced one pass small out, 4840452
-silesia.tar, multithreaded, advanced one pass small out, 4854160
-silesia.tar, multithreaded long distance mode, advanced one pass small out, 4845741
-silesia.tar, small window log, advanced one pass small out, 7100655
-silesia.tar, small hash log, advanced one pass small out, 6529206
-silesia.tar, small chain log, advanced one pass small out, 4917041
-silesia.tar, explicit params, advanced one pass small out, 4807274
-silesia.tar, uncompressed literals, advanced one pass small out, 5122473
-silesia.tar, uncompressed literals optimal, advanced one pass small out, 4308451
-silesia.tar, huffman literals, advanced one pass small out, 5341705
-silesia.tar, multithreaded with advanced params, advanced one pass small out, 5122567
+github.tar, huffman literals, advanced one pass, 38921
+github.tar, multithreaded with advanced params, advanced one pass, 41562
+silesia, level -5, advanced one pass small out, 6854688
+silesia, level -3, advanced one pass small out, 6502839
+silesia, level -1, advanced one pass small out, 6173625
+silesia, level 0, advanced one pass small out, 4832054
+silesia, level 1, advanced one pass small out, 5304296
+silesia, level 3, advanced one pass small out, 4832054
+silesia, level 4, advanced one pass small out, 4768799
+silesia, level 5 row 1, advanced one pass small out, 4663718
+silesia, level 5 row 2, advanced one pass small out, 4666272
+silesia, level 5, advanced one pass small out, 4663718
+silesia, level 6, advanced one pass small out, 4600034
+silesia, level 7 row 1, advanced one pass small out, 4566069
+silesia, level 7 row 2, advanced one pass small out, 4560893
+silesia, level 7, advanced one pass small out, 4566069
+silesia, level 9, advanced one pass small out, 4540520
+silesia, level 11 row 1, advanced one pass small out, 4500472
+silesia, level 11 row 2, advanced one pass small out, 4498174
+silesia, level 12 row 1, advanced one pass small out, 4500472
+silesia, level 12 row 2, advanced one pass small out, 4498174
+silesia, level 13, advanced one pass small out, 4488969
+silesia, level 16, advanced one pass small out, 4356799
+silesia, level 19, advanced one pass small out, 4265851
+silesia, no source size, advanced one pass small out, 4832054
+silesia, long distance mode, advanced one pass small out, 4823264
+silesia, multithreaded, advanced one pass small out, 4833065
+silesia, multithreaded long distance mode, advanced one pass small out, 4824293
+silesia, small window log, advanced one pass small out, 7094480
+silesia, small hash log, advanced one pass small out, 6525510
+silesia, small chain log, advanced one pass small out, 4912248
+silesia, explicit params, advanced one pass small out, 4791219
+silesia, uncompressed literals, advanced one pass small out, 5117526
+silesia, uncompressed literals optimal, advanced one pass small out, 4316644
+silesia, huffman literals, advanced one pass small out, 5319104
+silesia, multithreaded with advanced params, advanced one pass small out, 5118187
+silesia.tar, level -5, advanced one pass small out, 6858730
+silesia.tar, level -3, advanced one pass small out, 6502944
+silesia.tar, level -1, advanced one pass small out, 6175652
+silesia.tar, level 0, advanced one pass small out, 4829268
+silesia.tar, level 1, advanced one pass small out, 5307443
+silesia.tar, level 3, advanced one pass small out, 4829268
+silesia.tar, level 4, advanced one pass small out, 4767074
+silesia.tar, level 5 row 1, advanced one pass small out, 4662847
+silesia.tar, level 5 row 2, advanced one pass small out, 4666825
+silesia.tar, level 5, advanced one pass small out, 4662847
+silesia.tar, level 6, advanced one pass small out, 4597877
+silesia.tar, level 7 row 1, advanced one pass small out, 4563998
+silesia.tar, level 7 row 2, advanced one pass small out, 4559520
+silesia.tar, level 7, advanced one pass small out, 4563998
+silesia.tar, level 9, advanced one pass small out, 4537558
+silesia.tar, level 11 row 1, advanced one pass small out, 4496590
+silesia.tar, level 11 row 2, advanced one pass small out, 4495225
+silesia.tar, level 12 row 1, advanced one pass small out, 4496084
+silesia.tar, level 12 row 2, advanced one pass small out, 4495434
+silesia.tar, level 13, advanced one pass small out, 4484732
+silesia.tar, level 16, advanced one pass small out, 4355572
+silesia.tar, level 19, advanced one pass small out, 4257629
+silesia.tar, no source size, advanced one pass small out, 4829268
+silesia.tar, long distance mode, advanced one pass small out, 4815868
+silesia.tar, multithreaded, advanced one pass small out, 4836000
+silesia.tar, multithreaded long distance mode, advanced one pass small out, 4827826
+silesia.tar, small window log, advanced one pass small out, 7100064
+silesia.tar, small hash log, advanced one pass small out, 6530222
+silesia.tar, small chain log, advanced one pass small out, 4915689
+silesia.tar, explicit params, advanced one pass small out, 4790421
+silesia.tar, uncompressed literals, advanced one pass small out, 5114702
+silesia.tar, uncompressed literals optimal, advanced one pass small out, 4306289
+silesia.tar, huffman literals, advanced one pass small out, 5323421
+silesia.tar, multithreaded with advanced params, advanced one pass small out, 5116579
github, level -5, advanced one pass small out, 204407
github, level -5 with dict, advanced one pass small out, 45832
github, level -3, advanced one pass small out, 193253
github, level -3 with dict, advanced one pass small out, 44671
github, level -1, advanced one pass small out, 175468
github, level -1 with dict, advanced one pass small out, 41825
-github, level 0, advanced one pass small out, 136332
-github, level 0 with dict, advanced one pass small out, 41148
-github, level 0 with dict dms, advanced one pass small out, 41148
-github, level 0 with dict dds, advanced one pass small out, 41148
+github, level 0, advanced one pass small out, 136331
+github, level 0 with dict, advanced one pass small out, 41118
+github, level 0 with dict dms, advanced one pass small out, 41118
+github, level 0 with dict dds, advanced one pass small out, 41118
github, level 0 with dict copy, advanced one pass small out, 41124
github, level 0 with dict load, advanced one pass small out, 41847
github, level 1, advanced one pass small out, 142365
@@ -632,16 +632,16 @@ github, level 1 with dict dms, advanced
github, level 1 with dict dds, advanced one pass small out, 41266
github, level 1 with dict copy, advanced one pass small out, 41279
github, level 1 with dict load, advanced one pass small out, 43331
-github, level 3, advanced one pass small out, 136332
-github, level 3 with dict, advanced one pass small out, 41148
-github, level 3 with dict dms, advanced one pass small out, 41148
-github, level 3 with dict dds, advanced one pass small out, 41148
+github, level 3, advanced one pass small out, 136331
+github, level 3 with dict, advanced one pass small out, 41118
+github, level 3 with dict dms, advanced one pass small out, 41118
+github, level 3 with dict dds, advanced one pass small out, 41118
github, level 3 with dict copy, advanced one pass small out, 41124
github, level 3 with dict load, advanced one pass small out, 41847
github, level 4, advanced one pass small out, 136199
-github, level 4 with dict, advanced one pass small out, 41251
-github, level 4 with dict dms, advanced one pass small out, 41251
-github, level 4 with dict dds, advanced one pass small out, 41251
+github, level 4 with dict, advanced one pass small out, 41229
+github, level 4 with dict dms, advanced one pass small out, 41229
+github, level 4 with dict dds, advanced one pass small out, 41229
github, level 4 with dict copy, advanced one pass small out, 41216
github, level 4 with dict load, advanced one pass small out, 41548
github, level 5 row 1, advanced one pass small out, 134584
@@ -713,60 +713,60 @@ github, level 13 with dict, advanced
github, level 13 with dict dms, advanced one pass small out, 39900
github, level 13 with dict dds, advanced one pass small out, 39900
github, level 13 with dict copy, advanced one pass small out, 39948
-github, level 13 with dict load, advanced one pass small out, 42624
+github, level 13 with dict load, advanced one pass small out, 42643
github, level 16, advanced one pass small out, 133209
github, level 16 with dict, advanced one pass small out, 37902
github, level 16 with dict dms, advanced one pass small out, 37902
github, level 16 with dict dds, advanced one pass small out, 37902
github, level 16 with dict copy, advanced one pass small out, 37892
-github, level 16 with dict load, advanced one pass small out, 42402
+github, level 16 with dict load, advanced one pass small out, 42434
github, level 19, advanced one pass small out, 132879
github, level 19 with dict, advanced one pass small out, 37916
github, level 19 with dict dms, advanced one pass small out, 37916
github, level 19 with dict dds, advanced one pass small out, 37916
github, level 19 with dict copy, advanced one pass small out, 37906
-github, level 19 with dict load, advanced one pass small out, 39770
-github, no source size, advanced one pass small out, 136332
-github, no source size with dict, advanced one pass small out, 41148
-github, long distance mode, advanced one pass small out, 136332
-github, multithreaded, advanced one pass small out, 136332
-github, multithreaded long distance mode, advanced one pass small out, 136332
-github, small window log, advanced one pass small out, 136332
+github, level 19 with dict load, advanced one pass small out, 40405
+github, no source size, advanced one pass small out, 136331
+github, no source size with dict, advanced one pass small out, 41118
+github, long distance mode, advanced one pass small out, 136331
+github, multithreaded, advanced one pass small out, 136331
+github, multithreaded long distance mode, advanced one pass small out, 136331
+github, small window log, advanced one pass small out, 136331
github, small hash log, advanced one pass small out, 135590
github, small chain log, advanced one pass small out, 136341
github, explicit params, advanced one pass small out, 137727
-github, uncompressed literals, advanced one pass small out, 165911
+github, uncompressed literals, advanced one pass small out, 165909
github, uncompressed literals optimal, advanced one pass small out, 152667
github, huffman literals, advanced one pass small out, 142365
-github, multithreaded with advanced params, advanced one pass small out, 165911
-github.tar, level -5, advanced one pass small out, 52115
-github.tar, level -5 with dict, advanced one pass small out, 51097
-github.tar, level -3, advanced one pass small out, 45678
-github.tar, level -3 with dict, advanced one pass small out, 44734
-github.tar, level -1, advanced one pass small out, 42560
-github.tar, level -1 with dict, advanced one pass small out, 41353
-github.tar, level 0, advanced one pass small out, 38831
+github, multithreaded with advanced params, advanced one pass small out, 165909
+github.tar, level -5, advanced one pass small out, 52173
+github.tar, level -5 with dict, advanced one pass small out, 51161
+github.tar, level -3, advanced one pass small out, 45783
+github.tar, level -3 with dict, advanced one pass small out, 44768
+github.tar, level -1, advanced one pass small out, 42606
+github.tar, level -1 with dict, advanced one pass small out, 41397
+github.tar, level 0, advanced one pass small out, 38884
github.tar, level 0 with dict, advanced one pass small out, 37995
-github.tar, level 0 with dict dms, advanced one pass small out, 38003
-github.tar, level 0 with dict dds, advanced one pass small out, 38003
+github.tar, level 0 with dict dms, advanced one pass small out, 38114
+github.tar, level 0 with dict dds, advanced one pass small out, 38114
github.tar, level 0 with dict copy, advanced one pass small out, 37995
github.tar, level 0 with dict load, advanced one pass small out, 37956
github.tar, level 1, advanced one pass small out, 39200
-github.tar, level 1 with dict, advanced one pass small out, 38119
-github.tar, level 1 with dict dms, advanced one pass small out, 38406
-github.tar, level 1 with dict dds, advanced one pass small out, 38406
-github.tar, level 1 with dict copy, advanced one pass small out, 38119
-github.tar, level 1 with dict load, advanced one pass small out, 38364
-github.tar, level 3, advanced one pass small out, 38831
+github.tar, level 1 with dict, advanced one pass small out, 38189
+github.tar, level 1 with dict dms, advanced one pass small out, 38398
+github.tar, level 1 with dict dds, advanced one pass small out, 38398
+github.tar, level 1 with dict copy, advanced one pass small out, 38189
+github.tar, level 1 with dict load, advanced one pass small out, 38393
+github.tar, level 3, advanced one pass small out, 38884
github.tar, level 3 with dict, advanced one pass small out, 37995
-github.tar, level 3 with dict dms, advanced one pass small out, 38003
-github.tar, level 3 with dict dds, advanced one pass small out, 38003
+github.tar, level 3 with dict dms, advanced one pass small out, 38114
+github.tar, level 3 with dict dds, advanced one pass small out, 38114
github.tar, level 3 with dict copy, advanced one pass small out, 37995
github.tar, level 3 with dict load, advanced one pass small out, 37956
-github.tar, level 4, advanced one pass small out, 38893
+github.tar, level 4, advanced one pass small out, 38880
github.tar, level 4 with dict, advanced one pass small out, 37948
-github.tar, level 4 with dict dms, advanced one pass small out, 37954
-github.tar, level 4 with dict dds, advanced one pass small out, 37954
+github.tar, level 4 with dict dms, advanced one pass small out, 37995
+github.tar, level 4 with dict dds, advanced one pass small out, 37995
github.tar, level 4 with dict copy, advanced one pass small out, 37948
github.tar, level 4 with dict load, advanced one pass small out, 37927
github.tar, level 5 row 1, advanced one pass small out, 39651
@@ -851,97 +851,97 @@ github.tar, level 19 with dict dms, advanced
github.tar, level 19 with dict dds, advanced one pass small out, 32565
github.tar, level 19 with dict copy, advanced one pass small out, 32701
github.tar, level 19 with dict load, advanced one pass small out, 32428
-github.tar, no source size, advanced one pass small out, 38831
+github.tar, no source size, advanced one pass small out, 38884
github.tar, no source size with dict, advanced one pass small out, 37995
-github.tar, long distance mode, advanced one pass small out, 40252
-github.tar, multithreaded, advanced one pass small out, 38831
-github.tar, multithreaded long distance mode, advanced one pass small out, 40232
-github.tar, small window log, advanced one pass small out, 198540
+github.tar, long distance mode, advanced one pass small out, 40156
+github.tar, multithreaded, advanced one pass small out, 38884
+github.tar, multithreaded long distance mode, advanced one pass small out, 40139
+github.tar, small window log, advanced one pass small out, 198535
github.tar, small hash log, advanced one pass small out, 129870
github.tar, small chain log, advanced one pass small out, 41669
github.tar, explicit params, advanced one pass small out, 41385
-github.tar, uncompressed literals, advanced one pass small out, 41525
+github.tar, uncompressed literals, advanced one pass small out, 41562
github.tar, uncompressed literals optimal, advanced one pass small out, 35356
-github.tar, huffman literals, advanced one pass small out, 38853
-github.tar, multithreaded with advanced params, advanced one pass small out, 41525
-silesia, level -5, advanced streaming, 6854744
-silesia, level -3, advanced streaming, 6503319
-silesia, level -1, advanced streaming, 6172207
-silesia, level 0, advanced streaming, 4842075
-silesia, level 1, advanced streaming, 5306388
-silesia, level 3, advanced streaming, 4842075
-silesia, level 4, advanced streaming, 4779186
-silesia, level 5 row 1, advanced streaming, 4667668
-silesia, level 5 row 2, advanced streaming, 4670326
-silesia, level 5, advanced streaming, 4667668
-silesia, level 6, advanced streaming, 4604351
-silesia, level 7 row 1, advanced streaming, 4570271
-silesia, level 7 row 2, advanced streaming, 4565169
-silesia, level 7, advanced streaming, 4570271
-silesia, level 9, advanced streaming, 4545850
-silesia, level 11 row 1, advanced streaming, 4505658
-silesia, level 11 row 2, advanced streaming, 4503429
-silesia, level 12 row 1, advanced streaming, 4505658
-silesia, level 12 row 2, advanced streaming, 4503429
-silesia, level 13, advanced streaming, 4493990
-silesia, level 16, advanced streaming, 4359652
-silesia, level 19, advanced streaming, 4266582
-silesia, no source size, advanced streaming, 4842039
-silesia, long distance mode, advanced streaming, 4833710
-silesia, multithreaded, advanced streaming, 4842075
-silesia, multithreaded long distance mode, advanced streaming, 4833737
-silesia, small window log, advanced streaming, 7111103
-silesia, small hash log, advanced streaming, 6526141
-silesia, small chain log, advanced streaming, 4912197
-silesia, explicit params, advanced streaming, 4795857
-silesia, uncompressed literals, advanced streaming, 5120566
-silesia, uncompressed literals optimal, advanced streaming, 4316880
-silesia, huffman literals, advanced streaming, 5321370
-silesia, multithreaded with advanced params, advanced streaming, 5120566
-silesia.tar, level -5, advanced streaming, 6856523
-silesia.tar, level -3, advanced streaming, 6505954
-silesia.tar, level -1, advanced streaming, 6179056
-silesia.tar, level 0, advanced streaming, 4859271
-silesia.tar, level 1, advanced streaming, 5327708
-silesia.tar, level 3, advanced streaming, 4859271
-silesia.tar, level 4, advanced streaming, 4797470
-silesia.tar, level 5 row 1, advanced streaming, 4679020
-silesia.tar, level 5 row 2, advanced streaming, 4682355
-silesia.tar, level 5, advanced streaming, 4679020
-silesia.tar, level 6, advanced streaming, 4614558
-silesia.tar, level 7 row 1, advanced streaming, 4579823
-silesia.tar, level 7 row 2, advanced streaming, 4575601
-silesia.tar, level 7, advanced streaming, 4579823
-silesia.tar, level 9, advanced streaming, 4555445
-silesia.tar, level 11 row 1, advanced streaming, 4514959
-silesia.tar, level 11 row 2, advanced streaming, 4513810
-silesia.tar, level 12 row 1, advanced streaming, 4514514
-silesia.tar, level 12 row 2, advanced streaming, 4514003
-silesia.tar, level 13, advanced streaming, 4502956
-silesia.tar, level 16, advanced streaming, 4360385
-silesia.tar, level 19, advanced streaming, 4260939
-silesia.tar, no source size, advanced streaming, 4859267
-silesia.tar, long distance mode, advanced streaming, 4840452
-silesia.tar, multithreaded, advanced streaming, 4854160
-silesia.tar, multithreaded long distance mode, advanced streaming, 4845741
-silesia.tar, small window log, advanced streaming, 7117559
-silesia.tar, small hash log, advanced streaming, 6529209
-silesia.tar, small chain log, advanced streaming, 4917021
-silesia.tar, explicit params, advanced streaming, 4807288
-silesia.tar, uncompressed literals, advanced streaming, 5127423
-silesia.tar, uncompressed literals optimal, advanced streaming, 4308451
-silesia.tar, huffman literals, advanced streaming, 5341712
-silesia.tar, multithreaded with advanced params, advanced streaming, 5122567
+github.tar, huffman literals, advanced one pass small out, 38921
+github.tar, multithreaded with advanced params, advanced one pass small out, 41562
+silesia, level -5, advanced streaming, 6853462
+silesia, level -3, advanced streaming, 6502349
+silesia, level -1, advanced streaming, 6172125
+silesia, level 0, advanced streaming, 4835804
+silesia, level 1, advanced streaming, 5301644
+silesia, level 3, advanced streaming, 4835804
+silesia, level 4, advanced streaming, 4773049
+silesia, level 5 row 1, advanced streaming, 4664679
+silesia, level 5 row 2, advanced streaming, 4667307
+silesia, level 5, advanced streaming, 4664679
+silesia, level 6, advanced streaming, 4601116
+silesia, level 7 row 1, advanced streaming, 4567082
+silesia, level 7 row 2, advanced streaming, 4561992
+silesia, level 7, advanced streaming, 4567082
+silesia, level 9, advanced streaming, 4542474
+silesia, level 11 row 1, advanced streaming, 4502322
+silesia, level 11 row 2, advanced streaming, 4500050
+silesia, level 12 row 1, advanced streaming, 4502322
+silesia, level 12 row 2, advanced streaming, 4500050
+silesia, level 13, advanced streaming, 4490650
+silesia, level 16, advanced streaming, 4358094
+silesia, level 19, advanced streaming, 4265908
+silesia, no source size, advanced streaming, 4835768
+silesia, long distance mode, advanced streaming, 4827032
+silesia, multithreaded, advanced streaming, 4833065
+silesia, multithreaded long distance mode, advanced streaming, 4824293
+silesia, small window log, advanced streaming, 7110591
+silesia, small hash log, advanced streaming, 6525259
+silesia, small chain log, advanced streaming, 4911577
+silesia, explicit params, advanced streaming, 4792505
+silesia, uncompressed literals, advanced streaming, 5116404
+silesia, uncompressed literals optimal, advanced streaming, 4316533
+silesia, huffman literals, advanced streaming, 5317620
+silesia, multithreaded with advanced params, advanced streaming, 5118187
+silesia.tar, level -5, advanced streaming, 6853184
+silesia.tar, level -3, advanced streaming, 6503455
+silesia.tar, level -1, advanced streaming, 6175761
+silesia.tar, level 0, advanced streaming, 4846783
+silesia.tar, level 1, advanced streaming, 5306719
+silesia.tar, level 3, advanced streaming, 4846783
+silesia.tar, level 4, advanced streaming, 4785332
+silesia.tar, level 5 row 1, advanced streaming, 4664523
+silesia.tar, level 5 row 2, advanced streaming, 4668292
+silesia.tar, level 5, advanced streaming, 4664523
+silesia.tar, level 6, advanced streaming, 4599420
+silesia.tar, level 7 row 1, advanced streaming, 4565332
+silesia.tar, level 7 row 2, advanced streaming, 4561064
+silesia.tar, level 7, advanced streaming, 4565332
+silesia.tar, level 9, advanced streaming, 4539391
+silesia.tar, level 11 row 1, advanced streaming, 4498530
+silesia.tar, level 11 row 2, advanced streaming, 4497297
+silesia.tar, level 12 row 1, advanced streaming, 4498097
+silesia.tar, level 12 row 2, advanced streaming, 4497497
+silesia.tar, level 13, advanced streaming, 4486652
+silesia.tar, level 16, advanced streaming, 4358029
+silesia.tar, level 19, advanced streaming, 4258228
+silesia.tar, no source size, advanced streaming, 4846779
+silesia.tar, long distance mode, advanced streaming, 4825842
+silesia.tar, multithreaded, advanced streaming, 4836000
+silesia.tar, multithreaded long distance mode, advanced streaming, 4827826
+silesia.tar, small window log, advanced streaming, 7117024
+silesia.tar, small hash log, advanced streaming, 6529503
+silesia.tar, small chain log, advanced streaming, 4915956
+silesia.tar, explicit params, advanced streaming, 4791739
+silesia.tar, uncompressed literals, advanced streaming, 5123274
+silesia.tar, uncompressed literals optimal, advanced streaming, 4306968
+silesia.tar, huffman literals, advanced streaming, 5323245
+silesia.tar, multithreaded with advanced params, advanced streaming, 5116579
github, level -5, advanced streaming, 204407
github, level -5 with dict, advanced streaming, 45832
github, level -3, advanced streaming, 193253
github, level -3 with dict, advanced streaming, 44671
github, level -1, advanced streaming, 175468
github, level -1 with dict, advanced streaming, 41825
-github, level 0, advanced streaming, 136332
-github, level 0 with dict, advanced streaming, 41148
-github, level 0 with dict dms, advanced streaming, 41148
-github, level 0 with dict dds, advanced streaming, 41148
+github, level 0, advanced streaming, 136331
+github, level 0 with dict, advanced streaming, 41118
+github, level 0 with dict dms, advanced streaming, 41118
+github, level 0 with dict dds, advanced streaming, 41118
github, level 0 with dict copy, advanced streaming, 41124
github, level 0 with dict load, advanced streaming, 41847
github, level 1, advanced streaming, 142365
@@ -950,16 +950,16 @@ github, level 1 with dict dms, advanced
github, level 1 with dict dds, advanced streaming, 41266
github, level 1 with dict copy, advanced streaming, 41279
github, level 1 with dict load, advanced streaming, 43331
-github, level 3, advanced streaming, 136332
-github, level 3 with dict, advanced streaming, 41148
-github, level 3 with dict dms, advanced streaming, 41148
-github, level 3 with dict dds, advanced streaming, 41148
+github, level 3, advanced streaming, 136331
+github, level 3 with dict, advanced streaming, 41118
+github, level 3 with dict dms, advanced streaming, 41118
+github, level 3 with dict dds, advanced streaming, 41118
github, level 3 with dict copy, advanced streaming, 41124
github, level 3 with dict load, advanced streaming, 41847
github, level 4, advanced streaming, 136199
-github, level 4 with dict, advanced streaming, 41251
-github, level 4 with dict dms, advanced streaming, 41251
-github, level 4 with dict dds, advanced streaming, 41251
+github, level 4 with dict, advanced streaming, 41229
+github, level 4 with dict dms, advanced streaming, 41229
+github, level 4 with dict dds, advanced streaming, 41229
github, level 4 with dict copy, advanced streaming, 41216
github, level 4 with dict load, advanced streaming, 41548
github, level 5 row 1, advanced streaming, 134584
@@ -1031,60 +1031,60 @@ github, level 13 with dict, advanced
github, level 13 with dict dms, advanced streaming, 39900
github, level 13 with dict dds, advanced streaming, 39900
github, level 13 with dict copy, advanced streaming, 39948
-github, level 13 with dict load, advanced streaming, 42624
+github, level 13 with dict load, advanced streaming, 42643
github, level 16, advanced streaming, 133209
github, level 16 with dict, advanced streaming, 37902
github, level 16 with dict dms, advanced streaming, 37902
github, level 16 with dict dds, advanced streaming, 37902
github, level 16 with dict copy, advanced streaming, 37892
-github, level 16 with dict load, advanced streaming, 42402
+github, level 16 with dict load, advanced streaming, 42434
github, level 19, advanced streaming, 132879
github, level 19 with dict, advanced streaming, 37916
github, level 19 with dict dms, advanced streaming, 37916
github, level 19 with dict dds, advanced streaming, 37916
github, level 19 with dict copy, advanced streaming, 37906
-github, level 19 with dict load, advanced streaming, 39770
-github, no source size, advanced streaming, 136332
-github, no source size with dict, advanced streaming, 41148
-github, long distance mode, advanced streaming, 136332
-github, multithreaded, advanced streaming, 136332
-github, multithreaded long distance mode, advanced streaming, 136332
-github, small window log, advanced streaming, 136332
+github, level 19 with dict load, advanced streaming, 40405
+github, no source size, advanced streaming, 136331
+github, no source size with dict, advanced streaming, 41118
+github, long distance mode, advanced streaming, 136331
+github, multithreaded, advanced streaming, 136331
+github, multithreaded long distance mode, advanced streaming, 136331
+github, small window log, advanced streaming, 136331
github, small hash log, advanced streaming, 135590
github, small chain log, advanced streaming, 136341
github, explicit params, advanced streaming, 137727
-github, uncompressed literals, advanced streaming, 165911
+github, uncompressed literals, advanced streaming, 165909
github, uncompressed literals optimal, advanced streaming, 152667
github, huffman literals, advanced streaming, 142365
-github, multithreaded with advanced params, advanced streaming, 165911
-github.tar, level -5, advanced streaming, 52152
-github.tar, level -5 with dict, advanced streaming, 51181
-github.tar, level -3, advanced streaming, 45678
-github.tar, level -3 with dict, advanced streaming, 44734
-github.tar, level -1, advanced streaming, 42560
-github.tar, level -1 with dict, advanced streaming, 41353
-github.tar, level 0, advanced streaming, 38831
+github, multithreaded with advanced params, advanced streaming, 165909
+github.tar, level -5, advanced streaming, 52273
+github.tar, level -5 with dict, advanced streaming, 51297
+github.tar, level -3, advanced streaming, 45783
+github.tar, level -3 with dict, advanced streaming, 44853
+github.tar, level -1, advanced streaming, 42687
+github.tar, level -1 with dict, advanced streaming, 41486
+github.tar, level 0, advanced streaming, 38884
github.tar, level 0 with dict, advanced streaming, 37995
-github.tar, level 0 with dict dms, advanced streaming, 38003
-github.tar, level 0 with dict dds, advanced streaming, 38003
+github.tar, level 0 with dict dms, advanced streaming, 38114
+github.tar, level 0 with dict dds, advanced streaming, 38114
github.tar, level 0 with dict copy, advanced streaming, 37995
github.tar, level 0 with dict load, advanced streaming, 37956
-github.tar, level 1, advanced streaming, 39200
-github.tar, level 1 with dict, advanced streaming, 38119
-github.tar, level 1 with dict dms, advanced streaming, 38406
-github.tar, level 1 with dict dds, advanced streaming, 38406
-github.tar, level 1 with dict copy, advanced streaming, 38119
-github.tar, level 1 with dict load, advanced streaming, 38364
-github.tar, level 3, advanced streaming, 38831
+github.tar, level 1, advanced streaming, 39346
+github.tar, level 1 with dict, advanced streaming, 38251
+github.tar, level 1 with dict dms, advanced streaming, 38557
+github.tar, level 1 with dict dds, advanced streaming, 38557
+github.tar, level 1 with dict copy, advanced streaming, 38251
+github.tar, level 1 with dict load, advanced streaming, 38503
+github.tar, level 3, advanced streaming, 38884
github.tar, level 3 with dict, advanced streaming, 37995
-github.tar, level 3 with dict dms, advanced streaming, 38003
-github.tar, level 3 with dict dds, advanced streaming, 38003
+github.tar, level 3 with dict dms, advanced streaming, 38114
+github.tar, level 3 with dict dds, advanced streaming, 38114
github.tar, level 3 with dict copy, advanced streaming, 37995
github.tar, level 3 with dict load, advanced streaming, 37956
-github.tar, level 4, advanced streaming, 38893
+github.tar, level 4, advanced streaming, 38880
github.tar, level 4 with dict, advanced streaming, 37948
-github.tar, level 4 with dict dms, advanced streaming, 37954
-github.tar, level 4 with dict dds, advanced streaming, 37954
+github.tar, level 4 with dict dms, advanced streaming, 37995
+github.tar, level 4 with dict dds, advanced streaming, 37995
github.tar, level 4 with dict copy, advanced streaming, 37948
github.tar, level 4 with dict load, advanced streaming, 37927
github.tar, level 5 row 1, advanced streaming, 39651
@@ -1169,69 +1169,69 @@ github.tar, level 19 with dict dms, advanced
github.tar, level 19 with dict dds, advanced streaming, 32565
github.tar, level 19 with dict copy, advanced streaming, 32701
github.tar, level 19 with dict load, advanced streaming, 32428
-github.tar, no source size, advanced streaming, 38828
-github.tar, no source size with dict, advanced streaming, 38000
-github.tar, long distance mode, advanced streaming, 40252
-github.tar, multithreaded, advanced streaming, 38831
-github.tar, multithreaded long distance mode, advanced streaming, 40232
-github.tar, small window log, advanced streaming, 199558
+github.tar, no source size, advanced streaming, 38881
+github.tar, no source size with dict, advanced streaming, 38111
+github.tar, long distance mode, advanced streaming, 40156
+github.tar, multithreaded, advanced streaming, 38884
+github.tar, multithreaded long distance mode, advanced streaming, 40139
+github.tar, small window log, advanced streaming, 199553
github.tar, small hash log, advanced streaming, 129870
github.tar, small chain log, advanced streaming, 41669
github.tar, explicit params, advanced streaming, 41385
-github.tar, uncompressed literals, advanced streaming, 41525
+github.tar, uncompressed literals, advanced streaming, 41562
github.tar, uncompressed literals optimal, advanced streaming, 35356
-github.tar, huffman literals, advanced streaming, 38853
-github.tar, multithreaded with advanced params, advanced streaming, 41525
-silesia, level -5, old streaming, 6854744
-silesia, level -3, old streaming, 6503319
-silesia, level -1, old streaming, 6172207
-silesia, level 0, old streaming, 4842075
-silesia, level 1, old streaming, 5306388
-silesia, level 3, old streaming, 4842075
-silesia, level 4, old streaming, 4779186
-silesia, level 5, old streaming, 4667668
-silesia, level 6, old streaming, 4604351
-silesia, level 7, old streaming, 4570271
-silesia, level 9, old streaming, 4545850
-silesia, level 13, old streaming, 4493990
-silesia, level 16, old streaming, 4359652
-silesia, level 19, old streaming, 4266582
-silesia, no source size, old streaming, 4842039
-silesia, uncompressed literals, old streaming, 4842075
-silesia, uncompressed literals optimal, old streaming, 4266582
-silesia, huffman literals, old streaming, 6172207
-silesia.tar, level -5, old streaming, 6856523
-silesia.tar, level -3, old streaming, 6505954
-silesia.tar, level -1, old streaming, 6179056
-silesia.tar, level 0, old streaming, 4859271
-silesia.tar, level 1, old streaming, 5327708
-silesia.tar, level 3, old streaming, 4859271
-silesia.tar, level 4, old streaming, 4797470
-silesia.tar, level 5, old streaming, 4679020
-silesia.tar, level 6, old streaming, 4614558
-silesia.tar, level 7, old streaming, 4579823
-silesia.tar, level 9, old streaming, 4555445
-silesia.tar, level 13, old streaming, 4502956
-silesia.tar, level 16, old streaming, 4360385
-silesia.tar, level 19, old streaming, 4260939
-silesia.tar, no source size, old streaming, 4859267
-silesia.tar, uncompressed literals, old streaming, 4859271
-silesia.tar, uncompressed literals optimal, old streaming, 4260939
-silesia.tar, huffman literals, old streaming, 6179056
+github.tar, huffman literals, advanced streaming, 38998
+github.tar, multithreaded with advanced params, advanced streaming, 41562
+silesia, level -5, old streaming, 6853462
+silesia, level -3, old streaming, 6502349
+silesia, level -1, old streaming, 6172125
+silesia, level 0, old streaming, 4835804
+silesia, level 1, old streaming, 5301644
+silesia, level 3, old streaming, 4835804
+silesia, level 4, old streaming, 4773049
+silesia, level 5, old streaming, 4664679
+silesia, level 6, old streaming, 4601116
+silesia, level 7, old streaming, 4567082
+silesia, level 9, old streaming, 4542474
+silesia, level 13, old streaming, 4490650
+silesia, level 16, old streaming, 4358094
+silesia, level 19, old streaming, 4265908
+silesia, no source size, old streaming, 4835768
+silesia, uncompressed literals, old streaming, 4835804
+silesia, uncompressed literals optimal, old streaming, 4265908
+silesia, huffman literals, old streaming, 6172125
+silesia.tar, level -5, old streaming, 6853184
+silesia.tar, level -3, old streaming, 6503455
+silesia.tar, level -1, old streaming, 6175761
+silesia.tar, level 0, old streaming, 4846783
+silesia.tar, level 1, old streaming, 5306719
+silesia.tar, level 3, old streaming, 4846783
+silesia.tar, level 4, old streaming, 4785332
+silesia.tar, level 5, old streaming, 4664523
+silesia.tar, level 6, old streaming, 4599420
+silesia.tar, level 7, old streaming, 4565332
+silesia.tar, level 9, old streaming, 4539391
+silesia.tar, level 13, old streaming, 4486652
+silesia.tar, level 16, old streaming, 4358029
+silesia.tar, level 19, old streaming, 4258228
+silesia.tar, no source size, old streaming, 4846779
+silesia.tar, uncompressed literals, old streaming, 4846783
+silesia.tar, uncompressed literals optimal, old streaming, 4258228
+silesia.tar, huffman literals, old streaming, 6175761
github, level -5, old streaming, 204407
github, level -5 with dict, old streaming, 45832
github, level -3, old streaming, 193253
github, level -3 with dict, old streaming, 44671
github, level -1, old streaming, 175468
github, level -1 with dict, old streaming, 41825
-github, level 0, old streaming, 136332
-github, level 0 with dict, old streaming, 41148
+github, level 0, old streaming, 136331
+github, level 0 with dict, old streaming, 41118
github, level 1, old streaming, 142365
github, level 1 with dict, old streaming, 41266
-github, level 3, old streaming, 136332
-github, level 3 with dict, old streaming, 41148
+github, level 3, old streaming, 136331
+github, level 3 with dict, old streaming, 41118
github, level 4, old streaming, 136199
-github, level 4 with dict, old streaming, 41251
+github, level 4 with dict, old streaming, 41229
github, level 5, old streaming, 135121
github, level 5 with dict, old streaming, 38754
github, level 6, old streaming, 135122
@@ -1247,23 +1247,23 @@ github, level 16 with dict, old stre
github, level 19, old streaming, 132879
github, level 19 with dict, old streaming, 37916
github, no source size, old streaming, 140599
-github, no source size with dict, old streaming, 40654
-github, uncompressed literals, old streaming, 136332
+github, no source size with dict, old streaming, 40652
+github, uncompressed literals, old streaming, 136331
github, uncompressed literals optimal, old streaming, 132879
github, huffman literals, old streaming, 175468
-github.tar, level -5, old streaming, 52152
-github.tar, level -5 with dict, old streaming, 51181
-github.tar, level -3, old streaming, 45678
-github.tar, level -3 with dict, old streaming, 44734
-github.tar, level -1, old streaming, 42560
-github.tar, level -1 with dict, old streaming, 41353
-github.tar, level 0, old streaming, 38831
+github.tar, level -5, old streaming, 52273
+github.tar, level -5 with dict, old streaming, 51297
+github.tar, level -3, old streaming, 45783
+github.tar, level -3 with dict, old streaming, 44853
+github.tar, level -1, old streaming, 42687
+github.tar, level -1 with dict, old streaming, 41486
+github.tar, level 0, old streaming, 38884
github.tar, level 0 with dict, old streaming, 37995
-github.tar, level 1, old streaming, 39200
-github.tar, level 1 with dict, old streaming, 38119
-github.tar, level 3, old streaming, 38831
+github.tar, level 1, old streaming, 39346
+github.tar, level 1 with dict, old streaming, 38251
+github.tar, level 3, old streaming, 38884
github.tar, level 3 with dict, old streaming, 37995
-github.tar, level 4, old streaming, 38893
+github.tar, level 4, old streaming, 38880
github.tar, level 4 with dict, old streaming, 37948
github.tar, level 5, old streaming, 39651
github.tar, level 5 with dict, old streaming, 39145
@@ -1279,77 +1279,77 @@ github.tar, level 16, old stre
github.tar, level 16 with dict, old streaming, 33375
github.tar, level 19, old streaming, 32262
github.tar, level 19 with dict, old streaming, 32701
-github.tar, no source size, old streaming, 38828
-github.tar, no source size with dict, old streaming, 38000
-github.tar, uncompressed literals, old streaming, 38831
+github.tar, no source size, old streaming, 38881
+github.tar, no source size with dict, old streaming, 38111
+github.tar, uncompressed literals, old streaming, 38884
github.tar, uncompressed literals optimal, old streaming, 32262
-github.tar, huffman literals, old streaming, 42560
-silesia, level -5, old streaming advanced, 6854744
-silesia, level -3, old streaming advanced, 6503319
-silesia, level -1, old streaming advanced, 6172207
-silesia, level 0, old streaming advanced, 4842075
-silesia, level 1, old streaming advanced, 5306388
-silesia, level 3, old streaming advanced, 4842075
-silesia, level 4, old streaming advanced, 4779186
-silesia, level 5, old streaming advanced, 4667668
-silesia, level 6, old streaming advanced, 4604351
-silesia, level 7, old streaming advanced, 4570271
-silesia, level 9, old streaming advanced, 4545850
-silesia, level 13, old streaming advanced, 4493990
-silesia, level 16, old streaming advanced, 4359652
-silesia, level 19, old streaming advanced, 4266582
-silesia, no source size, old streaming advanced, 4842039
-silesia, long distance mode, old streaming advanced, 4842075
-silesia, multithreaded, old streaming advanced, 4842075
-silesia, multithreaded long distance mode, old streaming advanced, 4842075
-silesia, small window log, old streaming advanced, 7111103
-silesia, small hash log, old streaming advanced, 6526141
-silesia, small chain log, old streaming advanced, 4912197
-silesia, explicit params, old streaming advanced, 4795857
-silesia, uncompressed literals, old streaming advanced, 4842075
-silesia, uncompressed literals optimal, old streaming advanced, 4266582
-silesia, huffman literals, old streaming advanced, 6172207
-silesia, multithreaded with advanced params, old streaming advanced, 4842075
-silesia.tar, level -5, old streaming advanced, 6856523
-silesia.tar, level -3, old streaming advanced, 6505954
-silesia.tar, level -1, old streaming advanced, 6179056
-silesia.tar, level 0, old streaming advanced, 4859271
-silesia.tar, level 1, old streaming advanced, 5327708
-silesia.tar, level 3, old streaming advanced, 4859271
-silesia.tar, level 4, old streaming advanced, 4797470
-silesia.tar, level 5, old streaming advanced, 4679020
-silesia.tar, level 6, old streaming advanced, 4614558
-silesia.tar, level 7, old streaming advanced, 4579823
-silesia.tar, level 9, old streaming advanced, 4555445
-silesia.tar, level 13, old streaming advanced, 4502956
-silesia.tar, level 16, old streaming advanced, 4360385
-silesia.tar, level 19, old streaming advanced, 4260939
-silesia.tar, no source size, old streaming advanced, 4859267
-silesia.tar, long distance mode, old streaming advanced, 4859271
-silesia.tar, multithreaded, old streaming advanced, 4859271
-silesia.tar, multithreaded long distance mode, old streaming advanced, 4859271
-silesia.tar, small window log, old streaming advanced, 7117562
-silesia.tar, small hash log, old streaming advanced, 6529209
-silesia.tar, small chain log, old streaming advanced, 4917021
-silesia.tar, explicit params, old streaming advanced, 4807288
-silesia.tar, uncompressed literals, old streaming advanced, 4859271
-silesia.tar, uncompressed literals optimal, old streaming advanced, 4260939
-silesia.tar, huffman literals, old streaming advanced, 6179056
-silesia.tar, multithreaded with advanced params, old streaming advanced, 4859271
+github.tar, huffman literals, old streaming, 42687
+silesia, level -5, old streaming advanced, 6853462
+silesia, level -3, old streaming advanced, 6502349
+silesia, level -1, old streaming advanced, 6172125
+silesia, level 0, old streaming advanced, 4835804
+silesia, level 1, old streaming advanced, 5301644
+silesia, level 3, old streaming advanced, 4835804
+silesia, level 4, old streaming advanced, 4773049
+silesia, level 5, old streaming advanced, 4664679
+silesia, level 6, old streaming advanced, 4601116
+silesia, level 7, old streaming advanced, 4567082
+silesia, level 9, old streaming advanced, 4542474
+silesia, level 13, old streaming advanced, 4490650
+silesia, level 16, old streaming advanced, 4358094
+silesia, level 19, old streaming advanced, 4265908
+silesia, no source size, old streaming advanced, 4835768
+silesia, long distance mode, old streaming advanced, 4835804
+silesia, multithreaded, old streaming advanced, 4835804
+silesia, multithreaded long distance mode, old streaming advanced, 4835804
+silesia, small window log, old streaming advanced, 7110591
+silesia, small hash log, old streaming advanced, 6525259
+silesia, small chain log, old streaming advanced, 4911577
+silesia, explicit params, old streaming advanced, 4792505
+silesia, uncompressed literals, old streaming advanced, 4835804
+silesia, uncompressed literals optimal, old streaming advanced, 4265908
+silesia, huffman literals, old streaming advanced, 6172125
+silesia, multithreaded with advanced params, old streaming advanced, 4835804
+silesia.tar, level -5, old streaming advanced, 6853184
+silesia.tar, level -3, old streaming advanced, 6503455
+silesia.tar, level -1, old streaming advanced, 6175761
+silesia.tar, level 0, old streaming advanced, 4846783
+silesia.tar, level 1, old streaming advanced, 5306719
+silesia.tar, level 3, old streaming advanced, 4846783
+silesia.tar, level 4, old streaming advanced, 4785332
+silesia.tar, level 5, old streaming advanced, 4664523
+silesia.tar, level 6, old streaming advanced, 4599420
+silesia.tar, level 7, old streaming advanced, 4565332
+silesia.tar, level 9, old streaming advanced, 4539391
+silesia.tar, level 13, old streaming advanced, 4486652
+silesia.tar, level 16, old streaming advanced, 4358029
+silesia.tar, level 19, old streaming advanced, 4258228
+silesia.tar, no source size, old streaming advanced, 4846779
+silesia.tar, long distance mode, old streaming advanced, 4846783
+silesia.tar, multithreaded, old streaming advanced, 4846783
+silesia.tar, multithreaded long distance mode, old streaming advanced, 4846783
+silesia.tar, small window log, old streaming advanced, 7117027
+silesia.tar, small hash log, old streaming advanced, 6529503
+silesia.tar, small chain log, old streaming advanced, 4915956
+silesia.tar, explicit params, old streaming advanced, 4791739
+silesia.tar, uncompressed literals, old streaming advanced, 4846783
+silesia.tar, uncompressed literals optimal, old streaming advanced, 4258228
+silesia.tar, huffman literals, old streaming advanced, 6175761
+silesia.tar, multithreaded with advanced params, old streaming advanced, 4846783
github, level -5, old streaming advanced, 213265
github, level -5 with dict, old streaming advanced, 46708
github, level -3, old streaming advanced, 196126
github, level -3 with dict, old streaming advanced, 45476
github, level -1, old streaming advanced, 181107
github, level -1 with dict, old streaming advanced, 42060
-github, level 0, old streaming advanced, 141104
-github, level 0 with dict, old streaming advanced, 41113
+github, level 0, old streaming advanced, 141101
+github, level 0 with dict, old streaming advanced, 41074
github, level 1, old streaming advanced, 143693
github, level 1 with dict, old streaming advanced, 42430
-github, level 3, old streaming advanced, 141104
-github, level 3 with dict, old streaming advanced, 41113
-github, level 4, old streaming advanced, 141104
-github, level 4 with dict, old streaming advanced, 41084
+github, level 3, old streaming advanced, 141101
+github, level 3 with dict, old streaming advanced, 41074
+github, level 4, old streaming advanced, 141101
+github, level 4 with dict, old streaming advanced, 41046
github, level 5, old streaming advanced, 139402
github, level 5 with dict, old streaming advanced, 38723
github, level 6, old streaming advanced, 138676
@@ -1366,30 +1366,30 @@ github, level 19, old stre
github, level 19 with dict, old streaming advanced, 37916
github, no source size, old streaming advanced, 140599
github, no source size with dict, old streaming advanced, 40608
-github, long distance mode, old streaming advanced, 141104
-github, multithreaded, old streaming advanced, 141104
-github, multithreaded long distance mode, old streaming advanced, 141104
-github, small window log, old streaming advanced, 141104
+github, long distance mode, old streaming advanced, 141101
+github, multithreaded, old streaming advanced, 141101
+github, multithreaded long distance mode, old streaming advanced, 141101
+github, small window log, old streaming advanced, 141101
github, small hash log, old streaming advanced, 141597
github, small chain log, old streaming advanced, 139275
github, explicit params, old streaming advanced, 140937
-github, uncompressed literals, old streaming advanced, 141104
+github, uncompressed literals, old streaming advanced, 141101
github, uncompressed literals optimal, old streaming advanced, 132879
github, huffman literals, old streaming advanced, 181107
-github, multithreaded with advanced params, old streaming advanced, 141104
-github.tar, level -5, old streaming advanced, 52152
-github.tar, level -5 with dict, old streaming advanced, 51129
-github.tar, level -3, old streaming advanced, 45678
-github.tar, level -3 with dict, old streaming advanced, 44986
-github.tar, level -1, old streaming advanced, 42560
-github.tar, level -1 with dict, old streaming advanced, 41650
-github.tar, level 0, old streaming advanced, 38831
+github, multithreaded with advanced params, old streaming advanced, 141101
+github.tar, level -5, old streaming advanced, 52273
+github.tar, level -5 with dict, old streaming advanced, 51249
+github.tar, level -3, old streaming advanced, 45783
+github.tar, level -3 with dict, old streaming advanced, 45093
+github.tar, level -1, old streaming advanced, 42687
+github.tar, level -1 with dict, old streaming advanced, 41762
+github.tar, level 0, old streaming advanced, 38884
github.tar, level 0 with dict, old streaming advanced, 38013
-github.tar, level 1, old streaming advanced, 39200
-github.tar, level 1 with dict, old streaming advanced, 38359
-github.tar, level 3, old streaming advanced, 38831
+github.tar, level 1, old streaming advanced, 39346
+github.tar, level 1 with dict, old streaming advanced, 38507
+github.tar, level 3, old streaming advanced, 38884
github.tar, level 3 with dict, old streaming advanced, 38013
-github.tar, level 4, old streaming advanced, 38893
+github.tar, level 4, old streaming advanced, 38880
github.tar, level 4 with dict, old streaming advanced, 38063
github.tar, level 5, old streaming advanced, 39651
github.tar, level 5 with dict, old streaming advanced, 39018
@@ -1405,26 +1405,26 @@ github.tar, level 16, old stre
github.tar, level 16 with dict, old streaming advanced, 38578
github.tar, level 19, old streaming advanced, 32262
github.tar, level 19 with dict, old streaming advanced, 32678
-github.tar, no source size, old streaming advanced, 38828
-github.tar, no source size with dict, old streaming advanced, 38015
-github.tar, long distance mode, old streaming advanced, 38831
-github.tar, multithreaded, old streaming advanced, 38831
-github.tar, multithreaded long distance mode, old streaming advanced, 38831
-github.tar, small window log, old streaming advanced, 199561
+github.tar, no source size, old streaming advanced, 38881
+github.tar, no source size with dict, old streaming advanced, 38076
+github.tar, long distance mode, old streaming advanced, 38884
+github.tar, multithreaded, old streaming advanced, 38884
+github.tar, multithreaded long distance mode, old streaming advanced, 38884
+github.tar, small window log, old streaming advanced, 199556
github.tar, small hash log, old streaming advanced, 129870
github.tar, small chain log, old streaming advanced, 41669
github.tar, explicit params, old streaming advanced, 41385
-github.tar, uncompressed literals, old streaming advanced, 38831
+github.tar, uncompressed literals, old streaming advanced, 38884
github.tar, uncompressed literals optimal, old streaming advanced, 32262
-github.tar, huffman literals, old streaming advanced, 42560
-github.tar, multithreaded with advanced params, old streaming advanced, 38831
+github.tar, huffman literals, old streaming advanced, 42687
+github.tar, multithreaded with advanced params, old streaming advanced, 38884
github, level -5 with dict, old streaming cdict, 45832
github, level -3 with dict, old streaming cdict, 44671
github, level -1 with dict, old streaming cdict, 41825
-github, level 0 with dict, old streaming cdict, 41148
+github, level 0 with dict, old streaming cdict, 41118
github, level 1 with dict, old streaming cdict, 41266
-github, level 3 with dict, old streaming cdict, 41148
-github, level 4 with dict, old streaming cdict, 41251
+github, level 3 with dict, old streaming cdict, 41118
+github, level 4 with dict, old streaming cdict, 41229
github, level 5 with dict, old streaming cdict, 38754
github, level 6 with dict, old streaming cdict, 38669
github, level 7 with dict, old streaming cdict, 38765
@@ -1432,12 +1432,12 @@ github, level 9 with dict, old stre
github, level 13 with dict, old streaming cdict, 39900
github, level 16 with dict, old streaming cdict, 37902
github, level 19 with dict, old streaming cdict, 37916
-github, no source size with dict, old streaming cdict, 40654
-github.tar, level -5 with dict, old streaming cdict, 51286
-github.tar, level -3 with dict, old streaming cdict, 45147
-github.tar, level -1 with dict, old streaming cdict, 41865
+github, no source size with dict, old streaming cdict, 40652
+github.tar, level -5 with dict, old streaming cdict, 51407
+github.tar, level -3 with dict, old streaming cdict, 45254
+github.tar, level -1 with dict, old streaming cdict, 41973
github.tar, level 0 with dict, old streaming cdict, 37956
-github.tar, level 1 with dict, old streaming cdict, 38364
+github.tar, level 1 with dict, old streaming cdict, 38503
github.tar, level 3 with dict, old streaming cdict, 37956
github.tar, level 4 with dict, old streaming cdict, 37927
github.tar, level 5 with dict, old streaming cdict, 39000
@@ -1447,14 +1447,14 @@ github.tar, level 9 with dict, old stre
github.tar, level 13 with dict, old streaming cdict, 36010
github.tar, level 16 with dict, old streaming cdict, 39081
github.tar, level 19 with dict, old streaming cdict, 32428
-github.tar, no source size with dict, old streaming cdict, 38000
+github.tar, no source size with dict, old streaming cdict, 38111
github, level -5 with dict, old streaming advanced cdict, 46708
github, level -3 with dict, old streaming advanced cdict, 45476
github, level -1 with dict, old streaming advanced cdict, 42060
-github, level 0 with dict, old streaming advanced cdict, 41113
+github, level 0 with dict, old streaming advanced cdict, 41074
github, level 1 with dict, old streaming advanced cdict, 42430
-github, level 3 with dict, old streaming advanced cdict, 41113
-github, level 4 with dict, old streaming advanced cdict, 41084
+github, level 3 with dict, old streaming advanced cdict, 41074
+github, level 4 with dict, old streaming advanced cdict, 41046
github, level 5 with dict, old streaming advanced cdict, 38723
github, level 6 with dict, old streaming advanced cdict, 38744
github, level 7 with dict, old streaming advanced cdict, 38875
@@ -1463,11 +1463,11 @@ github, level 13 with dict, old stre
github, level 16 with dict, old streaming advanced cdict, 40804
github, level 19 with dict, old streaming advanced cdict, 37916
github, no source size with dict, old streaming advanced cdict, 40608
-github.tar, level -5 with dict, old streaming advanced cdict, 50791
-github.tar, level -3 with dict, old streaming advanced cdict, 44926
-github.tar, level -1 with dict, old streaming advanced cdict, 41482
+github.tar, level -5 with dict, old streaming advanced cdict, 50907
+github.tar, level -3 with dict, old streaming advanced cdict, 45032
+github.tar, level -1 with dict, old streaming advanced cdict, 41589
github.tar, level 0 with dict, old streaming advanced cdict, 38013
-github.tar, level 1 with dict, old streaming advanced cdict, 38168
+github.tar, level 1 with dict, old streaming advanced cdict, 38294
github.tar, level 3 with dict, old streaming advanced cdict, 38013
github.tar, level 4 with dict, old streaming advanced cdict, 38063
github.tar, level 5 with dict, old streaming advanced cdict, 39018
@@ -1477,4 +1477,4 @@ github.tar, level 9 with dict, old stre
github.tar, level 13 with dict, old streaming advanced cdict, 35807
github.tar, level 16 with dict, old streaming advanced cdict, 38578
github.tar, level 19 with dict, old streaming advanced cdict, 32678
-github.tar, no source size with dict, old streaming advanced cdict, 38015
+github.tar, no source size with dict, old streaming advanced cdict, 38076
diff --git a/tests/zstreamtest.c b/tests/zstreamtest.c
index e0ee4c3e934..760d9f26cc3 100644
--- a/tests/zstreamtest.c
+++ b/tests/zstreamtest.c
@@ -436,12 +436,12 @@ static int basicUnitTests(U32 seed, double compressibility, int bigTests)
/* context size functions */
DISPLAYLEVEL(3, "test%3i : estimate DStream size : ", testNb++);
- { ZSTD_frameHeader fhi;
+ { ZSTD_FrameHeader fhi;
const void* cStart = (char*)compressedBuffer + (skippableFrameSize + 8);
size_t const gfhError = ZSTD_getFrameHeader(&fhi, cStart, cSize);
if (gfhError!=0) goto _output_error;
DISPLAYLEVEL(5, " (windowSize : %u) ", (unsigned)fhi.windowSize);
- { size_t const s = ZSTD_estimateDStreamSize(fhi.windowSize)
+ { size_t const s = ZSTD_estimateDStreamSize((size_t)fhi.windowSize)
/* uses ZSTD_initDStream_usingDict() */
+ ZSTD_estimateDDictSize(dictSize, ZSTD_dlm_byCopy);
if (ZSTD_isError(s)) goto _output_error;
@@ -537,7 +537,7 @@ static int basicUnitTests(U32 seed, double compressibility, int bigTests)
outBuff.size = compressedBufferSize;
outBuff.pos = 0;
{ size_t const r = ZSTD_endStream(zc, &outBuff);
- CHECK(r != 0, "Error or some data not flushed (ret=%zu)", r);
+ CHECK(r != 0, "Error or some data not flushed (ret=%i)", ZSTD_getErrorCode(r));
}
inBuff.src = outBuff.dst;
inBuff.size = outBuff.pos;
@@ -567,7 +567,7 @@ static int basicUnitTests(U32 seed, double compressibility, int bigTests)
outBuff.size = compressedBufferSize;
outBuff.pos = 0;
{ size_t const r = ZSTD_endStream(zc, &outBuff);
- CHECK(r != 0, "Error or some data not flushed (ret=%zu)", r);
+ CHECK(r != 0, "Error or some data not flushed (ret=%i)", ZSTD_getErrorCode(r));
}
inBuff.src = outBuff.dst;
inBuff.size = outBuff.pos;
@@ -595,7 +595,7 @@ static int basicUnitTests(U32 seed, double compressibility, int bigTests)
CHECK_Z( ZSTD_compressStream(zc, &outBuff, &inBuff) );
CHECK(inBuff.pos != inBuff.size, "Entire input should be consumed");
{ size_t const r = ZSTD_endStream(zc, &outBuff);
- CHECK(r != 0, "Error or some data not flushed (ret=%zu)", r);
+ CHECK(r != 0, "Error or some data not flushed (ret=%i)", ZSTD_getErrorCode(r));
}
{ unsigned long long origSize = ZSTD_findDecompressedSize(outBuff.dst, outBuff.pos);
CHECK(origSize == ZSTD_CONTENTSIZE_UNKNOWN, "Unknown!");
@@ -758,7 +758,7 @@ static int basicUnitTests(U32 seed, double compressibility, int bigTests)
}
streaming2KSize = ZSTD_sizeof_DCtx(dctx);
CHECK_Z(streaming2KSize);
-
+
CHECK_Z(ZSTD_DCtx_reset(dctx, ZSTD_reset_session_and_parameters));
inBuff.pos = 0;
outBuff.pos = 0;
@@ -769,7 +769,7 @@ static int basicUnitTests(U32 seed, double compressibility, int bigTests)
}
streamingSize = ZSTD_sizeof_DCtx(dctx);
CHECK_Z(streamingSize);
-
+
CHECK_Z(ZSTD_DCtx_setParameter(dctx, ZSTD_d_maxBlockSize, 1024));
inBuff.pos = 0;
outBuff.pos = 0;
@@ -777,7 +777,7 @@ static int basicUnitTests(U32 seed, double compressibility, int bigTests)
CHECK(streamingSize < singlePassSize + (1 << 18) + 3 * ZSTD_BLOCKSIZE_MAX, "Streaming doesn't use the right amount of memory");
CHECK(streamingSize != streaming2KSize + 3 * (ZSTD_BLOCKSIZE_MAX - 2048), "ZSTD_d_blockSizeMax didn't save the right amount of memory");
- DISPLAYLEVEL(3, "| %zu | %zu | %zu | ", singlePassSize, streaming2KSize, streamingSize);
+ DISPLAYLEVEL(3, "| %u | %u | %u | ", (unsigned)singlePassSize, (unsigned)streaming2KSize, (unsigned)streamingSize);
ZSTD_freeDCtx(dctx);
}
@@ -884,7 +884,8 @@ static int basicUnitTests(U32 seed, double compressibility, int bigTests)
DISPLAYLEVEL(3, "test%3i : ZSTD_compress2() uses stable input and output : ", testNb++);
CHECK_Z(cSize = ZSTD_compress2(cctx, compressedBuffer, compressedBufferSize, CNBuffer, CNBufferSize));
CHECK(!(cSize < ZSTD_compressBound(CNBufferSize)), "cSize too large for test");
- CHECK_Z(cSize = ZSTD_compress2(cctx, compressedBuffer, cSize + 4, CNBuffer, CNBufferSize));
+ /* check that compression fits with just a 8-bytes margin */
+ CHECK_Z(cSize = ZSTD_compress2(cctx, compressedBuffer, cSize+8, CNBuffer, CNBufferSize));
CHECK_Z(cctxSize1 = ZSTD_sizeof_CCtx(cctx));
/* @cctxSize2 : sizeof_CCtx when doing full streaming (no stable in/out) */
{ ZSTD_CCtx* const cctx2 = ZSTD_createCCtx();
@@ -957,7 +958,7 @@ static int basicUnitTests(U32 seed, double compressibility, int bigTests)
break;
out.size = MIN(out.size + cSize / 4, compressedBufferSize);
}
- CHECK_Z(ZSTD_decompress(decodedBuffer, CNBufferSize, compressedBuffer, cSize));
+ CHECK_Z(ZSTD_decompress(decodedBuffer, CNBufferSize, compressedBuffer, out.pos));
DISPLAYLEVEL(3, "OK \n");
DISPLAYLEVEL(3, "test%3i : ZSTD_compressStream2() ZSTD_c_stableInBuffer modify buffer : ", testNb++);
@@ -998,7 +999,7 @@ static int basicUnitTests(U32 seed, double compressibility, int bigTests)
void* const verifBuf = (char*)outBuf.dst + outBuf.pos;
const size_t decSize = ZSTD_decompress(verifBuf, inputSize, outBuf.dst, outBuf.pos);
CHECK_Z(decSize);
- CHECK(decSize != inputSize, "regenerated %zu bytes, instead of %zu", decSize, inputSize);
+ CHECK(decSize != inputSize, "regenerated %u bytes, instead of %u", (unsigned)decSize, (unsigned)inputSize);
CHECK(memcmp(realSrcStart, verifBuf, inputSize) != 0, "regenerated data different from original");
} }
DISPLAYLEVEL(3, "OK \n");
@@ -1028,14 +1029,14 @@ static int basicUnitTests(U32 seed, double compressibility, int bigTests)
void* const verifBuf = (char*)outBuf.dst + outBuf.pos;
const size_t decSize = ZSTD_decompress(verifBuf, inputSize, outBuf.dst, outBuf.pos);
CHECK_Z(decSize);
- CHECK(decSize != inputSize, "regenerated %zu bytes, instead of %zu", decSize, inputSize);
+ CHECK(decSize != inputSize, "regenerated %u bytes, instead of %u", (unsigned)decSize, (unsigned)inputSize);
CHECK(memcmp(realSrcStart, verifBuf, inputSize) != 0, "regenerated data different from original");
} }
DISPLAYLEVEL(3, "OK \n");
DISPLAYLEVEL(3, "test%3i : ZSTD_compressStream2() with ZSTD_c_stableInBuffer: context size : ", testNb++);
{ size_t const cctxSize = ZSTD_sizeof_CCtx(cctx);
- DISPLAYLEVEL(4, "cctxSize1=%zu; cctxSize=%zu; cctxSize2=%zu : ", cctxSize1, cctxSize, cctxSize2);
+ DISPLAYLEVEL(4, "cctxSize1=%u; cctxSize=%u; cctxSize2=%u : ", (unsigned)cctxSize1, (unsigned)cctxSize, (unsigned)cctxSize2);
CHECK(!(cctxSize1 < cctxSize), "Must be bigger than single-pass");
CHECK(!(cctxSize < cctxSize2), "Must be smaller than streaming");
cctxSize1 = cctxSize;
@@ -1070,7 +1071,7 @@ static int basicUnitTests(U32 seed, double compressibility, int bigTests)
DISPLAYLEVEL(3, "test%3i : ZSTD_compressStream2() with ZSTD_c_stableOutBuffer: context size : ", testNb++);
{ size_t const cctxSize = ZSTD_sizeof_CCtx(cctx);
- DISPLAYLEVEL(4, "cctxSize1=%zu; cctxSize=%zu; cctxSize2=%zu : ", cctxSize1, cctxSize, cctxSize2);
+ DISPLAYLEVEL(4, "cctxSize1=%u; cctxSize=%u; cctxSize2=%u : ", (unsigned)cctxSize1, (unsigned)cctxSize, (unsigned)cctxSize2);
CHECK(!(cctxSize1 < cctxSize), "Must be bigger than single-pass and stableInBuffer");
CHECK(!(cctxSize < cctxSize2), "Must be smaller than streaming");
}
@@ -1504,7 +1505,7 @@ static int basicUnitTests(U32 seed, double compressibility, int bigTests)
DISPLAYLEVEL(3, "test%3i : compress %u bytes with multiple threads + dictionary : ", testNb++, (unsigned)srcSize);
CHECK_Z( ZSTD_CCtx_setParameter(zc, ZSTD_c_compressionLevel, 3) );
CHECK_Z( ZSTD_CCtx_setParameter(zc, ZSTD_c_nbWorkers, nbWorkers) );
- CHECK_Z( ZSTD_CCtx_setParameter(zc, ZSTD_c_jobSize, jobSize) );
+ CHECK_Z( ZSTD_CCtx_setParameter(zc, ZSTD_c_jobSize, (int)jobSize) );
assert(start > offset);
assert(start + segLength < COMPRESSIBLE_NOISE_LENGTH);
memcpy(dst, srcToCopy, segLength); /* create a long repetition at long distance for job 2 */
@@ -1529,7 +1530,7 @@ static int basicUnitTests(U32 seed, double compressibility, int bigTests)
DISPLAYLEVEL(3, "test%3i : decompress large frame created from multiple threads + dictionary : ", testNb++);
{ ZSTD_DStream* const dstream = ZSTD_createDCtx();
- ZSTD_frameHeader zfh;
+ ZSTD_FrameHeader zfh;
ZSTD_getFrameHeader(&zfh, compressedBuffer, cSize);
DISPLAYLEVEL(5, "frame windowsize = %u : ", (unsigned)zfh.windowSize);
outBuff.dst = decodedBuffer;
@@ -1550,7 +1551,7 @@ static int basicUnitTests(U32 seed, double compressibility, int bigTests)
DISPLAYLEVEL(3, "test%3i : check dictionary FSE tables can represent every code : ", testNb++);
{ unsigned const kMaxWindowLog = 24;
unsigned value;
- ZSTD_compressionParameters cParams = ZSTD_getCParams(3, 1U << kMaxWindowLog, 1024);
+ ZSTD_compressionParameters cParams = ZSTD_getCParams(3, 1ULL << kMaxWindowLog, 1024);
ZSTD_CDict* cdict;
ZSTD_DDict* ddict;
SEQ_stream seq = SEQ_initStream(0x87654321);
@@ -1653,7 +1654,7 @@ static int basicUnitTests(U32 seed, double compressibility, int bigTests)
int windowLog;
int const kMaxWindowLog = bigTests ? 29 : 26;
size_t const kNbSequences = 10000;
- size_t const kMaxSrcSize = (1u << kMaxWindowLog) + 10 * kNbSequences;
+ size_t const kMaxSrcSize = ((size_t)1 << kMaxWindowLog) + 10 * kNbSequences;
char* src = calloc(kMaxSrcSize, 1);
ZSTD_Sequence* sequences = malloc(sizeof(ZSTD_Sequence) * kNbSequences);
for (windowLog = ZSTD_WINDOWLOG_MIN; windowLog <= kMaxWindowLog; ++windowLog) {
@@ -1901,7 +1902,7 @@ static int basicUnitTests(U32 seed, double compressibility, int bigTests)
CHECK_Z(ZSTD_compressStream2(zc, &out, &in, ZSTD_e_flush));
CHECK(in.pos != in.size, "input not fully consumed");
- remainingInput -= kSmallBlockSize;
+ remainingInput -= (int)kSmallBlockSize;
}
/* Write several very long offset matches into the dictionary */
for (offset = 1024; offset >= 0; offset -= 128) {
@@ -2356,7 +2357,7 @@ static int basicUnitTests(U32 seed, double compressibility, int bigTests)
}
DISPLAYLEVEL(3, "OK \n");
- DISPLAYLEVEL(3, "test%3i : Testing external sequence producer with static CCtx: ", testNb++);
+ DISPLAYLEVEL(3, "test%3i : Testing external sequence producer with static CCtx (one-shot): ", testNb++);
{
size_t const dstBufSize = ZSTD_compressBound(CNBufferSize);
BYTE* const dstBuf = (BYTE*)malloc(dstBufSize);
@@ -2375,7 +2376,7 @@ static int basicUnitTests(U32 seed, double compressibility, int bigTests)
size_t const cctxSize = ZSTD_estimateCCtxSize_usingCCtxParams(params);
cctxBuf = malloc(cctxSize);
staticCCtx = ZSTD_initStaticCCtx(cctxBuf, cctxSize);
- ZSTD_CCtx_setParametersUsingCCtxParams(staticCCtx, params);
+ CHECK_Z(ZSTD_CCtx_setParametersUsingCCtxParams(staticCCtx, params));
}
// Check that compression with external sequence producer succeeds when expected
@@ -2408,6 +2409,65 @@ static int basicUnitTests(U32 seed, double compressibility, int bigTests)
}
DISPLAYLEVEL(3, "OK \n");
+ DISPLAYLEVEL(3, "test%3i : Testing external sequence producer with static CCtx (streaming): ", testNb++);
+ {
+ size_t const dstBufSize = ZSTD_compressBound(CNBufferSize);
+ BYTE* const dstBuf = (BYTE*)malloc(dstBufSize);
+ size_t const checkBufSize = CNBufferSize;
+ BYTE* const checkBuf = (BYTE*)malloc(checkBufSize);
+ ZSTD_CCtx_params* params = ZSTD_createCCtxParams();
+ ZSTD_CCtx* staticCCtx;
+ void* cctxBuf;
+ EMF_testCase seqProdState;
+
+ CHECK_Z(ZSTD_CCtxParams_setParameter(params, ZSTD_c_validateSequences, 1));
+ CHECK_Z(ZSTD_CCtxParams_setParameter(params, ZSTD_c_enableSeqProducerFallback, 0));
+ ZSTD_CCtxParams_registerSequenceProducer(params, &seqProdState, zstreamSequenceProducer);
+
+ {
+ size_t const cctxSize = ZSTD_estimateCStreamSize_usingCCtxParams(params);
+ cctxBuf = malloc(cctxSize);
+ staticCCtx = ZSTD_initStaticCCtx(cctxBuf, cctxSize);
+ CHECK_Z(ZSTD_CCtx_setParametersUsingCCtxParams(staticCCtx, params));
+ }
+
+ // Check that compression with external sequence producer succeeds when expected
+ seqProdState = EMF_LOTS_OF_SEQS;
+ {
+ ZSTD_inBuffer inBuf = { CNBuffer, CNBufferSize, 0 };
+ ZSTD_outBuffer outBuf = { dstBuf, dstBufSize, 0 };
+ size_t dResult;
+ CHECK_Z(ZSTD_compressStream(staticCCtx, &outBuf, &inBuf));
+ CHECK_Z(ZSTD_endStream(staticCCtx, &outBuf));
+ CHECK(inBuf.pos != inBuf.size, "EMF: inBuf.pos != inBuf.size");
+ dResult = ZSTD_decompress(checkBuf, checkBufSize, outBuf.dst, outBuf.pos);
+ CHECK(ZSTD_isError(dResult), "EMF: Decompression error: %s", ZSTD_getErrorName(dResult));
+ CHECK(dResult != CNBufferSize, "EMF: Corruption!");
+ CHECK(memcmp(CNBuffer, checkBuf, CNBufferSize) != 0, "EMF: Corruption!");
+ }
+
+ CHECK_Z(ZSTD_CCtx_reset(staticCCtx, ZSTD_reset_session_only));
+
+ // Check that compression with external sequence producer fails when expected
+ seqProdState = EMF_BIG_ERROR;
+ {
+ ZSTD_inBuffer inBuf = { CNBuffer, CNBufferSize, 0 };
+ ZSTD_outBuffer outBuf = { dstBuf, dstBufSize, 0 };
+ size_t const cResult = ZSTD_compressStream(staticCCtx, &outBuf, &inBuf);
+ CHECK(!ZSTD_isError(cResult), "EMF: Should have raised an error!");
+ CHECK(
+ ZSTD_getErrorCode(cResult) != ZSTD_error_sequenceProducer_failed,
+ "EMF: Wrong error code: %s", ZSTD_getErrorName(cResult)
+ );
+ }
+
+ free(dstBuf);
+ free(checkBuf);
+ free(cctxBuf);
+ ZSTD_freeCCtxParams(params);
+ }
+ DISPLAYLEVEL(3, "OK \n");
+
DISPLAYLEVEL(3, "test%3i : Decoder should reject invalid frame header on legacy frames: ", testNb++);
{
const unsigned char compressed[] = { 0x26,0xb5,0x2f,0xfd,0x50,0x91,0xfd,0xd8,0xb5 };
@@ -2419,7 +2479,7 @@ static int basicUnitTests(U32 seed, double compressibility, int bigTests)
DISPLAYLEVEL(3, "test%3i : Test single-shot fallback for magicless mode: ", testNb++);
{
- // Aquire resources
+ // Acquire resources
size_t const srcSize = COMPRESSIBLE_NOISE_LENGTH;
void* src = malloc(srcSize);
size_t const dstSize = ZSTD_compressBound(srcSize);
@@ -2444,7 +2504,7 @@ static int basicUnitTests(U32 seed, double compressibility, int bigTests)
// Validate
CHECK(outBuf.pos != srcSize, "decompressed size must match");
CHECK(memcmp(src, val, srcSize) != 0, "decompressed data must match");
-
+
// Cleanup
free(src); free(dst); free(val);
ZSTD_freeCCtx(cctx);
@@ -2522,13 +2582,13 @@ static int fuzzerTests(U32 seed, unsigned nbTests, unsigned startTest, double co
static const U32 maxSampleLog = 19;
size_t const srcBufferSize = (size_t)1<write)
return 0;
@@ -287,7 +287,7 @@ int gzclose _Z_OF((gzFile));
int gzclose(gzFile gz) {
z_stream *strm;
- unsigned char out[BUFLEN];
+ unsigned char out[BUFLEN] = { 0 };
if (gz == NULL)
return Z_STREAM_ERROR;
diff --git a/zlibWrapper/gzwrite.c b/zlibWrapper/gzwrite.c
index 81da15314a9..85b776a94f7 100644
--- a/zlibWrapper/gzwrite.c
+++ b/zlibWrapper/gzwrite.c
@@ -64,6 +64,7 @@ local int gz_init(gz_statep state) {
strm->next_out = state.state->out;
state.state->x.next = strm->next_out;
}
+
return 0;
}
@@ -223,7 +224,7 @@ local z_size_t gz_write(gz_statep state, voidpc buf, z_size_t len) {
z_size_t n = (unsigned)-1;
if (n > len)
n = len;
- state.state->strm.avail_in = (z_uInt)n;
+ state.state->strm.avail_in = (uInt)n;
state.state->x.pos += n;
if (gz_comp(state, Z_NO_FLUSH) == -1)
return 0;
diff --git a/zlibWrapper/zstd_zlibwrapper.h b/zlibWrapper/zstd_zlibwrapper.h
index 230bf8411b0..dae6787d3ad 100644
--- a/zlibWrapper/zstd_zlibwrapper.h
+++ b/zlibWrapper/zstd_zlibwrapper.h
@@ -11,11 +11,6 @@
#ifndef ZSTD_ZLIBWRAPPER_H
#define ZSTD_ZLIBWRAPPER_H
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-
#define ZLIB_CONST
#define Z_PREFIX
#define ZLIB_INTERNAL /* disables gz*64 functions but fixes zlib 1.2.4 with Z_PREFIX */
@@ -29,6 +24,11 @@ extern "C" {
#define _Z_OF OF
#endif
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
/* returns a string with version of zstd library */
const char * zstdVersion(void);