aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBaptiste Daroussin <bapt@FreeBSD.org>2017-11-09 15:38:02 +0000
committerBaptiste Daroussin <bapt@FreeBSD.org>2017-11-09 15:38:02 +0000
commit653667f9dc9cc0169deeca1a82a60c2e91d5683a (patch)
treeac900fcbc6efae43f50daa8009f04669eeebe02d
parent299df5d48ce3c064ef926ac7814c278bcdc92db0 (diff)
downloadsrc-vendor/zstd/1.3.2.tar.gz
src-vendor/zstd/1.3.2.zip
import zstd 1.3.2vendor/zstd/1.3.2
-rw-r--r--COPYING339
-rw-r--r--LICENSE-examples11
-rw-r--r--Makefile68
-rw-r--r--NEWS23
-rw-r--r--appveyor.yml12
-rw-r--r--circle.yml9
-rw-r--r--contrib/gen_html/Makefile10
-rw-r--r--contrib/gen_html/gen_html.cpp6
-rw-r--r--contrib/pzstd/ErrorHolder.h8
-rw-r--r--contrib/pzstd/Logging.h8
-rw-r--r--contrib/pzstd/Makefile10
-rw-r--r--contrib/pzstd/Options.cpp8
-rw-r--r--contrib/pzstd/Options.h8
-rw-r--r--contrib/pzstd/Pzstd.cpp8
-rw-r--r--contrib/pzstd/Pzstd.h8
-rw-r--r--contrib/pzstd/SkippableFrame.cpp8
-rw-r--r--contrib/pzstd/SkippableFrame.h8
-rw-r--r--contrib/pzstd/main.cpp8
-rw-r--r--contrib/pzstd/test/OptionsTest.cpp8
-rw-r--r--contrib/pzstd/test/PzstdTest.cpp8
-rw-r--r--contrib/pzstd/test/RoundTrip.h8
-rw-r--r--contrib/pzstd/test/RoundTripTest.cpp8
-rw-r--r--contrib/pzstd/utils/Buffer.h8
-rw-r--r--contrib/pzstd/utils/FileSystem.h8
-rw-r--r--contrib/pzstd/utils/Likely.h8
-rw-r--r--contrib/pzstd/utils/Range.h8
-rw-r--r--contrib/pzstd/utils/ResourcePool.h8
-rw-r--r--contrib/pzstd/utils/ScopeGuard.h8
-rw-r--r--contrib/pzstd/utils/ThreadPool.h8
-rw-r--r--contrib/pzstd/utils/WorkQueue.h8
-rw-r--r--contrib/pzstd/utils/test/BufferTest.cpp8
-rw-r--r--contrib/pzstd/utils/test/RangeTest.cpp8
-rw-r--r--contrib/pzstd/utils/test/ResourcePoolTest.cpp8
-rw-r--r--contrib/pzstd/utils/test/ScopeGuardTest.cpp8
-rw-r--r--contrib/pzstd/utils/test/ThreadPoolTest.cpp8
-rw-r--r--contrib/pzstd/utils/test/WorkQueueTest.cpp8
-rw-r--r--contrib/seekable_format/examples/.gitignore4
-rw-r--r--contrib/seekable_format/examples/Makefile42
-rw-r--r--contrib/seekable_format/examples/parallel_compression.c215
-rw-r--r--contrib/seekable_format/examples/parallel_processing.c194
-rw-r--r--contrib/seekable_format/examples/seekable_compression.c132
-rw-r--r--contrib/seekable_format/examples/seekable_decompression.c138
-rw-r--r--contrib/seekable_format/zstd_seekable.h184
-rw-r--r--contrib/seekable_format/zstd_seekable_compression_format.md116
-rw-r--r--contrib/seekable_format/zstdseek_compress.c366
-rw-r--r--contrib/seekable_format/zstdseek_decompress.c462
-rw-r--r--doc/educational_decoder/Makefile34
-rw-r--r--doc/educational_decoder/harness.c6
-rw-r--r--doc/educational_decoder/zstd_decompress.c8
-rw-r--r--doc/educational_decoder/zstd_decompress.h8
-rw-r--r--doc/zstd_manual.html409
-rw-r--r--lib/.gitignore1
-rw-r--r--lib/Makefile66
-rw-r--r--lib/README.md113
-rw-r--r--lib/common/bitstream.h58
-rw-r--r--lib/common/compiler.h1
-rw-r--r--lib/common/error_private.c6
-rw-r--r--lib/common/error_private.h8
-rw-r--r--lib/common/fse.h2
-rw-r--r--lib/common/huf.h2
-rw-r--r--lib/common/mem.h1
-rw-r--r--lib/common/pool.c107
-rw-r--r--lib/common/pool.h4
-rw-r--r--lib/common/threading.c26
-rw-r--r--lib/common/threading.h84
-rw-r--r--lib/common/zstd_common.c4
-rw-r--r--lib/common/zstd_errors.h4
-rw-r--r--lib/common/zstd_internal.h116
-rw-r--r--lib/compress/fse_compress.c4
-rw-r--r--lib/compress/huf_compress.c7
-rw-r--r--lib/compress/zstd_compress.c2650
-rw-r--r--lib/compress/zstd_compress.h307
-rw-r--r--lib/compress/zstd_double_fast.c308
-rw-r--r--lib/compress/zstd_double_fast.h28
-rw-r--r--lib/compress/zstd_fast.c242
-rw-r--r--lib/compress/zstd_fast.h30
-rw-r--r--lib/compress/zstd_lazy.c749
-rw-r--r--lib/compress/zstd_lazy.h38
-rw-r--r--lib/compress/zstd_ldm.c707
-rw-r--r--lib/compress/zstd_ldm.h67
-rw-r--r--lib/compress/zstd_opt.c957
-rw-r--r--lib/compress/zstd_opt.h936
-rw-r--r--lib/compress/zstdmt_compress.c343
-rw-r--r--lib/compress/zstdmt_compress.h23
-rw-r--r--lib/decompress/zstd_decompress.c569
-rw-r--r--lib/deprecated/zbuff.h1
-rw-r--r--lib/deprecated/zbuff_common.c1
-rw-r--r--lib/deprecated/zbuff_compress.c1
-rw-r--r--lib/deprecated/zbuff_decompress.c1
-rw-r--r--lib/dictBuilder/cover.c55
-rw-r--r--lib/dictBuilder/zdict.c13
-rw-r--r--lib/dictBuilder/zdict.h1
-rw-r--r--lib/legacy/zstd_legacy.h1
-rw-r--r--lib/legacy/zstd_v01.c1
-rw-r--r--lib/legacy/zstd_v01.h1
-rw-r--r--lib/legacy/zstd_v02.c1
-rw-r--r--lib/legacy/zstd_v02.h1
-rw-r--r--lib/legacy/zstd_v03.c1
-rw-r--r--lib/legacy/zstd_v03.h1
-rw-r--r--lib/legacy/zstd_v04.c1
-rw-r--r--lib/legacy/zstd_v04.h1
-rw-r--r--lib/legacy/zstd_v05.c1
-rw-r--r--lib/legacy/zstd_v05.h1
-rw-r--r--lib/legacy/zstd_v06.c1
-rw-r--r--lib/legacy/zstd_v06.h1
-rw-r--r--lib/legacy/zstd_v07.c1
-rw-r--r--lib/legacy/zstd_v07.h1
-rw-r--r--lib/zstd.h484
-rw-r--r--programs/Makefile64
-rw-r--r--programs/README.md68
-rw-r--r--programs/bench.c66
-rw-r--r--programs/bench.h6
-rw-r--r--programs/datagen.c1
-rw-r--r--programs/datagen.h1
-rw-r--r--programs/dibio.c193
-rw-r--r--programs/dibio.h3
-rw-r--r--programs/fileio.c332
-rw-r--r--programs/fileio.h6
-rw-r--r--programs/platform.h3
-rw-r--r--programs/util.h155
-rw-r--r--programs/zstd.178
-rw-r--r--programs/zstd.1.md63
-rw-r--r--programs/zstdcli.c68
-rw-r--r--tests/.gitignore2
-rw-r--r--tests/Makefile109
-rw-r--r--tests/datagencli.c3
-rw-r--r--tests/decodecorpus.c400
-rw-r--r--tests/fullbench.c14
-rw-r--r--tests/fuzz/Makefile139
-rw-r--r--tests/fuzz/README.md94
-rw-r--r--tests/fuzz/block_decompress.c51
-rw-r--r--tests/fuzz/block_round_trip.c92
-rw-r--r--tests/fuzz/default.options2
-rw-r--r--tests/fuzz/fuzz.h26
-rwxr-xr-xtests/fuzz/fuzz.py818
-rw-r--r--tests/fuzz/fuzz_helpers.h54
-rw-r--r--tests/fuzz/regression_driver.c16
-rw-r--r--tests/fuzz/simple_decompress.c17
-rw-r--r--tests/fuzz/simple_round_trip.c48
-rw-r--r--tests/fuzz/stream_decompress.c16
-rw-r--r--tests/fuzz/stream_round_trip.c135
-rw-r--r--tests/fuzz/zstd_helpers.c84
-rw-r--r--tests/fuzz/zstd_helpers.h35
-rw-r--r--tests/fuzzer.c94
-rw-r--r--tests/gzip/Makefile10
-rw-r--r--tests/invalidDictionaries.c1
-rw-r--r--tests/legacy.c1
-rw-r--r--tests/longmatch.c3
-rw-r--r--tests/namespaceTest.c1
-rw-r--r--tests/paramgrill.c7
-rwxr-xr-xtests/playTests.sh110
-rw-r--r--tests/poolTests.c1
-rw-r--r--tests/roundTripCrash.c83
-rw-r--r--tests/symbols.c1
-rwxr-xr-xtests/test-zstd-speed.py10
-rwxr-xr-xtests/test-zstd-versions.py10
-rw-r--r--tests/zbufftest.c3
-rw-r--r--tests/zstreamtest.c329
-rw-r--r--zlibWrapper/Makefile2
-rw-r--r--zlibWrapper/examples/zwrapbench.c41
-rw-r--r--zlibWrapper/gzcompatibility.h3
-rw-r--r--zlibWrapper/zstd_zlibwrapper.c3
-rw-r--r--zlibWrapper/zstd_zlibwrapper.h3
163 files changed, 11463 insertions, 4677 deletions
diff --git a/COPYING b/COPYING
new file mode 100644
index 000000000000..ecbc0593737b
--- /dev/null
+++ b/COPYING
@@ -0,0 +1,339 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 2, June 1991
+
+ Copyright (C) 1989, 1991 Free Software Foundation, Inc.,
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The licenses for most software are designed to take away your
+freedom to share and change it. By contrast, the GNU General Public
+License is intended to guarantee your freedom to share and change free
+software--to make sure the software is free for all its users. This
+General Public License applies to most of the Free Software
+Foundation's software and to any other program whose authors commit to
+using it. (Some other Free Software Foundation software is covered by
+the GNU Lesser General Public License instead.) You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+this service if you wish), that you receive source code or can get it
+if you want it, that you can change the software or use pieces of it
+in new free programs; and that you know you can do these things.
+
+ To protect your rights, we need to make restrictions that forbid
+anyone to deny you these rights or to ask you to surrender the rights.
+These restrictions translate to certain responsibilities for you if you
+distribute copies of the software, or if you modify it.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must give the recipients all the rights that
+you have. You must make sure that they, too, receive or can get the
+source code. And you must show them these terms so they know their
+rights.
+
+ We protect your rights with two steps: (1) copyright the software, and
+(2) offer you this license which gives you legal permission to copy,
+distribute and/or modify the software.
+
+ Also, for each author's protection and ours, we want to make certain
+that everyone understands that there is no warranty for this free
+software. If the software is modified by someone else and passed on, we
+want its recipients to know that what they have is not the original, so
+that any problems introduced by others will not reflect on the original
+authors' reputations.
+
+ Finally, any free program is threatened constantly by software
+patents. We wish to avoid the danger that redistributors of a free
+program will individually obtain patent licenses, in effect making the
+program proprietary. To prevent this, we have made it clear that any
+patent must be licensed for everyone's free use or not licensed at all.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ GNU GENERAL PUBLIC LICENSE
+ TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+ 0. This License applies to any program or other work which contains
+a notice placed by the copyright holder saying it may be distributed
+under the terms of this General Public License. The "Program", below,
+refers to any such program or work, and a "work based on the Program"
+means either the Program or any derivative work under copyright law:
+that is to say, a work containing the Program or a portion of it,
+either verbatim or with modifications and/or translated into another
+language. (Hereinafter, translation is included without limitation in
+the term "modification".) Each licensee is addressed as "you".
+
+Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope. The act of
+running the Program is not restricted, and the output from the Program
+is covered only if its contents constitute a work based on the
+Program (independent of having been made by running the Program).
+Whether that is true depends on what the Program does.
+
+ 1. You may copy and distribute verbatim copies of the Program's
+source code as you receive it, in any medium, provided that you
+conspicuously and appropriately publish on each copy an appropriate
+copyright notice and disclaimer of warranty; keep intact all the
+notices that refer to this License and to the absence of any warranty;
+and give any other recipients of the Program a copy of this License
+along with the Program.
+
+You may charge a fee for the physical act of transferring a copy, and
+you may at your option offer warranty protection in exchange for a fee.
+
+ 2. You may modify your copy or copies of the Program or any portion
+of it, thus forming a work based on the Program, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+ a) You must cause the modified files to carry prominent notices
+ stating that you changed the files and the date of any change.
+
+ b) You must cause any work that you distribute or publish, that in
+ whole or in part contains or is derived from the Program or any
+ part thereof, to be licensed as a whole at no charge to all third
+ parties under the terms of this License.
+
+ c) If the modified program normally reads commands interactively
+ when run, you must cause it, when started running for such
+ interactive use in the most ordinary way, to print or display an
+ announcement including an appropriate copyright notice and a
+ notice that there is no warranty (or else, saying that you provide
+ a warranty) and that users may redistribute the program under
+ these conditions, and telling the user how to view a copy of this
+ License. (Exception: if the Program itself is interactive but
+ does not normally print such an announcement, your work based on
+ the Program is not required to print an announcement.)
+
+These requirements apply to the modified work as a whole. If
+identifiable sections of that work are not derived from the Program,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works. But when you
+distribute the same sections as part of a whole which is a work based
+on the Program, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Program.
+
+In addition, mere aggregation of another work not based on the Program
+with the Program (or with a work based on the Program) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+ 3. You may copy and distribute the Program (or a work based on it,
+under Section 2) in object code or executable form under the terms of
+Sections 1 and 2 above provided that you also do one of the following:
+
+ a) Accompany it with the complete corresponding machine-readable
+ source code, which must be distributed under the terms of Sections
+ 1 and 2 above on a medium customarily used for software interchange; or,
+
+ b) Accompany it with a written offer, valid for at least three
+ years, to give any third party, for a charge no more than your
+ cost of physically performing source distribution, a complete
+ machine-readable copy of the corresponding source code, to be
+ distributed under the terms of Sections 1 and 2 above on a medium
+ customarily used for software interchange; or,
+
+ c) Accompany it with the information you received as to the offer
+ to distribute corresponding source code. (This alternative is
+ allowed only for noncommercial distribution and only if you
+ received the program in object code or executable form with such
+ an offer, in accord with Subsection b above.)
+
+The source code for a work means the preferred form of the work for
+making modifications to it. For an executable work, complete source
+code means all the source code for all modules it contains, plus any
+associated interface definition files, plus the scripts used to
+control compilation and installation of the executable. However, as a
+special exception, the source code distributed need not include
+anything that is normally distributed (in either source or binary
+form) with the major components (compiler, kernel, and so on) of the
+operating system on which the executable runs, unless that component
+itself accompanies the executable.
+
+If distribution of executable or object code is made by offering
+access to copy from a designated place, then offering equivalent
+access to copy the source code from the same place counts as
+distribution of the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+ 4. You may not copy, modify, sublicense, or distribute the Program
+except as expressly provided under this License. Any attempt
+otherwise to copy, modify, sublicense or distribute the Program is
+void, and will automatically terminate your rights under this License.
+However, parties who have received copies, or rights, from you under
+this License will not have their licenses terminated so long as such
+parties remain in full compliance.
+
+ 5. You are not required to accept this License, since you have not
+signed it. However, nothing else grants you permission to modify or
+distribute the Program or its derivative works. These actions are
+prohibited by law if you do not accept this License. Therefore, by
+modifying or distributing the Program (or any work based on the
+Program), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Program or works based on it.
+
+ 6. Each time you redistribute the Program (or any work based on the
+Program), the recipient automatically receives a license from the
+original licensor to copy, distribute or modify the Program subject to
+these terms and conditions. You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties to
+this License.
+
+ 7. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Program at all. For example, if a patent
+license would not permit royalty-free redistribution of the Program by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Program.
+
+If any portion of this section is held invalid or unenforceable under
+any particular circumstance, the balance of the section is intended to
+apply and the section as a whole is intended to apply in other
+circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system, which is
+implemented by public license practices. Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+ 8. If the distribution and/or use of the Program is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Program under this License
+may add an explicit geographical distribution limitation excluding
+those countries, so that distribution is permitted only in or among
+countries not thus excluded. In such case, this License incorporates
+the limitation as if written in the body of this License.
+
+ 9. The Free Software Foundation may publish revised and/or new versions
+of the General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+Each version is given a distinguishing version number. If the Program
+specifies a version number of this License which applies to it and "any
+later version", you have the option of following the terms and conditions
+either of that version or of any later version published by the Free
+Software Foundation. If the Program does not specify a version number of
+this License, you may choose any version ever published by the Free Software
+Foundation.
+
+ 10. If you wish to incorporate parts of the Program into other free
+programs whose distribution conditions are different, write to the author
+to ask for permission. For software which is copyrighted by the Free
+Software Foundation, write to the Free Software Foundation; we sometimes
+make exceptions for this. Our decision will be guided by the two goals
+of preserving the free status of all derivatives of our free software and
+of promoting the sharing and reuse of software generally.
+
+ NO WARRANTY
+
+ 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
+FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
+OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
+PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
+OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
+TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
+PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
+REPAIR OR CORRECTION.
+
+ 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
+REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
+INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
+OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
+TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
+YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
+PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGES.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License along
+ with this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+Also add information on how to contact you by electronic and paper mail.
+
+If the program is interactive, make it output a short notice like this
+when it starts in an interactive mode:
+
+ Gnomovision version 69, Copyright (C) year name of author
+ Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, the commands you use may
+be called something other than `show w' and `show c'; they could even be
+mouse-clicks or menu items--whatever suits your program.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the program, if
+necessary. Here is a sample; alter the names:
+
+ Yoyodyne, Inc., hereby disclaims all copyright interest in the program
+ `Gnomovision' (which makes passes at compilers) written by James Hacker.
+
+ <signature of Ty Coon>, 1 April 1989
+ Ty Coon, President of Vice
+
+This General Public License does not permit incorporating your program into
+proprietary programs. If your program is a subroutine library, you may
+consider it more useful to permit linking proprietary applications with the
+library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License. \ No newline at end of file
diff --git a/LICENSE-examples b/LICENSE-examples
deleted file mode 100644
index 1de781305490..000000000000
--- a/LICENSE-examples
+++ /dev/null
@@ -1,11 +0,0 @@
-Copyright (c) 2016-present, Facebook, Inc. All rights reserved.
-
-The examples provided by Facebook are for non-commercial testing and evaluation
-purposes only. Facebook reserves all rights not expressly granted.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
-FACEBOOK BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
-ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/Makefile b/Makefile
index a72f99fcb9d2..2805c8ad4b54 100644
--- a/Makefile
+++ b/Makefile
@@ -1,10 +1,10 @@
# ################################################################
-# Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+# Copyright (c) 2015-present, Yann Collet, Facebook, Inc.
# All rights reserved.
#
-# This source code is licensed under the BSD-style license found in the
-# LICENSE file in the root directory of this source tree. An additional grant
-# of patent rights can be found in the PATENTS file in the same directory.
+# This source code is licensed under both the BSD-style license (found in the
+# LICENSE file in the root directory of this source tree) and the GPLv2 (found
+# in the COPYING file in the root directory of this source tree).
# ################################################################
PRGDIR = programs
@@ -12,6 +12,7 @@ ZSTDDIR = lib
BUILDIR = build
ZWRAPDIR = zlibWrapper
TESTDIR = tests
+FUZZDIR = $(TESTDIR)/fuzz
# Define nul output
VOID = /dev/null
@@ -29,15 +30,12 @@ default: lib-release zstd-release
all: | allmost examples manual
.PHONY: allmost
-allmost:
- $(MAKE) -C $(ZSTDDIR) all
- $(MAKE) -C $(PRGDIR) all
- $(MAKE) -C $(TESTDIR) all
+allmost: allzstd
$(MAKE) -C $(ZWRAPDIR) all
#skip zwrapper, can't build that on alternate architectures without the proper zlib installed
-.PHONY: allarch
-allarch:
+.PHONY: allzstd
+allzstd:
$(MAKE) -C $(ZSTDDIR) all
$(MAKE) -C $(PRGDIR) all
$(MAKE) -C $(TESTDIR) all
@@ -100,6 +98,7 @@ clean:
@$(MAKE) -C examples/ $@ > $(VOID)
@$(MAKE) -C contrib/gen_html $@ > $(VOID)
@$(RM) zstd$(EXT) zstdmt$(EXT) tmp*
+ @$(RM) -r lz4
@echo Cleaning completed
#------------------------------------------------------------------------------
@@ -114,7 +113,7 @@ CMAKE_PARAMS = -DZSTD_BUILD_CONTRIB:BOOL=ON -DZSTD_BUILD_STATIC:BOOL=ON -DZSTD_B
list:
@$(MAKE) -pRrq -f $(lastword $(MAKEFILE_LIST)) : 2>/dev/null | awk -v RS= -F: '/^# File/,/^# Finished Make data base/ {if ($$1 !~ "^[#.]") {print $$1}}' | sort | egrep -v -e '^[^[:alnum:]]' -e '^$@$$' | xargs
-.PHONY: install clangtest gpptest armtest usan asan uasan
+.PHONY: install clangtest armtest usan asan uasan
install:
@$(MAKE) -C $(ZSTDDIR) $@
@$(MAKE) -C $(PRGDIR) $@
@@ -158,16 +157,16 @@ m32build: clean
$(MAKE) all32
armbuild: clean
- CC=arm-linux-gnueabi-gcc CFLAGS="-Werror" $(MAKE) allarch
+ CC=arm-linux-gnueabi-gcc CFLAGS="-Werror" $(MAKE) allzstd
aarch64build: clean
- CC=aarch64-linux-gnu-gcc CFLAGS="-Werror" $(MAKE) allarch
+ CC=aarch64-linux-gnu-gcc CFLAGS="-Werror" $(MAKE) allzstd
ppcbuild: clean
- CC=powerpc-linux-gnu-gcc CLAGS="-m32 -Wno-attributes -Werror" $(MAKE) allarch
+ CC=powerpc-linux-gnu-gcc CLAGS="-m32 -Wno-attributes -Werror" $(MAKE) allzstd
ppc64build: clean
- CC=powerpc-linux-gnu-gcc CFLAGS="-m64 -Werror" $(MAKE) allarch
+ CC=powerpc-linux-gnu-gcc CFLAGS="-m64 -Werror" $(MAKE) allzstd
armfuzz: clean
CC=arm-linux-gnueabi-gcc QEMU_SYS=qemu-arm-static MOREFLAGS="-static" FUZZER_FLAGS=--no-big-tests $(MAKE) -C $(TESTDIR) fuzztest
@@ -181,8 +180,10 @@ ppcfuzz: clean
ppc64fuzz: clean
CC=powerpc-linux-gnu-gcc QEMU_SYS=qemu-ppc64-static MOREFLAGS="-m64 -static" FUZZER_FLAGS=--no-big-tests $(MAKE) -C $(TESTDIR) fuzztest
-gpptest: clean
- CC=$(CXX) $(MAKE) -C $(PRGDIR) all CFLAGS="-O3 -Wall -Wextra -Wundef -Wshadow -Wcast-align -Werror"
+.PHONY: cxxtest
+cxxtest: CXXFLAGS += -Wall -Wextra -Wundef -Wshadow -Wcast-align -Werror
+cxxtest: clean
+ $(MAKE) -C $(PRGDIR) all CC="$(CXX) -Wno-deprecated" CFLAGS="$(CXXFLAGS)" # adding -Wno-deprecated to avoid clang++ warning on dealing with C files directly
gcc5test: clean
gcc-5 -v
@@ -218,6 +219,15 @@ arm-ppc-compilation:
$(MAKE) -C $(PRGDIR) clean zstd CC=powerpc-linux-gnu-gcc QEMU_SYS=qemu-ppc-static ZSTDRTTEST= MOREFLAGS="-Werror -Wno-attributes -static"
$(MAKE) -C $(PRGDIR) clean zstd CC=powerpc-linux-gnu-gcc QEMU_SYS=qemu-ppc64-static ZSTDRTTEST= MOREFLAGS="-m64 -static"
+regressiontest:
+ $(MAKE) -C $(FUZZDIR) regressiontest
+
+uasanregressiontest:
+ $(MAKE) -C $(FUZZDIR) regressiontest CC=clang CXX=clang++ CFLAGS="-O3 -fsanitize=address,undefined" CXXFLAGS="-O3 -fsanitize=address,undefined"
+
+msanregressiontest:
+ $(MAKE) -C $(FUZZDIR) regressiontest CC=clang CXX=clang++ CFLAGS="-O3 -fsanitize=memory" CXXFLAGS="-O3 -fsanitize=memory"
+
# run UBsan with -fsanitize-recover=signed-integer-overflow
# due to a bug in UBsan when doing pointer subtraction
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63303
@@ -277,6 +287,10 @@ gpp6install: apt-add-repo
clang38install:
APT_PACKAGES="clang-3.8" $(MAKE) apt-install
+# Ubuntu 14.04 ships a too-old lz4
+lz4install:
+ [ -e lz4 ] || git clone https://github.com/lz4/lz4 && sudo $(MAKE) -C lz4 install
+
endif
@@ -287,7 +301,7 @@ endif
#------------------------------------------------------------------------
-#make tests validated only for MSYS, Linux, OSX, kFreeBSD and Hurd targets
+# target specific tests
#------------------------------------------------------------------------
ifneq (,$(filter $(HOST_OS),MSYS POSIX))
cmakebuild:
@@ -297,38 +311,38 @@ cmakebuild:
cd $(BUILDIR)/cmake/build ; cmake -DCMAKE_INSTALL_PREFIX:PATH=~/install_test_dir $(CMAKE_PARAMS) .. ; $(MAKE) install ; $(MAKE) uninstall
c90build: clean
- gcc -v
+ $(CC) -v
CFLAGS="-std=c90" $(MAKE) allmost # will fail, due to missing support for `long long`
gnu90build: clean
- gcc -v
+ $(CC) -v
CFLAGS="-std=gnu90" $(MAKE) allmost
c99build: clean
- gcc -v
+ $(CC) -v
CFLAGS="-std=c99" $(MAKE) allmost
gnu99build: clean
- gcc -v
+ $(CC) -v
CFLAGS="-std=gnu99" $(MAKE) allmost
c11build: clean
- gcc -v
+ $(CC) -v
CFLAGS="-std=c11" $(MAKE) allmost
bmix64build: clean
- gcc -v
+ $(CC) -v
CFLAGS="-O3 -mbmi -Werror" $(MAKE) -C $(TESTDIR) test
bmix32build: clean
- gcc -v
+ $(CC) -v
CFLAGS="-O3 -mbmi -mx32 -Werror" $(MAKE) -C $(TESTDIR) test
bmi32build: clean
- gcc -v
+ $(CC) -v
CFLAGS="-O3 -mbmi -m32 -Werror" $(MAKE) -C $(TESTDIR) test
staticAnalyze: clean
- gcc -v
+ $(CC) -v
CPPFLAGS=-g scan-build --status-bugs -v $(MAKE) all
endif
diff --git a/NEWS b/NEWS
index 59687532fe22..215c0a329ba6 100644
--- a/NEWS
+++ b/NEWS
@@ -1,3 +1,26 @@
+v1.3.2
+new : long range mode, using --long command, by Stella Lau (@stellamplau)
+new : ability to generate and decode magicless frames (#591)
+changed : maximum nb of threads reduced to 200, to avoid address space exhaustion in 32-bits mode
+fix : multi-threading compression works with custom allocators
+fix : ZSTD_sizeof_CStream() was over-evaluating memory usage
+fix : a rare compression bug when compression generates very large distances and bunch of other conditions (only possible at --ultra -22)
+fix : 32-bits build can now decode large offsets (levels 21+)
+cli : added LZ4 frame support by default, by Felix Handte (@felixhandte)
+cli : improved --list output
+cli : new : can split input file for dictionary training, using command -B#
+cli : new : clean operation artefact on Ctrl-C interruption
+cli : fix : do not change /dev/null permissions when using command -t with root access, reported by @mike155 (#851)
+cli : fix : write file size in header in multiple-files mode
+api : added macro ZSTD_COMPRESSBOUND() for static allocation
+api : experimental : new advanced decompression API
+api : fix : sizeof_CCtx() used to over-estimate
+build: fix : no-multithread variant compiles without pool.c dependency, reported by Mitchell Blank Jr (@mitchblank) (#819)
+build: better compatibility with reproducible builds, by Bernhard M. Wiedemann (@bmwiedemann) (#818)
+example : added streaming_memory_usage
+license : changed /examples license to BSD + GPLv2
+license : fix a few header files to reflect new license (#825)
+
v1.3.1
New license : BSD + GPLv2
perf: substantially decreased memory usage in Multi-threading mode, thanks to reports by Tino Reichardt (@mcmilk)
diff --git a/appveyor.yml b/appveyor.yml
index 1815563e7f5d..0f9142b4715a 100644
--- a/appveyor.yml
+++ b/appveyor.yml
@@ -9,19 +9,19 @@
- COMPILER: "gcc"
HOST: "mingw"
PLATFORM: "x64"
- SCRIPT: "make allarch && make -C tests test-symbols fullbench-dll fullbench-lib"
+ SCRIPT: "make allzstd MOREFLAGS=-static && make -C tests test-symbols fullbench-dll fullbench-lib"
ARTIFACT: "true"
BUILD: "true"
- COMPILER: "gcc"
HOST: "mingw"
PLATFORM: "x86"
- SCRIPT: "make allarch"
+ SCRIPT: "make allzstd MOREFLAGS=-static"
ARTIFACT: "true"
BUILD: "true"
- COMPILER: "clang"
HOST: "mingw"
PLATFORM: "x64"
- SCRIPT: "MOREFLAGS='--target=x86_64-w64-mingw32 -Werror -Wconversion -Wno-sign-conversion' make allarch"
+ SCRIPT: "MOREFLAGS='--target=x86_64-w64-mingw32 -Werror -Wconversion -Wno-sign-conversion' make allzstd"
BUILD: "true"
- COMPILER: "gcc"
@@ -172,15 +172,15 @@
- COMPILER: "gcc"
HOST: "mingw"
PLATFORM: "x64"
- SCRIPT: "make allarch"
+ SCRIPT: "make allzstd"
- COMPILER: "gcc"
HOST: "mingw"
PLATFORM: "x86"
- SCRIPT: "make allarch"
+ SCRIPT: "make allzstd"
- COMPILER: "clang"
HOST: "mingw"
PLATFORM: "x64"
- SCRIPT: "MOREFLAGS='--target=x86_64-w64-mingw32 -Werror -Wconversion -Wno-sign-conversion' make allarch"
+ SCRIPT: "MOREFLAGS='--target=x86_64-w64-mingw32 -Werror -Wconversion -Wno-sign-conversion' make allzstd"
- COMPILER: "visual"
HOST: "visual"
diff --git a/circle.yml b/circle.yml
index 8c2bd30d330d..5bc0ce643204 100644
--- a/circle.yml
+++ b/circle.yml
@@ -9,7 +9,7 @@ dependencies:
test:
override:
- ? |
- if [[ "$CIRCLE_NODE_INDEX" == "0" ]] ; then cc -v; make all && make clean; fi &&
+ if [[ "$CIRCLE_NODE_INDEX" == "0" ]] ; then cc -v; make all && make clean && make -C lib libzstd-nomt && make clean; fi &&
if [[ "$CIRCLE_NODE_TOTAL" < "2" ]] || [[ "$CIRCLE_NODE_INDEX" == "1" ]]; then make gnu90build && make clean; fi
:
parallel: true
@@ -45,7 +45,7 @@ test:
parallel: true
- ? |
if [[ "$CIRCLE_NODE_INDEX" == "0" ]] ; then make ppc64build && make clean; fi &&
- if [[ "$CIRCLE_NODE_TOTAL" < "2" ]] || [[ "$CIRCLE_NODE_INDEX" == "1" ]]; then make gcc7build && make clean; fi #could add another test here
+ if [[ "$CIRCLE_NODE_TOTAL" < "2" ]] || [[ "$CIRCLE_NODE_INDEX" == "1" ]]; then make gcc7build && make clean; fi
:
parallel: true
- ? |
@@ -53,6 +53,11 @@ test:
if [[ "$CIRCLE_NODE_TOTAL" < "2" ]] || [[ "$CIRCLE_NODE_INDEX" == "1" ]]; then make -C tests test-legacy test-longmatch test-symbols && make clean; fi
:
parallel: true
+ - ? |
+ if [[ "$CIRCLE_NODE_INDEX" == "0" ]] ; then make -j regressiontest && make clean; fi &&
+ if [[ "$CIRCLE_NODE_TOTAL" < "2" ]] || [[ "$CIRCLE_NODE_INDEX" == "1" ]]; then true; fi # Could add another test here
+ :
+ parallel: true
post:
- echo Circle CI tests finished
diff --git a/contrib/gen_html/Makefile b/contrib/gen_html/Makefile
index ea68b11fc568..d9b32e351273 100644
--- a/contrib/gen_html/Makefile
+++ b/contrib/gen_html/Makefile
@@ -1,11 +1,11 @@
-# ##########################################################################
+# ################################################################
# Copyright (c) 2016-present, Facebook, Inc.
# All rights reserved.
#
-# This source code is licensed under the BSD-style license found in the
-# LICENSE file in the root directory of this source tree. An additional grant
-# of patent rights can be found in the PATENTS file in the same directory.
-# ##########################################################################
+# This source code is licensed under both the BSD-style license (found in the
+# LICENSE file in the root directory of this source tree) and the GPLv2 (found
+# in the COPYING file in the root directory of this source tree).
+# ################################################################
CFLAGS ?= -O3
CFLAGS += -Wall -Wextra -Wcast-qual -Wcast-align -Wshadow -Wstrict-aliasing=1 -Wswitch-enum -Wno-comment
diff --git a/contrib/gen_html/gen_html.cpp b/contrib/gen_html/gen_html.cpp
index e5261c0866d0..90d5b21a3aa6 100644
--- a/contrib/gen_html/gen_html.cpp
+++ b/contrib/gen_html/gen_html.cpp
@@ -2,9 +2,9 @@
* Copyright (c) 2016-present, Przemyslaw Skibinski, Facebook, Inc.
* All rights reserved.
*
- * This source code is licensed under the BSD-style license found in the
- * LICENSE file in the root directory of this source tree. An additional grant
- * of patent rights can be found in the PATENTS file in the same directory.
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
*/
#include <iostream>
diff --git a/contrib/pzstd/ErrorHolder.h b/contrib/pzstd/ErrorHolder.h
index 188badcadfd3..829651c5961e 100644
--- a/contrib/pzstd/ErrorHolder.h
+++ b/contrib/pzstd/ErrorHolder.h
@@ -1,10 +1,10 @@
-/**
+/*
* Copyright (c) 2016-present, Facebook, Inc.
* All rights reserved.
*
- * This source code is licensed under the BSD-style license found in the
- * LICENSE file in the root directory of this source tree. An additional grant
- * of patent rights can be found in the PATENTS file in the same directory.
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
*/
#pragma once
diff --git a/contrib/pzstd/Logging.h b/contrib/pzstd/Logging.h
index 76c982ab2a46..16a63932c0a3 100644
--- a/contrib/pzstd/Logging.h
+++ b/contrib/pzstd/Logging.h
@@ -1,10 +1,10 @@
-/**
+/*
* Copyright (c) 2016-present, Facebook, Inc.
* All rights reserved.
*
- * This source code is licensed under the BSD-style license found in the
- * LICENSE file in the root directory of this source tree. An additional grant
- * of patent rights can be found in the PATENTS file in the same directory.
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
*/
#pragma once
diff --git a/contrib/pzstd/Makefile b/contrib/pzstd/Makefile
index cec6959e62a5..40531e216539 100644
--- a/contrib/pzstd/Makefile
+++ b/contrib/pzstd/Makefile
@@ -1,11 +1,11 @@
-# ##########################################################################
+# ################################################################
# Copyright (c) 2016-present, Facebook, Inc.
# All rights reserved.
#
-# This source code is licensed under the BSD-style license found in the
-# LICENSE file in the root directory of this source tree. An additional grant
-# of patent rights can be found in the PATENTS file in the same directory.
-# ##########################################################################
+# This source code is licensed under both the BSD-style license (found in the
+# LICENSE file in the root directory of this source tree) and the GPLv2 (found
+# in the COPYING file in the root directory of this source tree).
+# ################################################################
# Standard variables for installation
DESTDIR ?=
diff --git a/contrib/pzstd/Options.cpp b/contrib/pzstd/Options.cpp
index 1f53f2bff78a..d9b216b42951 100644
--- a/contrib/pzstd/Options.cpp
+++ b/contrib/pzstd/Options.cpp
@@ -1,10 +1,10 @@
-/**
+/*
* Copyright (c) 2016-present, Facebook, Inc.
* All rights reserved.
*
- * This source code is licensed under the BSD-style license found in the
- * LICENSE file in the root directory of this source tree. An additional grant
- * of patent rights can be found in the PATENTS file in the same directory.
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
*/
#include "Options.h"
#include "util.h"
diff --git a/contrib/pzstd/Options.h b/contrib/pzstd/Options.h
index d58de017e012..f4f2aaa499cb 100644
--- a/contrib/pzstd/Options.h
+++ b/contrib/pzstd/Options.h
@@ -1,10 +1,10 @@
-/**
+/*
* Copyright (c) 2016-present, Facebook, Inc.
* All rights reserved.
*
- * This source code is licensed under the BSD-style license found in the
- * LICENSE file in the root directory of this source tree. An additional grant
- * of patent rights can be found in the PATENTS file in the same directory.
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
*/
#pragma once
diff --git a/contrib/pzstd/Pzstd.cpp b/contrib/pzstd/Pzstd.cpp
index ae5d73444413..1eb4ce14cf15 100644
--- a/contrib/pzstd/Pzstd.cpp
+++ b/contrib/pzstd/Pzstd.cpp
@@ -1,10 +1,10 @@
-/**
+/*
* Copyright (c) 2016-present, Facebook, Inc.
* All rights reserved.
*
- * This source code is licensed under the BSD-style license found in the
- * LICENSE file in the root directory of this source tree. An additional grant
- * of patent rights can be found in the PATENTS file in the same directory.
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
*/
#include "Pzstd.h"
#include "SkippableFrame.h"
diff --git a/contrib/pzstd/Pzstd.h b/contrib/pzstd/Pzstd.h
index 1e29a7170e08..79d1fcca2653 100644
--- a/contrib/pzstd/Pzstd.h
+++ b/contrib/pzstd/Pzstd.h
@@ -1,10 +1,10 @@
-/**
+/*
* Copyright (c) 2016-present, Facebook, Inc.
* All rights reserved.
*
- * This source code is licensed under the BSD-style license found in the
- * LICENSE file in the root directory of this source tree. An additional grant
- * of patent rights can be found in the PATENTS file in the same directory.
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
*/
#pragma once
diff --git a/contrib/pzstd/SkippableFrame.cpp b/contrib/pzstd/SkippableFrame.cpp
index 5dc95e5ab099..769866dfc815 100644
--- a/contrib/pzstd/SkippableFrame.cpp
+++ b/contrib/pzstd/SkippableFrame.cpp
@@ -1,10 +1,10 @@
-/**
+/*
* Copyright (c) 2016-present, Facebook, Inc.
* All rights reserved.
*
- * This source code is licensed under the BSD-style license found in the
- * LICENSE file in the root directory of this source tree. An additional grant
- * of patent rights can be found in the PATENTS file in the same directory.
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
*/
#include "SkippableFrame.h"
#include "mem.h"
diff --git a/contrib/pzstd/SkippableFrame.h b/contrib/pzstd/SkippableFrame.h
index 9dc95c1f51e2..60deed0405be 100644
--- a/contrib/pzstd/SkippableFrame.h
+++ b/contrib/pzstd/SkippableFrame.h
@@ -1,10 +1,10 @@
-/**
+/*
* Copyright (c) 2016-present, Facebook, Inc.
* All rights reserved.
*
- * This source code is licensed under the BSD-style license found in the
- * LICENSE file in the root directory of this source tree. An additional grant
- * of patent rights can be found in the PATENTS file in the same directory.
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
*/
#pragma once
diff --git a/contrib/pzstd/main.cpp b/contrib/pzstd/main.cpp
index 7d8dbfbcff5e..b93f043b16b1 100644
--- a/contrib/pzstd/main.cpp
+++ b/contrib/pzstd/main.cpp
@@ -1,10 +1,10 @@
-/**
+/*
* Copyright (c) 2016-present, Facebook, Inc.
* All rights reserved.
*
- * This source code is licensed under the BSD-style license found in the
- * LICENSE file in the root directory of this source tree. An additional grant
- * of patent rights can be found in the PATENTS file in the same directory.
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
*/
#include "ErrorHolder.h"
#include "Options.h"
diff --git a/contrib/pzstd/test/OptionsTest.cpp b/contrib/pzstd/test/OptionsTest.cpp
index b3efe2b7ef5b..e601148255d4 100644
--- a/contrib/pzstd/test/OptionsTest.cpp
+++ b/contrib/pzstd/test/OptionsTest.cpp
@@ -1,10 +1,10 @@
-/**
+/*
* Copyright (c) 2016-present, Facebook, Inc.
* All rights reserved.
*
- * This source code is licensed under the BSD-style license found in the
- * LICENSE file in the root directory of this source tree. An additional grant
- * of patent rights can be found in the PATENTS file in the same directory.
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
*/
#include "Options.h"
diff --git a/contrib/pzstd/test/PzstdTest.cpp b/contrib/pzstd/test/PzstdTest.cpp
index cadfa83f7215..5c7d66310805 100644
--- a/contrib/pzstd/test/PzstdTest.cpp
+++ b/contrib/pzstd/test/PzstdTest.cpp
@@ -1,10 +1,10 @@
-/**
+/*
* Copyright (c) 2016-present, Facebook, Inc.
* All rights reserved.
*
- * This source code is licensed under the BSD-style license found in the
- * LICENSE file in the root directory of this source tree. An additional grant
- * of patent rights can be found in the PATENTS file in the same directory.
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
*/
#include "Pzstd.h"
extern "C" {
diff --git a/contrib/pzstd/test/RoundTrip.h b/contrib/pzstd/test/RoundTrip.h
index 8b908845941f..c6364ecb4227 100644
--- a/contrib/pzstd/test/RoundTrip.h
+++ b/contrib/pzstd/test/RoundTrip.h
@@ -1,10 +1,10 @@
-/**
+/*
* Copyright (c) 2016-present, Facebook, Inc.
* All rights reserved.
*
- * This source code is licensed under the BSD-style license found in the
- * LICENSE file in the root directory of this source tree. An additional grant
- * of patent rights can be found in the PATENTS file in the same directory.
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
*/
#pragma once
diff --git a/contrib/pzstd/test/RoundTripTest.cpp b/contrib/pzstd/test/RoundTripTest.cpp
index ed2ea770c523..36af0673ae6a 100644
--- a/contrib/pzstd/test/RoundTripTest.cpp
+++ b/contrib/pzstd/test/RoundTripTest.cpp
@@ -1,10 +1,10 @@
-/**
+/*
* Copyright (c) 2016-present, Facebook, Inc.
* All rights reserved.
*
- * This source code is licensed under the BSD-style license found in the
- * LICENSE file in the root directory of this source tree. An additional grant
- * of patent rights can be found in the PATENTS file in the same directory.
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
*/
extern "C" {
#include "datagen.h"
diff --git a/contrib/pzstd/utils/Buffer.h b/contrib/pzstd/utils/Buffer.h
index ab25bac9cb55..f69c3b4d9f7a 100644
--- a/contrib/pzstd/utils/Buffer.h
+++ b/contrib/pzstd/utils/Buffer.h
@@ -1,10 +1,10 @@
-/**
+/*
* Copyright (c) 2016-present, Facebook, Inc.
* All rights reserved.
*
- * This source code is licensed under the BSD-style license found in the
- * LICENSE file in the root directory of this source tree. An additional grant
- * of patent rights can be found in the PATENTS file in the same directory.
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
*/
#pragma once
diff --git a/contrib/pzstd/utils/FileSystem.h b/contrib/pzstd/utils/FileSystem.h
index 7d597047fd1f..3cfbe86e507e 100644
--- a/contrib/pzstd/utils/FileSystem.h
+++ b/contrib/pzstd/utils/FileSystem.h
@@ -1,10 +1,10 @@
-/**
+/*
* Copyright (c) 2016-present, Facebook, Inc.
* All rights reserved.
*
- * This source code is licensed under the BSD-style license found in the
- * LICENSE file in the root directory of this source tree. An additional grant
- * of patent rights can be found in the PATENTS file in the same directory.
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
*/
#pragma once
diff --git a/contrib/pzstd/utils/Likely.h b/contrib/pzstd/utils/Likely.h
index c8ea102b1396..7cea8da2771f 100644
--- a/contrib/pzstd/utils/Likely.h
+++ b/contrib/pzstd/utils/Likely.h
@@ -1,10 +1,10 @@
-/**
+/*
* Copyright (c) 2016-present, Facebook, Inc.
* All rights reserved.
*
- * This source code is licensed under the BSD-style license found in the
- * LICENSE file in the root directory of this source tree. An additional grant
- * of patent rights can be found in the PATENTS file in the same directory.
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
*/
/**
diff --git a/contrib/pzstd/utils/Range.h b/contrib/pzstd/utils/Range.h
index 111e98f58813..7e2559cc9e3d 100644
--- a/contrib/pzstd/utils/Range.h
+++ b/contrib/pzstd/utils/Range.h
@@ -1,10 +1,10 @@
-/**
+/*
* Copyright (c) 2016-present, Facebook, Inc.
* All rights reserved.
*
- * This source code is licensed under the BSD-style license found in the
- * LICENSE file in the root directory of this source tree. An additional grant
- * of patent rights can be found in the PATENTS file in the same directory.
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
*/
/**
diff --git a/contrib/pzstd/utils/ResourcePool.h b/contrib/pzstd/utils/ResourcePool.h
index ed011306ba52..a6ff5ffc5db3 100644
--- a/contrib/pzstd/utils/ResourcePool.h
+++ b/contrib/pzstd/utils/ResourcePool.h
@@ -1,10 +1,10 @@
-/**
+/*
* Copyright (c) 2016-present, Facebook, Inc.
* All rights reserved.
*
- * This source code is licensed under the BSD-style license found in the
- * LICENSE file in the root directory of this source tree. An additional grant
- * of patent rights can be found in the PATENTS file in the same directory.
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
*/
#pragma once
diff --git a/contrib/pzstd/utils/ScopeGuard.h b/contrib/pzstd/utils/ScopeGuard.h
index 5a333e0ab884..31768f43d22c 100644
--- a/contrib/pzstd/utils/ScopeGuard.h
+++ b/contrib/pzstd/utils/ScopeGuard.h
@@ -1,10 +1,10 @@
-/**
+/*
* Copyright (c) 2016-present, Facebook, Inc.
* All rights reserved.
*
- * This source code is licensed under the BSD-style license found in the
- * LICENSE file in the root directory of this source tree. An additional grant
- * of patent rights can be found in the PATENTS file in the same directory.
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
*/
#pragma once
diff --git a/contrib/pzstd/utils/ThreadPool.h b/contrib/pzstd/utils/ThreadPool.h
index 99b3ecfa51ae..8ece8e0da4eb 100644
--- a/contrib/pzstd/utils/ThreadPool.h
+++ b/contrib/pzstd/utils/ThreadPool.h
@@ -1,10 +1,10 @@
-/**
+/*
* Copyright (c) 2016-present, Facebook, Inc.
* All rights reserved.
*
- * This source code is licensed under the BSD-style license found in the
- * LICENSE file in the root directory of this source tree. An additional grant
- * of patent rights can be found in the PATENTS file in the same directory.
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
*/
#pragma once
diff --git a/contrib/pzstd/utils/WorkQueue.h b/contrib/pzstd/utils/WorkQueue.h
index 780e5360ffb1..1d14d922c648 100644
--- a/contrib/pzstd/utils/WorkQueue.h
+++ b/contrib/pzstd/utils/WorkQueue.h
@@ -1,10 +1,10 @@
-/**
+/*
* Copyright (c) 2016-present, Facebook, Inc.
* All rights reserved.
*
- * This source code is licensed under the BSD-style license found in the
- * LICENSE file in the root directory of this source tree. An additional grant
- * of patent rights can be found in the PATENTS file in the same directory.
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
*/
#pragma once
diff --git a/contrib/pzstd/utils/test/BufferTest.cpp b/contrib/pzstd/utils/test/BufferTest.cpp
index 66ec961e236f..fbba74e82628 100644
--- a/contrib/pzstd/utils/test/BufferTest.cpp
+++ b/contrib/pzstd/utils/test/BufferTest.cpp
@@ -1,10 +1,10 @@
-/**
+/*
* Copyright (c) 2016-present, Facebook, Inc.
* All rights reserved.
*
- * This source code is licensed under the BSD-style license found in the
- * LICENSE file in the root directory of this source tree. An additional grant
- * of patent rights can be found in the PATENTS file in the same directory.
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
*/
#include "utils/Buffer.h"
#include "utils/Range.h"
diff --git a/contrib/pzstd/utils/test/RangeTest.cpp b/contrib/pzstd/utils/test/RangeTest.cpp
index c761c8aff6e3..755b50fa6e80 100644
--- a/contrib/pzstd/utils/test/RangeTest.cpp
+++ b/contrib/pzstd/utils/test/RangeTest.cpp
@@ -1,10 +1,10 @@
-/**
+/*
* Copyright (c) 2016-present, Facebook, Inc.
* All rights reserved.
*
- * This source code is licensed under the BSD-style license found in the
- * LICENSE file in the root directory of this source tree. An additional grant
- * of patent rights can be found in the PATENTS file in the same directory.
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
*/
#include "utils/Range.h"
diff --git a/contrib/pzstd/utils/test/ResourcePoolTest.cpp b/contrib/pzstd/utils/test/ResourcePoolTest.cpp
index a6a86b345eb9..6fe145180be9 100644
--- a/contrib/pzstd/utils/test/ResourcePoolTest.cpp
+++ b/contrib/pzstd/utils/test/ResourcePoolTest.cpp
@@ -1,10 +1,10 @@
-/**
+/*
* Copyright (c) 2016-present, Facebook, Inc.
* All rights reserved.
*
- * This source code is licensed under the BSD-style license found in the
- * LICENSE file in the root directory of this source tree. An additional grant
- * of patent rights can be found in the PATENTS file in the same directory.
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
*/
#include "utils/ResourcePool.h"
diff --git a/contrib/pzstd/utils/test/ScopeGuardTest.cpp b/contrib/pzstd/utils/test/ScopeGuardTest.cpp
index 0c4dc0357304..7bc624da79b2 100644
--- a/contrib/pzstd/utils/test/ScopeGuardTest.cpp
+++ b/contrib/pzstd/utils/test/ScopeGuardTest.cpp
@@ -1,10 +1,10 @@
-/**
+/*
* Copyright (c) 2016-present, Facebook, Inc.
* All rights reserved.
*
- * This source code is licensed under the BSD-style license found in the
- * LICENSE file in the root directory of this source tree. An additional grant
- * of patent rights can be found in the PATENTS file in the same directory.
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
*/
#include "utils/ScopeGuard.h"
diff --git a/contrib/pzstd/utils/test/ThreadPoolTest.cpp b/contrib/pzstd/utils/test/ThreadPoolTest.cpp
index 89085afd434c..703fd4c9ca17 100644
--- a/contrib/pzstd/utils/test/ThreadPoolTest.cpp
+++ b/contrib/pzstd/utils/test/ThreadPoolTest.cpp
@@ -1,10 +1,10 @@
-/**
+/*
* Copyright (c) 2016-present, Facebook, Inc.
* All rights reserved.
*
- * This source code is licensed under the BSD-style license found in the
- * LICENSE file in the root directory of this source tree. An additional grant
- * of patent rights can be found in the PATENTS file in the same directory.
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
*/
#include "utils/ThreadPool.h"
diff --git a/contrib/pzstd/utils/test/WorkQueueTest.cpp b/contrib/pzstd/utils/test/WorkQueueTest.cpp
index 8caf170d2948..14cf77304f21 100644
--- a/contrib/pzstd/utils/test/WorkQueueTest.cpp
+++ b/contrib/pzstd/utils/test/WorkQueueTest.cpp
@@ -1,10 +1,10 @@
-/**
+/*
* Copyright (c) 2016-present, Facebook, Inc.
* All rights reserved.
*
- * This source code is licensed under the BSD-style license found in the
- * LICENSE file in the root directory of this source tree. An additional grant
- * of patent rights can be found in the PATENTS file in the same directory.
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
*/
#include "utils/Buffer.h"
#include "utils/WorkQueue.h"
diff --git a/contrib/seekable_format/examples/.gitignore b/contrib/seekable_format/examples/.gitignore
new file mode 100644
index 000000000000..df2f9ab07499
--- /dev/null
+++ b/contrib/seekable_format/examples/.gitignore
@@ -0,0 +1,4 @@
+seekable_compression
+seekable_decompression
+parallel_processing
+parallel_compression
diff --git a/contrib/seekable_format/examples/Makefile b/contrib/seekable_format/examples/Makefile
new file mode 100644
index 000000000000..1847aa7e7b39
--- /dev/null
+++ b/contrib/seekable_format/examples/Makefile
@@ -0,0 +1,42 @@
+# ################################################################
+# Copyright (c) 2017-present, Facebook, Inc.
+# All rights reserved.
+#
+# This source code is licensed under both the BSD-style license (found in the
+# LICENSE file in the root directory of this source tree) and the GPLv2 (found
+# in the COPYING file in the root directory of this source tree).
+# ################################################################
+
+# This Makefile presumes libzstd is built, using `make` in / or /lib/
+
+LDFLAGS += ../../../lib/libzstd.a
+CPPFLAGS += -I../ -I../../../lib -I../../../lib/common
+
+CFLAGS ?= -O3
+CFLAGS += -g
+
+SEEKABLE_OBJS = ../zstdseek_compress.c ../zstdseek_decompress.c
+
+.PHONY: default all clean test
+
+default: all
+
+all: seekable_compression seekable_decompression parallel_processing
+
+seekable_compression : seekable_compression.c $(SEEKABLE_OBJS)
+ $(CC) $(CPPFLAGS) $(CFLAGS) $^ $(LDFLAGS) -o $@
+
+seekable_decompression : seekable_decompression.c $(SEEKABLE_OBJS)
+ $(CC) $(CPPFLAGS) $(CFLAGS) $^ $(LDFLAGS) -o $@
+
+parallel_processing : parallel_processing.c $(SEEKABLE_OBJS)
+ $(CC) $(CPPFLAGS) $(CFLAGS) $^ $(LDFLAGS) -o $@ -pthread
+
+parallel_compression : parallel_compression.c $(SEEKABLE_OBJS)
+ $(CC) $(CPPFLAGS) $(CFLAGS) $^ $(LDFLAGS) -o $@ -pthread
+
+clean:
+ @rm -f core *.o tmp* result* *.zst \
+ seekable_compression seekable_decompression \
+ parallel_processing parallel_compression
+ @echo Cleaning completed
diff --git a/contrib/seekable_format/examples/parallel_compression.c b/contrib/seekable_format/examples/parallel_compression.c
new file mode 100644
index 000000000000..69644d2b3c80
--- /dev/null
+++ b/contrib/seekable_format/examples/parallel_compression.c
@@ -0,0 +1,215 @@
+/*
+ * Copyright (c) 2017-present, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ */
+
+#include <stdlib.h> // malloc, free, exit, atoi
+#include <stdio.h> // fprintf, perror, feof, fopen, etc.
+#include <string.h> // strlen, memset, strcat
+#define ZSTD_STATIC_LINKING_ONLY
+#include <zstd.h> // presumes zstd library is installed
+#include <zstd_errors.h>
+#if defined(WIN32) || defined(_WIN32)
+# include <windows.h>
+# define SLEEP(x) Sleep(x)
+#else
+# include <unistd.h>
+# define SLEEP(x) usleep(x * 1000)
+#endif
+
+#define XXH_NAMESPACE ZSTD_
+#include "xxhash.h"
+
+#include "pool.h" // use zstd thread pool for demo
+
+#include "zstd_seekable.h"
+
+static void* malloc_orDie(size_t size)
+{
+ void* const buff = malloc(size);
+ if (buff) return buff;
+ /* error */
+ perror("malloc:");
+ exit(1);
+}
+
+static FILE* fopen_orDie(const char *filename, const char *instruction)
+{
+ FILE* const inFile = fopen(filename, instruction);
+ if (inFile) return inFile;
+ /* error */
+ perror(filename);
+ exit(3);
+}
+
+static size_t fread_orDie(void* buffer, size_t sizeToRead, FILE* file)
+{
+ size_t const readSize = fread(buffer, 1, sizeToRead, file);
+ if (readSize == sizeToRead) return readSize; /* good */
+ if (feof(file)) return readSize; /* good, reached end of file */
+ /* error */
+ perror("fread");
+ exit(4);
+}
+
+static size_t fwrite_orDie(const void* buffer, size_t sizeToWrite, FILE* file)
+{
+ size_t const writtenSize = fwrite(buffer, 1, sizeToWrite, file);
+ if (writtenSize == sizeToWrite) return sizeToWrite; /* good */
+ /* error */
+ perror("fwrite");
+ exit(5);
+}
+
+static size_t fclose_orDie(FILE* file)
+{
+ if (!fclose(file)) return 0;
+ /* error */
+ perror("fclose");
+ exit(6);
+}
+
+static void fseek_orDie(FILE* file, long int offset, int origin)
+{
+ if (!fseek(file, offset, origin)) {
+ if (!fflush(file)) return;
+ }
+ /* error */
+ perror("fseek");
+ exit(7);
+}
+
+static long int ftell_orDie(FILE* file)
+{
+ long int off = ftell(file);
+ if (off != -1) return off;
+ /* error */
+ perror("ftell");
+ exit(8);
+}
+
+struct job {
+ const void* src;
+ size_t srcSize;
+ void* dst;
+ size_t dstSize;
+
+ unsigned checksum;
+
+ int compressionLevel;
+ int done;
+};
+
+static void compressFrame(void* opaque)
+{
+ struct job* job = opaque;
+
+ job->checksum = XXH64(job->src, job->srcSize, 0);
+
+ size_t ret = ZSTD_compress(job->dst, job->dstSize, job->src, job->srcSize, job->compressionLevel);
+ if (ZSTD_isError(ret)) {
+ fprintf(stderr, "ZSTD_compress() error : %s \n", ZSTD_getErrorName(ret));
+ exit(20);
+ }
+
+ job->dstSize = ret;
+ job->done = 1;
+}
+
+static void compressFile_orDie(const char* fname, const char* outName, int cLevel, unsigned frameSize, int nbThreads)
+{
+ POOL_ctx* pool = POOL_create(nbThreads, nbThreads);
+ if (pool == NULL) { fprintf(stderr, "POOL_create() error \n"); exit(9); }
+
+ FILE* const fin = fopen_orDie(fname, "rb");
+ FILE* const fout = fopen_orDie(outName, "wb");
+
+ if (ZSTD_compressBound(frameSize) > 0xFFFFFFFFU) { fprintf(stderr, "Frame size too large \n"); exit(10); }
+ unsigned dstSize = ZSTD_compressBound(frameSize);
+
+
+ fseek_orDie(fin, 0, SEEK_END);
+ long int length = ftell_orDie(fin);
+ fseek_orDie(fin, 0, SEEK_SET);
+
+ size_t numFrames = (length + frameSize - 1) / frameSize;
+
+ struct job* jobs = malloc_orDie(sizeof(struct job) * numFrames);
+
+ size_t i;
+ for(i = 0; i < numFrames; i++) {
+ void* in = malloc_orDie(frameSize);
+ void* out = malloc_orDie(dstSize);
+
+ size_t inSize = fread_orDie(in, frameSize, fin);
+
+ jobs[i].src = in;
+ jobs[i].srcSize = inSize;
+ jobs[i].dst = out;
+ jobs[i].dstSize = dstSize;
+ jobs[i].compressionLevel = cLevel;
+ jobs[i].done = 0;
+ POOL_add(pool, compressFrame, &jobs[i]);
+ }
+
+ ZSTD_frameLog* fl = ZSTD_seekable_createFrameLog(1);
+ if (fl == NULL) { fprintf(stderr, "ZSTD_seekable_createFrameLog() failed \n"); exit(11); }
+ for (i = 0; i < numFrames; i++) {
+ while (!jobs[i].done) SLEEP(5); /* wake up every 5 milliseconds to check */
+ fwrite_orDie(jobs[i].dst, jobs[i].dstSize, fout);
+ free((void*)jobs[i].src);
+ free(jobs[i].dst);
+
+ size_t ret = ZSTD_seekable_logFrame(fl, jobs[i].dstSize, jobs[i].srcSize, jobs[i].checksum);
+ if (ZSTD_isError(ret)) { fprintf(stderr, "ZSTD_seekable_logFrame() error : %s \n", ZSTD_getErrorName(ret)); }
+ }
+
+ { unsigned char seekTableBuff[1024];
+ ZSTD_outBuffer out = {seekTableBuff, 1024, 0};
+ while (ZSTD_seekable_writeSeekTable(fl, &out) != 0) {
+ fwrite_orDie(seekTableBuff, out.pos, fout);
+ out.pos = 0;
+ }
+ fwrite_orDie(seekTableBuff, out.pos, fout);
+ }
+
+ ZSTD_seekable_freeFrameLog(fl);
+ free(jobs);
+ fclose_orDie(fout);
+ fclose_orDie(fin);
+}
+
+static const char* createOutFilename_orDie(const char* filename)
+{
+ size_t const inL = strlen(filename);
+ size_t const outL = inL + 5;
+ void* outSpace = malloc_orDie(outL);
+ memset(outSpace, 0, outL);
+ strcat(outSpace, filename);
+ strcat(outSpace, ".zst");
+ return (const char*)outSpace;
+}
+
+int main(int argc, const char** argv) {
+ const char* const exeName = argv[0];
+ if (argc!=4) {
+ printf("wrong arguments\n");
+ printf("usage:\n");
+ printf("%s FILE FRAME_SIZE NB_THREADS\n", exeName);
+ return 1;
+ }
+
+ { const char* const inFileName = argv[1];
+ unsigned const frameSize = (unsigned)atoi(argv[2]);
+ int const nbThreads = atoi(argv[3]);
+
+ const char* const outFileName = createOutFilename_orDie(inFileName);
+ compressFile_orDie(inFileName, outFileName, 5, frameSize, nbThreads);
+ }
+
+ return 0;
+}
diff --git a/contrib/seekable_format/examples/parallel_processing.c b/contrib/seekable_format/examples/parallel_processing.c
new file mode 100644
index 000000000000..da3477632e2c
--- /dev/null
+++ b/contrib/seekable_format/examples/parallel_processing.c
@@ -0,0 +1,194 @@
+/*
+ * Copyright (c) 2017-present, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ */
+
+/*
+ * A simple demo that sums up all the bytes in the file in parallel using
+ * seekable decompression and the zstd thread pool
+ */
+
+#include <stdlib.h> // malloc, exit
+#include <stdio.h> // fprintf, perror, feof
+#include <string.h> // strerror
+#include <errno.h> // errno
+#define ZSTD_STATIC_LINKING_ONLY
+#include <zstd.h> // presumes zstd library is installed
+#include <zstd_errors.h>
+#if defined(WIN32) || defined(_WIN32)
+# include <windows.h>
+# define SLEEP(x) Sleep(x)
+#else
+# include <unistd.h>
+# define SLEEP(x) usleep(x * 1000)
+#endif
+
+#include "pool.h" // use zstd thread pool for demo
+
+#include "zstd_seekable.h"
+
+#define MIN(a, b) ((a) < (b) ? (a) : (b))
+
+static void* malloc_orDie(size_t size)
+{
+ void* const buff = malloc(size);
+ if (buff) return buff;
+ /* error */
+ perror("malloc");
+ exit(1);
+}
+
+static void* realloc_orDie(void* ptr, size_t size)
+{
+ ptr = realloc(ptr, size);
+ if (ptr) return ptr;
+ /* error */
+ perror("realloc");
+ exit(1);
+}
+
+static FILE* fopen_orDie(const char *filename, const char *instruction)
+{
+ FILE* const inFile = fopen(filename, instruction);
+ if (inFile) return inFile;
+ /* error */
+ perror(filename);
+ exit(3);
+}
+
+static size_t fread_orDie(void* buffer, size_t sizeToRead, FILE* file)
+{
+ size_t const readSize = fread(buffer, 1, sizeToRead, file);
+ if (readSize == sizeToRead) return readSize; /* good */
+ if (feof(file)) return readSize; /* good, reached end of file */
+ /* error */
+ perror("fread");
+ exit(4);
+}
+
+static size_t fwrite_orDie(const void* buffer, size_t sizeToWrite, FILE* file)
+{
+ size_t const writtenSize = fwrite(buffer, 1, sizeToWrite, file);
+ if (writtenSize == sizeToWrite) return sizeToWrite; /* good */
+ /* error */
+ perror("fwrite");
+ exit(5);
+}
+
+static size_t fclose_orDie(FILE* file)
+{
+ if (!fclose(file)) return 0;
+ /* error */
+ perror("fclose");
+ exit(6);
+}
+
+static void fseek_orDie(FILE* file, long int offset, int origin) {
+ if (!fseek(file, offset, origin)) {
+ if (!fflush(file)) return;
+ }
+ /* error */
+ perror("fseek");
+ exit(7);
+}
+
+struct sum_job {
+ const char* fname;
+ unsigned long long sum;
+ unsigned frameNb;
+ int done;
+};
+
+static void sumFrame(void* opaque)
+{
+ struct sum_job* job = (struct sum_job*)opaque;
+ job->done = 0;
+
+ FILE* const fin = fopen_orDie(job->fname, "rb");
+
+ ZSTD_seekable* const seekable = ZSTD_seekable_create();
+ if (seekable==NULL) { fprintf(stderr, "ZSTD_seekable_create() error \n"); exit(10); }
+
+ size_t const initResult = ZSTD_seekable_initFile(seekable, fin);
+ if (ZSTD_isError(initResult)) { fprintf(stderr, "ZSTD_seekable_init() error : %s \n", ZSTD_getErrorName(initResult)); exit(11); }
+
+ size_t const frameSize = ZSTD_seekable_getFrameDecompressedSize(seekable, job->frameNb);
+ unsigned char* data = malloc_orDie(frameSize);
+
+ size_t result = ZSTD_seekable_decompressFrame(seekable, data, frameSize, job->frameNb);
+ if (ZSTD_isError(result)) { fprintf(stderr, "ZSTD_seekable_decompressFrame() error : %s \n", ZSTD_getErrorName(result)); exit(12); }
+
+ unsigned long long sum = 0;
+ size_t i;
+ for (i = 0; i < frameSize; i++) {
+ sum += data[i];
+ }
+ job->sum = sum;
+ job->done = 1;
+
+ fclose(fin);
+ ZSTD_seekable_free(seekable);
+ free(data);
+}
+
+static void sumFile_orDie(const char* fname, int nbThreads)
+{
+ POOL_ctx* pool = POOL_create(nbThreads, nbThreads);
+ if (pool == NULL) { fprintf(stderr, "POOL_create() error \n"); exit(9); }
+
+ FILE* const fin = fopen_orDie(fname, "rb");
+
+ ZSTD_seekable* const seekable = ZSTD_seekable_create();
+ if (seekable==NULL) { fprintf(stderr, "ZSTD_seekable_create() error \n"); exit(10); }
+
+ size_t const initResult = ZSTD_seekable_initFile(seekable, fin);
+ if (ZSTD_isError(initResult)) { fprintf(stderr, "ZSTD_seekable_init() error : %s \n", ZSTD_getErrorName(initResult)); exit(11); }
+
+ size_t const numFrames = ZSTD_seekable_getNumFrames(seekable);
+ struct sum_job* jobs = (struct sum_job*)malloc(numFrames * sizeof(struct sum_job));
+
+ size_t i;
+ for (i = 0; i < numFrames; i++) {
+ jobs[i] = (struct sum_job){ fname, 0, i, 0 };
+ POOL_add(pool, sumFrame, &jobs[i]);
+ }
+
+ unsigned long long total = 0;
+
+ for (i = 0; i < numFrames; i++) {
+ while (!jobs[i].done) SLEEP(5); /* wake up every 5 milliseconds to check */
+ total += jobs[i].sum;
+ }
+
+ printf("Sum: %llu\n", total);
+
+ POOL_free(pool);
+ ZSTD_seekable_free(seekable);
+ fclose(fin);
+ free(jobs);
+}
+
+
+int main(int argc, const char** argv)
+{
+ const char* const exeName = argv[0];
+
+ if (argc!=3) {
+ fprintf(stderr, "wrong arguments\n");
+ fprintf(stderr, "usage:\n");
+ fprintf(stderr, "%s FILE NB_THREADS\n", exeName);
+ return 1;
+ }
+
+ {
+ const char* const inFilename = argv[1];
+ int const nbThreads = atoi(argv[2]);
+ sumFile_orDie(inFilename, nbThreads);
+ }
+
+ return 0;
+}
diff --git a/contrib/seekable_format/examples/seekable_compression.c b/contrib/seekable_format/examples/seekable_compression.c
new file mode 100644
index 000000000000..9485bf26fc46
--- /dev/null
+++ b/contrib/seekable_format/examples/seekable_compression.c
@@ -0,0 +1,132 @@
+/*
+ * Copyright (c) 2017-present, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ */
+
+#include <stdlib.h> // malloc, free, exit, atoi
+#include <stdio.h> // fprintf, perror, feof, fopen, etc.
+#include <string.h> // strlen, memset, strcat
+#define ZSTD_STATIC_LINKING_ONLY
+#include <zstd.h> // presumes zstd library is installed
+
+#include "zstd_seekable.h"
+
+static void* malloc_orDie(size_t size)
+{
+ void* const buff = malloc(size);
+ if (buff) return buff;
+ /* error */
+ perror("malloc:");
+ exit(1);
+}
+
+static FILE* fopen_orDie(const char *filename, const char *instruction)
+{
+ FILE* const inFile = fopen(filename, instruction);
+ if (inFile) return inFile;
+ /* error */
+ perror(filename);
+ exit(3);
+}
+
+static size_t fread_orDie(void* buffer, size_t sizeToRead, FILE* file)
+{
+ size_t const readSize = fread(buffer, 1, sizeToRead, file);
+ if (readSize == sizeToRead) return readSize; /* good */
+ if (feof(file)) return readSize; /* good, reached end of file */
+ /* error */
+ perror("fread");
+ exit(4);
+}
+
+static size_t fwrite_orDie(const void* buffer, size_t sizeToWrite, FILE* file)
+{
+ size_t const writtenSize = fwrite(buffer, 1, sizeToWrite, file);
+ if (writtenSize == sizeToWrite) return sizeToWrite; /* good */
+ /* error */
+ perror("fwrite");
+ exit(5);
+}
+
+static size_t fclose_orDie(FILE* file)
+{
+ if (!fclose(file)) return 0;
+ /* error */
+ perror("fclose");
+ exit(6);
+}
+
+static void compressFile_orDie(const char* fname, const char* outName, int cLevel, unsigned frameSize)
+{
+ FILE* const fin = fopen_orDie(fname, "rb");
+ FILE* const fout = fopen_orDie(outName, "wb");
+ size_t const buffInSize = ZSTD_CStreamInSize(); /* can always read one full block */
+ void* const buffIn = malloc_orDie(buffInSize);
+ size_t const buffOutSize = ZSTD_CStreamOutSize(); /* can always flush a full block */
+ void* const buffOut = malloc_orDie(buffOutSize);
+
+ ZSTD_seekable_CStream* const cstream = ZSTD_seekable_createCStream();
+ if (cstream==NULL) { fprintf(stderr, "ZSTD_seekable_createCStream() error \n"); exit(10); }
+ size_t const initResult = ZSTD_seekable_initCStream(cstream, cLevel, 1, frameSize);
+ if (ZSTD_isError(initResult)) { fprintf(stderr, "ZSTD_seekable_initCStream() error : %s \n", ZSTD_getErrorName(initResult)); exit(11); }
+
+ size_t read, toRead = buffInSize;
+ while( (read = fread_orDie(buffIn, toRead, fin)) ) {
+ ZSTD_inBuffer input = { buffIn, read, 0 };
+ while (input.pos < input.size) {
+ ZSTD_outBuffer output = { buffOut, buffOutSize, 0 };
+ toRead = ZSTD_seekable_compressStream(cstream, &output , &input); /* toRead is guaranteed to be <= ZSTD_CStreamInSize() */
+ if (ZSTD_isError(toRead)) { fprintf(stderr, "ZSTD_seekable_compressStream() error : %s \n", ZSTD_getErrorName(toRead)); exit(12); }
+ if (toRead > buffInSize) toRead = buffInSize; /* Safely handle case when `buffInSize` is manually changed to a value < ZSTD_CStreamInSize()*/
+ fwrite_orDie(buffOut, output.pos, fout);
+ }
+ }
+
+ while (1) {
+ ZSTD_outBuffer output = { buffOut, buffOutSize, 0 };
+ size_t const remainingToFlush = ZSTD_seekable_endStream(cstream, &output); /* close stream */
+ if (ZSTD_isError(remainingToFlush)) { fprintf(stderr, "ZSTD_seekable_endStream() error : %s \n", ZSTD_getErrorName(remainingToFlush)); exit(13); }
+ fwrite_orDie(buffOut, output.pos, fout);
+ if (!remainingToFlush) break;
+ }
+
+ ZSTD_seekable_freeCStream(cstream);
+ fclose_orDie(fout);
+ fclose_orDie(fin);
+ free(buffIn);
+ free(buffOut);
+}
+
+static const char* createOutFilename_orDie(const char* filename)
+{
+ size_t const inL = strlen(filename);
+ size_t const outL = inL + 5;
+ void* outSpace = malloc_orDie(outL);
+ memset(outSpace, 0, outL);
+ strcat(outSpace, filename);
+ strcat(outSpace, ".zst");
+ return (const char*)outSpace;
+}
+
+int main(int argc, const char** argv) {
+ const char* const exeName = argv[0];
+ if (argc!=3) {
+ printf("wrong arguments\n");
+ printf("usage:\n");
+ printf("%s FILE FRAME_SIZE\n", exeName);
+ return 1;
+ }
+
+ { const char* const inFileName = argv[1];
+ unsigned const frameSize = (unsigned)atoi(argv[2]);
+
+ const char* const outFileName = createOutFilename_orDie(inFileName);
+ compressFile_orDie(inFileName, outFileName, 5, frameSize);
+ }
+
+ return 0;
+}
diff --git a/contrib/seekable_format/examples/seekable_decompression.c b/contrib/seekable_format/examples/seekable_decompression.c
new file mode 100644
index 000000000000..9cd232922636
--- /dev/null
+++ b/contrib/seekable_format/examples/seekable_decompression.c
@@ -0,0 +1,138 @@
+/*
+ * Copyright (c) 2017-present, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ */
+
+
+#include <stdlib.h> // malloc, exit
+#include <stdio.h> // fprintf, perror, feof
+#include <string.h> // strerror
+#include <errno.h> // errno
+#define ZSTD_STATIC_LINKING_ONLY
+#include <zstd.h> // presumes zstd library is installed
+#include <zstd_errors.h>
+
+#include "zstd_seekable.h"
+
+#define MIN(a, b) ((a) < (b) ? (a) : (b))
+
+static void* malloc_orDie(size_t size)
+{
+ void* const buff = malloc(size);
+ if (buff) return buff;
+ /* error */
+ perror("malloc");
+ exit(1);
+}
+
+static void* realloc_orDie(void* ptr, size_t size)
+{
+ ptr = realloc(ptr, size);
+ if (ptr) return ptr;
+ /* error */
+ perror("realloc");
+ exit(1);
+}
+
+static FILE* fopen_orDie(const char *filename, const char *instruction)
+{
+ FILE* const inFile = fopen(filename, instruction);
+ if (inFile) return inFile;
+ /* error */
+ perror(filename);
+ exit(3);
+}
+
+static size_t fread_orDie(void* buffer, size_t sizeToRead, FILE* file)
+{
+ size_t const readSize = fread(buffer, 1, sizeToRead, file);
+ if (readSize == sizeToRead) return readSize; /* good */
+ if (feof(file)) return readSize; /* good, reached end of file */
+ /* error */
+ perror("fread");
+ exit(4);
+}
+
+static size_t fwrite_orDie(const void* buffer, size_t sizeToWrite, FILE* file)
+{
+ size_t const writtenSize = fwrite(buffer, 1, sizeToWrite, file);
+ if (writtenSize == sizeToWrite) return sizeToWrite; /* good */
+ /* error */
+ perror("fwrite");
+ exit(5);
+}
+
+static size_t fclose_orDie(FILE* file)
+{
+ if (!fclose(file)) return 0;
+ /* error */
+ perror("fclose");
+ exit(6);
+}
+
+static void fseek_orDie(FILE* file, long int offset, int origin) {
+ if (!fseek(file, offset, origin)) {
+ if (!fflush(file)) return;
+ }
+ /* error */
+ perror("fseek");
+ exit(7);
+}
+
+
+static void decompressFile_orDie(const char* fname, unsigned startOffset, unsigned endOffset)
+{
+ FILE* const fin = fopen_orDie(fname, "rb");
+ FILE* const fout = stdout;
+ size_t const buffOutSize = ZSTD_DStreamOutSize(); /* Guarantee to successfully flush at least one complete compressed block in all circumstances. */
+ void* const buffOut = malloc_orDie(buffOutSize);
+
+ ZSTD_seekable* const seekable = ZSTD_seekable_create();
+ if (seekable==NULL) { fprintf(stderr, "ZSTD_seekable_create() error \n"); exit(10); }
+
+ size_t const initResult = ZSTD_seekable_initFile(seekable, fin);
+ if (ZSTD_isError(initResult)) { fprintf(stderr, "ZSTD_seekable_init() error : %s \n", ZSTD_getErrorName(initResult)); exit(11); }
+
+ while (startOffset < endOffset) {
+ size_t const result = ZSTD_seekable_decompress(seekable, buffOut, MIN(endOffset - startOffset, buffOutSize), startOffset);
+
+ if (ZSTD_isError(result)) {
+ fprintf(stderr, "ZSTD_seekable_decompress() error : %s \n",
+ ZSTD_getErrorName(result));
+ exit(12);
+ }
+ fwrite_orDie(buffOut, result, fout);
+ startOffset += result;
+ }
+
+ ZSTD_seekable_free(seekable);
+ fclose_orDie(fin);
+ fclose_orDie(fout);
+ free(buffOut);
+}
+
+
+int main(int argc, const char** argv)
+{
+ const char* const exeName = argv[0];
+
+ if (argc!=4) {
+ fprintf(stderr, "wrong arguments\n");
+ fprintf(stderr, "usage:\n");
+ fprintf(stderr, "%s FILE START END\n", exeName);
+ return 1;
+ }
+
+ {
+ const char* const inFilename = argv[1];
+ unsigned const startOffset = (unsigned) atoi(argv[2]);
+ unsigned const endOffset = (unsigned) atoi(argv[3]);
+ decompressFile_orDie(inFilename, startOffset, endOffset);
+ }
+
+ return 0;
+}
diff --git a/contrib/seekable_format/zstd_seekable.h b/contrib/seekable_format/zstd_seekable.h
new file mode 100644
index 000000000000..438ac20149f8
--- /dev/null
+++ b/contrib/seekable_format/zstd_seekable.h
@@ -0,0 +1,184 @@
+#ifndef SEEKABLE_H
+#define SEEKABLE_H
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#include <stdio.h>
+
+static const unsigned ZSTD_seekTableFooterSize = 9;
+
+#define ZSTD_SEEKABLE_MAGICNUMBER 0x8F92EAB1
+
+#define ZSTD_SEEKABLE_MAXFRAMES 0x8000000U
+
+/* Limit the maximum size to avoid any potential issues storing the compressed size */
+#define ZSTD_SEEKABLE_MAX_FRAME_DECOMPRESSED_SIZE 0x80000000U
+
+/*-****************************************************************************
+* Seekable Format
+*
+* The seekable format splits the compressed data into a series of "frames",
+* each compressed individually so that decompression of a section in the
+* middle of an archive only requires zstd to decompress at most a frame's
+* worth of extra data, instead of the entire archive.
+******************************************************************************/
+
+typedef struct ZSTD_seekable_CStream_s ZSTD_seekable_CStream;
+typedef struct ZSTD_seekable_s ZSTD_seekable;
+
+/*-****************************************************************************
+* Seekable compression - HowTo
+* A ZSTD_seekable_CStream object is required to tracking streaming operation.
+* Use ZSTD_seekable_createCStream() and ZSTD_seekable_freeCStream() to create/
+* release resources.
+*
+* Streaming objects are reusable to avoid allocation and deallocation,
+* to start a new compression operation call ZSTD_seekable_initCStream() on the
+* compressor.
+*
+* Data streamed to the seekable compressor will automatically be split into
+* frames of size `maxFrameSize` (provided in ZSTD_seekable_initCStream()),
+* or if none is provided, will be cut off whenever ZSTD_seekable_endFrame() is
+* called or when the default maximum frame size (2GB) is reached.
+*
+* Use ZSTD_seekable_initCStream() to initialize a ZSTD_seekable_CStream object
+* for a new compression operation.
+* `maxFrameSize` indicates the size at which to automatically start a new
+* seekable frame. `maxFrameSize == 0` implies the default maximum size.
+* `checksumFlag` indicates whether or not the seek table should include frame
+* checksums on the uncompressed data for verification.
+* @return : a size hint for input to provide for compression, or an error code
+* checkable with ZSTD_isError()
+*
+* Use ZSTD_seekable_compressStream() repetitively to consume input stream.
+* The function will automatically update both `pos` fields.
+* Note that it may not consume the entire input, in which case `pos < size`,
+* and it's up to the caller to present again remaining data.
+* @return : a size hint, preferred nb of bytes to use as input for next
+* function call or an error code, which can be tested using
+* ZSTD_isError().
+* Note 1 : it's just a hint, to help latency a little, any other
+* value will work fine.
+*
+* At any time, call ZSTD_seekable_endFrame() to end the current frame and
+* start a new one.
+*
+* ZSTD_seekable_endStream() will end the current frame, and then write the seek
+* table so that decompressors can efficiently find compressed frames.
+* ZSTD_seekable_endStream() may return a number > 0 if it was unable to flush
+* all the necessary data to `output`. In this case, it should be called again
+* until all remaining data is flushed out and 0 is returned.
+******************************************************************************/
+
+/*===== Seekable compressor management =====*/
+ZSTDLIB_API ZSTD_seekable_CStream* ZSTD_seekable_createCStream(void);
+ZSTDLIB_API size_t ZSTD_seekable_freeCStream(ZSTD_seekable_CStream* zcs);
+
+/*===== Seekable compression functions =====*/
+ZSTDLIB_API size_t ZSTD_seekable_initCStream(ZSTD_seekable_CStream* zcs, int compressionLevel, int checksumFlag, unsigned maxFrameSize);
+ZSTDLIB_API size_t ZSTD_seekable_compressStream(ZSTD_seekable_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input);
+ZSTDLIB_API size_t ZSTD_seekable_endFrame(ZSTD_seekable_CStream* zcs, ZSTD_outBuffer* output);
+ZSTDLIB_API size_t ZSTD_seekable_endStream(ZSTD_seekable_CStream* zcs, ZSTD_outBuffer* output);
+
+/*= Raw seek table API
+ * These functions allow for the seek table to be constructed directly.
+ * This table can then be appended to a file of concatenated frames.
+ * This allows the frames to be compressed independently, even in parallel,
+ * and compiled together afterward into a seekable archive.
+ *
+ * Use ZSTD_seekable_createFrameLog() to allocate and initialize a tracking
+ * structure.
+ *
+ * Call ZSTD_seekable_logFrame() once for each frame in the archive.
+ * checksum is optional, and will not be used if checksumFlag was 0 when the
+ * frame log was created. If present, it should be the least significant 32
+ * bits of the XXH64 hash of the uncompressed data.
+ *
+ * Call ZSTD_seekable_writeSeekTable to serialize the data into a seek table.
+ * If the entire table was written, the return value will be 0. Otherwise,
+ * it will be equal to the number of bytes left to write. */
+typedef struct ZSTD_frameLog_s ZSTD_frameLog;
+ZSTDLIB_API ZSTD_frameLog* ZSTD_seekable_createFrameLog(int checksumFlag);
+ZSTDLIB_API size_t ZSTD_seekable_freeFrameLog(ZSTD_frameLog* fl);
+ZSTDLIB_API size_t ZSTD_seekable_logFrame(ZSTD_frameLog* fl, unsigned compressedSize, unsigned decompressedSize, unsigned checksum);
+ZSTDLIB_API size_t ZSTD_seekable_writeSeekTable(ZSTD_frameLog* fl, ZSTD_outBuffer* output);
+
+/*-****************************************************************************
+* Seekable decompression - HowTo
+* A ZSTD_seekable object is required to tracking the seekTable.
+*
+* Call ZSTD_seekable_init* to initialize a ZSTD_seekable object with the
+* the seek table provided in the input.
+* There are three modes for ZSTD_seekable_init:
+* - ZSTD_seekable_initBuff() : An in-memory API. The data contained in
+* `src` should be the entire seekable file, including the seek table.
+* `src` should be kept alive and unmodified until the ZSTD_seekable object
+* is freed or reset.
+* - ZSTD_seekable_initFile() : A simplified file API using stdio. fread and
+* fseek will be used to access the required data for building the seek
+* table and doing decompression operations. `src` should not be closed
+* or modified until the ZSTD_seekable object is freed or reset.
+* - ZSTD_seekable_initAdvanced() : A general API allowing the client to
+* provide its own read and seek callbacks.
+* + ZSTD_seekable_read() : read exactly `n` bytes into `buffer`.
+* Premature EOF should be treated as an error.
+* + ZSTD_seekable_seek() : seek the read head to `offset` from `origin`,
+* where origin is either SEEK_SET (beginning of
+* file), or SEEK_END (end of file).
+* Both functions should return a non-negative value in case of success, and a
+* negative value in case of failure. If implementing using this API and
+* stdio, be careful with files larger than 4GB and fseek. All of these
+* functions return an error code checkable with ZSTD_isError().
+*
+* Call ZSTD_seekable_decompress to decompress `dstSize` bytes at decompressed
+* offset `offset`. ZSTD_seekable_decompress may have to decompress the entire
+* prefix of the frame before the desired data if it has not already processed
+* this section. If ZSTD_seekable_decompress is called multiple times for a
+* consecutive range of data, it will efficiently retain the decompressor object
+* and avoid redecompressing frame prefixes. The return value is the number of
+* bytes decompressed, or an error code checkable with ZSTD_isError().
+*
+* The seek table access functions can be used to obtain the data contained
+* in the seek table. If frameIndex is larger than the value returned by
+* ZSTD_seekable_getNumFrames(), they will return error codes checkable with
+* ZSTD_isError(). Note that since the offset access functions return
+* unsigned long long instead of size_t, in this case they will instead return
+* the value ZSTD_SEEKABLE_FRAMEINDEX_TOOLARGE.
+******************************************************************************/
+
+/*===== Seekable decompressor management =====*/
+ZSTDLIB_API ZSTD_seekable* ZSTD_seekable_create(void);
+ZSTDLIB_API size_t ZSTD_seekable_free(ZSTD_seekable* zs);
+
+/*===== Seekable decompression functions =====*/
+ZSTDLIB_API size_t ZSTD_seekable_initBuff(ZSTD_seekable* zs, const void* src, size_t srcSize);
+ZSTDLIB_API size_t ZSTD_seekable_initFile(ZSTD_seekable* zs, FILE* src);
+ZSTDLIB_API size_t ZSTD_seekable_decompress(ZSTD_seekable* zs, void* dst, size_t dstSize, unsigned long long offset);
+ZSTDLIB_API size_t ZSTD_seekable_decompressFrame(ZSTD_seekable* zs, void* dst, size_t dstSize, unsigned frameIndex);
+
+#define ZSTD_SEEKABLE_FRAMEINDEX_TOOLARGE (0ULL-2)
+/*===== Seek Table access functions =====*/
+ZSTDLIB_API unsigned ZSTD_seekable_getNumFrames(ZSTD_seekable* const zs);
+ZSTDLIB_API unsigned long long ZSTD_seekable_getFrameCompressedOffset(ZSTD_seekable* const zs, unsigned frameIndex);
+ZSTDLIB_API unsigned long long ZSTD_seekable_getFrameDecompressedOffset(ZSTD_seekable* const zs, unsigned frameIndex);
+ZSTDLIB_API size_t ZSTD_seekable_getFrameCompressedSize(ZSTD_seekable* const zs, unsigned frameIndex);
+ZSTDLIB_API size_t ZSTD_seekable_getFrameDecompressedSize(ZSTD_seekable* const zs, unsigned frameIndex);
+ZSTDLIB_API unsigned ZSTD_seekable_offsetToFrameIndex(ZSTD_seekable* const zs, unsigned long long offset);
+
+/*===== Seekable advanced I/O API =====*/
+typedef int(ZSTD_seekable_read)(void* opaque, void* buffer, size_t n);
+typedef int(ZSTD_seekable_seek)(void* opaque, long long offset, int origin);
+typedef struct {
+ void* opaque;
+ ZSTD_seekable_read* read;
+ ZSTD_seekable_seek* seek;
+} ZSTD_seekable_customFile;
+ZSTDLIB_API size_t ZSTD_seekable_initAdvanced(ZSTD_seekable* zs, ZSTD_seekable_customFile src);
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif
diff --git a/contrib/seekable_format/zstd_seekable_compression_format.md b/contrib/seekable_format/zstd_seekable_compression_format.md
new file mode 100644
index 000000000000..bf3080f7bbed
--- /dev/null
+++ b/contrib/seekable_format/zstd_seekable_compression_format.md
@@ -0,0 +1,116 @@
+# Zstandard Seekable Format
+
+### Notices
+
+Copyright (c) 2017-present Facebook, Inc.
+
+Permission is granted to copy and distribute this document
+for any purpose and without charge,
+including translations into other languages
+and incorporation into compilations,
+provided that the copyright notice and this notice are preserved,
+and that any substantive changes or deletions from the original
+are clearly marked.
+Distribution of this document is unlimited.
+
+### Version
+0.1.0 (11/04/17)
+
+## Introduction
+This document defines a format for compressed data to be stored so that subranges of the data can be efficiently decompressed without requiring the entire document to be decompressed.
+This is done by splitting up the input data into frames,
+each of which are compressed independently,
+and so can be decompressed independently.
+Decompression then takes advantage of a provided 'seek table', which allows the decompressor to immediately jump to the desired data. This is done in a way that is compatible with the original Zstandard format by placing the seek table in a Zstandard skippable frame.
+
+### Overall conventions
+In this document:
+- square brackets i.e. `[` and `]` are used to indicate optional fields or parameters.
+- the naming convention for identifiers is `Mixed_Case_With_Underscores`
+- All numeric fields are little-endian unless specified otherwise
+
+## Format
+
+The format consists of a number of frames (Zstandard compressed frames and skippable frames), followed by a final skippable frame at the end containing the seek table.
+
+### Seek Table Format
+The structure of the seek table frame is as follows:
+
+|`Skippable_Magic_Number`|`Frame_Size`|`[Seek_Table_Entries]`|`Seek_Table_Footer`|
+|------------------------|------------|----------------------|-------------------|
+| 4 bytes | 4 bytes | 8-12 bytes each | 9 bytes |
+
+__`Skippable_Magic_Number`__
+
+Value : 0x184D2A5E.
+This is for compatibility with [Zstandard skippable frames].
+Since it is legal for other Zstandard skippable frames to use the same
+magic number, it is not recommended for a decoder to recognize frames
+solely on this.
+
+__`Frame_Size`__
+
+The total size of the skippable frame, not including the `Skippable_Magic_Number` or `Frame_Size`.
+This is for compatibility with [Zstandard skippable frames].
+
+[Zstandard skippable frames]: https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#skippable-frames
+
+#### `Seek_Table_Footer`
+The seek table footer format is as follows:
+
+|`Number_Of_Frames`|`Seek_Table_Descriptor`|`Seekable_Magic_Number`|
+|------------------|-----------------------|-----------------------|
+| 4 bytes | 1 byte | 4 bytes |
+
+__`Seekable_Magic_Number`__
+
+Value : 0x8F92EAB1.
+This value must be the last bytes present in the compressed file so that decoders
+can efficiently find it and determine if there is an actual seek table present.
+
+__`Number_Of_Frames`__
+
+The number of stored frames in the data.
+
+__`Seek_Table_Descriptor`__
+
+A bitfield describing the format of the seek table.
+
+| Bit number | Field name |
+| ---------- | ---------- |
+| 7 | `Checksum_Flag` |
+| 6-2 | `Reserved_Bits` |
+| 1-0 | `Unused_Bits` |
+
+While only `Checksum_Flag` currently exists, there are 7 other bits in this field that can be used for future changes to the format,
+for example the addition of inline dictionaries.
+
+__`Checksum_Flag`__
+
+If the checksum flag is set, each of the seek table entries contains a 4 byte checksum of the uncompressed data contained in its frame.
+
+`Reserved_Bits` are not currently used but may be used in the future for breaking changes, so a compliant decoder should ensure they are set to 0. `Unused_Bits` may be used in the future for non-breaking changes, so a compliant decoder should not interpret these bits.
+
+#### __`Seek_Table_Entries`__
+
+`Seek_Table_Entries` consists of `Number_Of_Frames` (one for each frame in the data, not including the seek table frame) entries of the following form, in sequence:
+
+|`Compressed_Size`|`Decompressed_Size`|`[Checksum]`|
+|-----------------|-------------------|------------|
+| 4 bytes | 4 bytes | 4 bytes |
+
+__`Compressed_Size`__
+
+The compressed size of the frame.
+The cumulative sum of the `Compressed_Size` fields of frames `0` to `i` gives the offset in the compressed file of frame `i+1`.
+
+__`Decompressed_Size`__
+
+The size of the decompressed data contained in the frame. For skippable or otherwise empty frames, this value is 0.
+
+__`Checksum`__
+
+Only present if `Checksum_Flag` is set in the `Seek_Table_Descriptor`. Value : the least significant 32 bits of the XXH64 digest of the uncompressed data, stored in little-endian format.
+
+## Version Changes
+- 0.1.0: initial version
diff --git a/contrib/seekable_format/zstdseek_compress.c b/contrib/seekable_format/zstdseek_compress.c
new file mode 100644
index 000000000000..df2074988e63
--- /dev/null
+++ b/contrib/seekable_format/zstdseek_compress.c
@@ -0,0 +1,366 @@
+/*
+ * Copyright (c) 2017-present, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ */
+
+#include <stdlib.h> /* malloc, free */
+
+#define XXH_STATIC_LINKING_ONLY
+#define XXH_NAMESPACE ZSTD_
+#include "xxhash.h"
+
+#define ZSTD_STATIC_LINKING_ONLY
+#include "zstd.h"
+#include "zstd_errors.h"
+#include "mem.h"
+#include "zstd_seekable.h"
+
+#define CHECK_Z(f) { size_t const ret = (f); if (ret != 0) return ret; }
+
+#undef ERROR
+#define ERROR(name) ((size_t)-ZSTD_error_##name)
+
+#undef MIN
+#undef MAX
+#define MIN(a, b) ((a) < (b) ? (a) : (b))
+#define MAX(a, b) ((a) > (b) ? (a) : (b))
+
+typedef struct {
+ U32 cSize;
+ U32 dSize;
+ U32 checksum;
+} framelogEntry_t;
+
+struct ZSTD_frameLog_s {
+ framelogEntry_t* entries;
+ U32 size;
+ U32 capacity;
+
+ int checksumFlag;
+
+ /* for use when streaming out the seek table */
+ U32 seekTablePos;
+ U32 seekTableIndex;
+} framelog_t;
+
+struct ZSTD_seekable_CStream_s {
+ ZSTD_CStream* cstream;
+ ZSTD_frameLog framelog;
+
+ U32 frameCSize;
+ U32 frameDSize;
+
+ XXH64_state_t xxhState;
+
+ U32 maxFrameSize;
+
+ int writingSeekTable;
+};
+
+size_t ZSTD_seekable_frameLog_allocVec(ZSTD_frameLog* fl)
+{
+ /* allocate some initial space */
+ size_t const FRAMELOG_STARTING_CAPACITY = 16;
+ fl->entries = (framelogEntry_t*)malloc(
+ sizeof(framelogEntry_t) * FRAMELOG_STARTING_CAPACITY);
+ if (fl->entries == NULL) return ERROR(memory_allocation);
+ fl->capacity = FRAMELOG_STARTING_CAPACITY;
+
+ return 0;
+}
+
+size_t ZSTD_seekable_frameLog_freeVec(ZSTD_frameLog* fl)
+{
+ if (fl != NULL) free(fl->entries);
+ return 0;
+}
+
+ZSTD_frameLog* ZSTD_seekable_createFrameLog(int checksumFlag)
+{
+ ZSTD_frameLog* fl = malloc(sizeof(ZSTD_frameLog));
+ if (fl == NULL) return NULL;
+
+ if (ZSTD_isError(ZSTD_seekable_frameLog_allocVec(fl))) {
+ free(fl);
+ return NULL;
+ }
+
+ fl->checksumFlag = checksumFlag;
+ fl->seekTablePos = 0;
+ fl->seekTableIndex = 0;
+ fl->size = 0;
+
+ return fl;
+}
+
+size_t ZSTD_seekable_freeFrameLog(ZSTD_frameLog* fl)
+{
+ ZSTD_seekable_frameLog_freeVec(fl);
+ free(fl);
+ return 0;
+}
+
+ZSTD_seekable_CStream* ZSTD_seekable_createCStream()
+{
+ ZSTD_seekable_CStream* zcs = malloc(sizeof(ZSTD_seekable_CStream));
+
+ if (zcs == NULL) return NULL;
+
+ memset(zcs, 0, sizeof(*zcs));
+
+ zcs->cstream = ZSTD_createCStream();
+ if (zcs->cstream == NULL) goto failed1;
+
+ if (ZSTD_isError(ZSTD_seekable_frameLog_allocVec(&zcs->framelog))) goto failed2;
+
+ return zcs;
+
+failed2:
+ ZSTD_freeCStream(zcs->cstream);
+failed1:
+ free(zcs);
+ return NULL;
+}
+
+size_t ZSTD_seekable_freeCStream(ZSTD_seekable_CStream* zcs)
+{
+ if (zcs == NULL) return 0; /* support free on null */
+ ZSTD_freeCStream(zcs->cstream);
+ ZSTD_seekable_frameLog_freeVec(&zcs->framelog);
+ free(zcs);
+
+ return 0;
+}
+
+size_t ZSTD_seekable_initCStream(ZSTD_seekable_CStream* zcs,
+ int compressionLevel,
+ int checksumFlag,
+ U32 maxFrameSize)
+{
+ zcs->framelog.size = 0;
+ zcs->frameCSize = 0;
+ zcs->frameDSize = 0;
+
+ /* make sure maxFrameSize has a reasonable value */
+ if (maxFrameSize > ZSTD_SEEKABLE_MAX_FRAME_DECOMPRESSED_SIZE) {
+ return ERROR(compressionParameter_unsupported);
+ }
+
+ zcs->maxFrameSize = maxFrameSize
+ ? maxFrameSize
+ : ZSTD_SEEKABLE_MAX_FRAME_DECOMPRESSED_SIZE;
+
+ zcs->framelog.checksumFlag = checksumFlag;
+ if (zcs->framelog.checksumFlag) {
+ XXH64_reset(&zcs->xxhState, 0);
+ }
+
+ zcs->framelog.seekTablePos = 0;
+ zcs->framelog.seekTableIndex = 0;
+ zcs->writingSeekTable = 0;
+
+ return ZSTD_initCStream(zcs->cstream, compressionLevel);
+}
+
+size_t ZSTD_seekable_logFrame(ZSTD_frameLog* fl,
+ unsigned compressedSize,
+ unsigned decompressedSize,
+ unsigned checksum)
+{
+ if (fl->size == ZSTD_SEEKABLE_MAXFRAMES)
+ return ERROR(frameIndex_tooLarge);
+
+ /* grow the buffer if required */
+ if (fl->size == fl->capacity) {
+ /* exponential size increase for constant amortized runtime */
+ size_t const newCapacity = fl->capacity * 2;
+ framelogEntry_t* const newEntries = realloc(fl->entries,
+ sizeof(framelogEntry_t) * newCapacity);
+
+ if (newEntries == NULL) return ERROR(memory_allocation);
+
+ fl->entries = newEntries;
+ fl->capacity = newCapacity;
+ }
+
+ fl->entries[fl->size] = (framelogEntry_t){
+ compressedSize, decompressedSize, checksum
+ };
+ fl->size++;
+
+ return 0;
+}
+
+size_t ZSTD_seekable_endFrame(ZSTD_seekable_CStream* zcs, ZSTD_outBuffer* output)
+{
+ size_t const prevOutPos = output->pos;
+ /* end the frame */
+ size_t ret = ZSTD_endStream(zcs->cstream, output);
+
+ zcs->frameCSize += output->pos - prevOutPos;
+
+ /* need to flush before doing the rest */
+ if (ret) return ret;
+
+ /* frame done */
+
+ /* store the frame data for later */
+ ret = ZSTD_seekable_logFrame(
+ &zcs->framelog, zcs->frameCSize, zcs->frameDSize,
+ zcs->framelog.checksumFlag
+ ? XXH64_digest(&zcs->xxhState) & 0xFFFFFFFFU
+ : 0);
+ if (ret) return ret;
+
+ /* reset for the next frame */
+ zcs->frameCSize = 0;
+ zcs->frameDSize = 0;
+
+ ZSTD_resetCStream(zcs->cstream, 0);
+ if (zcs->framelog.checksumFlag)
+ XXH64_reset(&zcs->xxhState, 0);
+
+ return 0;
+}
+
+size_t ZSTD_seekable_compressStream(ZSTD_seekable_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input)
+{
+ const BYTE* const inBase = (const BYTE*) input->src + input->pos;
+ size_t inLen = input->size - input->pos;
+
+ inLen = MIN(inLen, (size_t)(zcs->maxFrameSize - zcs->frameDSize));
+
+ /* if we haven't finished flushing the last frame, don't start writing a new one */
+ if (inLen > 0) {
+ ZSTD_inBuffer inTmp = { inBase, inLen, 0 };
+ size_t const prevOutPos = output->pos;
+
+ size_t const ret = ZSTD_compressStream(zcs->cstream, output, &inTmp);
+
+ if (zcs->framelog.checksumFlag) {
+ XXH64_update(&zcs->xxhState, inBase, inTmp.pos);
+ }
+
+ zcs->frameCSize += output->pos - prevOutPos;
+ zcs->frameDSize += inTmp.pos;
+
+ input->pos += inTmp.pos;
+
+ if (ZSTD_isError(ret)) return ret;
+ }
+
+ if (zcs->maxFrameSize == zcs->frameDSize) {
+ /* log the frame and start over */
+ size_t const ret = ZSTD_seekable_endFrame(zcs, output);
+ if (ZSTD_isError(ret)) return ret;
+
+ /* get the client ready for the next frame */
+ return (size_t)zcs->maxFrameSize;
+ }
+
+ return (size_t)(zcs->maxFrameSize - zcs->frameDSize);
+}
+
+static inline size_t ZSTD_seekable_seekTableSize(const ZSTD_frameLog* fl)
+{
+ size_t const sizePerFrame = 8 + (fl->checksumFlag?4:0);
+ size_t const seekTableLen = ZSTD_skippableHeaderSize +
+ sizePerFrame * fl->size +
+ ZSTD_seekTableFooterSize;
+
+ return seekTableLen;
+}
+
+static inline size_t ZSTD_stwrite32(ZSTD_frameLog* fl,
+ ZSTD_outBuffer* output, U32 const value,
+ U32 const offset)
+{
+ if (fl->seekTablePos < offset + 4) {
+ BYTE tmp[4]; /* so that we can work with buffers too small to write a whole word to */
+ size_t const lenWrite =
+ MIN(output->size - output->pos, offset + 4 - fl->seekTablePos);
+ MEM_writeLE32(tmp, value);
+ memcpy((BYTE*)output->dst + output->pos,
+ tmp + (fl->seekTablePos - offset), lenWrite);
+ output->pos += lenWrite;
+ fl->seekTablePos += lenWrite;
+
+ if (lenWrite < 4) return ZSTD_seekable_seekTableSize(fl) - fl->seekTablePos;
+ }
+ return 0;
+}
+
+size_t ZSTD_seekable_writeSeekTable(ZSTD_frameLog* fl, ZSTD_outBuffer* output)
+{
+ /* seekTableIndex: the current index in the table and
+ * seekTableSize: the amount of the table written so far
+ *
+ * This function is written this way so that if it has to return early
+ * because of a small buffer, it can keep going where it left off.
+ */
+
+ size_t const sizePerFrame = 8 + (fl->checksumFlag?4:0);
+ size_t const seekTableLen = ZSTD_seekable_seekTableSize(fl);
+
+ CHECK_Z(ZSTD_stwrite32(fl, output, ZSTD_MAGIC_SKIPPABLE_START | 0xE, 0));
+ CHECK_Z(ZSTD_stwrite32(fl, output, seekTableLen - ZSTD_skippableHeaderSize,
+ 4));
+
+ while (fl->seekTableIndex < fl->size) {
+ CHECK_Z(ZSTD_stwrite32(fl, output,
+ fl->entries[fl->seekTableIndex].cSize,
+ ZSTD_skippableHeaderSize +
+ sizePerFrame * fl->seekTableIndex + 0));
+
+ CHECK_Z(ZSTD_stwrite32(fl, output,
+ fl->entries[fl->seekTableIndex].dSize,
+ ZSTD_skippableHeaderSize +
+ sizePerFrame * fl->seekTableIndex + 4));
+
+ if (fl->checksumFlag) {
+ CHECK_Z(ZSTD_stwrite32(
+ fl, output, fl->entries[fl->seekTableIndex].checksum,
+ ZSTD_skippableHeaderSize +
+ sizePerFrame * fl->seekTableIndex + 8));
+ }
+
+ fl->seekTableIndex++;
+ }
+
+ CHECK_Z(ZSTD_stwrite32(fl, output, fl->size,
+ seekTableLen - ZSTD_seekTableFooterSize));
+
+ if (output->size - output->pos < 1) return seekTableLen - fl->seekTablePos;
+ if (fl->seekTablePos < seekTableLen - 4) {
+ BYTE sfd = 0;
+ sfd |= (fl->checksumFlag) << 7;
+
+ ((BYTE*)output->dst)[output->pos] = sfd;
+ output->pos++;
+ fl->seekTablePos++;
+ }
+
+ CHECK_Z(ZSTD_stwrite32(fl, output, ZSTD_SEEKABLE_MAGICNUMBER,
+ seekTableLen - 4));
+
+ if (fl->seekTablePos != seekTableLen) return ERROR(GENERIC);
+ return 0;
+}
+
+size_t ZSTD_seekable_endStream(ZSTD_seekable_CStream* zcs, ZSTD_outBuffer* output)
+{
+ if (!zcs->writingSeekTable && zcs->frameDSize) {
+ const size_t endFrame = ZSTD_seekable_endFrame(zcs, output);
+ if (ZSTD_isError(endFrame)) return endFrame;
+ /* return an accurate size hint */
+ if (endFrame) return endFrame + ZSTD_seekable_seekTableSize(&zcs->framelog);
+ }
+
+ zcs->writingSeekTable = 1;
+
+ return ZSTD_seekable_writeSeekTable(&zcs->framelog, output);
+}
diff --git a/contrib/seekable_format/zstdseek_decompress.c b/contrib/seekable_format/zstdseek_decompress.c
new file mode 100644
index 000000000000..d740e16be465
--- /dev/null
+++ b/contrib/seekable_format/zstdseek_decompress.c
@@ -0,0 +1,462 @@
+/*
+ * Copyright (c) 2017-present, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+/* *********************************************************
+* Turn on Large Files support (>4GB) for 32-bit Linux/Unix
+***********************************************************/
+#if !defined(__64BIT__) || defined(__MINGW32__) /* No point defining Large file for 64 bit but MinGW-w64 requires it */
+# if !defined(_FILE_OFFSET_BITS)
+# define _FILE_OFFSET_BITS 64 /* turn off_t into a 64-bit type for ftello, fseeko */
+# endif
+# if !defined(_LARGEFILE_SOURCE) /* obsolete macro, replaced with _FILE_OFFSET_BITS */
+# define _LARGEFILE_SOURCE 1 /* Large File Support extension (LFS) - fseeko, ftello */
+# endif
+# if defined(_AIX) || defined(__hpux)
+# define _LARGE_FILES /* Large file support on 32-bits AIX and HP-UX */
+# endif
+#endif
+
+/* ************************************************************
+* Avoid fseek()'s 2GiB barrier with MSVC, MacOS, *BSD, MinGW
+***************************************************************/
+#if defined(_MSC_VER) && _MSC_VER >= 1400
+# define LONG_SEEK _fseeki64
+#elif !defined(__64BIT__) && (PLATFORM_POSIX_VERSION >= 200112L) /* No point defining Large file for 64 bit */
+# define LONG_SEEK fseeko
+#elif defined(__MINGW32__) && !defined(__STRICT_ANSI__) && !defined(__NO_MINGW_LFS) && defined(__MSVCRT__)
+# define LONG_SEEK fseeko64
+#elif defined(_WIN32) && !defined(__DJGPP__)
+# include <windows.h>
+ static int LONG_SEEK(FILE* file, __int64 offset, int origin) {
+ LARGE_INTEGER off;
+ DWORD method;
+ off.QuadPart = offset;
+ if (origin == SEEK_END)
+ method = FILE_END;
+ else if (origin == SEEK_CUR)
+ method = FILE_CURRENT;
+ else
+ method = FILE_BEGIN;
+
+ if (SetFilePointerEx((HANDLE) _get_osfhandle(_fileno(file)), off, NULL, method))
+ return 0;
+ else
+ return -1;
+ }
+#else
+# define LONG_SEEK fseek
+#endif
+
+#include <stdlib.h> /* malloc, free */
+#include <stdio.h> /* FILE* */
+
+#define XXH_STATIC_LINKING_ONLY
+#define XXH_NAMESPACE ZSTD_
+#include "xxhash.h"
+
+#define ZSTD_STATIC_LINKING_ONLY
+#include "zstd.h"
+#include "zstd_errors.h"
+#include "mem.h"
+#include "zstd_seekable.h"
+
+#undef ERROR
+#define ERROR(name) ((size_t)-ZSTD_error_##name)
+
+#define CHECK_IO(f) { int const errcod = (f); if (errcod < 0) return ERROR(seekableIO); }
+
+#undef MIN
+#undef MAX
+#define MIN(a, b) ((a) < (b) ? (a) : (b))
+#define MAX(a, b) ((a) > (b) ? (a) : (b))
+
+/* Special-case callbacks for FILE* and in-memory modes, so that we can treat
+ * them the same way as the advanced API */
+static int ZSTD_seekable_read_FILE(void* opaque, void* buffer, size_t n)
+{
+ size_t const result = fread(buffer, 1, n, (FILE*)opaque);
+ if (result != n) {
+ return -1;
+ }
+ return 0;
+}
+
+static int ZSTD_seekable_seek_FILE(void* opaque, S64 offset, int origin)
+{
+ int const ret = LONG_SEEK((FILE*)opaque, offset, origin);
+ if (ret) return ret;
+ return fflush((FILE*)opaque);
+}
+
+typedef struct {
+ const void *ptr;
+ size_t size;
+ size_t pos;
+} buffWrapper_t;
+
+static int ZSTD_seekable_read_buff(void* opaque, void* buffer, size_t n)
+{
+ buffWrapper_t* buff = (buffWrapper_t*) opaque;
+ if (buff->size + n > buff->pos) return -1;
+ memcpy(buffer, (const BYTE*)buff->ptr + buff->pos, n);
+ buff->pos += n;
+ return 0;
+}
+
+static int ZSTD_seekable_seek_buff(void* opaque, S64 offset, int origin)
+{
+ buffWrapper_t* buff = (buffWrapper_t*) opaque;
+ unsigned long long newOffset;
+ switch (origin) {
+ case SEEK_SET:
+ newOffset = offset;
+ break;
+ case SEEK_CUR:
+ newOffset = (unsigned long long)buff->pos + offset;
+ break;
+ case SEEK_END:
+ newOffset = (unsigned long long)buff->size - offset;
+ break;
+ }
+ if (newOffset < 0 || newOffset > buff->size) {
+ return -1;
+ }
+ buff->pos = newOffset;
+ return 0;
+}
+
+typedef struct {
+ U64 cOffset;
+ U64 dOffset;
+ U32 checksum;
+} seekEntry_t;
+
+typedef struct {
+ seekEntry_t* entries;
+ size_t tableLen;
+
+ int checksumFlag;
+} seekTable_t;
+
+#define SEEKABLE_BUFF_SIZE ZSTD_BLOCKSIZE_ABSOLUTEMAX
+
+struct ZSTD_seekable_s {
+ ZSTD_DStream* dstream;
+ seekTable_t seekTable;
+ ZSTD_seekable_customFile src;
+
+ U64 decompressedOffset;
+ U32 curFrame;
+
+ BYTE inBuff[SEEKABLE_BUFF_SIZE]; /* need to do our own input buffering */
+ BYTE outBuff[SEEKABLE_BUFF_SIZE]; /* so we can efficiently decompress the
+ starts of chunks before we get to the
+ desired section */
+ ZSTD_inBuffer in; /* maintain continuity across ZSTD_seekable_decompress operations */
+ buffWrapper_t buffWrapper; /* for `src.opaque` in in-memory mode */
+
+ XXH64_state_t xxhState;
+};
+
+ZSTD_seekable* ZSTD_seekable_create(void)
+{
+ ZSTD_seekable* zs = malloc(sizeof(ZSTD_seekable));
+
+ if (zs == NULL) return NULL;
+
+ /* also initializes stage to zsds_init */
+ memset(zs, 0, sizeof(*zs));
+
+ zs->dstream = ZSTD_createDStream();
+ if (zs->dstream == NULL) {
+ free(zs);
+ return NULL;
+ }
+
+ return zs;
+}
+
+size_t ZSTD_seekable_free(ZSTD_seekable* zs)
+{
+ if (zs == NULL) return 0; /* support free on null */
+ ZSTD_freeDStream(zs->dstream);
+ free(zs->seekTable.entries);
+ free(zs);
+
+ return 0;
+}
+
+/** ZSTD_seekable_offsetToFrameIndex() :
+ * Performs a binary search to find the last frame with a decompressed offset
+ * <= pos
+ * @return : the frame's index */
+U32 ZSTD_seekable_offsetToFrameIndex(ZSTD_seekable* const zs, U64 pos)
+{
+ U32 lo = 0;
+ U32 hi = zs->seekTable.tableLen;
+
+ if (pos >= zs->seekTable.entries[zs->seekTable.tableLen].dOffset) {
+ return zs->seekTable.tableLen;
+ }
+
+ while (lo + 1 < hi) {
+ U32 const mid = lo + ((hi - lo) >> 1);
+ if (zs->seekTable.entries[mid].dOffset <= pos) {
+ lo = mid;
+ } else {
+ hi = mid;
+ }
+ }
+ return lo;
+}
+
+U32 ZSTD_seekable_getNumFrames(ZSTD_seekable* const zs)
+{
+ return zs->seekTable.tableLen;
+}
+
+U64 ZSTD_seekable_getFrameCompressedOffset(ZSTD_seekable* const zs, U32 frameIndex)
+{
+ if (frameIndex >= zs->seekTable.tableLen) return ZSTD_SEEKABLE_FRAMEINDEX_TOOLARGE;
+ return zs->seekTable.entries[frameIndex].cOffset;
+}
+
+U64 ZSTD_seekable_getFrameDecompressedOffset(ZSTD_seekable* const zs, U32 frameIndex)
+{
+ if (frameIndex >= zs->seekTable.tableLen) return ZSTD_SEEKABLE_FRAMEINDEX_TOOLARGE;
+ return zs->seekTable.entries[frameIndex].dOffset;
+}
+
+size_t ZSTD_seekable_getFrameCompressedSize(ZSTD_seekable* const zs, U32 frameIndex)
+{
+ if (frameIndex >= zs->seekTable.tableLen) return ERROR(frameIndex_tooLarge);
+ return zs->seekTable.entries[frameIndex + 1].cOffset -
+ zs->seekTable.entries[frameIndex].cOffset;
+}
+
+size_t ZSTD_seekable_getFrameDecompressedSize(ZSTD_seekable* const zs, U32 frameIndex)
+{
+ if (frameIndex > zs->seekTable.tableLen) return ERROR(frameIndex_tooLarge);
+ return zs->seekTable.entries[frameIndex + 1].dOffset -
+ zs->seekTable.entries[frameIndex].dOffset;
+}
+
+static size_t ZSTD_seekable_loadSeekTable(ZSTD_seekable* zs)
+{
+ int checksumFlag;
+ ZSTD_seekable_customFile src = zs->src;
+ /* read the footer, fixed size */
+ CHECK_IO(src.seek(src.opaque, -(int)ZSTD_seekTableFooterSize, SEEK_END));
+ CHECK_IO(src.read(src.opaque, zs->inBuff, ZSTD_seekTableFooterSize));
+
+ if (MEM_readLE32(zs->inBuff + 5) != ZSTD_SEEKABLE_MAGICNUMBER) {
+ return ERROR(prefix_unknown);
+ }
+
+ { BYTE const sfd = zs->inBuff[4];
+ checksumFlag = sfd >> 7;
+
+ /* check reserved bits */
+ if ((checksumFlag >> 2) & 0x1f) {
+ return ERROR(corruption_detected);
+ }
+ }
+
+ { U32 const numFrames = MEM_readLE32(zs->inBuff);
+ U32 const sizePerEntry = 8 + (checksumFlag?4:0);
+ U32 const tableSize = sizePerEntry * numFrames;
+ U32 const frameSize = tableSize + ZSTD_seekTableFooterSize + ZSTD_skippableHeaderSize;
+
+ U32 remaining = frameSize - ZSTD_seekTableFooterSize; /* don't need to re-read footer */
+ {
+ U32 const toRead = MIN(remaining, SEEKABLE_BUFF_SIZE);
+
+ CHECK_IO(src.seek(src.opaque, -(S64)frameSize, SEEK_END));
+ CHECK_IO(src.read(src.opaque, zs->inBuff, toRead));
+
+ remaining -= toRead;
+ }
+
+ if (MEM_readLE32(zs->inBuff) != (ZSTD_MAGIC_SKIPPABLE_START | 0xE)) {
+ return ERROR(prefix_unknown);
+ }
+ if (MEM_readLE32(zs->inBuff+4) + ZSTD_skippableHeaderSize != frameSize) {
+ return ERROR(prefix_unknown);
+ }
+
+ { /* Allocate an extra entry at the end so that we can do size
+ * computations on the last element without special case */
+ seekEntry_t* entries = (seekEntry_t*)malloc(sizeof(seekEntry_t) * (numFrames + 1));
+ const BYTE* tableBase = zs->inBuff + ZSTD_skippableHeaderSize;
+
+ U32 idx = 0;
+ U32 pos = 8;
+
+
+ U64 cOffset = 0;
+ U64 dOffset = 0;
+
+ if (!entries) {
+ free(entries);
+ return ERROR(memory_allocation);
+ }
+
+ /* compute cumulative positions */
+ for (; idx < numFrames; idx++) {
+ if (pos + sizePerEntry > SEEKABLE_BUFF_SIZE) {
+ U32 const toRead = MIN(remaining, SEEKABLE_BUFF_SIZE);
+ U32 const offset = SEEKABLE_BUFF_SIZE - pos;
+ memmove(zs->inBuff, zs->inBuff + pos, offset); /* move any data we haven't read yet */
+ CHECK_IO(src.read(src.opaque, zs->inBuff+offset, toRead));
+ remaining -= toRead;
+ pos = 0;
+ }
+ entries[idx].cOffset = cOffset;
+ entries[idx].dOffset = dOffset;
+
+ cOffset += MEM_readLE32(zs->inBuff + pos);
+ pos += 4;
+ dOffset += MEM_readLE32(zs->inBuff + pos);
+ pos += 4;
+ if (checksumFlag) {
+ entries[idx].checksum = MEM_readLE32(zs->inBuff + pos);
+ pos += 4;
+ }
+ }
+ entries[numFrames].cOffset = cOffset;
+ entries[numFrames].dOffset = dOffset;
+
+ zs->seekTable.entries = entries;
+ zs->seekTable.tableLen = numFrames;
+ zs->seekTable.checksumFlag = checksumFlag;
+ return 0;
+ }
+ }
+}
+
+size_t ZSTD_seekable_initBuff(ZSTD_seekable* zs, const void* src, size_t srcSize)
+{
+ zs->buffWrapper = (buffWrapper_t){src, srcSize, 0};
+ { ZSTD_seekable_customFile srcFile = {&zs->buffWrapper,
+ &ZSTD_seekable_read_buff,
+ &ZSTD_seekable_seek_buff};
+ return ZSTD_seekable_initAdvanced(zs, srcFile); }
+}
+
+size_t ZSTD_seekable_initFile(ZSTD_seekable* zs, FILE* src)
+{
+ ZSTD_seekable_customFile srcFile = {src, &ZSTD_seekable_read_FILE,
+ &ZSTD_seekable_seek_FILE};
+ return ZSTD_seekable_initAdvanced(zs, srcFile);
+}
+
+size_t ZSTD_seekable_initAdvanced(ZSTD_seekable* zs, ZSTD_seekable_customFile src)
+{
+ zs->src = src;
+
+ { const size_t seekTableInit = ZSTD_seekable_loadSeekTable(zs);
+ if (ZSTD_isError(seekTableInit)) return seekTableInit; }
+
+ zs->decompressedOffset = (U64)-1;
+ zs->curFrame = (U32)-1;
+
+ { const size_t dstreamInit = ZSTD_initDStream(zs->dstream);
+ if (ZSTD_isError(dstreamInit)) return dstreamInit; }
+ return 0;
+}
+
+size_t ZSTD_seekable_decompress(ZSTD_seekable* zs, void* dst, size_t len, U64 offset)
+{
+ U32 targetFrame = ZSTD_seekable_offsetToFrameIndex(zs, offset);
+ do {
+ /* check if we can continue from a previous decompress job */
+ if (targetFrame != zs->curFrame || offset != zs->decompressedOffset) {
+ zs->decompressedOffset = zs->seekTable.entries[targetFrame].dOffset;
+ zs->curFrame = targetFrame;
+
+ CHECK_IO(zs->src.seek(zs->src.opaque,
+ zs->seekTable.entries[targetFrame].cOffset,
+ SEEK_SET));
+ zs->in = (ZSTD_inBuffer){zs->inBuff, 0, 0};
+ XXH64_reset(&zs->xxhState, 0);
+ ZSTD_resetDStream(zs->dstream);
+ }
+
+ while (zs->decompressedOffset < offset + len) {
+ size_t toRead;
+ ZSTD_outBuffer outTmp;
+ size_t prevOutPos;
+ if (zs->decompressedOffset < offset) {
+ /* dummy decompressions until we get to the target offset */
+ outTmp = (ZSTD_outBuffer){zs->outBuff, MIN(SEEKABLE_BUFF_SIZE, offset - zs->decompressedOffset), 0};
+ } else {
+ outTmp = (ZSTD_outBuffer){dst, len, zs->decompressedOffset - offset};
+ }
+
+ prevOutPos = outTmp.pos;
+ toRead = ZSTD_decompressStream(zs->dstream, &outTmp, &zs->in);
+ if (ZSTD_isError(toRead)) {
+ return toRead;
+ }
+
+ if (zs->seekTable.checksumFlag) {
+ XXH64_update(&zs->xxhState, (BYTE*)outTmp.dst + prevOutPos,
+ outTmp.pos - prevOutPos);
+ }
+ zs->decompressedOffset += outTmp.pos - prevOutPos;
+
+ if (toRead == 0) {
+ /* frame complete */
+
+ /* verify checksum */
+ if (zs->seekTable.checksumFlag &&
+ (XXH64_digest(&zs->xxhState) & 0xFFFFFFFFU) !=
+ zs->seekTable.entries[targetFrame].checksum) {
+ return ERROR(corruption_detected);
+ }
+
+ if (zs->decompressedOffset < offset + len) {
+ /* go back to the start and force a reset of the stream */
+ targetFrame = ZSTD_seekable_offsetToFrameIndex(zs, zs->decompressedOffset);
+ }
+ break;
+ }
+
+ /* read in more data if we're done with this buffer */
+ if (zs->in.pos == zs->in.size) {
+ toRead = MIN(toRead, SEEKABLE_BUFF_SIZE);
+ CHECK_IO(zs->src.read(zs->src.opaque, zs->inBuff, toRead));
+ zs->in.size = toRead;
+ zs->in.pos = 0;
+ }
+ }
+ } while (zs->decompressedOffset != offset + len);
+
+ return len;
+}
+
+size_t ZSTD_seekable_decompressFrame(ZSTD_seekable* zs, void* dst, size_t dstSize, U32 frameIndex)
+{
+ if (frameIndex >= zs->seekTable.tableLen) {
+ return ERROR(frameIndex_tooLarge);
+ }
+
+ {
+ size_t const decompressedSize =
+ zs->seekTable.entries[frameIndex + 1].dOffset -
+ zs->seekTable.entries[frameIndex].dOffset;
+ if (dstSize < decompressedSize) {
+ return ERROR(dstSize_tooSmall);
+ }
+ return ZSTD_seekable_decompress(
+ zs, dst, decompressedSize,
+ zs->seekTable.entries[frameIndex].dOffset);
+ }
+}
diff --git a/doc/educational_decoder/Makefile b/doc/educational_decoder/Makefile
new file mode 100644
index 000000000000..ace1294f8e73
--- /dev/null
+++ b/doc/educational_decoder/Makefile
@@ -0,0 +1,34 @@
+HARNESS_FILES=*.c
+
+MULTITHREAD_LDFLAGS = -pthread
+DEBUGFLAGS= -g -DZSTD_DEBUG=1
+CPPFLAGS += -I$(ZSTDDIR) -I$(ZSTDDIR)/common -I$(ZSTDDIR)/compress \
+ -I$(ZSTDDIR)/dictBuilder -I$(ZSTDDIR)/deprecated -I$(PRGDIR)
+CFLAGS ?= -O3
+CFLAGS += -Wall -Wextra -Wcast-qual -Wcast-align -Wshadow \
+ -Wstrict-aliasing=1 -Wswitch-enum -Wdeclaration-after-statement \
+ -Wstrict-prototypes -Wundef -Wformat-security \
+ -Wvla -Wformat=2 -Winit-self -Wfloat-equal -Wwrite-strings \
+ -Wredundant-decls
+CFLAGS += $(DEBUGFLAGS)
+CFLAGS += $(MOREFLAGS)
+FLAGS = $(CPPFLAGS) $(CFLAGS) $(LDFLAGS) $(MULTITHREAD_LDFLAGS)
+
+harness: $(HARNESS_FILES)
+ $(CC) $(FLAGS) $^ -o $@
+
+clean:
+ @$(RM) -f harness
+ @$(RM) -rf harness.dSYM
+
+test: harness
+ @zstd README.md -o tmp.zst
+ @./harness tmp.zst tmp
+ @diff -s tmp README.md
+ @$(RM) -f tmp*
+ @zstd --train harness.c zstd_decompress.c zstd_decompress.h README.md
+ @zstd -D dictionary README.md -o tmp.zst
+ @./harness tmp.zst tmp dictionary
+ @diff -s tmp README.md
+ @$(RM) -f tmp* dictionary
+ @make clean
diff --git a/doc/educational_decoder/harness.c b/doc/educational_decoder/harness.c
index 982e066e28f0..47882b168953 100644
--- a/doc/educational_decoder/harness.c
+++ b/doc/educational_decoder/harness.c
@@ -2,9 +2,9 @@
* Copyright (c) 2017-present, Facebook, Inc.
* All rights reserved.
*
- * This source code is licensed under the BSD-style license found in the
- * LICENSE file in the root directory of this source tree. An additional grant
- * of patent rights can be found in the PATENTS file in the same directory.
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
*/
#include <stdio.h>
diff --git a/doc/educational_decoder/zstd_decompress.c b/doc/educational_decoder/zstd_decompress.c
index af10db528d2a..bea0e0ce1f45 100644
--- a/doc/educational_decoder/zstd_decompress.c
+++ b/doc/educational_decoder/zstd_decompress.c
@@ -2,9 +2,9 @@
* Copyright (c) 2017-present, Facebook, Inc.
* All rights reserved.
*
- * This source code is licensed under the BSD-style license found in the
- * LICENSE file in the root directory of this source tree. An additional grant
- * of patent rights can be found in the PATENTS file in the same directory.
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
*/
/// Zstandard educational decoder implementation
@@ -1289,7 +1289,7 @@ static void execute_sequences(frame_context_t *const ctx, ostream_t *const out,
// Copy any leftover literals
{
size_t len = IO_istream_len(&litstream);
- copy_literals(len, &litstream, out);
+ copy_literals(len, &litstream, out);
total_output += len;
}
diff --git a/doc/educational_decoder/zstd_decompress.h b/doc/educational_decoder/zstd_decompress.h
index 41009909bfa1..a01fde331fb8 100644
--- a/doc/educational_decoder/zstd_decompress.h
+++ b/doc/educational_decoder/zstd_decompress.h
@@ -1,10 +1,10 @@
/*
- * Copyright (c) 2017-present, Facebook, Inc.
+ * Copyright (c) 2016-present, Facebook, Inc.
* All rights reserved.
*
- * This source code is licensed under the BSD-style license found in the
- * LICENSE file in the root directory of this source tree. An additional grant
- * of patent rights can be found in the PATENTS file in the same directory.
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
*/
/******* EXPOSED TYPES ********************************************************/
diff --git a/doc/zstd_manual.html b/doc/zstd_manual.html
index c166e7258d32..b4720adac263 100644
--- a/doc/zstd_manual.html
+++ b/doc/zstd_manual.html
@@ -1,10 +1,10 @@
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1">
-<title>zstd 1.3.1 Manual</title>
+<title>zstd 1.3.2 Manual</title>
</head>
<body>
-<h1>zstd 1.3.1 Manual</h1>
+<h1>zstd 1.3.2 Manual</h1>
<hr>
<a name="Contents"></a><h2>Contents</h2>
<ol>
@@ -27,7 +27,8 @@
<li><a href="#Chapter17">Buffer-less and synchronous inner streaming functions</a></li>
<li><a href="#Chapter18">Buffer-less streaming compression (synchronous mode)</a></li>
<li><a href="#Chapter19">Buffer-less streaming decompression (synchronous mode)</a></li>
-<li><a href="#Chapter20">Block functions</a></li>
+<li><a href="#Chapter20">New advanced API (experimental)</a></li>
+<li><a href="#Chapter21">Block level API</a></li>
</ol>
<hr>
<a name="Chapter1"></a><h2>Introduction</h2><pre>
@@ -110,10 +111,11 @@ unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize);
@return : content size to be decompressed, as a 64-bits value _if known and not empty_, 0 otherwise.
</p></pre><BR>
-<h3>Helper functions</h3><pre></pre><b><pre>int ZSTD_maxCLevel(void); </b>/*!< maximum compression level available */<b>
+<h3>Helper functions</h3><pre></pre><b><pre>#define ZSTD_COMPRESSBOUND(srcSize) ((srcSize) + ((srcSize)>>8) + (((srcSize) < 128 KB) ? ((128 KB - (srcSize)) >> 11) </b>/* margin, from 64 to 0 */ : 0)) /* this formula ensures that bound(A) + bound(B) <= bound(A+B) as long as A and B >= 128 KB */<b>
size_t ZSTD_compressBound(size_t srcSize); </b>/*!< maximum compressed size in worst case scenario */<b>
unsigned ZSTD_isError(size_t code); </b>/*!< tells if a `size_t` function result is an error code */<b>
const char* ZSTD_getErrorName(size_t code); </b>/*!< provides readable string from an error code */<b>
+int ZSTD_maxCLevel(void); </b>/*!< maximum compression level available */<b>
</pre></b><BR>
<a name="Chapter4"></a><h2>Explicit memory management</h2><pre></pre>
@@ -398,29 +400,33 @@ size_t ZSTD_sizeof_DStream(const ZSTD_DStream* zds);
size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict);
size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict);
</b><p> These functions give the current memory usage of selected object.
- Object memory usage can evolve if it's re-used multiple times.
+ Object memory usage can evolve when re-used multiple times.
</p></pre><BR>
<pre><b>size_t ZSTD_estimateCCtxSize(int compressionLevel);
-size_t ZSTD_estimateCCtxSize_advanced(ZSTD_compressionParameters cParams);
+size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams);
+size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params);
size_t ZSTD_estimateDCtxSize(void);
</b><p> These functions make it possible to estimate memory usage
of a future {D,C}Ctx, before its creation.
ZSTD_estimateCCtxSize() will provide a budget large enough for any compression level up to selected one.
It will also consider src size to be arbitrarily "large", which is worst case.
- If srcSize is known to always be small, ZSTD_estimateCCtxSize_advanced() can provide a tighter estimation.
- ZSTD_estimateCCtxSize_advanced() can be used in tandem with ZSTD_getCParams() to create cParams from compressionLevel.
+ If srcSize is known to always be small, ZSTD_estimateCCtxSize_usingCParams() can provide a tighter estimation.
+ ZSTD_estimateCCtxSize_usingCParams() can be used in tandem with ZSTD_getCParams() to create cParams from compressionLevel.
+ ZSTD_estimateCCtxSize_usingCCtxParams() can be used in tandem with ZSTD_CCtxParam_setParameter(). Only single-threaded compression is supported. This function will return an error code if ZSTD_p_nbThreads is > 1.
Note : CCtx estimation is only correct for single-threaded compression
</p></pre><BR>
<pre><b>size_t ZSTD_estimateCStreamSize(int compressionLevel);
-size_t ZSTD_estimateCStreamSize_advanced(ZSTD_compressionParameters cParams);
+size_t ZSTD_estimateCStreamSize_usingCParams(ZSTD_compressionParameters cParams);
+size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params);
size_t ZSTD_estimateDStreamSize(size_t windowSize);
size_t ZSTD_estimateDStreamSize_fromFrame(const void* src, size_t srcSize);
</b><p> ZSTD_estimateCStreamSize() will provide a budget large enough for any compression level up to selected one.
It will also consider src size to be arbitrarily "large", which is worst case.
- If srcSize is known to always be small, ZSTD_estimateCStreamSize_advanced() can provide a tighter estimation.
- ZSTD_estimateCStreamSize_advanced() can be used in tandem with ZSTD_getCParams() to create cParams from compressionLevel.
+ If srcSize is known to always be small, ZSTD_estimateCStreamSize_usingCParams() can provide a tighter estimation.
+ ZSTD_estimateCStreamSize_usingCParams() can be used in tandem with ZSTD_getCParams() to create cParams from compressionLevel.
+ ZSTD_estimateCStreamSize_usingCCtxParams() can be used in tandem with ZSTD_CCtxParam_setParameter(). Only single-threaded compression is supported. This function will return an error code if ZSTD_p_nbThreads is set to a value > 1.
Note : CStream estimation is only correct for single-threaded compression.
ZSTD_DStream memory budget depends on window Size.
This information can be passed manually, using ZSTD_estimateDStreamSize,
@@ -430,12 +436,18 @@ size_t ZSTD_estimateDStreamSize_fromFrame(const void* src, size_t srcSize);
In this case, get total size by adding ZSTD_estimate?DictSize
</p></pre><BR>
+<pre><b>typedef enum {
+ ZSTD_dlm_byCopy = 0, </b>/**< Copy dictionary content internally */<b>
+ ZSTD_dlm_byRef, </b>/**< Reference dictionary content -- the dictionary buffer must outlive its users. */<b>
+} ZSTD_dictLoadMethod_e;
+</b></pre><BR>
<pre><b>size_t ZSTD_estimateCDictSize(size_t dictSize, int compressionLevel);
-size_t ZSTD_estimateCDictSize_advanced(size_t dictSize, ZSTD_compressionParameters cParams, unsigned byReference);
-size_t ZSTD_estimateDDictSize(size_t dictSize, unsigned byReference);
+size_t ZSTD_estimateCDictSize_advanced(size_t dictSize, ZSTD_compressionParameters cParams, ZSTD_dictLoadMethod_e dictLoadMethod);
+size_t ZSTD_estimateDDictSize(size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod);
</b><p> ZSTD_estimateCDictSize() will bet that src size is relatively "small", and content is copied, like ZSTD_createCDict().
- ZSTD_estimateCStreamSize_advanced() makes it possible to control precisely compression parameters, like ZSTD_createCDict_advanced().
- Note : dictionary created "byReference" are smaller
+ ZSTD_estimateCStreamSize_advanced_usingCParams() makes it possible to control precisely compression parameters, like ZSTD_createCDict_advanced().
+ Note : dictionary created by reference using ZSTD_dlm_byRef are smaller
+
</p></pre><BR>
<a name="Chapter14"></a><h2>Advanced compression functions</h2><pre></pre>
@@ -461,16 +473,6 @@ size_t ZSTD_estimateDDictSize(size_t dictSize, unsigned byReference);
</p></pre><BR>
-<pre><b>typedef enum {
- ZSTD_p_forceWindow, </b>/* Force back-references to remain < windowSize, even when referencing Dictionary content (default:0) */<b>
- ZSTD_p_forceRawDict </b>/* Force loading dictionary in "content-only" mode (no header analysis) */<b>
-} ZSTD_CCtxParameter;
-</b></pre><BR>
-<pre><b>size_t ZSTD_setCCtxParameter(ZSTD_CCtx* cctx, ZSTD_CCtxParameter param, unsigned value);
-</b><p> Set advanced parameters, selected through enum ZSTD_CCtxParameter
- @result : 0, or an error code (which can be tested with ZSTD_isError())
-</p></pre><BR>
-
<pre><b>ZSTD_CDict* ZSTD_createCDict_byReference(const void* dictBuffer, size_t dictSize, int compressionLevel);
</b><p> Create a digested dictionary for compression
Dictionary content is simply referenced, and therefore stays in dictBuffer.
@@ -483,7 +485,8 @@ size_t ZSTD_estimateDDictSize(size_t dictSize, unsigned byReference);
} ZSTD_dictMode_e;
</b></pre><BR>
<pre><b>ZSTD_CDict* ZSTD_createCDict_advanced(const void* dict, size_t dictSize,
- unsigned byReference, ZSTD_dictMode_e dictMode,
+ ZSTD_dictLoadMethod_e dictLoadMethod,
+ ZSTD_dictMode_e dictMode,
ZSTD_compressionParameters cParams,
ZSTD_customMem customMem);
</b><p> Create a ZSTD_CDict using external alloc and free, and customized compression parameters
@@ -492,7 +495,7 @@ size_t ZSTD_estimateDDictSize(size_t dictSize, unsigned byReference);
<pre><b>ZSTD_CDict* ZSTD_initStaticCDict(
void* workspace, size_t workspaceSize,
const void* dict, size_t dictSize,
- unsigned byReference, ZSTD_dictMode_e dictMode,
+ ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictMode_e dictMode,
ZSTD_compressionParameters cParams);
</b><p> Generate a digested dictionary in provided memory area.
workspace: The memory area to emplace the dictionary into.
@@ -580,13 +583,14 @@ size_t ZSTD_estimateDDictSize(size_t dictSize, unsigned byReference);
</p></pre><BR>
<pre><b>ZSTD_DDict* ZSTD_createDDict_advanced(const void* dict, size_t dictSize,
- unsigned byReference, ZSTD_customMem customMem);
+ ZSTD_dictLoadMethod_e dictLoadMethod,
+ ZSTD_customMem customMem);
</b><p> Create a ZSTD_DDict using external alloc and free, optionally by reference
</p></pre><BR>
<pre><b>ZSTD_DDict* ZSTD_initStaticDDict(void* workspace, size_t workspaceSize,
const void* dict, size_t dictSize,
- unsigned byReference);
+ ZSTD_dictLoadMethod_e dictLoadMethod);
</b><p> Generate a digested dictionary in provided memory area.
workspace: The memory area to emplace the dictionary into.
Provided pointer must 8-bytes aligned.
@@ -628,9 +632,9 @@ size_t ZSTD_estimateDDictSize(size_t dictSize, unsigned byReference);
<h3>Advanced Streaming compression functions</h3><pre></pre><b><pre>ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem);
ZSTD_CStream* ZSTD_initStaticCStream(void* workspace, size_t workspaceSize); </b>/**< same as ZSTD_initStaticCCtx() */<b>
size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, int compressionLevel, unsigned long long pledgedSrcSize); </b>/**< pledgedSrcSize must be correct, a size of 0 means unknown. for a frame size of 0 use initCStream_advanced */<b>
-size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel); </b>/**< creates of an internal CDict (incompatible with static CCtx), except if dict == NULL or dictSize < 8, in which case no dict is used. */<b>
+size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel); </b>/**< creates of an internal CDict (incompatible with static CCtx), except if dict == NULL or dictSize < 8, in which case no dict is used. Note: dict is loaded with ZSTD_dm_auto (treated as a full zstd dictionary if it begins with ZSTD_MAGIC_DICTIONARY, else as raw content) and ZSTD_dlm_byCopy.*/<b>
size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs, const void* dict, size_t dictSize,
- ZSTD_parameters params, unsigned long long pledgedSrcSize); </b>/**< pledgedSrcSize is optional and can be 0 (meaning unknown). note: if the contentSizeFlag is set, pledgedSrcSize == 0 means the source size is actually 0 */<b>
+ ZSTD_parameters params, unsigned long long pledgedSrcSize); </b>/**< pledgedSrcSize is optional and can be 0 (meaning unknown). note: if the contentSizeFlag is set, pledgedSrcSize == 0 means the source size is actually 0. dict is loaded with ZSTD_dm_auto and ZSTD_dlm_byCopy. */<b>
size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict); </b>/**< note : cdict will just be referenced, and must outlive compression session */<b>
size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs, const ZSTD_CDict* cdict, ZSTD_frameParameters fParams, unsigned long long pledgedSrcSize); </b>/**< same as ZSTD_initCStream_usingCDict(), with control over frame parameters */<b>
</pre></b><BR>
@@ -643,18 +647,18 @@ size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs, const ZSTD_CDict*
@return : 0, or an error code (which can be tested using ZSTD_isError())
</p></pre><BR>
-<h3>Advanced Streaming decompression functions</h3><pre></pre><b><pre>typedef enum { DStream_p_maxWindowSize } ZSTD_DStreamParameter_e;
-ZSTD_DStream* ZSTD_createDStream_advanced(ZSTD_customMem customMem);
+<h3>Advanced Streaming decompression functions</h3><pre></pre><b><pre>ZSTD_DStream* ZSTD_createDStream_advanced(ZSTD_customMem customMem);
ZSTD_DStream* ZSTD_initStaticDStream(void* workspace, size_t workspaceSize); </b>/**< same as ZSTD_initStaticDCtx() */<b>
-size_t ZSTD_setDStreamParameter(ZSTD_DStream* zds, ZSTD_DStreamParameter_e paramType, unsigned paramValue);
-size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize); </b>/**< note: a dict will not be used if dict == NULL or dictSize < 8 */<b>
-size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* zds, const ZSTD_DDict* ddict); </b>/**< note : ddict will just be referenced, and must outlive decompression session */<b>
+typedef enum { DStream_p_maxWindowSize } ZSTD_DStreamParameter_e;
+size_t ZSTD_setDStreamParameter(ZSTD_DStream* zds, ZSTD_DStreamParameter_e paramType, unsigned paramValue); </b>/* obsolete : this API will be removed in a future version */<b>
+size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize); </b>/**< note: no dictionary will be used if dict == NULL or dictSize < 8 */<b>
+size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* zds, const ZSTD_DDict* ddict); </b>/**< note : ddict is referenced, it must outlive decompression session */<b>
size_t ZSTD_resetDStream(ZSTD_DStream* zds); </b>/**< re-use decompression parameters from previous init; saves dictionary loading */<b>
</pre></b><BR>
<a name="Chapter17"></a><h2>Buffer-less and synchronous inner streaming functions</h2><pre>
This is an advanced API, giving full control over buffer management, for users which need direct control over memory.
- But it's also a complex one, with many restrictions (documented below).
- Prefer using normal streaming API for an easier experience
+ But it's also a complex one, with several restrictions, documented below.
+ Prefer normal streaming API for an easier experience.
<BR></pre>
@@ -670,8 +674,8 @@ size_t ZSTD_resetDStream(ZSTD_DStream* zds); </b>/**< re-use decompression para
Then, consume your input using ZSTD_compressContinue().
There are some important considerations to keep in mind when using this advanced function :
- - ZSTD_compressContinue() has no internal buffer. It uses externally provided buffer only.
- - Interface is synchronous : input is consumed entirely and produce 1+ (or more) compressed blocks.
+ - ZSTD_compressContinue() has no internal buffer. It uses externally provided buffers only.
+ - Interface is synchronous : input is consumed entirely and produces 1+ compressed blocks.
- Caller must ensure there is enough space in `dst` to store compressed data under worst case scenario.
Worst case evaluation is provided by ZSTD_compressBound().
ZSTD_compressContinue() doesn't guarantee recover after a failed compression.
@@ -682,9 +686,9 @@ size_t ZSTD_resetDStream(ZSTD_DStream* zds); </b>/**< re-use decompression para
Finish a frame with ZSTD_compressEnd(), which will write the last block(s) and optional checksum.
It's possible to use srcSize==0, in which case, it will write a final empty block to end the frame.
- Without last block mark, frames will be considered unfinished (corrupted) by decoders.
+ Without last block mark, frames are considered unfinished (hence corrupted) by compliant decoders.
- `ZSTD_CCtx` object can be re-used (ZSTD_compressBegin()) to compress some new frame.
+ `ZSTD_CCtx` object can be re-used (ZSTD_compressBegin()) to compress again.
<BR></pre>
<h3>Buffer-less streaming compression functions</h3><pre></pre><b><pre>size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel);
@@ -700,40 +704,53 @@ size_t ZSTD_copyCCtx(ZSTD_CCtx* cctx, const ZSTD_CCtx* preparedCCtx, unsigned lo
A ZSTD_DCtx object can be re-used multiple times.
First typical operation is to retrieve frame parameters, using ZSTD_getFrameHeader().
- It fills a ZSTD_frameHeader structure with important information to correctly decode the frame,
- such as minimum rolling buffer size to allocate to decompress data (`windowSize`),
- and the dictionary ID in use.
- (Note : content size is optional, it may not be present. 0 means : content size unknown).
- Note that these values could be wrong, either because of data malformation, or because an attacker is spoofing deliberate false information.
- As a consequence, check that values remain within valid application range, especially `windowSize`, before allocation.
- Each application can set its own limit, depending on local restrictions.
- For extended interoperability, it is recommended to support windowSize of at least 8 MB.
Frame header is extracted from the beginning of compressed frame, so providing only the frame's beginning is enough.
Data fragment must be large enough to ensure successful decoding.
- `ZSTD_frameHeaderSize_max` bytes is guaranteed to always be large enough.
+ `ZSTD_frameHeaderSize_max` bytes is guaranteed to always be large enough.
@result : 0 : successful decoding, the `ZSTD_frameHeader` structure is correctly filled.
>0 : `srcSize` is too small, please provide at least @result bytes on next attempt.
errorCode, which can be tested using ZSTD_isError().
- Start decompression, with ZSTD_decompressBegin().
+ It fills a ZSTD_frameHeader structure with important information to correctly decode the frame,
+ such as the dictionary ID, content size, or maximum back-reference distance (`windowSize`).
+ Note that these values could be wrong, either because of data corruption, or because a 3rd party deliberately spoofs false information.
+ As a consequence, check that values remain within valid application range.
+ For example, do not allocate memory blindly, check that `windowSize` is within expectation.
+ Each application can set its own limits, depending on local restrictions.
+ For extended interoperability, it is recommended to support `windowSize` of at least 8 MB.
+
+ ZSTD_decompressContinue() needs previous data blocks during decompression, up to `windowSize` bytes.
+ ZSTD_decompressContinue() is very sensitive to contiguity,
+ if 2 blocks don't follow each other, make sure that either the compressor breaks contiguity at the same place,
+ or that previous contiguous segment is large enough to properly handle maximum back-reference distance.
+ There are multiple ways to guarantee this condition.
+
+ The most memory efficient way is to use a round buffer of sufficient size.
+ Sufficient size is determined by invoking ZSTD_decodingBufferSize_min(),
+ which can @return an error code if required value is too large for current system (in 32-bits mode).
+ In a round buffer methodology, ZSTD_decompressContinue() decompresses each block next to previous one,
+ up to the moment there is not enough room left in the buffer to guarantee decoding another full block,
+ which maximum size is provided in `ZSTD_frameHeader` structure, field `blockSizeMax`.
+ At which point, decoding can resume from the beginning of the buffer.
+ Note that already decoded data stored in the buffer should be flushed before being overwritten.
+
+ There are alternatives possible, for example using two or more buffers of size `windowSize` each, though they consume more memory.
+
+ Finally, if you control the compression process, you can also ignore all buffer size rules,
+ as long as the encoder and decoder progress in "lock-step",
+ aka use exactly the same buffer sizes, break contiguity at the same place, etc.
+
+ Once buffers are setup, start decompression, with ZSTD_decompressBegin().
If decompression requires a dictionary, use ZSTD_decompressBegin_usingDict() or ZSTD_decompressBegin_usingDDict().
- Alternatively, you can copy a prepared context, using ZSTD_copyDCtx().
Then use ZSTD_nextSrcSizeToDecompress() and ZSTD_decompressContinue() alternatively.
ZSTD_nextSrcSizeToDecompress() tells how many bytes to provide as 'srcSize' to ZSTD_decompressContinue().
ZSTD_decompressContinue() requires this _exact_ amount of bytes, or it will fail.
- @result of ZSTD_decompressContinue() is the number of bytes regenerated within 'dst' (necessarily <= dstCapacity).
- It can be zero, which is not an error; it just means ZSTD_decompressContinue() has decoded some metadata item.
+ @result of ZSTD_decompressContinue() is the number of bytes regenerated within 'dst' (necessarily <= dstCapacity).
+ It can be zero : it just means ZSTD_decompressContinue() has decoded some metadata item.
It can also be an error code, which can be tested with ZSTD_isError().
- ZSTD_decompressContinue() needs previous data blocks during decompression, up to `windowSize`.
- They should preferably be located contiguously, prior to current block.
- Alternatively, a round buffer of sufficient size is also possible. Sufficient size is determined by frame parameters.
- ZSTD_decompressContinue() is very sensitive to contiguity,
- if 2 blocks don't follow each other, make sure that either the compressor breaks contiguity at the same place,
- or that previous contiguous segment is large enough to properly handle maximum back-reference.
-
A frame is fully decoded when ZSTD_nextSrcSizeToDecompress() returns zero.
Context can then be reset to start a new decompression.
@@ -743,44 +760,62 @@ size_t ZSTD_copyCCtx(ZSTD_CCtx* cctx, const ZSTD_CCtx* preparedCCtx, unsigned lo
== Special case : skippable frames
Skippable frames allow integration of user-defined data into a flow of concatenated frames.
- Skippable frames will be ignored (skipped) by a decompressor. The format of skippable frames is as follows :
+ Skippable frames will be ignored (skipped) by decompressor.
+ The format of skippable frames is as follows :
a) Skippable frame ID - 4 Bytes, Little endian format, any value from 0x184D2A50 to 0x184D2A5F
b) Frame Size - 4 Bytes, Little endian format, unsigned 32-bits
c) Frame Content - any content (User Data) of length equal to Frame Size
- For skippable frames ZSTD_decompressContinue() always returns 0.
- For skippable frames ZSTD_getFrameHeader() returns fparamsPtr->windowLog==0 what means that a frame is skippable.
- Note : If fparamsPtr->frameContentSize==0, it is ambiguous: the frame might actually be a Zstd encoded frame with no content.
- For purposes of decompression, it is valid in both cases to skip the frame using
- ZSTD_findFrameCompressedSize to find its size in bytes.
- It also returns Frame Size as fparamsPtr->frameContentSize.
+ For skippable frames ZSTD_getFrameHeader() returns zfhPtr->frameType==ZSTD_skippableFrame.
+ For skippable frames ZSTD_decompressContinue() always returns 0 : it only skips the content.
<BR></pre>
<h3>Buffer-less streaming decompression functions</h3><pre></pre><b><pre>typedef enum { ZSTD_frame, ZSTD_skippableFrame } ZSTD_frameType_e;
typedef struct {
- unsigned long long frameContentSize; </b>/* ZSTD_CONTENTSIZE_UNKNOWN means this field is not available. 0 means "empty" */<b>
+ unsigned long long frameContentSize; </b>/* if == ZSTD_CONTENTSIZE_UNKNOWN, it means this field is not available. 0 means "empty" */<b>
unsigned long long windowSize; </b>/* can be very large, up to <= frameContentSize */<b>
+ unsigned blockSizeMax;
ZSTD_frameType_e frameType; </b>/* if == ZSTD_skippableFrame, frameContentSize is the size of skippable content */<b>
unsigned headerSize;
unsigned dictID;
unsigned checksumFlag;
} ZSTD_frameHeader;
size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize); </b>/**< doesn't consume input */<b>
-size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx);
-size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx* dctx, const void* dict, size_t dictSize);
-size_t ZSTD_decompressBegin_usingDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict);
-void ZSTD_copyDCtx(ZSTD_DCtx* dctx, const ZSTD_DCtx* preparedDCtx);
+size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long long frameContentSize); </b>/**< when frame content size is not known, pass in frameContentSize == ZSTD_CONTENTSIZE_UNKNOWN */<b>
</pre></b><BR>
<pre><b>typedef enum { ZSTDnit_frameHeader, ZSTDnit_blockHeader, ZSTDnit_block, ZSTDnit_lastBlock, ZSTDnit_checksum, ZSTDnit_skippableFrame } ZSTD_nextInputType_e;
</b></pre><BR>
-<h3>New advanced API (experimental, and compression only)</h3><pre></pre><b><pre></pre></b><BR>
+<a name="Chapter20"></a><h2>New advanced API (experimental)</h2><pre></pre>
+
+<pre><b>typedef enum {
+ </b>/* Question : should we have a format ZSTD_f_auto ?<b>
+ * For the time being, it would mean exactly the same as ZSTD_f_zstd1.
+ * But, in the future, should several formats be supported,
+ * on the compression side, it would mean "default format".
+ * On the decompression side, it would mean "multi format",
+ * and ZSTD_f_zstd1 could be reserved to mean "accept *only* zstd frames".
+ * Since meaning is a little different, another option could be to define different enums for compression and decompression.
+ * This question could be kept for later, when there are actually multiple formats to support,
+ * but there is also the question of pinning enum values, and pinning value `0` is especially important */
+ ZSTD_f_zstd1 = 0, </b>/* zstd frame format, specified in zstd_compression_format.md (default) */<b>
+ ZSTD_f_zstd1_magicless, </b>/* Variant of zstd frame format, without initial 4-bytes magic number.<b>
+ * Useful to save 4 bytes per generated frame.
+ * Decoder cannot recognise automatically this format, requiring instructions. */
+} ZSTD_format_e;
+</b></pre><BR>
<pre><b>typedef enum {
+ </b>/* compression format */<b>
+ ZSTD_p_format = 10, </b>/* See ZSTD_format_e enum definition.<b>
+ * Cast selected format as unsigned for ZSTD_CCtx_setParameter() compatibility. */
+
</b>/* compression parameters */<b>
ZSTD_p_compressionLevel=100, </b>/* Update all compression parameters according to pre-defined cLevel table<b>
* Default level is ZSTD_CLEVEL_DEFAULT==3.
* Special: value 0 means "do not change cLevel". */
ZSTD_p_windowLog, </b>/* Maximum allowed back-reference distance, expressed as power of 2.<b>
* Must be clamped between ZSTD_WINDOWLOG_MIN and ZSTD_WINDOWLOG_MAX.
- * Special: value 0 means "do not change windowLog". */
+ * Special: value 0 means "do not change windowLog".
+ * Note: Using a window size greater than ZSTD_MAXWINDOWSIZE_DEFAULT (default: 2^27)
+ * requires setting the maximum window size at least as large during decompression. */
ZSTD_p_hashLog, </b>/* Size of the probe table, as a power of 2.<b>
* Resulting table size is (1 << (hashLog+2)).
* Must be clamped between ZSTD_HASHLOG_MIN and ZSTD_HASHLOG_MAX.
@@ -819,12 +854,6 @@ void ZSTD_copyDCtx(ZSTD_DCtx* dctx, const ZSTD_DCtx* preparedDCtx);
ZSTD_p_checksumFlag, </b>/* A 32-bits checksum of content is written at end of frame (default:0) */<b>
ZSTD_p_dictIDFlag, </b>/* When applicable, dictID of dictionary is provided in frame header (default:1) */<b>
- </b>/* dictionary parameters (must be set before ZSTD_CCtx_loadDictionary) */<b>
- ZSTD_p_dictMode=300, </b>/* Select how dictionary content must be interpreted. Value must be from type ZSTD_dictMode_e.<b>
- * default : 0==auto : dictionary will be "full" if it respects specification, otherwise it will be "rawContent" */
- ZSTD_p_refDictContent, </b>/* Dictionary content will be referenced, instead of copied (default:0==byCopy).<b>
- * It requires that dictionary buffer outlives its users */
-
</b>/* multi-threading parameters */<b>
ZSTD_p_nbThreads=400, </b>/* Select how many threads a compression job can spawn (default:1)<b>
* More threads improve speed, but also increase memory usage.
@@ -840,6 +869,35 @@ void ZSTD_copyDCtx(ZSTD_DCtx* dctx, const ZSTD_DCtx* preparedDCtx);
</b>/* advanced parameters - may not remain available after API update */<b>
ZSTD_p_forceMaxWindow=1100, </b>/* Force back-reference distances to remain < windowSize,<b>
* even when referencing into Dictionary content (default:0) */
+ ZSTD_p_enableLongDistanceMatching=1200, </b>/* Enable long distance matching.<b>
+ * This parameter is designed to improve the compression
+ * ratio for large inputs with long distance matches.
+ * This increases the memory usage as well as window size.
+ * Note: setting this parameter sets all the LDM parameters
+ * as well as ZSTD_p_windowLog. It should be set after
+ * ZSTD_p_compressionLevel and before ZSTD_p_windowLog and
+ * other LDM parameters. Setting the compression level
+ * after this parameter overrides the window log, though LDM
+ * will remain enabled until explicitly disabled. */
+ ZSTD_p_ldmHashLog, </b>/* Size of the table for long distance matching, as a power of 2.<b>
+ * Larger values increase memory usage and compression ratio, but decrease
+ * compression speed.
+ * Must be clamped between ZSTD_HASHLOG_MIN and ZSTD_HASHLOG_MAX
+ * (default: windowlog - 7). */
+ ZSTD_p_ldmMinMatch, </b>/* Minimum size of searched matches for long distance matcher.<b>
+ * Larger/too small values usually decrease compression ratio.
+ * Must be clamped between ZSTD_LDM_MINMATCH_MIN
+ * and ZSTD_LDM_MINMATCH_MAX (default: 64). */
+ ZSTD_p_ldmBucketSizeLog, </b>/* Log size of each bucket in the LDM hash table for collision resolution.<b>
+ * Larger values usually improve collision resolution but may decrease
+ * compression speed.
+ * The maximum value is ZSTD_LDM_BUCKETSIZELOG_MAX (default: 3). */
+ ZSTD_p_ldmHashEveryLog, </b>/* Frequency of inserting/looking up entries in the LDM hash table.<b>
+ * The default is MAX(0, (windowLog - ldmHashLog)) to
+ * optimize hash table usage.
+ * Larger values improve compression speed. Deviating far from the
+ * default value will likely result in a decrease in compression ratio.
+ * Must be clamped between 0 and ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN. */
} ZSTD_cParameter;
</b></pre><BR>
@@ -861,18 +919,25 @@ void ZSTD_copyDCtx(ZSTD_DCtx* dctx, const ZSTD_DCtx* preparedDCtx);
</p></pre><BR>
<pre><b>size_t ZSTD_CCtx_loadDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize);
+size_t ZSTD_CCtx_loadDictionary_byReference(ZSTD_CCtx* cctx, const void* dict, size_t dictSize);
+size_t ZSTD_CCtx_loadDictionary_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictMode_e dictMode);
</b><p> Create an internal CDict from dict buffer.
Decompression will have to use same buffer.
@result : 0, or an error code (which can be tested with ZSTD_isError()).
Special : Adding a NULL (or 0-size) dictionary invalidates any previous dictionary,
meaning "return to no-dictionary mode".
- Note 1 : `dict` content will be copied internally,
- except if ZSTD_p_refDictContent is set before loading.
+ Note 1 : `dict` content will be copied internally. Use
+ ZSTD_CCtx_loadDictionary_byReference() to reference dictionary
+ content instead. The dictionary buffer must then outlive its
+ users.
Note 2 : Loading a dictionary involves building tables, which are dependent on compression parameters.
For this reason, compression parameters cannot be changed anymore after loading a dictionary.
It's also a CPU-heavy operation, with non-negligible impact on latency.
Note 3 : Dictionary will be used for all future compression jobs.
- To return to "no-dictionary" situation, load a NULL dictionary
+ To return to "no-dictionary" situation, load a NULL dictionary
+ Note 5 : Use ZSTD_CCtx_loadDictionary_advanced() to select how dictionary
+ content will be interpreted.
+
</p></pre><BR>
<pre><b>size_t ZSTD_CCtx_refCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict);
@@ -889,6 +954,7 @@ void ZSTD_copyDCtx(ZSTD_DCtx* dctx, const ZSTD_DCtx* preparedDCtx);
</p></pre><BR>
<pre><b>size_t ZSTD_CCtx_refPrefix(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize);
+size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize, ZSTD_dictMode_e dictMode);
</b><p> Reference a prefix (single-usage dictionary) for next compression job.
Decompression need same prefix to properly regenerate data.
Prefix is **only used once**. Tables are discarded at end of compression job.
@@ -899,13 +965,15 @@ void ZSTD_copyDCtx(ZSTD_DCtx* dctx, const ZSTD_DCtx* preparedDCtx);
Note 1 : Prefix buffer is referenced. It must outlive compression job.
Note 2 : Referencing a prefix involves building tables, which are dependent on compression parameters.
It's a CPU-heavy operation, with non-negligible impact on latency.
- Note 3 : it's possible to alter ZSTD_p_dictMode using ZSTD_CCtx_setParameter()
+ Note 3 : By default, the prefix is treated as raw content
+ (ZSTD_dm_rawContent). Use ZSTD_CCtx_refPrefix_advanced() to alter
+ dictMode.
</p></pre><BR>
<pre><b>typedef enum {
ZSTD_e_continue=0, </b>/* collect more data, encoder transparently decides when to output result, for optimal conditions */<b>
ZSTD_e_flush, </b>/* flush any data provided so far - frame will continue, future data can still reference previous data for better compression */<b>
- ZSTD_e_end </b>/* flush any remaining data and ends current frame. Any future compression starts a new frame. */<b>
+ ZSTD_e_end </b>/* flush any remaining data and close current frame. Any additional data starts a new frame. */<b>
} ZSTD_EndDirective;
</b></pre><BR>
<pre><b>size_t ZSTD_compress_generic (ZSTD_CCtx* cctx,
@@ -915,8 +983,8 @@ void ZSTD_copyDCtx(ZSTD_DCtx* dctx, const ZSTD_DCtx* preparedDCtx);
</b><p> Behave about the same as ZSTD_compressStream. To note :
- Compression parameters are pushed into CCtx before starting compression, using ZSTD_CCtx_setParameter()
- Compression parameters cannot be changed once compression is started.
- - *dstPos must be <= dstCapacity, *srcPos must be <= srcSize
- - *dspPos and *srcPos will be updated. They are guaranteed to remain below their respective limit.
+ - outpot->pos must be <= dstCapacity, input->pos must be <= srcSize
+ - outpot->pos and input->pos will be updated. They are guaranteed to remain below their respective limit.
- @return provides the minimum amount of data still to flush from internal buffers
or an error code, which can be tested using ZSTD_isError().
if @return != 0, flush is not fully completed, there is some data left within internal buffers.
@@ -932,6 +1000,7 @@ void ZSTD_copyDCtx(ZSTD_DCtx* dctx, const ZSTD_DCtx* preparedDCtx);
Useful after an error, or to interrupt an ongoing compression job and start a new one.
Any internal data not yet flushed is cancelled.
Dictionary (if any) is dropped.
+ All parameters are back to default values.
It's possible to modify compression parameters after a reset.
</p></pre><BR>
@@ -943,15 +1012,163 @@ void ZSTD_copyDCtx(ZSTD_DCtx* dctx, const ZSTD_DCtx* preparedDCtx);
ZSTD_EndDirective endOp);
</b><p> Same as ZSTD_compress_generic(),
but using only integral types as arguments.
- Argument list is larger and less expressive than ZSTD_{in,out}Buffer,
+ Argument list is larger than ZSTD_{in,out}Buffer,
+ but can be helpful for binders from dynamic languages
+ which have troubles handling structures containing memory pointers.
+
+</p></pre><BR>
+
+<pre><b>ZSTD_CCtx_params* ZSTD_createCCtxParams(void);
+</b><p> Quick howto :
+ - ZSTD_createCCtxParams() : Create a ZSTD_CCtx_params structure
+ - ZSTD_CCtxParam_setParameter() : Push parameters one by one into
+ an existing ZSTD_CCtx_params structure.
+ This is similar to
+ ZSTD_CCtx_setParameter().
+ - ZSTD_CCtx_setParametersUsingCCtxParams() : Apply parameters to
+ an existing CCtx.
+ These parameters will be applied to
+ all subsequent compression jobs.
+ - ZSTD_compress_generic() : Do compression using the CCtx.
+ - ZSTD_freeCCtxParams() : Free the memory.
+
+ This can be used with ZSTD_estimateCCtxSize_advanced_usingCCtxParams()
+ for static allocation for single-threaded compression.
+
+</p></pre><BR>
+
+<pre><b>size_t ZSTD_resetCCtxParams(ZSTD_CCtx_params* params);
+</b><p> Reset params to default, with the default compression level.
+
+</p></pre><BR>
+
+<pre><b>size_t ZSTD_initCCtxParams(ZSTD_CCtx_params* cctxParams, int compressionLevel);
+</b><p> Initializes the compression parameters of cctxParams according to
+ compression level. All other parameters are reset to their default values.
+
+</p></pre><BR>
+
+<pre><b>size_t ZSTD_initCCtxParams_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_parameters params);
+</b><p> Initializes the compression and frame parameters of cctxParams according to
+ params. All other parameters are reset to their default values.
+
+</p></pre><BR>
+
+<pre><b>size_t ZSTD_CCtxParam_setParameter(ZSTD_CCtx_params* params, ZSTD_cParameter param, unsigned value);
+</b><p> Similar to ZSTD_CCtx_setParameter.
+ Set one compression parameter, selected by enum ZSTD_cParameter.
+ Parameters must be applied to a ZSTD_CCtx using ZSTD_CCtx_setParametersUsingCCtxParams().
+ Note : when `value` is an enum, cast it to unsigned for proper type checking.
+ @result : 0, or an error code (which can be tested with ZSTD_isError()).
+
+</p></pre><BR>
+
+<pre><b>size_t ZSTD_CCtx_setParametersUsingCCtxParams(
+ ZSTD_CCtx* cctx, const ZSTD_CCtx_params* params);
+</b><p> Apply a set of ZSTD_CCtx_params to the compression context.
+ This must be done before the dictionary is loaded.
+ The pledgedSrcSize is treated as unknown.
+ Multithreading parameters are applied only if nbThreads > 1.
+
+</p></pre><BR>
+
+<h3>Advanced parameters for decompression API</h3><pre></pre><b><pre></pre></b><BR>
+<pre><b>size_t ZSTD_DCtx_loadDictionary(ZSTD_DCtx* dctx, const void* dict, size_t dictSize); </b>/* not implemented */<b>
+size_t ZSTD_DCtx_loadDictionary_byReference(ZSTD_DCtx* dctx, const void* dict, size_t dictSize); </b>/* not implemented */<b>
+size_t ZSTD_DCtx_loadDictionary_advanced(ZSTD_DCtx* dctx, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictMode_e dictMode); </b>/* not implemented */<b>
+</b><p> Create an internal DDict from dict buffer,
+ to be used to decompress next frames.
+ @result : 0, or an error code (which can be tested with ZSTD_isError()).
+ Special : Adding a NULL (or 0-size) dictionary invalidates any previous dictionary,
+ meaning "return to no-dictionary mode".
+ Note 1 : `dict` content will be copied internally.
+ Use ZSTD_DCtx_loadDictionary_byReference()
+ to reference dictionary content instead.
+ In which case, the dictionary buffer must outlive its users.
+ Note 2 : Loading a dictionary involves building tables,
+ which has a non-negligible impact on CPU usage and latency.
+ Note 3 : Use ZSTD_DCtx_loadDictionary_advanced() to select
+ how dictionary content will be interpreted and loaded.
+
+</p></pre><BR>
+
+<pre><b>size_t ZSTD_DCtx_refDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict); </b>/* not implemented */<b>
+</b><p> Reference a prepared dictionary, to be used to decompress next frames.
+ The dictionary remains active for decompression of future frames using same DCtx.
+ @result : 0, or an error code (which can be tested with ZSTD_isError()).
+ Note 1 : Currently, only one dictionary can be managed.
+ Referencing a new dictionary effectively "discards" any previous one.
+ Special : adding a NULL DDict means "return to no-dictionary mode".
+ Note 2 : DDict is just referenced, its lifetime must outlive its usage from DCtx.
+
+</p></pre><BR>
+
+<pre><b>size_t ZSTD_DCtx_refPrefix(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize); </b>/* not implemented */<b>
+size_t ZSTD_DCtx_refPrefix_advanced(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize, ZSTD_dictMode_e dictMode); </b>/* not implemented */<b>
+</b><p> Reference a prefix (single-usage dictionary) for next compression job.
+ Prefix is **only used once**. It must be explicitly referenced before each frame.
+ If there is a need to use same prefix multiple times, consider embedding it into a ZSTD_DDict instead.
+ @result : 0, or an error code (which can be tested with ZSTD_isError()).
+ Note 1 : Adding any prefix (including NULL) invalidates any previously set prefix or dictionary
+ Note 2 : Prefix buffer is referenced. It must outlive compression job.
+ Note 3 : By default, the prefix is treated as raw content (ZSTD_dm_rawContent).
+ Use ZSTD_CCtx_refPrefix_advanced() to alter dictMode.
+ Note 4 : Referencing a raw content prefix has almost no cpu nor memory cost.
+
+</p></pre><BR>
+
+<pre><b>size_t ZSTD_DCtx_setMaxWindowSize(ZSTD_DCtx* dctx, size_t maxWindowSize);
+</b><p> Refuses allocating internal buffers for frames requiring a window size larger than provided limit.
+ This is useful to prevent a decoder context from reserving too much memory for itself (potential attack scenario).
+ This parameter is only useful in streaming mode, since no internal buffer is allocated in direct mode.
+ By default, a decompression context accepts all window sizes <= (1 << ZSTD_WINDOWLOG_MAX)
+ @return : 0, or an error code (which can be tested using ZSTD_isError()).
+
+</p></pre><BR>
+
+<pre><b>size_t ZSTD_DCtx_setFormat(ZSTD_DCtx* dctx, ZSTD_format_e format);
+</b><p> Instruct the decoder context about what kind of data to decode next.
+ This instruction is mandatory to decode data without a fully-formed header,
+ such ZSTD_f_zstd1_magicless for example.
+ @return : 0, or an error code (which can be tested using ZSTD_isError()).
+
+</p></pre><BR>
+
+<pre><b>size_t ZSTD_decompress_generic(ZSTD_DCtx* dctx,
+ ZSTD_outBuffer* output,
+ ZSTD_inBuffer* input);
+</b><p> Behave the same as ZSTD_decompressStream.
+ Decompression parameters cannot be changed once decompression is started.
+ @return : an error code, which can be tested using ZSTD_isError()
+ if >0, a hint, nb of expected input bytes for next invocation.
+ `0` means : a frame has just been fully decoded and flushed.
+
+</p></pre><BR>
+
+<pre><b>size_t ZSTD_decompress_generic_simpleArgs (
+ ZSTD_DCtx* dctx,
+ void* dst, size_t dstCapacity, size_t* dstPos,
+ const void* src, size_t srcSize, size_t* srcPos);
+</b><p> Same as ZSTD_decompress_generic(),
+ but using only integral types as arguments.
+ Argument list is larger than ZSTD_{in,out}Buffer,
but can be helpful for binders from dynamic languages
which have troubles handling structures containing memory pointers.
</p></pre><BR>
-<a name="Chapter20"></a><h2>Block functions</h2><pre>
- Block functions produce and decode raw zstd blocks, without frame metadata.
- Frame metadata cost is typically ~18 bytes, which can be non-negligible for very small blocks (< 100 bytes).
+<pre><b>void ZSTD_DCtx_reset(ZSTD_DCtx* dctx);
+</b><p> Return a DCtx to clean state.
+ If a decompression was ongoing, any internal data not yet flushed is cancelled.
+ All parameters are back to default values, including sticky ones.
+ Dictionary (if any) is dropped.
+ Parameters can be modified again after a reset.
+
+</p></pre><BR>
+
+<a name="Chapter21"></a><h2>Block level API</h2><pre></pre>
+
+<pre><b></b><p> Frame metadata cost is typically ~18 bytes, which can be non-negligible for very small blocks (< 100 bytes).
User will have to take in charge required information to regenerate data, such as compressed and content sizes.
A few rules to respect :
@@ -961,7 +1178,7 @@ void ZSTD_copyDCtx(ZSTD_DCtx* dctx, const ZSTD_DCtx* preparedDCtx);
+ compression : any ZSTD_compressBegin*() variant, including with dictionary
+ decompression : any ZSTD_decompressBegin*() variant, including with dictionary
+ copyCCtx() and copyDCtx() can be used too
- - Block size is limited, it must be <= ZSTD_getBlockSize() <= ZSTD_BLOCKSIZE_MAX
+ - Block size is limited, it must be <= ZSTD_getBlockSize() <= ZSTD_BLOCKSIZE_MAX == 128 KB
+ If input is larger than a block size, it's necessary to split input data into multiple blocks
+ For inputs larger than a single block size, consider using the regular ZSTD_compress() instead.
Frame metadata is not that costly, and quickly becomes negligible as source size grows larger.
@@ -972,12 +1189,12 @@ void ZSTD_copyDCtx(ZSTD_DCtx* dctx, const ZSTD_DCtx* preparedDCtx);
+ In case of multiple successive blocks, should some of them be uncompressed,
decoder must be informed of their existence in order to follow proper history.
Use ZSTD_insertBlock() for such a case.
-<BR></pre>
+</p></pre><BR>
<h3>Raw zstd block functions</h3><pre></pre><b><pre>size_t ZSTD_getBlockSize (const ZSTD_CCtx* cctx);
size_t ZSTD_compressBlock (ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
-size_t ZSTD_insertBlock(ZSTD_DCtx* dctx, const void* blockStart, size_t blockSize); </b>/**< insert block into `dctx` history. Useful for uncompressed blocks */<b>
+size_t ZSTD_insertBlock(ZSTD_DCtx* dctx, const void* blockStart, size_t blockSize); </b>/**< insert uncompressed block into `dctx` history. Useful for multi-blocks decompression */<b>
</pre></b><BR>
</html>
</body>
diff --git a/lib/.gitignore b/lib/.gitignore
index b43a8543a199..4cd50ac61e52 100644
--- a/lib/.gitignore
+++ b/lib/.gitignore
@@ -1,2 +1,3 @@
# make install artefact
libzstd.pc
+libzstd-nomt
diff --git a/lib/Makefile b/lib/Makefile
index 5845cf170e71..cdfdc5cdfd46 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -1,13 +1,11 @@
-# ##########################################################################
-# Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+# ################################################################
+# Copyright (c) 2015-present, Yann Collet, Facebook, Inc.
# All rights reserved.
#
-# This Makefile is validated for Linux, macOS, *BSD, Hurd, Solaris, MSYS2 targets
-#
-# This source code is licensed under the BSD-style license found in the
-# LICENSE file in the root directory of this source tree. An additional grant
-# of patent rights can be found in the PATENTS file in the same directory.
-# ##########################################################################
+# This source code is licensed under both the BSD-style license (found in the
+# LICENSE file in the root directory of this source tree) and the GPLv2 (found
+# in the COPYING file in the root directory of this source tree).
+# ################################################################
# Version numbers
LIBVER_MAJOR_SCRIPT:=`sed -n '/define ZSTD_VERSION_MAJOR/s/.*[[:blank:]]\([0-9][0-9]*\).*/\1/p' < ./zstd.h`
@@ -31,7 +29,7 @@ CFLAGS += $(DEBUGFLAGS) $(MOREFLAGS)
FLAGS = $(CPPFLAGS) $(CFLAGS)
-ZSTD_FILES := $(wildcard common/*.c compress/*.c decompress/*.c dictBuilder/*.c deprecated/*.c)
+ZSTD_FILES := $(sort $(wildcard common/*.c compress/*.c decompress/*.c dictBuilder/*.c deprecated/*.c))
ZSTD_LEGACY_SUPPORT ?= 4
@@ -103,10 +101,19 @@ lib-release lib-release-mt: DEBUGFLAGS :=
lib-release: lib
lib-release-mt: lib-mt
+# Special case : building library in single-thread mode _and_ without zstdmt_compress.c
+ZSTDMT_FILES = compress/zstdmt_compress.c
+ZSTD_NOMT_FILES = $(filter-out $(ZSTDMT_FILES),$(ZSTD_FILES))
+libzstd-nomt: LDFLAGS += -shared -fPIC -fvisibility=hidden
+libzstd-nomt: $(ZSTD_NOMT_FILES)
+ @echo compiling single-thread dynamic library $(LIBVER)
+ @echo files : $(ZSTD_NOMT_FILES)
+ @$(CC) $(FLAGS) $^ $(LDFLAGS) $(SONAME_FLAGS) -o $@
+
clean:
@$(RM) -r *.dSYM # Mac OS-X specific
@$(RM) core *.o *.a *.gcda *.$(SHARED_EXT) *.$(SHARED_EXT).* libzstd.pc
- @$(RM) dll/libzstd.dll dll/libzstd.lib
+ @$(RM) dll/libzstd.dll dll/libzstd.lib libzstd-nomt*
@$(RM) common/*.o compress/*.o decompress/*.o dictBuilder/*.o legacy/*.o deprecated/*.o
@echo Cleaning library completed
@@ -115,16 +122,17 @@ clean:
#-----------------------------------------------------------------------------
ifneq (,$(filter $(shell uname),Linux Darwin GNU/kFreeBSD GNU OpenBSD FreeBSD NetBSD DragonFly SunOS))
-ifneq (,$(filter $(shell uname),SunOS))
-INSTALL ?= ginstall
-else
-INSTALL ?= install
-endif
-
-PREFIX ?= /usr/local
-DESTDIR ?=
-LIBDIR ?= $(PREFIX)/lib
-INCLUDEDIR ?= $(PREFIX)/include
+DESTDIR ?=
+# directory variables : GNU conventions prefer lowercase
+# see https://www.gnu.org/prep/standards/html_node/Makefile-Conventions.html
+# support both lower and uppercase (BSD), use uppercase in script
+prefix ?= /usr/local
+PREFIX ?= $(prefix)
+exec_prefix ?= $(PREFIX)
+libdir ?= $(exec_prefix)/lib
+LIBDIR ?= $(libdir)
+includedir ?= $(PREFIX)/include
+INCLUDEDIR ?= $(includedir)
ifneq (,$(filter $(shell uname),OpenBSD FreeBSD NetBSD DragonFly))
PKGCONFIGDIR ?= $(PREFIX)/libdata/pkgconfig
@@ -132,8 +140,14 @@ else
PKGCONFIGDIR ?= $(LIBDIR)/pkgconfig
endif
-INSTALL_LIB ?= $(INSTALL) -m 755
-INSTALL_DATA ?= $(INSTALL) -m 644
+ifneq (,$(filter $(shell uname),SunOS))
+INSTALL ?= ginstall
+else
+INSTALL ?= install
+endif
+
+INSTALL_PROGRAM ?= $(INSTALL)
+INSTALL_DATA ?= $(INSTALL) -m 644
libzstd.pc:
@@ -150,9 +164,9 @@ install: libzstd.a libzstd libzstd.pc
@$(INSTALL_DATA) libzstd.pc $(DESTDIR)$(PKGCONFIGDIR)/
@echo Installing libraries
@$(INSTALL_DATA) libzstd.a $(DESTDIR)$(LIBDIR)
- @$(INSTALL_LIB) libzstd.$(SHARED_EXT_VER) $(DESTDIR)$(LIBDIR)
- @ln -sf libzstd.$(SHARED_EXT_VER) $(DESTDIR)$(LIBDIR)/libzstd.$(SHARED_EXT_MAJOR)
- @ln -sf libzstd.$(SHARED_EXT_VER) $(DESTDIR)$(LIBDIR)/libzstd.$(SHARED_EXT)
+ @$(INSTALL_PROGRAM) $(LIBZSTD) $(DESTDIR)$(LIBDIR)
+ @ln -sf $(LIBZSTD) $(DESTDIR)$(LIBDIR)/libzstd.$(SHARED_EXT_MAJOR)
+ @ln -sf $(LIBZSTD) $(DESTDIR)$(LIBDIR)/libzstd.$(SHARED_EXT)
@echo Installing includes
@$(INSTALL_DATA) zstd.h $(DESTDIR)$(INCLUDEDIR)
@$(INSTALL_DATA) common/zstd_errors.h $(DESTDIR)$(INCLUDEDIR)
@@ -164,7 +178,7 @@ uninstall:
@$(RM) $(DESTDIR)$(LIBDIR)/libzstd.a
@$(RM) $(DESTDIR)$(LIBDIR)/libzstd.$(SHARED_EXT)
@$(RM) $(DESTDIR)$(LIBDIR)/libzstd.$(SHARED_EXT_MAJOR)
- @$(RM) $(DESTDIR)$(LIBDIR)/libzstd.$(SHARED_EXT_VER)
+ @$(RM) $(DESTDIR)$(LIBDIR)/$(LIBZSTD)
@$(RM) $(DESTDIR)$(PKGCONFIGDIR)/libzstd.pc
@$(RM) $(DESTDIR)$(INCLUDEDIR)/zstd.h
@$(RM) $(DESTDIR)$(INCLUDEDIR)/zstd_errors.h
diff --git a/lib/README.md b/lib/README.md
index 79b6fd50014d..df136c4867d2 100644
--- a/lib/README.md
+++ b/lib/README.md
@@ -1,57 +1,78 @@
Zstandard library files
================================
-The __lib__ directory contains several directories.
-Depending on target use case, it's enough to include only files from relevant directories.
+The __lib__ directory is split into several sub-directories,
+in order to make it easier to select or exclude specific features.
+
+
+#### Building
+
+`Makefile` script is provided, supporting the standard set of commands,
+directories, and variables (see https://www.gnu.org/prep/standards/html_node/Command-Variables.html).
+- `make` : generates both static and dynamic libraries
+- `make install` : install libraries in default system directories
#### API
-Zstandard's stable API is exposed within [zstd.h](zstd.h),
-at the root of `lib` directory.
+Zstandard's stable API is exposed within [lib/zstd.h](zstd.h).
#### Advanced API
-Some additional API may be useful if you're looking into advanced features :
-- common/error_public.h : transforms `size_t` function results into an `enum`,
- for precise error handling.
-- ZSTD_STATIC_LINKING_ONLY : if you define this macro _before_ including `zstd.h`,
- it will give access to advanced and experimental API.
+Optional advanced features are exposed via :
+
+- `lib/common/zstd_errors.h` : translates `size_t` function results
+ into an `ZSTD_ErrorCode`, for accurate error handling.
+- `ZSTD_STATIC_LINKING_ONLY` : if this macro is defined _before_ including `zstd.h`,
+ it unlocks access to advanced experimental API,
+ exposed in second part of `zstd.h`.
These APIs shall ___never be used with dynamic library___ !
They are not "stable", their definition may change in the future.
Only static linking is allowed.
-#### ZSTDMT API
-
-To enable multithreaded compression within the library, invoke `make lib-mt` target.
-Prototypes are defined in header file `compress/zstdmt_compress.h`.
-When linking a program that uses ZSTDMT API against libzstd.a on a POSIX system,
-`-pthread` flag must be provided to the compiler and linker.
-Note : ZSTDMT prototypes can still be used with a library built without multithread support,
-but in this case, they will be single threaded only.
#### Modular build
-Directory `common/` is required in all circumstances.
-You can select to support compression only, by just adding files from the `compress/` directory,
-In a similar way, you can build a decompressor-only library with the `decompress/` directory.
-
-Other optional functionalities provided are :
-
-- `dictBuilder/` : source files to create dictionaries.
- The API can be consulted in `dictBuilder/zdict.h`.
- This module also depends on `common/` and `compress/` .
-
-- `legacy/` : source code to decompress previous versions of zstd, starting from `v0.1`.
- This module also depends on `common/` and `decompress/` .
- Library compilation must include directive `ZSTD_LEGACY_SUPPORT = 1` .
- The main API can be consulted in `legacy/zstd_legacy.h`.
- Advanced API from each version can be found in their relevant header file.
- For example, advanced API for version `v0.4` is in `legacy/zstd_v04.h` .
-
-
-#### Using MinGW+MSYS to create DLL
+- Directory `lib/common` is always required, for all variants.
+- Compression source code lies in `lib/compress`
+- Decompression source code lies in `lib/decompress`
+- It's possible to include only `compress` or only `decompress`, they don't depend on each other.
+- `lib/dictBuilder` : makes it possible to generate dictionaries from a set of samples.
+ The API is exposed in `lib/dictBuilder/zdict.h`.
+ This module depends on both `lib/common` and `lib/compress` .
+- `lib/legacy` : source code to decompress older zstd formats, starting from `v0.1`.
+ This module depends on `lib/common` and `lib/decompress`.
+ To enable this feature, it's necessary to define `ZSTD_LEGACY_SUPPORT = 1` during compilation.
+ Typically, with `gcc`, add argument `-DZSTD_LEGACY_SUPPORT=1`.
+ Using higher number limits the number of version supported.
+ For example, `ZSTD_LEGACY_SUPPORT=2` means : "support legacy formats starting from v0.2+".
+ The API is exposed in `lib/legacy/zstd_legacy.h`.
+ Each version also provides a (dedicated) set of advanced API.
+ For example, advanced API for version `v0.4` is exposed in `lib/legacy/zstd_v04.h` .
+
+
+#### Multithreading support
+
+Multithreading is disabled by default when building with `make`.
+Enabling multithreading requires 2 conditions :
+- set macro `ZSTD_MULTITHREAD`
+- on POSIX systems : compile with pthread (`-pthread` compilation flag for `gcc` for example)
+
+Both conditions are automatically triggered by invoking `make lib-mt` target.
+Note that, when linking a POSIX program with a multithreaded version of `libzstd`,
+it's necessary to trigger `-pthread` flag during link stage.
+
+Multithreading capabilities are exposed via :
+- private API `lib/compress/zstdmt_compress.h`.
+ Symbols defined in this header are currently exposed in `libzstd`, hence usable.
+ Note however that this API is planned to be locked and remain strictly internal in the future.
+- advanced API `ZSTD_compress_generic()`, defined in `lib/zstd.h`, experimental section.
+ This API is still considered experimental, but is designed to be labelled "stable" at some point in the future.
+ It's the recommended entry point for multi-threading operations.
+
+
+#### Windows : using MinGW+MSYS to create DLL
DLL can be created using MinGW+MSYS with the `make libzstd` command.
This command creates `dll\libzstd.dll` and the import library `dll\libzstd.lib`.
@@ -67,19 +88,21 @@ file it should be linked with `dll\libzstd.dll`. For example:
The compiled executable will require ZSTD DLL which is available at `dll\libzstd.dll`.
-#### Obsolete streaming API
+#### Deprecated API
-Streaming is now provided within `zstd.h`.
-Older streaming API is still available within `deprecated/zbuff.h`.
-It will be removed in a future version.
-Consider migrating code towards newer streaming API in `zstd.h`.
+Obsolete API on their way out are stored in directory `lib/deprecated`.
+At this stage, it contains older streaming prototypes, in `lib/deprecated/zbuff.h`.
+Presence in this directory is temporary.
+These prototypes will be removed in some future version.
+Consider migrating code towards supported streaming API exposed in `zstd.h`.
#### Miscellaneous
The other files are not source code. There are :
- - LICENSE : contains the BSD license text
- - Makefile : script to compile or install zstd library (static and dynamic)
- - libzstd.pc.in : for pkg-config (`make install`)
- - README.md : this file
+ - `LICENSE` : contains the BSD license text
+ - `Makefile` : `make` script to build and install zstd library (static and dynamic)
+ - `BUCK` : support for `buck` build system (https://buckbuild.com/)
+ - `libzstd.pc.in` : for `pkg-config` (used in `make install`)
+ - `README.md` : this file
diff --git a/lib/common/bitstream.h b/lib/common/bitstream.h
index 06121f21c5b3..2094823fe240 100644
--- a/lib/common/bitstream.h
+++ b/lib/common/bitstream.h
@@ -169,33 +169,39 @@ MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits);
****************************************************************/
MEM_STATIC unsigned BIT_highbit32 (register U32 val)
{
+ assert(val != 0);
+ {
# if defined(_MSC_VER) /* Visual */
- unsigned long r=0;
- _BitScanReverse ( &r, val );
- return (unsigned) r;
+ unsigned long r=0;
+ _BitScanReverse ( &r, val );
+ return (unsigned) r;
# elif defined(__GNUC__) && (__GNUC__ >= 3) /* Use GCC Intrinsic */
- return 31 - __builtin_clz (val);
+ return 31 - __builtin_clz (val);
# else /* Software version */
- static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29,
- 11, 14, 16, 18, 22, 25, 3, 30,
- 8, 12, 20, 28, 15, 17, 24, 7,
- 19, 27, 23, 6, 26, 5, 4, 31 };
- U32 v = val;
- v |= v >> 1;
- v |= v >> 2;
- v |= v >> 4;
- v |= v >> 8;
- v |= v >> 16;
- return DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27];
+ static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29,
+ 11, 14, 16, 18, 22, 25, 3, 30,
+ 8, 12, 20, 28, 15, 17, 24, 7,
+ 19, 27, 23, 6, 26, 5, 4, 31 };
+ U32 v = val;
+ v |= v >> 1;
+ v |= v >> 2;
+ v |= v >> 4;
+ v |= v >> 8;
+ v |= v >> 16;
+ return DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27];
# endif
+ }
}
/*===== Local Constants =====*/
-static const unsigned BIT_mask[] = { 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F,
- 0xFF, 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF,
- 0xFFFF, 0x1FFFF, 0x3FFFF, 0x7FFFF, 0xFFFFF, 0x1FFFFF, 0x3FFFFF, 0x7FFFFF,
- 0xFFFFFF, 0x1FFFFFF, 0x3FFFFFF }; /* up to 26 bits */
-
+static const unsigned BIT_mask[] = {
+ 0, 1, 3, 7, 0xF, 0x1F,
+ 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF,
+ 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0x1FFFF,
+ 0x3FFFF, 0x7FFFF, 0xFFFFF, 0x1FFFFF, 0x3FFFFF, 0x7FFFFF,
+ 0xFFFFFF, 0x1FFFFFF, 0x3FFFFFF, 0x7FFFFFF, 0xFFFFFFF, 0x1FFFFFFF,
+ 0x3FFFFFFF, 0x7FFFFFFF}; /* up to 31 bits */
+#define BIT_MASK_SIZE (sizeof(BIT_mask) / sizeof(BIT_mask[0]))
/*-**************************************************************
* bitStream encoding
@@ -217,11 +223,14 @@ MEM_STATIC size_t BIT_initCStream(BIT_CStream_t* bitC,
}
/*! BIT_addBits() :
- * can add up to 26 bits into `bitC`.
+ * can add up to 31 bits into `bitC`.
* Note : does not check for register overflow ! */
MEM_STATIC void BIT_addBits(BIT_CStream_t* bitC,
size_t value, unsigned nbBits)
{
+ MEM_STATIC_ASSERT(BIT_MASK_SIZE == 32);
+ assert(nbBits < BIT_MASK_SIZE);
+ assert(nbBits + bitC->bitPos < sizeof(bitC->bitContainer) * 8);
bitC->bitContainer |= (value & BIT_mask[nbBits]) << bitC->bitPos;
bitC->bitPos += nbBits;
}
@@ -232,6 +241,7 @@ MEM_STATIC void BIT_addBitsFast(BIT_CStream_t* bitC,
size_t value, unsigned nbBits)
{
assert((value>>nbBits) == 0);
+ assert(nbBits + bitC->bitPos < sizeof(bitC->bitContainer) * 8);
bitC->bitContainer |= value << bitC->bitPos;
bitC->bitPos += nbBits;
}
@@ -242,7 +252,7 @@ MEM_STATIC void BIT_addBitsFast(BIT_CStream_t* bitC,
MEM_STATIC void BIT_flushBitsFast(BIT_CStream_t* bitC)
{
size_t const nbBytes = bitC->bitPos >> 3;
- assert( bitC->bitPos <= (sizeof(bitC->bitContainer)*8) );
+ assert(bitC->bitPos < sizeof(bitC->bitContainer) * 8);
MEM_writeLEST(bitC->ptr, bitC->bitContainer);
bitC->ptr += nbBytes;
assert(bitC->ptr <= bitC->endPtr);
@@ -258,7 +268,7 @@ MEM_STATIC void BIT_flushBitsFast(BIT_CStream_t* bitC)
MEM_STATIC void BIT_flushBits(BIT_CStream_t* bitC)
{
size_t const nbBytes = bitC->bitPos >> 3;
- assert( bitC->bitPos <= (sizeof(bitC->bitContainer)*8) );
+ assert(bitC->bitPos < sizeof(bitC->bitContainer) * 8);
MEM_writeLEST(bitC->ptr, bitC->bitContainer);
bitC->ptr += nbBytes;
if (bitC->ptr > bitC->endPtr) bitC->ptr = bitC->endPtr;
@@ -350,12 +360,14 @@ MEM_STATIC size_t BIT_getMiddleBits(size_t bitContainer, U32 const start, U32 co
# endif
return _bextr_u32(bitContainer, start, nbBits);
#else
+ assert(nbBits < BIT_MASK_SIZE);
return (bitContainer >> start) & BIT_mask[nbBits];
#endif
}
MEM_STATIC size_t BIT_getLowerBits(size_t bitContainer, U32 const nbBits)
{
+ assert(nbBits < BIT_MASK_SIZE);
return bitContainer & BIT_mask[nbBits];
}
diff --git a/lib/common/compiler.h b/lib/common/compiler.h
index d7225c443e91..3a7553c3807e 100644
--- a/lib/common/compiler.h
+++ b/lib/common/compiler.h
@@ -5,6 +5,7 @@
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
*/
#ifndef ZSTD_COMPILER_H
diff --git a/lib/common/error_private.c b/lib/common/error_private.c
index b5b14b509cf1..11f7cdab1c60 100644
--- a/lib/common/error_private.c
+++ b/lib/common/error_private.c
@@ -5,6 +5,7 @@
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
*/
/* The purpose of this file is to have a single list of error strings embedded in binary */
@@ -29,14 +30,15 @@ const char* ERR_getErrorString(ERR_enum code)
case PREFIX(init_missing): return "Context should be init first";
case PREFIX(memory_allocation): return "Allocation error : not enough memory";
case PREFIX(stage_wrong): return "Operation not authorized at current processing stage";
- case PREFIX(dstSize_tooSmall): return "Destination buffer is too small";
- case PREFIX(srcSize_wrong): return "Src size is incorrect";
case PREFIX(tableLog_tooLarge): return "tableLog requires too much memory : unsupported";
case PREFIX(maxSymbolValue_tooLarge): return "Unsupported max Symbol Value : too large";
case PREFIX(maxSymbolValue_tooSmall): return "Specified maxSymbolValue is too small";
case PREFIX(dictionary_corrupted): return "Dictionary is corrupted";
case PREFIX(dictionary_wrong): return "Dictionary mismatch";
case PREFIX(dictionaryCreation_failed): return "Cannot create Dictionary from provided samples";
+ case PREFIX(dstSize_tooSmall): return "Destination buffer is too small";
+ case PREFIX(srcSize_wrong): return "Src size is incorrect";
+ /* following error codes are not stable and may be removed or changed in a future version */
case PREFIX(frameIndex_tooLarge): return "Frame index is too large";
case PREFIX(seekableIO): return "An I/O error occurred when reading/seeking";
case PREFIX(maxCode):
diff --git a/lib/common/error_private.h b/lib/common/error_private.h
index 9dd9a87cfac8..0d2fa7e34b01 100644
--- a/lib/common/error_private.h
+++ b/lib/common/error_private.h
@@ -5,6 +5,7 @@
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
*/
/* Note : this module is expected to remain private, do not expose it */
@@ -48,10 +49,9 @@ typedef ZSTD_ErrorCode ERR_enum;
/*-****************************************
* Error codes handling
******************************************/
-#ifdef ERROR
-# undef ERROR /* reported already defined on VS 2015 (Rich Geldreich) */
-#endif
-#define ERROR(name) ((size_t)-PREFIX(name))
+#undef ERROR /* reported already defined on VS 2015 (Rich Geldreich) */
+#define ERROR(name) ZSTD_ERROR(name)
+#define ZSTD_ERROR(name) ((size_t)-PREFIX(name))
ERR_STATIC unsigned ERR_isError(size_t code) { return (code > ERROR(maxCode)); }
diff --git a/lib/common/fse.h b/lib/common/fse.h
index 1c44f8375078..afd78019635a 100644
--- a/lib/common/fse.h
+++ b/lib/common/fse.h
@@ -184,7 +184,7 @@ FSE_PUBLIC_API size_t FSE_writeNCount (void* buffer, size_t bufferSize, const sh
/*! Constructor and Destructor of FSE_CTable.
Note that FSE_CTable size depends on 'tableLog' and 'maxSymbolValue' */
typedef unsigned FSE_CTable; /* don't allocate that. It's only meant to be more restrictive than void* */
-FSE_PUBLIC_API FSE_CTable* FSE_createCTable (unsigned tableLog, unsigned maxSymbolValue);
+FSE_PUBLIC_API FSE_CTable* FSE_createCTable (unsigned maxSymbolValue, unsigned tableLog);
FSE_PUBLIC_API void FSE_freeCTable (FSE_CTable* ct);
/*! FSE_buildCTable():
diff --git a/lib/common/huf.h b/lib/common/huf.h
index 2b3015a84c19..522bf9b6c003 100644
--- a/lib/common/huf.h
+++ b/lib/common/huf.h
@@ -242,7 +242,7 @@ size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats,
/** HUF_readCTable() :
* Loading a CTable saved with HUF_writeCTable() */
-size_t HUF_readCTable (HUF_CElt* CTable, unsigned maxSymbolValue, const void* src, size_t srcSize);
+size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize);
/*
diff --git a/lib/common/mem.h b/lib/common/mem.h
index df85404fb869..23335c3146d7 100644
--- a/lib/common/mem.h
+++ b/lib/common/mem.h
@@ -5,6 +5,7 @@
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
*/
#ifndef MEM_H_MODULE
diff --git a/lib/common/pool.c b/lib/common/pool.c
index a227044f7f1c..1b0fe1035d2d 100644
--- a/lib/common/pool.c
+++ b/lib/common/pool.c
@@ -5,6 +5,7 @@
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
*/
@@ -25,13 +26,14 @@
/* A job is a function and an opaque argument */
typedef struct POOL_job_s {
- POOL_function function;
- void *opaque;
+ POOL_function function;
+ void *opaque;
} POOL_job;
struct POOL_ctx_s {
+ ZSTD_customMem customMem;
/* Keep track of the threads */
- pthread_t *threads;
+ ZSTD_pthread_t *threads;
size_t numThreads;
/* The queue is a circular buffer */
@@ -46,11 +48,11 @@ struct POOL_ctx_s {
int queueEmpty;
/* The mutex protects the queue */
- pthread_mutex_t queueMutex;
+ ZSTD_pthread_mutex_t queueMutex;
/* Condition variable for pushers to wait on when the queue is full */
- pthread_cond_t queuePushCond;
+ ZSTD_pthread_cond_t queuePushCond;
/* Condition variables for poppers to wait on when the queue is empty */
- pthread_cond_t queuePopCond;
+ ZSTD_pthread_cond_t queuePopCond;
/* Indicates if the queue is shutting down */
int shutdown;
};
@@ -65,14 +67,14 @@ static void* POOL_thread(void* opaque) {
if (!ctx) { return NULL; }
for (;;) {
/* Lock the mutex and wait for a non-empty queue or until shutdown */
- pthread_mutex_lock(&ctx->queueMutex);
+ ZSTD_pthread_mutex_lock(&ctx->queueMutex);
while (ctx->queueEmpty && !ctx->shutdown) {
- pthread_cond_wait(&ctx->queuePopCond, &ctx->queueMutex);
+ ZSTD_pthread_cond_wait(&ctx->queuePopCond, &ctx->queueMutex);
}
/* empty => shutting down: so stop */
if (ctx->queueEmpty) {
- pthread_mutex_unlock(&ctx->queueMutex);
+ ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
return opaque;
}
/* Pop a job off the queue */
@@ -81,28 +83,32 @@ static void* POOL_thread(void* opaque) {
ctx->numThreadsBusy++;
ctx->queueEmpty = ctx->queueHead == ctx->queueTail;
/* Unlock the mutex, signal a pusher, and run the job */
- pthread_mutex_unlock(&ctx->queueMutex);
- pthread_cond_signal(&ctx->queuePushCond);
+ ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
+ ZSTD_pthread_cond_signal(&ctx->queuePushCond);
job.function(job.opaque);
/* If the intended queue size was 0, signal after finishing job */
if (ctx->queueSize == 1) {
- pthread_mutex_lock(&ctx->queueMutex);
+ ZSTD_pthread_mutex_lock(&ctx->queueMutex);
ctx->numThreadsBusy--;
- pthread_mutex_unlock(&ctx->queueMutex);
- pthread_cond_signal(&ctx->queuePushCond);
+ ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
+ ZSTD_pthread_cond_signal(&ctx->queuePushCond);
} }
} /* for (;;) */
/* Unreachable */
}
-POOL_ctx *POOL_create(size_t numThreads, size_t queueSize) {
- POOL_ctx *ctx;
+POOL_ctx* POOL_create(size_t numThreads, size_t queueSize) {
+ return POOL_create_advanced(numThreads, queueSize, ZSTD_defaultCMem);
+}
+
+POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize, ZSTD_customMem customMem) {
+ POOL_ctx* ctx;
/* Check the parameters */
if (!numThreads) { return NULL; }
/* Allocate the context and zero initialize */
- ctx = (POOL_ctx *)calloc(1, sizeof(POOL_ctx));
+ ctx = (POOL_ctx*)ZSTD_calloc(sizeof(POOL_ctx), customMem);
if (!ctx) { return NULL; }
/* Initialize the job queue.
* It needs one extra space since one space is wasted to differentiate empty
@@ -114,19 +120,20 @@ POOL_ctx *POOL_create(size_t numThreads, size_t queueSize) {
ctx->queueTail = 0;
ctx->numThreadsBusy = 0;
ctx->queueEmpty = 1;
- (void)pthread_mutex_init(&ctx->queueMutex, NULL);
- (void)pthread_cond_init(&ctx->queuePushCond, NULL);
- (void)pthread_cond_init(&ctx->queuePopCond, NULL);
+ (void)ZSTD_pthread_mutex_init(&ctx->queueMutex, NULL);
+ (void)ZSTD_pthread_cond_init(&ctx->queuePushCond, NULL);
+ (void)ZSTD_pthread_cond_init(&ctx->queuePopCond, NULL);
ctx->shutdown = 0;
/* Allocate space for the thread handles */
- ctx->threads = (pthread_t*)malloc(numThreads * sizeof(pthread_t));
+ ctx->threads = (ZSTD_pthread_t*)ZSTD_malloc(numThreads * sizeof(ZSTD_pthread_t), customMem);
ctx->numThreads = 0;
+ ctx->customMem = customMem;
/* Check for errors */
if (!ctx->threads || !ctx->queue) { POOL_free(ctx); return NULL; }
/* Initialize the threads */
{ size_t i;
for (i = 0; i < numThreads; ++i) {
- if (pthread_create(&ctx->threads[i], NULL, &POOL_thread, ctx)) {
+ if (ZSTD_pthread_create(&ctx->threads[i], NULL, &POOL_thread, ctx)) {
ctx->numThreads = i;
POOL_free(ctx);
return NULL;
@@ -139,37 +146,37 @@ POOL_ctx *POOL_create(size_t numThreads, size_t queueSize) {
/*! POOL_join() :
Shutdown the queue, wake any sleeping threads, and join all of the threads.
*/
-static void POOL_join(POOL_ctx *ctx) {
+static void POOL_join(POOL_ctx* ctx) {
/* Shut down the queue */
- pthread_mutex_lock(&ctx->queueMutex);
+ ZSTD_pthread_mutex_lock(&ctx->queueMutex);
ctx->shutdown = 1;
- pthread_mutex_unlock(&ctx->queueMutex);
+ ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
/* Wake up sleeping threads */
- pthread_cond_broadcast(&ctx->queuePushCond);
- pthread_cond_broadcast(&ctx->queuePopCond);
+ ZSTD_pthread_cond_broadcast(&ctx->queuePushCond);
+ ZSTD_pthread_cond_broadcast(&ctx->queuePopCond);
/* Join all of the threads */
{ size_t i;
for (i = 0; i < ctx->numThreads; ++i) {
- pthread_join(ctx->threads[i], NULL);
+ ZSTD_pthread_join(ctx->threads[i], NULL);
} }
}
void POOL_free(POOL_ctx *ctx) {
if (!ctx) { return; }
POOL_join(ctx);
- pthread_mutex_destroy(&ctx->queueMutex);
- pthread_cond_destroy(&ctx->queuePushCond);
- pthread_cond_destroy(&ctx->queuePopCond);
- if (ctx->queue) free(ctx->queue);
- if (ctx->threads) free(ctx->threads);
- free(ctx);
+ ZSTD_pthread_mutex_destroy(&ctx->queueMutex);
+ ZSTD_pthread_cond_destroy(&ctx->queuePushCond);
+ ZSTD_pthread_cond_destroy(&ctx->queuePopCond);
+ ZSTD_free(ctx->queue, ctx->customMem);
+ ZSTD_free(ctx->threads, ctx->customMem);
+ ZSTD_free(ctx, ctx->customMem);
}
size_t POOL_sizeof(POOL_ctx *ctx) {
if (ctx==NULL) return 0; /* supports sizeof NULL */
return sizeof(*ctx)
+ ctx->queueSize * sizeof(POOL_job)
- + ctx->numThreads * sizeof(pthread_t);
+ + ctx->numThreads * sizeof(ZSTD_pthread_t);
}
/**
@@ -191,12 +198,12 @@ void POOL_add(void* ctxVoid, POOL_function function, void *opaque) {
POOL_ctx* const ctx = (POOL_ctx*)ctxVoid;
if (!ctx) { return; }
- pthread_mutex_lock(&ctx->queueMutex);
+ ZSTD_pthread_mutex_lock(&ctx->queueMutex);
{ POOL_job const job = {function, opaque};
/* Wait until there is space in the queue for the new job */
while (isQueueFull(ctx) && !ctx->shutdown) {
- pthread_cond_wait(&ctx->queuePushCond, &ctx->queueMutex);
+ ZSTD_pthread_cond_wait(&ctx->queuePushCond, &ctx->queueMutex);
}
/* The queue is still going => there is space */
if (!ctx->shutdown) {
@@ -205,8 +212,8 @@ void POOL_add(void* ctxVoid, POOL_function function, void *opaque) {
ctx->queueTail = (ctx->queueTail + 1) % ctx->queueSize;
}
}
- pthread_mutex_unlock(&ctx->queueMutex);
- pthread_cond_signal(&ctx->queuePopCond);
+ ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
+ ZSTD_pthread_cond_signal(&ctx->queuePopCond);
}
#else /* ZSTD_MULTITHREAD not defined */
@@ -214,26 +221,34 @@ void POOL_add(void* ctxVoid, POOL_function function, void *opaque) {
/* We don't need any data, but if it is empty malloc() might return NULL. */
struct POOL_ctx_s {
- int data;
+ int dummy;
};
+static POOL_ctx g_ctx;
POOL_ctx* POOL_create(size_t numThreads, size_t queueSize) {
- (void)numThreads;
- (void)queueSize;
- return (POOL_ctx*)malloc(sizeof(POOL_ctx));
+ return POOL_create_advanced(numThreads, queueSize, ZSTD_defaultCMem);
+}
+
+POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize, ZSTD_customMem customMem) {
+ (void)numThreads;
+ (void)queueSize;
+ (void)customMem;
+ return &g_ctx;
}
void POOL_free(POOL_ctx* ctx) {
- free(ctx);
+ assert(!ctx || ctx == &g_ctx);
+ (void)ctx;
}
void POOL_add(void* ctx, POOL_function function, void* opaque) {
- (void)ctx;
- function(opaque);
+ (void)ctx;
+ function(opaque);
}
size_t POOL_sizeof(POOL_ctx* ctx) {
if (ctx==NULL) return 0; /* supports sizeof NULL */
+ assert(ctx == &g_ctx);
return sizeof(*ctx);
}
diff --git a/lib/common/pool.h b/lib/common/pool.h
index 264c5c9ca7ea..08c63715aaa6 100644
--- a/lib/common/pool.h
+++ b/lib/common/pool.h
@@ -5,6 +5,7 @@
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
*/
#ifndef POOL_H
@@ -16,6 +17,7 @@ extern "C" {
#include <stddef.h> /* size_t */
+#include "zstd_internal.h" /* ZSTD_customMem */
typedef struct POOL_ctx_s POOL_ctx;
@@ -27,6 +29,8 @@ typedef struct POOL_ctx_s POOL_ctx;
*/
POOL_ctx *POOL_create(size_t numThreads, size_t queueSize);
+POOL_ctx *POOL_create_advanced(size_t numThreads, size_t queueSize, ZSTD_customMem customMem);
+
/*! POOL_free() :
Free a thread pool returned by POOL_create().
*/
diff --git a/lib/common/threading.c b/lib/common/threading.c
index 141376c56199..8be8c8da948a 100644
--- a/lib/common/threading.c
+++ b/lib/common/threading.c
@@ -2,9 +2,9 @@
* Copyright (c) 2016 Tino Reichardt
* All rights reserved.
*
- * This source code is licensed under the BSD-style license found in the
- * LICENSE file in the root directory of this source tree. An additional grant
- * of patent rights can be found in the PATENTS file in the same directory.
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
*
* You can contact the author at:
* - zstdmt source repository: https://github.com/mcmilk/zstdmt
@@ -14,12 +14,8 @@
* This file will hold wrapper for systems, which do not support pthreads
*/
-/* When ZSTD_MULTITHREAD is not defined, this file would become an empty translation unit.
-* Include some ISO C header code to prevent this and portably avoid related warnings.
-* (Visual C++: C4206 / GCC: -Wpedantic / Clang: -Wempty-translation-unit)
-*/
-#include <stddef.h>
-
+/* create fake symbol to avoid empty trnaslation unit warning */
+int g_ZSTD_threading_useles_symbol;
#if defined(ZSTD_MULTITHREAD) && defined(_WIN32)
@@ -39,12 +35,12 @@
static unsigned __stdcall worker(void *arg)
{
- pthread_t* const thread = (pthread_t*) arg;
+ ZSTD_pthread_t* const thread = (ZSTD_pthread_t*) arg;
thread->arg = thread->start_routine(thread->arg);
return 0;
}
-int pthread_create(pthread_t* thread, const void* unused,
+int ZSTD_pthread_create(ZSTD_pthread_t* thread, const void* unused,
void* (*start_routine) (void*), void* arg)
{
(void)unused;
@@ -58,16 +54,16 @@ int pthread_create(pthread_t* thread, const void* unused,
return 0;
}
-int _pthread_join(pthread_t * thread, void **value_ptr)
+int ZSTD_pthread_join(ZSTD_pthread_t thread, void **value_ptr)
{
DWORD result;
- if (!thread->handle) return 0;
+ if (!thread.handle) return 0;
- result = WaitForSingleObject(thread->handle, INFINITE);
+ result = WaitForSingleObject(thread.handle, INFINITE);
switch (result) {
case WAIT_OBJECT_0:
- if (value_ptr) *value_ptr = thread->arg;
+ if (value_ptr) *value_ptr = thread.arg;
return 0;
case WAIT_ABANDONED:
return EINVAL;
diff --git a/lib/common/threading.h b/lib/common/threading.h
index ab09977a86dc..197770db27a8 100644
--- a/lib/common/threading.h
+++ b/lib/common/threading.h
@@ -2,9 +2,9 @@
* Copyright (c) 2016 Tino Reichardt
* All rights reserved.
*
- * This source code is licensed under the BSD-style license found in the
- * LICENSE file in the root directory of this source tree. An additional grant
- * of patent rights can be found in the PATENTS file in the same directory.
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
*
* You can contact the author at:
* - zstdmt source repository: https://github.com/mcmilk/zstdmt
@@ -37,35 +37,38 @@ extern "C" {
# define WIN32_LEAN_AND_MEAN
#endif
+#undef ERROR /* reported already defined on VS 2015 (Rich Geldreich) */
#include <windows.h>
+#undef ERROR
+#define ERROR(name) ZSTD_ERROR(name)
+
/* mutex */
-#define pthread_mutex_t CRITICAL_SECTION
-#define pthread_mutex_init(a,b) (InitializeCriticalSection((a)), 0)
-#define pthread_mutex_destroy(a) DeleteCriticalSection((a))
-#define pthread_mutex_lock(a) EnterCriticalSection((a))
-#define pthread_mutex_unlock(a) LeaveCriticalSection((a))
+#define ZSTD_pthread_mutex_t CRITICAL_SECTION
+#define ZSTD_pthread_mutex_init(a, b) (InitializeCriticalSection((a)), 0)
+#define ZSTD_pthread_mutex_destroy(a) DeleteCriticalSection((a))
+#define ZSTD_pthread_mutex_lock(a) EnterCriticalSection((a))
+#define ZSTD_pthread_mutex_unlock(a) LeaveCriticalSection((a))
/* condition variable */
-#define pthread_cond_t CONDITION_VARIABLE
-#define pthread_cond_init(a, b) (InitializeConditionVariable((a)), 0)
-#define pthread_cond_destroy(a) /* No delete */
-#define pthread_cond_wait(a, b) SleepConditionVariableCS((a), (b), INFINITE)
-#define pthread_cond_signal(a) WakeConditionVariable((a))
-#define pthread_cond_broadcast(a) WakeAllConditionVariable((a))
-
-/* pthread_create() and pthread_join() */
+#define ZSTD_pthread_cond_t CONDITION_VARIABLE
+#define ZSTD_pthread_cond_init(a, b) (InitializeConditionVariable((a)), 0)
+#define ZSTD_pthread_cond_destroy(a) /* No delete */
+#define ZSTD_pthread_cond_wait(a, b) SleepConditionVariableCS((a), (b), INFINITE)
+#define ZSTD_pthread_cond_signal(a) WakeConditionVariable((a))
+#define ZSTD_pthread_cond_broadcast(a) WakeAllConditionVariable((a))
+
+/* ZSTD_pthread_create() and ZSTD_pthread_join() */
typedef struct {
HANDLE handle;
void* (*start_routine)(void*);
void* arg;
-} pthread_t;
+} ZSTD_pthread_t;
-int pthread_create(pthread_t* thread, const void* unused,
+int ZSTD_pthread_create(ZSTD_pthread_t* thread, const void* unused,
void* (*start_routine) (void*), void* arg);
-#define pthread_join(a, b) _pthread_join(&(a), (b))
-int _pthread_join(pthread_t* thread, void** value_ptr);
+int ZSTD_pthread_join(ZSTD_pthread_t thread, void** value_ptr);
/**
* add here more wrappers as required
@@ -76,23 +79,40 @@ int _pthread_join(pthread_t* thread, void** value_ptr);
/* === POSIX Systems === */
# include <pthread.h>
+#define ZSTD_pthread_mutex_t pthread_mutex_t
+#define ZSTD_pthread_mutex_init(a, b) pthread_mutex_init((a), (b))
+#define ZSTD_pthread_mutex_destroy(a) pthread_mutex_destroy((a))
+#define ZSTD_pthread_mutex_lock(a) pthread_mutex_lock((a))
+#define ZSTD_pthread_mutex_unlock(a) pthread_mutex_unlock((a))
+
+#define ZSTD_pthread_cond_t pthread_cond_t
+#define ZSTD_pthread_cond_init(a, b) pthread_cond_init((a), (b))
+#define ZSTD_pthread_cond_destroy(a) pthread_cond_destroy((a))
+#define ZSTD_pthread_cond_wait(a, b) pthread_cond_wait((a), (b))
+#define ZSTD_pthread_cond_signal(a) pthread_cond_signal((a))
+#define ZSTD_pthread_cond_broadcast(a) pthread_cond_broadcast((a))
+
+#define ZSTD_pthread_t pthread_t
+#define ZSTD_pthread_create(a, b, c, d) pthread_create((a), (b), (c), (d))
+#define ZSTD_pthread_join(a, b) pthread_join((a),(b))
+
#else /* ZSTD_MULTITHREAD not defined */
/* No multithreading support */
-#define pthread_mutex_t int /* #define rather than typedef, because sometimes pthread support is implicit, resulting in duplicated symbols */
-#define pthread_mutex_init(a,b) ((void)a, 0)
-#define pthread_mutex_destroy(a)
-#define pthread_mutex_lock(a)
-#define pthread_mutex_unlock(a)
+typedef int ZSTD_pthread_mutex_t;
+#define ZSTD_pthread_mutex_init(a, b) ((void)a, 0)
+#define ZSTD_pthread_mutex_destroy(a)
+#define ZSTD_pthread_mutex_lock(a)
+#define ZSTD_pthread_mutex_unlock(a)
-#define pthread_cond_t int
-#define pthread_cond_init(a,b) ((void)a, 0)
-#define pthread_cond_destroy(a)
-#define pthread_cond_wait(a,b)
-#define pthread_cond_signal(a)
-#define pthread_cond_broadcast(a)
+typedef int ZSTD_pthread_cond_t;
+#define ZSTD_pthread_cond_init(a, b) ((void)a, 0)
+#define ZSTD_pthread_cond_destroy(a)
+#define ZSTD_pthread_cond_wait(a, b)
+#define ZSTD_pthread_cond_signal(a)
+#define ZSTD_pthread_cond_broadcast(a)
-/* do not use pthread_t */
+/* do not use ZSTD_pthread_t */
#endif /* ZSTD_MULTITHREAD */
diff --git a/lib/common/zstd_common.c b/lib/common/zstd_common.c
index 08384cabf59b..c2041053be7f 100644
--- a/lib/common/zstd_common.c
+++ b/lib/common/zstd_common.c
@@ -5,6 +5,7 @@
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
*/
@@ -15,8 +16,7 @@
#include <stdlib.h> /* malloc, calloc, free */
#include <string.h> /* memset */
#include "error_private.h"
-#define ZSTD_STATIC_LINKING_ONLY
-#include "zstd.h"
+#include "zstd_internal.h"
/*-****************************************
diff --git a/lib/common/zstd_errors.h b/lib/common/zstd_errors.h
index a69387b714a8..4bcb7769fe77 100644
--- a/lib/common/zstd_errors.h
+++ b/lib/common/zstd_errors.h
@@ -5,6 +5,7 @@
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
*/
#ifndef ZSTD_ERRORS_H_398273423
@@ -62,9 +63,10 @@ typedef enum {
ZSTD_error_memory_allocation = 64,
ZSTD_error_dstSize_tooSmall = 70,
ZSTD_error_srcSize_wrong = 72,
+ /* following error codes are not stable and may be removed or changed in a future version */
ZSTD_error_frameIndex_tooLarge = 100,
ZSTD_error_seekableIO = 102,
- ZSTD_error_maxCode = 120 /* never EVER use this value directly, it may change in future versions! Use ZSTD_isError() instead */
+ ZSTD_error_maxCode = 120 /* never EVER use this value directly, it can change in future versions! Use ZSTD_isError() instead */
} ZSTD_ErrorCode;
/*! ZSTD_getErrorCode() :
diff --git a/lib/common/zstd_internal.h b/lib/common/zstd_internal.h
index 2610528608d4..e91cd20baa79 100644
--- a/lib/common/zstd_internal.h
+++ b/lib/common/zstd_internal.h
@@ -5,6 +5,7 @@
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
*/
#ifndef ZSTD_CCOMMON_H_MODULE
@@ -29,6 +30,11 @@
#include "xxhash.h" /* XXH_reset, update, digest */
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
/*-*************************************
* Debug
***************************************/
@@ -96,9 +102,13 @@ static const U32 repStartValue[ZSTD_REP_NUM] = { 1, 4, 8 };
#define BIT0 1
#define ZSTD_WINDOWLOG_ABSOLUTEMIN 10
+#define ZSTD_WINDOWLOG_DEFAULTMAX 27 /* Default maximum allowed window log */
static const size_t ZSTD_fcs_fieldSize[4] = { 0, 2, 4, 8 };
static const size_t ZSTD_did_fieldSize[4] = { 0, 1, 2, 4 };
+#define ZSTD_FRAMEIDSIZE 4
+static const size_t ZSTD_frameIdSize = ZSTD_FRAMEIDSIZE; /* magic number size */
+
#define ZSTD_BLOCKHEADERSIZE 3 /* C standard doesn't allow `static const` variable to be init using another `static const` variable */
static const size_t ZSTD_blockHeaderSize = ZSTD_BLOCKHEADERSIZE;
typedef enum { bt_raw, bt_rle, bt_compressed, bt_reserved } blockType_e;
@@ -117,7 +127,8 @@ typedef enum { set_basic, set_rle, set_compressed, set_repeat } symbolEncodingTy
#define MaxLit ((1<<Litbits) - 1)
#define MaxML 52
#define MaxLL 35
-#define MaxOff 28
+#define DefaultMaxOff 28
+#define MaxOff 31
#define MaxSeq MAX(MaxLL, MaxML) /* Assumption : MaxOff < MaxLL,MaxML */
#define MLFSELog 9
#define LLFSELog 9
@@ -143,8 +154,8 @@ static const S16 ML_defaultNorm[MaxML+1] = { 1, 4, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1,
#define ML_DEFAULTNORMLOG 6 /* for static allocation */
static const U32 ML_defaultNormLog = ML_DEFAULTNORMLOG;
-static const S16 OF_defaultNorm[MaxOff+1] = { 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,-1,-1,-1,-1,-1 };
+static const S16 OF_defaultNorm[DefaultMaxOff+1] = { 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,-1,-1,-1,-1,-1 };
#define OF_DEFAULTNORMLOG 5 /* for static allocation */
static const U32 OF_defaultNormLog = OF_DEFAULTNORMLOG;
@@ -244,6 +255,26 @@ typedef struct {
} optState_t;
typedef struct {
+ U32 offset;
+ U32 checksum;
+} ldmEntry_t;
+
+typedef struct {
+ ldmEntry_t* hashTable;
+ BYTE* bucketOffsets; /* Next position in bucket to insert entry */
+ U64 hashPower; /* Used to compute the rolling hash.
+ * Depends on ldmParams.minMatchLength */
+} ldmState_t;
+
+typedef struct {
+ U32 enableLdm; /* 1 if enable long distance matching */
+ U32 hashLog; /* Log size of hashTable */
+ U32 bucketSizeLog; /* Log bucket size for collision resolution, at most 8 */
+ U32 minMatchLength; /* Minimum match length */
+ U32 hashEveryLog; /* Log number of entries to skip */
+} ldmParams_t;
+
+typedef struct {
U32 hufCTable[HUF_CTABLE_SIZE_U32(255)];
FSE_CTable offcodeCTable[FSE_CTABLE_SIZE_U32(OffFSELog, MaxOff)];
FSE_CTable matchlengthCTable[FSE_CTABLE_SIZE_U32(MLFSELog, MaxML)];
@@ -255,6 +286,28 @@ typedef struct {
FSE_repeat litlength_repeatMode;
} ZSTD_entropyCTables_t;
+struct ZSTD_CCtx_params_s {
+ ZSTD_format_e format;
+ ZSTD_compressionParameters cParams;
+ ZSTD_frameParameters fParams;
+
+ int compressionLevel;
+ U32 forceWindow; /* force back-references to respect limit of
+ * 1<<wLog, even for dictionary */
+
+ /* Multithreading: used to pass parameters to mtctx */
+ U32 nbThreads;
+ unsigned jobSize;
+ unsigned overlapSizeLog;
+
+ /* Long distance matching parameters */
+ ldmParams_t ldmParams;
+
+ /* For use with createCCtxParams() and freeCCtxParams() only */
+ ZSTD_customMem customMem;
+
+}; /* typedef'd to ZSTD_CCtx_params within "zstd.h" */
+
const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx);
void ZSTD_seqToCodes(const seqStore_t* seqStorePtr);
@@ -268,24 +321,27 @@ void ZSTD_free(void* ptr, ZSTD_customMem customMem);
MEM_STATIC U32 ZSTD_highbit32(U32 val)
{
+ assert(val != 0);
+ {
# if defined(_MSC_VER) /* Visual */
- unsigned long r=0;
- _BitScanReverse(&r, val);
- return (unsigned)r;
+ unsigned long r=0;
+ _BitScanReverse(&r, val);
+ return (unsigned)r;
# elif defined(__GNUC__) && (__GNUC__ >= 3) /* GCC Intrinsic */
- return 31 - __builtin_clz(val);
+ return 31 - __builtin_clz(val);
# else /* Software version */
- static const int DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };
- U32 v = val;
- int r;
- v |= v >> 1;
- v |= v >> 2;
- v |= v >> 4;
- v |= v >> 8;
- v |= v >> 16;
- r = DeBruijnClz[(U32)(v * 0x07C4ACDDU) >> 27];
- return r;
+ static const int DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };
+ U32 v = val;
+ int r;
+ v |= v >> 1;
+ v |= v >> 2;
+ v |= v >> 4;
+ v |= v >> 8;
+ v |= v >> 16;
+ r = DeBruijnClz[(U32)(v * 0x07C4ACDDU) >> 27];
+ return r;
# endif
+ }
}
@@ -306,7 +362,7 @@ void ZSTD_invalidateRepCodes(ZSTD_CCtx* cctx);
size_t ZSTD_initCStream_internal(ZSTD_CStream* zcs,
const void* dict, size_t dictSize,
const ZSTD_CDict* cdict,
- ZSTD_parameters params, unsigned long long pledgedSrcSize);
+ ZSTD_CCtx_params params, unsigned long long pledgedSrcSize);
/*! ZSTD_compressStream_generic() :
* Private use only. To be called from zstdmt_compress.c in single-thread mode. */
@@ -315,10 +371,25 @@ size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
ZSTD_inBuffer* input,
ZSTD_EndDirective const flushMode);
-/*! ZSTD_getParamsFromCDict() :
+/*! ZSTD_getCParamsFromCDict() :
* as the name implies */
-ZSTD_parameters ZSTD_getParamsFromCDict(const ZSTD_CDict* cdict);
-
+ZSTD_compressionParameters ZSTD_getCParamsFromCDict(const ZSTD_CDict* cdict);
+
+/* ZSTD_compressBegin_advanced_internal() :
+ * Private use only. To be called from zstdmt_compress.c. */
+size_t ZSTD_compressBegin_advanced_internal(ZSTD_CCtx* cctx,
+ const void* dict, size_t dictSize,
+ ZSTD_dictMode_e dictMode,
+ ZSTD_CCtx_params params,
+ unsigned long long pledgedSrcSize);
+
+/* ZSTD_compress_advanced_internal() :
+ * Private use only. To be called from zstdmt_compress.c. */
+size_t ZSTD_compress_advanced_internal(ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const void* dict,size_t dictSize,
+ ZSTD_CCtx_params params);
typedef struct {
blockType_e blockType;
@@ -331,5 +402,8 @@ typedef struct {
size_t ZSTD_getcBlockSize(const void* src, size_t srcSize,
blockProperties_t* bpPtr);
+#if defined (__cplusplus)
+}
+#endif
#endif /* ZSTD_CCOMMON_H_MODULE */
diff --git a/lib/compress/fse_compress.c b/lib/compress/fse_compress.c
index cc9fa73514ad..549c115d4238 100644
--- a/lib/compress/fse_compress.c
+++ b/lib/compress/fse_compress.c
@@ -461,6 +461,7 @@ static unsigned FSE_minTableLog(size_t srcSize, unsigned maxSymbolValue)
U32 minBitsSrc = BIT_highbit32((U32)(srcSize - 1)) + 1;
U32 minBitsSymbols = BIT_highbit32(maxSymbolValue) + 2;
U32 minBits = minBitsSrc < minBitsSymbols ? minBitsSrc : minBitsSymbols;
+ assert(srcSize > 1); /* Not supported, RLE should be used instead */
return minBits;
}
@@ -469,6 +470,7 @@ unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsi
U32 maxBitsSrc = BIT_highbit32((U32)(srcSize - 1)) - minus;
U32 tableLog = maxTableLog;
U32 minBits = FSE_minTableLog(srcSize, maxSymbolValue);
+ assert(srcSize > 1); /* Not supported, RLE should be used instead */
if (tableLog==0) tableLog = FSE_DEFAULT_TABLELOG;
if (maxBitsSrc < tableLog) tableLog = maxBitsSrc; /* Accuracy can be reduced */
if (minBits > tableLog) tableLog = minBits; /* Need a minimum to safely represent all symbol values */
@@ -580,7 +582,7 @@ size_t FSE_normalizeCount (short* normalizedCounter, unsigned tableLog,
if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge); /* Unsupported size */
if (tableLog < FSE_minTableLog(total, maxSymbolValue)) return ERROR(GENERIC); /* Too small tableLog, compression potentially impossible */
- { U32 const rtbTable[] = { 0, 473195, 504333, 520860, 550000, 700000, 750000, 830000 };
+ { static U32 const rtbTable[] = { 0, 473195, 504333, 520860, 550000, 700000, 750000, 830000 };
U64 const scale = 62 - tableLog;
U64 const step = ((U64)1<<62) / total; /* <== here, one division ! */
U64 const vStep = 1ULL<<(scale-20);
diff --git a/lib/compress/huf_compress.c b/lib/compress/huf_compress.c
index 2a47c1820526..5692d56e003d 100644
--- a/lib/compress/huf_compress.c
+++ b/lib/compress/huf_compress.c
@@ -167,7 +167,7 @@ size_t HUF_writeCTable (void* dst, size_t maxDstSize,
}
-size_t HUF_readCTable (HUF_CElt* CTable, U32 maxSymbolValue, const void* src, size_t srcSize)
+size_t HUF_readCTable (HUF_CElt* CTable, U32* maxSymbolValuePtr, const void* src, size_t srcSize)
{
BYTE huffWeight[HUF_SYMBOLVALUE_MAX + 1]; /* init not required, even though some static analyzer may complain */
U32 rankVal[HUF_TABLELOG_ABSOLUTEMAX + 1]; /* large enough for values from 0 to 16 */
@@ -179,7 +179,7 @@ size_t HUF_readCTable (HUF_CElt* CTable, U32 maxSymbolValue, const void* src, si
/* check result */
if (tableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);
- if (nbSymbols > maxSymbolValue+1) return ERROR(maxSymbolValue_tooSmall);
+ if (nbSymbols > *maxSymbolValuePtr+1) return ERROR(maxSymbolValue_tooSmall);
/* Prepare base value per rank */
{ U32 n, nextRankStart = 0;
@@ -208,9 +208,10 @@ size_t HUF_readCTable (HUF_CElt* CTable, U32 maxSymbolValue, const void* src, si
min >>= 1;
} }
/* assign value within rank, symbol order */
- { U32 n; for (n=0; n<=maxSymbolValue; n++) CTable[n].val = valPerRank[CTable[n].nbBits]++; }
+ { U32 n; for (n=0; n<nbSymbols; n++) CTable[n].val = valPerRank[CTable[n].nbBits]++; }
}
+ *maxSymbolValuePtr = nbSymbols - 1;
return readSize;
}
diff --git a/lib/compress/zstd_compress.c b/lib/compress/zstd_compress.c
index 0322c03eb316..2c46c79f1c16 100644
--- a/lib/compress/zstd_compress.c
+++ b/lib/compress/zstd_compress.c
@@ -5,6 +5,7 @@
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
*/
@@ -25,25 +26,19 @@
#include "fse.h"
#define HUF_STATIC_LINKING_ONLY
#include "huf.h"
-#include "zstd_internal.h" /* includes zstd.h */
-#include "zstdmt_compress.h"
-
-
-/*-*************************************
-* Constants
-***************************************/
-static const U32 g_searchStrength = 8; /* control skip over incompressible data */
-#define HASH_READ_SIZE 8
-typedef enum { ZSTDcs_created=0, ZSTDcs_init, ZSTDcs_ongoing, ZSTDcs_ending } ZSTD_compressionStage_e;
+#include "zstd_compress.h"
+#include "zstd_fast.h"
+#include "zstd_double_fast.h"
+#include "zstd_lazy.h"
+#include "zstd_opt.h"
+#include "zstd_ldm.h"
/*-*************************************
* Helper functions
***************************************/
size_t ZSTD_compressBound(size_t srcSize) {
- size_t const lowLimit = 256 KB;
- size_t const margin = (srcSize < lowLimit) ? (lowLimit-srcSize) >> 12 : 0; /* from 64 to 0 */
- return srcSize + (srcSize >> 8) + margin;
+ return ZSTD_COMPRESSBOUND(srcSize);
}
@@ -61,8 +56,6 @@ static void ZSTD_resetSeqStore(seqStore_t* ssPtr)
/*-*************************************
* Context memory management
***************************************/
-typedef enum { zcss_init=0, zcss_load, zcss_flush } ZSTD_cStreamStage;
-
struct ZSTD_CDict_s {
void* dictBuffer;
const void* dictContent;
@@ -70,65 +63,6 @@ struct ZSTD_CDict_s {
ZSTD_CCtx* refContext;
}; /* typedef'd to ZSTD_CDict within "zstd.h" */
-struct ZSTD_CCtx_s {
- const BYTE* nextSrc; /* next block here to continue on current prefix */
- const BYTE* base; /* All regular indexes relative to this position */
- const BYTE* dictBase; /* extDict indexes relative to this position */
- U32 dictLimit; /* below that point, need extDict */
- U32 lowLimit; /* below that point, no more data */
- U32 nextToUpdate; /* index from which to continue dictionary update */
- U32 nextToUpdate3; /* index from which to continue dictionary update */
- U32 hashLog3; /* dispatch table : larger == faster, more memory */
- U32 loadedDictEnd; /* index of end of dictionary */
- U32 forceWindow; /* force back-references to respect limit of 1<<wLog, even for dictionary */
- ZSTD_compressionStage_e stage;
- U32 dictID;
- int compressionLevel;
- ZSTD_parameters requestedParams;
- ZSTD_parameters appliedParams;
- void* workSpace;
- size_t workSpaceSize;
- size_t blockSize;
- U64 pledgedSrcSizePlusOne; /* this way, 0 (default) == unknown */
- U64 consumedSrcSize;
- XXH64_state_t xxhState;
- ZSTD_customMem customMem;
- size_t staticSize;
-
- seqStore_t seqStore; /* sequences storage ptrs */
- optState_t optState;
- U32* hashTable;
- U32* hashTable3;
- U32* chainTable;
- ZSTD_entropyCTables_t* entropy;
-
- /* streaming */
- char* inBuff;
- size_t inBuffSize;
- size_t inToCompress;
- size_t inBuffPos;
- size_t inBuffTarget;
- char* outBuff;
- size_t outBuffSize;
- size_t outBuffContentSize;
- size_t outBuffFlushedSize;
- ZSTD_cStreamStage streamStage;
- U32 frameEnded;
-
- /* Dictionary */
- ZSTD_dictMode_e dictMode; /* select restricting dictionary to "rawContent" or "fullDict" only */
- U32 dictContentByRef;
- ZSTD_CDict* cdictLocal;
- const ZSTD_CDict* cdict;
- const void* prefix;
- size_t prefixSize;
-
- /* Multi-threading */
- U32 nbThreads;
- ZSTDMT_CCtx* mtctx;
-};
-
-
ZSTD_CCtx* ZSTD_createCCtx(void)
{
return ZSTD_createCCtx_advanced(ZSTD_defaultCMem);
@@ -143,7 +77,7 @@ ZSTD_CCtx* ZSTD_createCCtx_advanced(ZSTD_customMem customMem)
cctx = (ZSTD_CCtx*) ZSTD_calloc(sizeof(ZSTD_CCtx), customMem);
if (!cctx) return NULL;
cctx->customMem = customMem;
- cctx->compressionLevel = ZSTD_CLEVEL_DEFAULT;
+ cctx->requestedParams.compressionLevel = ZSTD_CLEVEL_DEFAULT;
ZSTD_STATIC_ASSERT(zcss_init==0);
ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN==(0ULL - 1));
return cctx;
@@ -151,7 +85,7 @@ ZSTD_CCtx* ZSTD_createCCtx_advanced(ZSTD_customMem customMem)
ZSTD_CCtx* ZSTD_initStaticCCtx(void *workspace, size_t workspaceSize)
{
- ZSTD_CCtx* cctx = (ZSTD_CCtx*) workspace;
+ ZSTD_CCtx* const cctx = (ZSTD_CCtx*) workspace;
if (workspaceSize <= sizeof(ZSTD_CCtx)) return NULL; /* minimum size */
if ((size_t)workspace & 7) return NULL; /* must be 8-aligned */
memset(workspace, 0, workspaceSize); /* may be a bit generous, could memset be smaller ? */
@@ -161,7 +95,7 @@ ZSTD_CCtx* ZSTD_initStaticCCtx(void *workspace, size_t workspaceSize)
/* entropy space (never moves) */
if (cctx->workSpaceSize < sizeof(ZSTD_entropyCTables_t)) return NULL;
- assert(((size_t)cctx->workSpace & 7) == 0); /* ensure correct alignment */
+ assert(((size_t)cctx->workSpace & (sizeof(void*)-1)) == 0); /* ensure correct alignment */
cctx->entropy = (ZSTD_entropyCTables_t*)cctx->workSpace;
return cctx;
@@ -175,23 +109,36 @@ size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx)
cctx->workSpace = NULL;
ZSTD_freeCDict(cctx->cdictLocal);
cctx->cdictLocal = NULL;
+#ifdef ZSTD_MULTITHREAD
ZSTDMT_freeCCtx(cctx->mtctx);
cctx->mtctx = NULL;
+#endif
ZSTD_free(cctx, cctx->customMem);
return 0; /* reserved as a potential error code in the future */
}
+
+static size_t ZSTD_sizeof_mtctx(const ZSTD_CCtx* cctx)
+{
+#ifdef ZSTD_MULTITHREAD
+ return ZSTDMT_sizeof_CCtx(cctx->mtctx);
+#else
+ (void) cctx;
+ return 0;
+#endif
+}
+
+
size_t ZSTD_sizeof_CCtx(const ZSTD_CCtx* cctx)
{
if (cctx==NULL) return 0; /* support sizeof on NULL */
- DEBUGLOG(5, "sizeof(*cctx) : %u", (U32)sizeof(*cctx));
- DEBUGLOG(5, "workSpaceSize : %u", (U32)cctx->workSpaceSize);
- DEBUGLOG(5, "streaming buffers : %u", (U32)(cctx->outBuffSize + cctx->inBuffSize));
- DEBUGLOG(5, "inner MTCTX : %u", (U32)ZSTDMT_sizeof_CCtx(cctx->mtctx));
+ DEBUGLOG(3, "sizeof(*cctx) : %u", (U32)sizeof(*cctx));
+ DEBUGLOG(3, "workSpaceSize (including streaming buffers): %u", (U32)cctx->workSpaceSize);
+ DEBUGLOG(3, "inner cdict : %u", (U32)ZSTD_sizeof_CDict(cctx->cdictLocal));
+ DEBUGLOG(3, "inner MTCTX : %u", (U32)ZSTD_sizeof_mtctx(cctx));
return sizeof(*cctx) + cctx->workSpaceSize
+ ZSTD_sizeof_CDict(cctx->cdictLocal)
- + cctx->outBuffSize + cctx->inBuffSize
- + ZSTDMT_sizeof_CCtx(cctx->mtctx);
+ + ZSTD_sizeof_mtctx(cctx);
}
size_t ZSTD_sizeof_CStream(const ZSTD_CStream* zcs)
@@ -202,29 +149,99 @@ size_t ZSTD_sizeof_CStream(const ZSTD_CStream* zcs)
/* private API call, for dictBuilder only */
const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx) { return &(ctx->seqStore); }
-static ZSTD_parameters ZSTD_getParamsFromCCtx(const ZSTD_CCtx* cctx) { return cctx->appliedParams; }
+#define ZSTD_CLEVEL_CUSTOM 999
-/* older variant; will be deprecated */
-size_t ZSTD_setCCtxParameter(ZSTD_CCtx* cctx, ZSTD_CCtxParameter param, unsigned value)
+static ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams(
+ ZSTD_CCtx_params params, U64 srcSizeHint, size_t dictSize)
{
- switch(param)
- {
- case ZSTD_p_forceWindow : cctx->forceWindow = value>0; cctx->loadedDictEnd = 0; return 0;
- ZSTD_STATIC_ASSERT(ZSTD_dm_auto==0);
- ZSTD_STATIC_ASSERT(ZSTD_dm_rawContent==1);
- case ZSTD_p_forceRawDict : cctx->dictMode = (ZSTD_dictMode_e)(value>0); return 0;
- default: return ERROR(parameter_unsupported);
- }
+ return (params.compressionLevel == ZSTD_CLEVEL_CUSTOM ?
+ params.cParams :
+ ZSTD_getCParams(params.compressionLevel, srcSizeHint, dictSize));
}
+static void ZSTD_cLevelToCCtxParams_srcSize(ZSTD_CCtx_params* params, U64 srcSize)
+{
+ params->cParams = ZSTD_getCParamsFromCCtxParams(*params, srcSize, 0);
+ params->compressionLevel = ZSTD_CLEVEL_CUSTOM;
+}
-#define ZSTD_CLEVEL_CUSTOM 999
static void ZSTD_cLevelToCParams(ZSTD_CCtx* cctx)
{
- if (cctx->compressionLevel==ZSTD_CLEVEL_CUSTOM) return;
- cctx->requestedParams.cParams = ZSTD_getCParams(cctx->compressionLevel,
- cctx->pledgedSrcSizePlusOne-1, 0);
- cctx->compressionLevel = ZSTD_CLEVEL_CUSTOM;
+ ZSTD_cLevelToCCtxParams_srcSize(
+ &cctx->requestedParams, cctx->pledgedSrcSizePlusOne-1);
+}
+
+static void ZSTD_cLevelToCCtxParams(ZSTD_CCtx_params* params)
+{
+ ZSTD_cLevelToCCtxParams_srcSize(params, 0);
+}
+
+static ZSTD_CCtx_params ZSTD_makeCCtxParamsFromCParams(
+ ZSTD_compressionParameters cParams)
+{
+ ZSTD_CCtx_params cctxParams;
+ memset(&cctxParams, 0, sizeof(cctxParams));
+ cctxParams.cParams = cParams;
+ cctxParams.compressionLevel = ZSTD_CLEVEL_CUSTOM;
+ return cctxParams;
+}
+
+static ZSTD_CCtx_params* ZSTD_createCCtxParams_advanced(
+ ZSTD_customMem customMem)
+{
+ ZSTD_CCtx_params* params;
+ if (!customMem.customAlloc ^ !customMem.customFree) return NULL;
+ params = (ZSTD_CCtx_params*)ZSTD_calloc(
+ sizeof(ZSTD_CCtx_params), customMem);
+ if (!params) { return NULL; }
+ params->customMem = customMem;
+ params->compressionLevel = ZSTD_CLEVEL_DEFAULT;
+ return params;
+}
+
+ZSTD_CCtx_params* ZSTD_createCCtxParams(void)
+{
+ return ZSTD_createCCtxParams_advanced(ZSTD_defaultCMem);
+}
+
+size_t ZSTD_freeCCtxParams(ZSTD_CCtx_params* params)
+{
+ if (params == NULL) { return 0; }
+ ZSTD_free(params, params->customMem);
+ return 0;
+}
+
+size_t ZSTD_resetCCtxParams(ZSTD_CCtx_params* params)
+{
+ return ZSTD_initCCtxParams(params, ZSTD_CLEVEL_DEFAULT);
+}
+
+size_t ZSTD_initCCtxParams(ZSTD_CCtx_params* cctxParams, int compressionLevel) {
+ if (!cctxParams) { return ERROR(GENERIC); }
+ memset(cctxParams, 0, sizeof(*cctxParams));
+ cctxParams->compressionLevel = compressionLevel;
+ return 0;
+}
+
+size_t ZSTD_initCCtxParams_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_parameters params)
+{
+ if (!cctxParams) { return ERROR(GENERIC); }
+ CHECK_F( ZSTD_checkCParams(params.cParams) );
+ memset(cctxParams, 0, sizeof(*cctxParams));
+ cctxParams->cParams = params.cParams;
+ cctxParams->fParams = params.fParams;
+ cctxParams->compressionLevel = ZSTD_CLEVEL_CUSTOM;
+ return 0;
+}
+
+static ZSTD_CCtx_params ZSTD_assignParamsToCCtxParams(
+ ZSTD_CCtx_params cctxParams, ZSTD_parameters params)
+{
+ ZSTD_CCtx_params ret = cctxParams;
+ ret.cParams = params.cParams;
+ ret.fParams = params.fParams;
+ ret.compressionLevel = ZSTD_CLEVEL_CUSTOM;
+ return ret;
}
#define CLAMPCHECK(val,min,max) { \
@@ -238,167 +255,285 @@ size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, unsigned v
switch(param)
{
- case ZSTD_p_compressionLevel :
- if ((int)value > ZSTD_maxCLevel()) value = ZSTD_maxCLevel(); /* cap max compression level */
+ case ZSTD_p_format :
+ return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
+
+ case ZSTD_p_compressionLevel:
if (value == 0) return 0; /* special value : 0 means "don't change anything" */
if (cctx->cdict) return ERROR(stage_wrong);
- cctx->compressionLevel = value;
- return 0;
+ return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
+
+ case ZSTD_p_windowLog:
+ case ZSTD_p_hashLog:
+ case ZSTD_p_chainLog:
+ case ZSTD_p_searchLog:
+ case ZSTD_p_minMatch:
+ case ZSTD_p_targetLength:
+ case ZSTD_p_compressionStrategy:
+ if (value == 0) return 0; /* special value : 0 means "don't change anything" */
+ if (cctx->cdict) return ERROR(stage_wrong);
+ ZSTD_cLevelToCParams(cctx); /* Can optimize if srcSize is known */
+ return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
- case ZSTD_p_windowLog :
- DEBUGLOG(5, "setting ZSTD_p_windowLog = %u (cdict:%u)",
- value, (cctx->cdict!=NULL));
+ case ZSTD_p_contentSizeFlag:
+ case ZSTD_p_checksumFlag:
+ case ZSTD_p_dictIDFlag:
+ return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
+
+ case ZSTD_p_forceMaxWindow : /* Force back-references to remain < windowSize,
+ * even when referencing into Dictionary content
+ * default : 0 when using a CDict, 1 when using a Prefix */
+ cctx->loadedDictEnd = 0;
+ return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
+
+ case ZSTD_p_nbThreads:
+ if (value==0) return 0;
+ DEBUGLOG(5, " setting nbThreads : %u", value);
+ if (value > 1 && cctx->staticSize) {
+ return ERROR(parameter_unsupported); /* MT not compatible with static alloc */
+ }
+ return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
+
+ case ZSTD_p_jobSize:
+ return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
+
+ case ZSTD_p_overlapSizeLog:
+ DEBUGLOG(5, " setting overlap with nbThreads == %u", cctx->requestedParams.nbThreads);
+ return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
+
+ case ZSTD_p_enableLongDistanceMatching:
+ if (cctx->cdict) return ERROR(stage_wrong);
+ if (value != 0) {
+ ZSTD_cLevelToCParams(cctx);
+ }
+ return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
+
+ case ZSTD_p_ldmHashLog:
+ case ZSTD_p_ldmMinMatch:
if (value == 0) return 0; /* special value : 0 means "don't change anything" */
if (cctx->cdict) return ERROR(stage_wrong);
+ return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
+
+ case ZSTD_p_ldmBucketSizeLog:
+ case ZSTD_p_ldmHashEveryLog:
+ if (cctx->cdict) return ERROR(stage_wrong);
+ return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
+
+ default: return ERROR(parameter_unsupported);
+ }
+}
+
+size_t ZSTD_CCtxParam_setParameter(
+ ZSTD_CCtx_params* params, ZSTD_cParameter param, unsigned value)
+{
+ switch(param)
+ {
+ case ZSTD_p_format :
+ if (value > (unsigned)ZSTD_f_zstd1_magicless)
+ return ERROR(parameter_unsupported);
+ params->format = (ZSTD_format_e)value;
+ return 0;
+
+ case ZSTD_p_compressionLevel :
+ if ((int)value > ZSTD_maxCLevel()) value = ZSTD_maxCLevel();
+ if (value == 0) return 0;
+ params->compressionLevel = value;
+ return 0;
+
+ case ZSTD_p_windowLog :
+ if (value == 0) return 0;
CLAMPCHECK(value, ZSTD_WINDOWLOG_MIN, ZSTD_WINDOWLOG_MAX);
- ZSTD_cLevelToCParams(cctx);
- cctx->requestedParams.cParams.windowLog = value;
+ ZSTD_cLevelToCCtxParams(params);
+ params->cParams.windowLog = value;
return 0;
case ZSTD_p_hashLog :
- if (value == 0) return 0; /* special value : 0 means "don't change anything" */
- if (cctx->cdict) return ERROR(stage_wrong);
+ if (value == 0) return 0;
CLAMPCHECK(value, ZSTD_HASHLOG_MIN, ZSTD_HASHLOG_MAX);
- ZSTD_cLevelToCParams(cctx);
- cctx->requestedParams.cParams.hashLog = value;
+ ZSTD_cLevelToCCtxParams(params);
+ params->cParams.hashLog = value;
return 0;
case ZSTD_p_chainLog :
- if (value == 0) return 0; /* special value : 0 means "don't change anything" */
- if (cctx->cdict) return ERROR(stage_wrong);
+ if (value == 0) return 0;
CLAMPCHECK(value, ZSTD_CHAINLOG_MIN, ZSTD_CHAINLOG_MAX);
- ZSTD_cLevelToCParams(cctx);
- cctx->requestedParams.cParams.chainLog = value;
+ ZSTD_cLevelToCCtxParams(params);
+ params->cParams.chainLog = value;
return 0;
case ZSTD_p_searchLog :
- if (value == 0) return 0; /* special value : 0 means "don't change anything" */
- if (cctx->cdict) return ERROR(stage_wrong);
+ if (value == 0) return 0;
CLAMPCHECK(value, ZSTD_SEARCHLOG_MIN, ZSTD_SEARCHLOG_MAX);
- ZSTD_cLevelToCParams(cctx);
- cctx->requestedParams.cParams.searchLog = value;
+ ZSTD_cLevelToCCtxParams(params);
+ params->cParams.searchLog = value;
return 0;
case ZSTD_p_minMatch :
- if (value == 0) return 0; /* special value : 0 means "don't change anything" */
- if (cctx->cdict) return ERROR(stage_wrong);
+ if (value == 0) return 0;
CLAMPCHECK(value, ZSTD_SEARCHLENGTH_MIN, ZSTD_SEARCHLENGTH_MAX);
- ZSTD_cLevelToCParams(cctx);
- cctx->requestedParams.cParams.searchLength = value;
+ ZSTD_cLevelToCCtxParams(params);
+ params->cParams.searchLength = value;
return 0;
case ZSTD_p_targetLength :
- if (value == 0) return 0; /* special value : 0 means "don't change anything" */
- if (cctx->cdict) return ERROR(stage_wrong);
+ if (value == 0) return 0;
CLAMPCHECK(value, ZSTD_TARGETLENGTH_MIN, ZSTD_TARGETLENGTH_MAX);
- ZSTD_cLevelToCParams(cctx);
- cctx->requestedParams.cParams.targetLength = value;
+ ZSTD_cLevelToCCtxParams(params);
+ params->cParams.targetLength = value;
return 0;
case ZSTD_p_compressionStrategy :
- if (value == 0) return 0; /* special value : 0 means "don't change anything" */
- if (cctx->cdict) return ERROR(stage_wrong);
+ if (value == 0) return 0;
CLAMPCHECK(value, (unsigned)ZSTD_fast, (unsigned)ZSTD_btultra);
- ZSTD_cLevelToCParams(cctx);
- cctx->requestedParams.cParams.strategy = (ZSTD_strategy)value;
+ ZSTD_cLevelToCCtxParams(params);
+ params->cParams.strategy = (ZSTD_strategy)value;
return 0;
case ZSTD_p_contentSizeFlag :
- DEBUGLOG(5, "set content size flag = %u", (value>0));
/* Content size written in frame header _when known_ (default:1) */
- cctx->requestedParams.fParams.contentSizeFlag = value>0;
+ DEBUGLOG(5, "set content size flag = %u", (value>0));
+ params->fParams.contentSizeFlag = value > 0;
return 0;
case ZSTD_p_checksumFlag :
/* A 32-bits content checksum will be calculated and written at end of frame (default:0) */
- cctx->requestedParams.fParams.checksumFlag = value>0;
+ params->fParams.checksumFlag = value > 0;
return 0;
case ZSTD_p_dictIDFlag : /* When applicable, dictionary's dictID is provided in frame header (default:1) */
DEBUGLOG(5, "set dictIDFlag = %u", (value>0));
- cctx->requestedParams.fParams.noDictIDFlag = (value==0);
+ params->fParams.noDictIDFlag = (value == 0);
return 0;
- /* Dictionary parameters */
- case ZSTD_p_dictMode :
- if (cctx->cdict) return ERROR(stage_wrong); /* must be set before loading */
- /* restrict dictionary mode, to "rawContent" or "fullDict" only */
- ZSTD_STATIC_ASSERT((U32)ZSTD_dm_fullDict > (U32)ZSTD_dm_rawContent);
- if (value > (unsigned)ZSTD_dm_fullDict)
- return ERROR(parameter_outOfBound);
- cctx->dictMode = (ZSTD_dictMode_e)value;
+ case ZSTD_p_forceMaxWindow :
+ params->forceWindow = value > 0;
return 0;
- case ZSTD_p_refDictContent :
- if (cctx->cdict) return ERROR(stage_wrong); /* must be set before loading */
- /* dictionary content will be referenced, instead of copied */
- cctx->dictContentByRef = value>0;
+ case ZSTD_p_nbThreads :
+ if (value == 0) return 0;
+#ifndef ZSTD_MULTITHREAD
+ if (value > 1) return ERROR(parameter_unsupported);
return 0;
+#else
+ return ZSTDMT_initializeCCtxParameters(params, value);
+#endif
- case ZSTD_p_forceMaxWindow : /* Force back-references to remain < windowSize,
- * even when referencing into Dictionary content
- * default : 0 when using a CDict, 1 when using a Prefix */
- cctx->forceWindow = value>0;
- cctx->loadedDictEnd = 0;
- return 0;
+ case ZSTD_p_jobSize :
+#ifndef ZSTD_MULTITHREAD
+ return ERROR(parameter_unsupported);
+#else
+ if (params->nbThreads <= 1) return ERROR(parameter_unsupported);
+ return ZSTDMT_CCtxParam_setMTCtxParameter(params, ZSTDMT_p_sectionSize, value);
+#endif
- case ZSTD_p_nbThreads:
- if (value==0) return 0;
- DEBUGLOG(5, " setting nbThreads : %u", value);
+ case ZSTD_p_overlapSizeLog :
#ifndef ZSTD_MULTITHREAD
- if (value > 1) return ERROR(parameter_unsupported);
+ return ERROR(parameter_unsupported);
+#else
+ if (params->nbThreads <= 1) return ERROR(parameter_unsupported);
+ return ZSTDMT_CCtxParam_setMTCtxParameter(params, ZSTDMT_p_overlapSectionLog, value);
#endif
- if ((value>1) && (cctx->nbThreads != value)) {
- if (cctx->staticSize) /* MT not compatible with static alloc */
- return ERROR(parameter_unsupported);
- ZSTDMT_freeCCtx(cctx->mtctx);
- cctx->nbThreads = 1;
- cctx->mtctx = ZSTDMT_createCCtx_advanced(value, cctx->customMem);
- if (cctx->mtctx == NULL) return ERROR(memory_allocation);
+
+ case ZSTD_p_enableLongDistanceMatching :
+ if (value != 0) {
+ ZSTD_cLevelToCCtxParams(params);
+ params->cParams.windowLog = ZSTD_LDM_DEFAULT_WINDOW_LOG;
}
- cctx->nbThreads = value;
+ return ZSTD_ldm_initializeParameters(&params->ldmParams, value);
+
+ case ZSTD_p_ldmHashLog :
+ if (value == 0) return 0;
+ CLAMPCHECK(value, ZSTD_HASHLOG_MIN, ZSTD_HASHLOG_MAX);
+ params->ldmParams.hashLog = value;
return 0;
- case ZSTD_p_jobSize:
- if (cctx->nbThreads <= 1) return ERROR(parameter_unsupported);
- assert(cctx->mtctx != NULL);
- return ZSTDMT_setMTCtxParameter(cctx->mtctx, ZSTDMT_p_sectionSize, value);
+ case ZSTD_p_ldmMinMatch :
+ if (value == 0) return 0;
+ CLAMPCHECK(value, ZSTD_LDM_MINMATCH_MIN, ZSTD_LDM_MINMATCH_MAX);
+ params->ldmParams.minMatchLength = value;
+ return 0;
- case ZSTD_p_overlapSizeLog:
- DEBUGLOG(5, " setting overlap with nbThreads == %u", cctx->nbThreads);
- if (cctx->nbThreads <= 1) return ERROR(parameter_unsupported);
- assert(cctx->mtctx != NULL);
- return ZSTDMT_setMTCtxParameter(cctx->mtctx, ZSTDMT_p_overlapSectionLog, value);
+ case ZSTD_p_ldmBucketSizeLog :
+ if (value > ZSTD_LDM_BUCKETSIZELOG_MAX) {
+ return ERROR(parameter_outOfBound);
+ }
+ params->ldmParams.bucketSizeLog = value;
+ return 0;
+
+ case ZSTD_p_ldmHashEveryLog :
+ if (value > ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN) {
+ return ERROR(parameter_outOfBound);
+ }
+ params->ldmParams.hashEveryLog = value;
+ return 0;
default: return ERROR(parameter_unsupported);
}
}
+/**
+ * This function should be updated whenever ZSTD_CCtx_params is updated.
+ * Parameters are copied manually before the dictionary is loaded.
+ * The multithreading parameters jobSize and overlapSizeLog are set only if
+ * nbThreads > 1.
+ *
+ * Pledged srcSize is treated as unknown.
+ */
+size_t ZSTD_CCtx_setParametersUsingCCtxParams(
+ ZSTD_CCtx* cctx, const ZSTD_CCtx_params* params)
+{
+ if (cctx->streamStage != zcss_init) return ERROR(stage_wrong);
+ if (cctx->cdict) return ERROR(stage_wrong);
+
+ /* Assume the compression and frame parameters are validated */
+ cctx->requestedParams.cParams = params->cParams;
+ cctx->requestedParams.fParams = params->fParams;
+ cctx->requestedParams.compressionLevel = params->compressionLevel;
+
+ /* Set force window explicitly since it sets cctx->loadedDictEnd */
+ CHECK_F( ZSTD_CCtx_setParameter(
+ cctx, ZSTD_p_forceMaxWindow, params->forceWindow) );
+
+ /* Set multithreading parameters explicitly */
+ CHECK_F( ZSTD_CCtx_setParameter(cctx, ZSTD_p_nbThreads, params->nbThreads) );
+ if (params->nbThreads > 1) {
+ CHECK_F( ZSTD_CCtx_setParameter(cctx, ZSTD_p_jobSize, params->jobSize) );
+ CHECK_F( ZSTD_CCtx_setParameter(
+ cctx, ZSTD_p_overlapSizeLog, params->overlapSizeLog) );
+ }
+
+ /* Copy long distance matching parameters */
+ cctx->requestedParams.ldmParams = params->ldmParams;
+
+ /* customMem is used only for create/free params and can be ignored */
+ return 0;
+}
+
ZSTDLIB_API size_t ZSTD_CCtx_setPledgedSrcSize(ZSTD_CCtx* cctx, unsigned long long pledgedSrcSize)
{
- DEBUGLOG(5, " setting pledgedSrcSize to %u", (U32)pledgedSrcSize);
+ DEBUGLOG(4, " setting pledgedSrcSize to %u", (U32)pledgedSrcSize);
if (cctx->streamStage != zcss_init) return ERROR(stage_wrong);
cctx->pledgedSrcSizePlusOne = pledgedSrcSize+1;
return 0;
}
-ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize)
+size_t ZSTD_CCtx_loadDictionary_advanced(
+ ZSTD_CCtx* cctx, const void* dict, size_t dictSize,
+ ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictMode_e dictMode)
{
if (cctx->streamStage != zcss_init) return ERROR(stage_wrong);
if (cctx->staticSize) return ERROR(memory_allocation); /* no malloc for static CCtx */
- DEBUGLOG(5, "load dictionary of size %u", (U32)dictSize);
+ DEBUGLOG(4, "load dictionary of size %u", (U32)dictSize);
ZSTD_freeCDict(cctx->cdictLocal); /* in case one already exists */
if (dict==NULL || dictSize==0) { /* no dictionary mode */
cctx->cdictLocal = NULL;
cctx->cdict = NULL;
} else {
ZSTD_compressionParameters const cParams =
- cctx->compressionLevel == ZSTD_CLEVEL_CUSTOM ?
- cctx->requestedParams.cParams :
- ZSTD_getCParams(cctx->compressionLevel, 0, dictSize);
+ ZSTD_getCParamsFromCCtxParams(cctx->requestedParams, 0, dictSize);
cctx->cdictLocal = ZSTD_createCDict_advanced(
dict, dictSize,
- cctx->dictContentByRef, cctx->dictMode,
+ dictLoadMethod, dictMode,
cParams, cctx->customMem);
cctx->cdict = cctx->cdictLocal;
if (cctx->cdictLocal == NULL)
@@ -407,21 +542,41 @@ ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary(ZSTD_CCtx* cctx, const void* dict, s
return 0;
}
+ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary_byReference(
+ ZSTD_CCtx* cctx, const void* dict, size_t dictSize)
+{
+ return ZSTD_CCtx_loadDictionary_advanced(
+ cctx, dict, dictSize, ZSTD_dlm_byRef, ZSTD_dm_auto);
+}
+
+ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize)
+{
+ return ZSTD_CCtx_loadDictionary_advanced(
+ cctx, dict, dictSize, ZSTD_dlm_byCopy, ZSTD_dm_auto);
+}
+
+
size_t ZSTD_CCtx_refCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict)
{
if (cctx->streamStage != zcss_init) return ERROR(stage_wrong);
cctx->cdict = cdict;
- cctx->prefix = NULL; /* exclusive */
- cctx->prefixSize = 0;
+ memset(&cctx->prefixDict, 0, sizeof(cctx->prefixDict)); /* exclusive */
return 0;
}
size_t ZSTD_CCtx_refPrefix(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize)
{
+ return ZSTD_CCtx_refPrefix_advanced(cctx, prefix, prefixSize, ZSTD_dm_rawContent);
+}
+
+size_t ZSTD_CCtx_refPrefix_advanced(
+ ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize, ZSTD_dictMode_e dictMode)
+{
if (cctx->streamStage != zcss_init) return ERROR(stage_wrong);
cctx->cdict = NULL; /* prefix discards any prior cdict */
- cctx->prefix = prefix;
- cctx->prefixSize = prefixSize;
+ cctx->prefixDict.dict = prefix;
+ cctx->prefixDict.dictSize = prefixSize;
+ cctx->prefixDict.dictMode = dictMode;
return 0;
}
@@ -484,28 +639,37 @@ static U32 ZSTD_cycleLog(U32 hashLog, ZSTD_strategy strat)
/** ZSTD_adjustCParams_internal() :
optimize `cPar` for a given input (`srcSize` and `dictSize`).
- mostly downsizing to reduce memory consumption and initialization.
- Both `srcSize` and `dictSize` are optional (use 0 if unknown),
- but if both are 0, no optimization can be done.
- Note : cPar is considered validated at this stage. Use ZSTD_checkParams() to ensure that. */
+ mostly downsizing to reduce memory consumption and initialization latency.
+ Both `srcSize` and `dictSize` are optional (use 0 if unknown).
+ Note : cPar is considered validated at this stage. Use ZSTD_checkCParams() to ensure that condition. */
ZSTD_compressionParameters ZSTD_adjustCParams_internal(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize)
{
+ static const U64 minSrcSize = 513; /* (1<<9) + 1 */
+ static const U64 maxWindowResize = 1ULL << (ZSTD_WINDOWLOG_MAX-1);
assert(ZSTD_checkCParams(cPar)==0);
- if (srcSize+dictSize == 0) return cPar; /* no size information available : no adjustment */
-
- /* resize params, to use less memory when necessary */
- { U32 const minSrcSize = (srcSize==0) ? 500 : 0;
- U64 const rSize = srcSize + dictSize + minSrcSize;
- if (rSize < ((U64)1<<ZSTD_WINDOWLOG_MAX)) {
- U32 const srcLog = MAX(ZSTD_HASHLOG_MIN, ZSTD_highbit32((U32)(rSize)-1) + 1);
- if (cPar.windowLog > srcLog) cPar.windowLog = srcLog;
- } }
+
+ if (dictSize && (srcSize+1<2) /* srcSize unknown */ )
+ srcSize = minSrcSize; /* presumed small when there is a dictionary */
+ else if (srcSize == 0)
+ srcSize = ZSTD_CONTENTSIZE_UNKNOWN; /* 0 == unknown : presumed large */
+
+ /* resize windowLog if input is small enough, to use less memory */
+ if ( (srcSize < maxWindowResize)
+ && (dictSize < maxWindowResize) ) {
+ U32 const tSize = (U32)(srcSize + dictSize);
+ static U32 const hashSizeMin = 1 << ZSTD_HASHLOG_MIN;
+ U32 const srcLog = (tSize < hashSizeMin) ? ZSTD_HASHLOG_MIN :
+ ZSTD_highbit32(tSize-1) + 1;
+ if (cPar.windowLog > srcLog) cPar.windowLog = srcLog;
+ }
if (cPar.hashLog > cPar.windowLog) cPar.hashLog = cPar.windowLog;
{ U32 const cycleLog = ZSTD_cycleLog(cPar.chainLog, cPar.strategy);
- if (cycleLog > cPar.windowLog) cPar.chainLog -= (cycleLog - cPar.windowLog);
+ if (cycleLog > cPar.windowLog)
+ cPar.chainLog -= (cycleLog - cPar.windowLog);
}
- if (cPar.windowLog < ZSTD_WINDOWLOG_ABSOLUTEMIN) cPar.windowLog = ZSTD_WINDOWLOG_ABSOLUTEMIN; /* required for frame header */
+ if (cPar.windowLog < ZSTD_WINDOWLOG_ABSOLUTEMIN)
+ cPar.windowLog = ZSTD_WINDOWLOG_ABSOLUTEMIN; /* required for frame header */
return cPar;
}
@@ -516,56 +680,81 @@ ZSTD_compressionParameters ZSTD_adjustCParams(ZSTD_compressionParameters cPar, u
return ZSTD_adjustCParams_internal(cPar, srcSize, dictSize);
}
-
-size_t ZSTD_estimateCCtxSize_advanced(ZSTD_compressionParameters cParams)
+size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params)
{
- size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, (size_t)1 << cParams.windowLog);
- U32 const divider = (cParams.searchLength==3) ? 3 : 4;
- size_t const maxNbSeq = blockSize / divider;
- size_t const tokenSpace = blockSize + 11*maxNbSeq;
+ /* Estimate CCtx size is supported for single-threaded compression only. */
+ if (params->nbThreads > 1) { return ERROR(GENERIC); }
+ { ZSTD_compressionParameters const cParams =
+ ZSTD_getCParamsFromCCtxParams(*params, 0, 0);
+ size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, (size_t)1 << cParams.windowLog);
+ U32 const divider = (cParams.searchLength==3) ? 3 : 4;
+ size_t const maxNbSeq = blockSize / divider;
+ size_t const tokenSpace = blockSize + 11*maxNbSeq;
+ size_t const chainSize =
+ (cParams.strategy == ZSTD_fast) ? 0 : ((size_t)1 << cParams.chainLog);
+ size_t const hSize = ((size_t)1) << cParams.hashLog;
+ U32 const hashLog3 = (cParams.searchLength>3) ?
+ 0 : MIN(ZSTD_HASHLOG3_MAX, cParams.windowLog);
+ size_t const h3Size = ((size_t)1) << hashLog3;
+ size_t const entropySpace = sizeof(ZSTD_entropyCTables_t);
+ size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32);
+
+ size_t const optBudget =
+ ((MaxML+1) + (MaxLL+1) + (MaxOff+1) + (1<<Litbits))*sizeof(U32)
+ + (ZSTD_OPT_NUM+1)*(sizeof(ZSTD_match_t) + sizeof(ZSTD_optimal_t));
+ size_t const optSpace = ((cParams.strategy == ZSTD_btopt) || (cParams.strategy == ZSTD_btultra)) ? optBudget : 0;
- size_t const chainSize = (cParams.strategy == ZSTD_fast) ? 0 : (1 << cParams.chainLog);
- size_t const hSize = ((size_t)1) << cParams.hashLog;
- U32 const hashLog3 = (cParams.searchLength>3) ? 0 : MIN(ZSTD_HASHLOG3_MAX, cParams.windowLog);
- size_t const h3Size = ((size_t)1) << hashLog3;
- size_t const entropySpace = sizeof(ZSTD_entropyCTables_t);
- size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32);
+ size_t const ldmSpace = params->ldmParams.enableLdm ?
+ ZSTD_ldm_getTableSize(params->ldmParams.hashLog,
+ params->ldmParams.bucketSizeLog) : 0;
- size_t const optBudget = ((MaxML+1) + (MaxLL+1) + (MaxOff+1) + (1<<Litbits))*sizeof(U32)
- + (ZSTD_OPT_NUM+1)*(sizeof(ZSTD_match_t) + sizeof(ZSTD_optimal_t));
- size_t const optSpace = ((cParams.strategy == ZSTD_btopt) || (cParams.strategy == ZSTD_btultra)) ? optBudget : 0;
- size_t const neededSpace = entropySpace + tableSpace + tokenSpace + optSpace;
+ size_t const neededSpace = entropySpace + tableSpace + tokenSpace +
+ optSpace + ldmSpace;
- DEBUGLOG(5, "sizeof(ZSTD_CCtx) : %u", (U32)sizeof(ZSTD_CCtx));
- DEBUGLOG(5, "estimate workSpace : %u", (U32)neededSpace);
- return sizeof(ZSTD_CCtx) + neededSpace;
+ DEBUGLOG(5, "sizeof(ZSTD_CCtx) : %u", (U32)sizeof(ZSTD_CCtx));
+ DEBUGLOG(5, "estimate workSpace : %u", (U32)neededSpace);
+ return sizeof(ZSTD_CCtx) + neededSpace;
+ }
+}
+
+size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams)
+{
+ ZSTD_CCtx_params const params = ZSTD_makeCCtxParamsFromCParams(cParams);
+ return ZSTD_estimateCCtxSize_usingCCtxParams(&params);
}
size_t ZSTD_estimateCCtxSize(int compressionLevel)
{
ZSTD_compressionParameters const cParams = ZSTD_getCParams(compressionLevel, 0, 0);
- return ZSTD_estimateCCtxSize_advanced(cParams);
+ return ZSTD_estimateCCtxSize_usingCParams(cParams);
}
-size_t ZSTD_estimateCStreamSize_advanced(ZSTD_compressionParameters cParams)
+size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params)
{
- size_t const CCtxSize = ZSTD_estimateCCtxSize_advanced(cParams);
- size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, (size_t)1 << cParams.windowLog);
- size_t const inBuffSize = ((size_t)1 << cParams.windowLog) + blockSize;
- size_t const outBuffSize = ZSTD_compressBound(blockSize) + 1;
- size_t const streamingSize = inBuffSize + outBuffSize;
+ if (params->nbThreads > 1) { return ERROR(GENERIC); }
+ { size_t const CCtxSize = ZSTD_estimateCCtxSize_usingCCtxParams(params);
+ size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, (size_t)1 << params->cParams.windowLog);
+ size_t const inBuffSize = ((size_t)1 << params->cParams.windowLog) + blockSize;
+ size_t const outBuffSize = ZSTD_compressBound(blockSize) + 1;
+ size_t const streamingSize = inBuffSize + outBuffSize;
- return CCtxSize + streamingSize;
+ return CCtxSize + streamingSize;
+ }
+}
+
+size_t ZSTD_estimateCStreamSize_usingCParams(ZSTD_compressionParameters cParams)
+{
+ ZSTD_CCtx_params const params = ZSTD_makeCCtxParamsFromCParams(cParams);
+ return ZSTD_estimateCStreamSize_usingCCtxParams(&params);
}
size_t ZSTD_estimateCStreamSize(int compressionLevel) {
ZSTD_compressionParameters const cParams = ZSTD_getCParams(compressionLevel, 0, 0);
- return ZSTD_estimateCStreamSize_advanced(cParams);
+ return ZSTD_estimateCStreamSize_usingCParams(cParams);
}
-
-static U32 ZSTD_equivalentParams(ZSTD_compressionParameters cParams1,
- ZSTD_compressionParameters cParams2)
+static U32 ZSTD_equivalentCParams(ZSTD_compressionParameters cParams1,
+ ZSTD_compressionParameters cParams2)
{
U32 bslog1 = MIN(cParams1.windowLog, ZSTD_BLOCKSIZELOG_MAX);
U32 bslog2 = MIN(cParams2.windowLog, ZSTD_BLOCKSIZELOG_MAX);
@@ -576,18 +765,39 @@ static U32 ZSTD_equivalentParams(ZSTD_compressionParameters cParams1,
& ((cParams1.searchLength==3) == (cParams2.searchLength==3)); /* hashlog3 space */
}
+/** The parameters are equivalent if ldm is not enabled in both sets or
+ * all the parameters are equivalent. */
+static U32 ZSTD_equivalentLdmParams(ldmParams_t ldmParams1,
+ ldmParams_t ldmParams2)
+{
+ return (!ldmParams1.enableLdm && !ldmParams2.enableLdm) ||
+ (ldmParams1.enableLdm == ldmParams2.enableLdm &&
+ ldmParams1.hashLog == ldmParams2.hashLog &&
+ ldmParams1.bucketSizeLog == ldmParams2.bucketSizeLog &&
+ ldmParams1.minMatchLength == ldmParams2.minMatchLength &&
+ ldmParams1.hashEveryLog == ldmParams2.hashEveryLog);
+}
+
+/** Equivalence for resetCCtx purposes */
+static U32 ZSTD_equivalentParams(ZSTD_CCtx_params params1,
+ ZSTD_CCtx_params params2)
+{
+ return ZSTD_equivalentCParams(params1.cParams, params2.cParams) &&
+ ZSTD_equivalentLdmParams(params1.ldmParams, params2.ldmParams);
+}
+
/*! ZSTD_continueCCtx() :
* reuse CCtx without reset (note : requires no dictionary) */
-static size_t ZSTD_continueCCtx(ZSTD_CCtx* cctx, ZSTD_parameters params, U64 pledgedSrcSize)
+static size_t ZSTD_continueCCtx(ZSTD_CCtx* cctx, ZSTD_CCtx_params params, U64 pledgedSrcSize)
{
U32 const end = (U32)(cctx->nextSrc - cctx->base);
- DEBUGLOG(5, "continue mode");
+ DEBUGLOG(4, "continue mode");
cctx->appliedParams = params;
cctx->pledgedSrcSizePlusOne = pledgedSrcSize+1;
cctx->consumedSrcSize = 0;
if (pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN)
cctx->appliedParams.fParams.contentSizeFlag = 0;
- DEBUGLOG(5, "pledged content size : %u ; flag : %u",
+ DEBUGLOG(4, "pledged content size : %u ; flag : %u",
(U32)pledgedSrcSize, cctx->appliedParams.fParams.contentSizeFlag);
cctx->lowLimit = end;
cctx->dictLimit = end;
@@ -607,15 +817,19 @@ typedef enum { ZSTDb_not_buffered, ZSTDb_buffered } ZSTD_buffered_policy_e;
/*! ZSTD_resetCCtx_internal() :
note : `params` are assumed fully validated at this stage */
static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
- ZSTD_parameters params, U64 pledgedSrcSize,
+ ZSTD_CCtx_params params, U64 pledgedSrcSize,
ZSTD_compResetPolicy_e const crp,
ZSTD_buffered_policy_e const zbuff)
{
+ DEBUGLOG(4, "ZSTD_resetCCtx_internal");
assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
+ DEBUGLOG(4, "pledgedSrcSize: %u", (U32)pledgedSrcSize);
if (crp == ZSTDcrp_continue) {
- if (ZSTD_equivalentParams(params.cParams, zc->appliedParams.cParams)) {
- DEBUGLOG(5, "ZSTD_equivalentParams()==1");
+ if (ZSTD_equivalentParams(params, zc->appliedParams)) {
+ DEBUGLOG(4, "ZSTD_equivalentParams()==1");
+ assert(!(params.ldmParams.enableLdm &&
+ params.ldmParams.hashEveryLog == ZSTD_LDM_HASHEVERYLOG_NOTSET));
zc->entropy->hufCTable_repeatMode = HUF_repeat_none;
zc->entropy->offcode_repeatMode = FSE_repeat_none;
zc->entropy->matchlength_repeatMode = FSE_repeat_none;
@@ -623,12 +837,21 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
return ZSTD_continueCCtx(zc, params, pledgedSrcSize);
} }
+ if (params.ldmParams.enableLdm) {
+ /* Adjust long distance matching parameters */
+ ZSTD_ldm_adjustParameters(&params.ldmParams, params.cParams.windowLog);
+ assert(params.ldmParams.hashLog >= params.ldmParams.bucketSizeLog);
+ assert(params.ldmParams.hashEveryLog < 32);
+ zc->ldmState.hashPower =
+ ZSTD_ldm_getHashPower(params.ldmParams.minMatchLength);
+ }
+
{ size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, (size_t)1 << params.cParams.windowLog);
U32 const divider = (params.cParams.searchLength==3) ? 3 : 4;
size_t const maxNbSeq = blockSize / divider;
size_t const tokenSpace = blockSize + 11*maxNbSeq;
size_t const chainSize = (params.cParams.strategy == ZSTD_fast) ?
- 0 : (1 << params.cParams.chainLog);
+ 0 : ((size_t)1 << params.cParams.chainLog);
size_t const hSize = ((size_t)1) << params.cParams.hashLog;
U32 const hashLog3 = (params.cParams.searchLength>3) ?
0 : MIN(ZSTD_HASHLOG3_MAX, params.cParams.windowLog);
@@ -646,10 +869,13 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
|| (params.cParams.strategy == ZSTD_btultra)) ?
optPotentialSpace : 0;
size_t const bufferSpace = buffInSize + buffOutSize;
- size_t const neededSpace = entropySpace + optSpace + tableSpace
- + tokenSpace + bufferSpace;
+ size_t const ldmSpace = params.ldmParams.enableLdm
+ ? ZSTD_ldm_getTableSize(params.ldmParams.hashLog, params.ldmParams.bucketSizeLog)
+ : 0;
+ size_t const neededSpace = entropySpace + optSpace + ldmSpace +
+ tableSpace + tokenSpace + bufferSpace;
- if (zc->workSpaceSize < neededSpace) { /* too small : resize /*/
+ if (zc->workSpaceSize < neededSpace) { /* too small : resize */
DEBUGLOG(5, "Need to update workSpaceSize from %uK to %uK \n",
(unsigned)zc->workSpaceSize>>10,
(unsigned)neededSpace>>10);
@@ -714,6 +940,16 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
ptr = zc->optState.priceTable + ZSTD_OPT_NUM+1;
}
+ /* ldm hash table */
+ /* initialize bucketOffsets table later for pointer alignment */
+ if (params.ldmParams.enableLdm) {
+ size_t const ldmHSize = ((size_t)1) << params.ldmParams.hashLog;
+ memset(ptr, 0, ldmHSize * sizeof(ldmEntry_t));
+ assert(((size_t)ptr & 3) == 0); /* ensure ptr is properly aligned */
+ zc->ldmState.hashTable = (ldmEntry_t*)ptr;
+ ptr = zc->ldmState.hashTable + ldmHSize;
+ }
+
/* table Space */
if (crp!=ZSTDcrp_noMemset) memset(ptr, 0, tableSpace); /* reset tables only */
assert(((size_t)ptr & 3) == 0); /* ensure ptr is properly aligned */
@@ -731,6 +967,16 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
zc->seqStore.litStart = zc->seqStore.ofCode + maxNbSeq;
ptr = zc->seqStore.litStart + blockSize;
+ /* ldm bucketOffsets table */
+ if (params.ldmParams.enableLdm) {
+ size_t const ldmBucketSize =
+ ((size_t)1) << (params.ldmParams.hashLog -
+ params.ldmParams.bucketSizeLog);
+ memset(ptr, 0, ldmBucketSize);
+ zc->ldmState.bucketOffsets = (BYTE*)ptr;
+ ptr = zc->ldmState.bucketOffsets + ldmBucketSize;
+ }
+
/* buffers */
zc->inBuffSize = buffInSize;
zc->inBuff = (char*)ptr;
@@ -753,6 +999,8 @@ void ZSTD_invalidateRepCodes(ZSTD_CCtx* cctx) {
/*! ZSTD_copyCCtx_internal() :
* Duplicate an existing context `srcCCtx` into another one `dstCCtx`.
+ * The "context", in this case, refers to the hash and chain tables, entropy
+ * tables, and dictionary offsets.
* Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()).
* pledgedSrcSize=0 means "empty" if fParams.contentSizeFlag=1
* @return : 0, or an error code */
@@ -766,14 +1014,16 @@ static size_t ZSTD_copyCCtx_internal(ZSTD_CCtx* dstCCtx,
if (srcCCtx->stage!=ZSTDcs_init) return ERROR(stage_wrong);
memcpy(&dstCCtx->customMem, &srcCCtx->customMem, sizeof(ZSTD_customMem));
- { ZSTD_parameters params = srcCCtx->appliedParams;
+ { ZSTD_CCtx_params params = dstCCtx->requestedParams;
+ /* Copy only compression parameters related to tables. */
+ params.cParams = srcCCtx->appliedParams.cParams;
params.fParams = fParams;
ZSTD_resetCCtx_internal(dstCCtx, params, pledgedSrcSize,
ZSTDcrp_noMemset, zbuff);
}
/* copy tables */
- { size_t const chainSize = (srcCCtx->appliedParams.cParams.strategy == ZSTD_fast) ? 0 : (1 << srcCCtx->appliedParams.cParams.chainLog);
+ { size_t const chainSize = (srcCCtx->appliedParams.cParams.strategy == ZSTD_fast) ? 0 : ((size_t)1 << srcCCtx->appliedParams.cParams.chainLog);
size_t const hSize = (size_t)1 << srcCCtx->appliedParams.cParams.hashLog;
size_t const h3Size = (size_t)1 << srcCCtx->hashLog3;
size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32);
@@ -826,18 +1076,36 @@ static void ZSTD_reduceTable (U32* const table, U32 const size, U32 const reduce
}
}
+/*! ZSTD_ldm_reduceTable() :
+ * reduce table indexes by `reducerValue` */
+static void ZSTD_ldm_reduceTable(ldmEntry_t* const table, U32 const size,
+ U32 const reducerValue)
+{
+ U32 u;
+ for (u = 0; u < size; u++) {
+ if (table[u].offset < reducerValue) table[u].offset = 0;
+ else table[u].offset -= reducerValue;
+ }
+}
+
/*! ZSTD_reduceIndex() :
* rescale all indexes to avoid future overflow (indexes are U32) */
static void ZSTD_reduceIndex (ZSTD_CCtx* zc, const U32 reducerValue)
{
- { U32 const hSize = 1 << zc->appliedParams.cParams.hashLog;
+ { U32 const hSize = (U32)1 << zc->appliedParams.cParams.hashLog;
ZSTD_reduceTable(zc->hashTable, hSize, reducerValue); }
- { U32 const chainSize = (zc->appliedParams.cParams.strategy == ZSTD_fast) ? 0 : (1 << zc->appliedParams.cParams.chainLog);
+ { U32 const chainSize = (zc->appliedParams.cParams.strategy == ZSTD_fast) ? 0 : ((U32)1 << zc->appliedParams.cParams.chainLog);
ZSTD_reduceTable(zc->chainTable, chainSize, reducerValue); }
- { U32 const h3Size = (zc->hashLog3) ? 1 << zc->hashLog3 : 0;
+ { U32 const h3Size = (zc->hashLog3) ? (U32)1 << zc->hashLog3 : 0;
ZSTD_reduceTable(zc->hashTable3, h3Size, reducerValue); }
+
+ { if (zc->appliedParams.ldmParams.enableLdm) {
+ U32 const ldmHSize = (U32)1 << zc->appliedParams.ldmParams.hashLog;
+ ZSTD_ldm_reduceTable(zc->ldmState.hashTable, ldmHSize, reducerValue);
+ }
+ }
}
@@ -976,24 +1244,6 @@ static size_t ZSTD_compressLiterals (ZSTD_entropyCTables_t * entropy,
return lhSize+cLitSize;
}
-static const BYTE LL_Code[64] = { 0, 1, 2, 3, 4, 5, 6, 7,
- 8, 9, 10, 11, 12, 13, 14, 15,
- 16, 16, 17, 17, 18, 18, 19, 19,
- 20, 20, 20, 20, 21, 21, 21, 21,
- 22, 22, 22, 22, 22, 22, 22, 22,
- 23, 23, 23, 23, 23, 23, 23, 23,
- 24, 24, 24, 24, 24, 24, 24, 24,
- 24, 24, 24, 24, 24, 24, 24, 24 };
-
-static const BYTE ML_Code[128] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
- 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
- 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37,
- 38, 38, 38, 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39,
- 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40,
- 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41,
- 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42,
- 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42 };
-
void ZSTD_seqToCodes(const seqStore_t* seqStorePtr)
{
@@ -1018,20 +1268,30 @@ void ZSTD_seqToCodes(const seqStore_t* seqStorePtr)
mlCodeTable[seqStorePtr->longLengthPos] = MaxML;
}
-MEM_STATIC symbolEncodingType_e ZSTD_selectEncodingType(FSE_repeat* repeatMode,
- size_t const mostFrequent, size_t nbSeq, U32 defaultNormLog)
+typedef enum {
+ ZSTD_defaultDisallowed = 0,
+ ZSTD_defaultAllowed = 1
+} ZSTD_defaultPolicy_e;
+
+MEM_STATIC symbolEncodingType_e ZSTD_selectEncodingType(
+ FSE_repeat* repeatMode, size_t const mostFrequent, size_t nbSeq,
+ U32 defaultNormLog, ZSTD_defaultPolicy_e const isDefaultAllowed)
{
#define MIN_SEQ_FOR_DYNAMIC_FSE 64
#define MAX_SEQ_FOR_STATIC_FSE 1000
-
- if ((mostFrequent == nbSeq) && (nbSeq > 2)) {
+ ZSTD_STATIC_ASSERT(ZSTD_defaultDisallowed == 0 && ZSTD_defaultAllowed != 0);
+ if ((mostFrequent == nbSeq) && (!isDefaultAllowed || nbSeq > 2)) {
+ /* Prefer set_basic over set_rle when there are 2 or less symbols,
+ * since RLE uses 1 byte, but set_basic uses 5-6 bits per symbol.
+ * If basic encoding isn't possible, always choose RLE.
+ */
*repeatMode = FSE_repeat_check;
return set_rle;
}
- if ((*repeatMode == FSE_repeat_valid) && (nbSeq < MAX_SEQ_FOR_STATIC_FSE)) {
+ if (isDefaultAllowed && (*repeatMode == FSE_repeat_valid) && (nbSeq < MAX_SEQ_FOR_STATIC_FSE)) {
return set_repeat;
}
- if ((nbSeq < MIN_SEQ_FOR_DYNAMIC_FSE) || (mostFrequent < (nbSeq >> (defaultNormLog-1)))) {
+ if (isDefaultAllowed && ((nbSeq < MIN_SEQ_FOR_DYNAMIC_FSE) || (mostFrequent < (nbSeq >> (defaultNormLog-1))))) {
*repeatMode = FSE_repeat_valid;
return set_basic;
}
@@ -1067,6 +1327,7 @@ MEM_STATIC size_t ZSTD_buildCTable(void* dst, size_t dstCapacity,
count[codeTable[nbSeq-1]]--;
nbSeq_1--;
}
+ assert(nbSeq_1 > 1);
CHECK_F(FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max));
{ size_t const NCountSize = FSE_writeNCount(op, oend - op, norm, max, tableLog); /* overflow protected */
if (FSE_isError(NCountSize)) return NCountSize;
@@ -1131,7 +1392,7 @@ MEM_STATIC size_t ZSTD_encodeSequences(void* dst, size_t dstCapacity,
BIT_addBits(&blockStream, sequences[n].litLength, llBits);
if (MEM_32bits() && ((llBits+mlBits)>24)) BIT_flushBits(&blockStream);
BIT_addBits(&blockStream, sequences[n].matchLength, mlBits);
- if (MEM_32bits()) BIT_flushBits(&blockStream); /* (7)*/
+ if (MEM_32bits() || (ofBits+mlBits+llBits > 56)) BIT_flushBits(&blockStream);
if (longOffsets) {
int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1);
if (extraBits) {
@@ -1204,7 +1465,7 @@ MEM_STATIC size_t ZSTD_compressSequences_internal(seqStore_t* seqStorePtr,
/* CTable for Literal Lengths */
{ U32 max = MaxLL;
size_t const mostFrequent = FSE_countFast_wksp(count, &max, llCodeTable, nbSeq, entropy->workspace);
- LLtype = ZSTD_selectEncodingType(&entropy->litlength_repeatMode, mostFrequent, nbSeq, LL_defaultNormLog);
+ LLtype = ZSTD_selectEncodingType(&entropy->litlength_repeatMode, mostFrequent, nbSeq, LL_defaultNormLog, ZSTD_defaultAllowed);
{ size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_LitLength, LLFSELog, (symbolEncodingType_e)LLtype,
count, max, llCodeTable, nbSeq, LL_defaultNorm, LL_defaultNormLog, MaxLL,
entropy->workspace, sizeof(entropy->workspace));
@@ -1214,9 +1475,11 @@ MEM_STATIC size_t ZSTD_compressSequences_internal(seqStore_t* seqStorePtr,
/* CTable for Offsets */
{ U32 max = MaxOff;
size_t const mostFrequent = FSE_countFast_wksp(count, &max, ofCodeTable, nbSeq, entropy->workspace);
- Offtype = ZSTD_selectEncodingType(&entropy->offcode_repeatMode, mostFrequent, nbSeq, OF_defaultNormLog);
+ /* We can only use the basic table if max <= DefaultMaxOff, otherwise the offsets are too large */
+ ZSTD_defaultPolicy_e const defaultPolicy = max <= DefaultMaxOff ? ZSTD_defaultAllowed : ZSTD_defaultDisallowed;
+ Offtype = ZSTD_selectEncodingType(&entropy->offcode_repeatMode, mostFrequent, nbSeq, OF_defaultNormLog, defaultPolicy);
{ size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_OffsetBits, OffFSELog, (symbolEncodingType_e)Offtype,
- count, max, ofCodeTable, nbSeq, OF_defaultNorm, OF_defaultNormLog, MaxOff,
+ count, max, ofCodeTable, nbSeq, OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff,
entropy->workspace, sizeof(entropy->workspace));
if (ZSTD_isError(countSize)) return countSize;
op += countSize;
@@ -1224,7 +1487,7 @@ MEM_STATIC size_t ZSTD_compressSequences_internal(seqStore_t* seqStorePtr,
/* CTable for MatchLengths */
{ U32 max = MaxML;
size_t const mostFrequent = FSE_countFast_wksp(count, &max, mlCodeTable, nbSeq, entropy->workspace);
- MLtype = ZSTD_selectEncodingType(&entropy->matchlength_repeatMode, mostFrequent, nbSeq, ML_defaultNormLog);
+ MLtype = ZSTD_selectEncodingType(&entropy->matchlength_repeatMode, mostFrequent, nbSeq, ML_defaultNormLog, ZSTD_defaultAllowed);
{ size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_MatchLength, MLFSELog, (symbolEncodingType_e)MLtype,
count, max, mlCodeTable, nbSeq, ML_defaultNorm, ML_defaultNormLog, MaxML,
entropy->workspace, sizeof(entropy->workspace));
@@ -1279,1528 +1542,11 @@ MEM_STATIC size_t ZSTD_compressSequences(seqStore_t* seqStorePtr,
return cSize;
}
-
-/*! ZSTD_storeSeq() :
- Store a sequence (literal length, literals, offset code and match length code) into seqStore_t.
- `offsetCode` : distance to match, or 0 == repCode.
- `matchCode` : matchLength - MINMATCH
-*/
-MEM_STATIC void ZSTD_storeSeq(seqStore_t* seqStorePtr, size_t litLength, const void* literals, U32 offsetCode, size_t matchCode)
-{
-#if defined(ZSTD_DEBUG) && (ZSTD_DEBUG >= 6)
- static const BYTE* g_start = NULL;
- U32 const pos = (U32)((const BYTE*)literals - g_start);
- if (g_start==NULL) g_start = (const BYTE*)literals;
- if ((pos > 0) && (pos < 1000000000))
- DEBUGLOG(6, "Cpos %6u :%5u literals & match %3u bytes at distance %6u",
- pos, (U32)litLength, (U32)matchCode+MINMATCH, (U32)offsetCode);
-#endif
- /* copy Literals */
- assert(seqStorePtr->lit + litLength <= seqStorePtr->litStart + 128 KB);
- ZSTD_wildcopy(seqStorePtr->lit, literals, litLength);
- seqStorePtr->lit += litLength;
-
- /* literal Length */
- if (litLength>0xFFFF) {
- seqStorePtr->longLengthID = 1;
- seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
- }
- seqStorePtr->sequences[0].litLength = (U16)litLength;
-
- /* match offset */
- seqStorePtr->sequences[0].offset = offsetCode + 1;
-
- /* match Length */
- if (matchCode>0xFFFF) {
- seqStorePtr->longLengthID = 2;
- seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
- }
- seqStorePtr->sequences[0].matchLength = (U16)matchCode;
-
- seqStorePtr->sequences++;
-}
-
-
-/*-*************************************
-* Match length counter
-***************************************/
-static unsigned ZSTD_NbCommonBytes (register size_t val)
-{
- if (MEM_isLittleEndian()) {
- if (MEM_64bits()) {
-# if defined(_MSC_VER) && defined(_WIN64)
- unsigned long r = 0;
- _BitScanForward64( &r, (U64)val );
- return (unsigned)(r>>3);
-# elif defined(__GNUC__) && (__GNUC__ >= 3)
- return (__builtin_ctzll((U64)val) >> 3);
-# else
- static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2,
- 0, 3, 1, 3, 1, 4, 2, 7,
- 0, 2, 3, 6, 1, 5, 3, 5,
- 1, 3, 4, 4, 2, 5, 6, 7,
- 7, 0, 1, 2, 3, 3, 4, 6,
- 2, 6, 5, 5, 3, 4, 5, 6,
- 7, 1, 2, 4, 6, 4, 4, 5,
- 7, 2, 6, 5, 7, 6, 7, 7 };
- return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58];
-# endif
- } else { /* 32 bits */
-# if defined(_MSC_VER)
- unsigned long r=0;
- _BitScanForward( &r, (U32)val );
- return (unsigned)(r>>3);
-# elif defined(__GNUC__) && (__GNUC__ >= 3)
- return (__builtin_ctz((U32)val) >> 3);
-# else
- static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0,
- 3, 2, 2, 1, 3, 2, 0, 1,
- 3, 3, 1, 2, 2, 2, 2, 0,
- 3, 1, 2, 0, 1, 0, 1, 1 };
- return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27];
-# endif
- }
- } else { /* Big Endian CPU */
- if (MEM_64bits()) {
-# if defined(_MSC_VER) && defined(_WIN64)
- unsigned long r = 0;
- _BitScanReverse64( &r, val );
- return (unsigned)(r>>3);
-# elif defined(__GNUC__) && (__GNUC__ >= 3)
- return (__builtin_clzll(val) >> 3);
-# else
- unsigned r;
- const unsigned n32 = sizeof(size_t)*4; /* calculate this way due to compiler complaining in 32-bits mode */
- if (!(val>>n32)) { r=4; } else { r=0; val>>=n32; }
- if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }
- r += (!val);
- return r;
-# endif
- } else { /* 32 bits */
-# if defined(_MSC_VER)
- unsigned long r = 0;
- _BitScanReverse( &r, (unsigned long)val );
- return (unsigned)(r>>3);
-# elif defined(__GNUC__) && (__GNUC__ >= 3)
- return (__builtin_clz((U32)val) >> 3);
-# else
- unsigned r;
- if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; }
- r += (!val);
- return r;
-# endif
- } }
-}
-
-
-static size_t ZSTD_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* const pInLimit)
-{
- const BYTE* const pStart = pIn;
- const BYTE* const pInLoopLimit = pInLimit - (sizeof(size_t)-1);
-
- while (pIn < pInLoopLimit) {
- size_t const diff = MEM_readST(pMatch) ^ MEM_readST(pIn);
- if (!diff) { pIn+=sizeof(size_t); pMatch+=sizeof(size_t); continue; }
- pIn += ZSTD_NbCommonBytes(diff);
- return (size_t)(pIn - pStart);
- }
- if (MEM_64bits()) if ((pIn<(pInLimit-3)) && (MEM_read32(pMatch) == MEM_read32(pIn))) { pIn+=4; pMatch+=4; }
- if ((pIn<(pInLimit-1)) && (MEM_read16(pMatch) == MEM_read16(pIn))) { pIn+=2; pMatch+=2; }
- if ((pIn<pInLimit) && (*pMatch == *pIn)) pIn++;
- return (size_t)(pIn - pStart);
-}
-
-/** ZSTD_count_2segments() :
-* can count match length with `ip` & `match` in 2 different segments.
-* convention : on reaching mEnd, match count continue starting from iStart
-*/
-static size_t ZSTD_count_2segments(const BYTE* ip, const BYTE* match, const BYTE* iEnd, const BYTE* mEnd, const BYTE* iStart)
-{
- const BYTE* const vEnd = MIN( ip + (mEnd - match), iEnd);
- size_t const matchLength = ZSTD_count(ip, match, vEnd);
- if (match + matchLength != mEnd) return matchLength;
- return matchLength + ZSTD_count(ip+matchLength, iStart, iEnd);
-}
-
-
-/*-*************************************
-* Hashes
-***************************************/
-static const U32 prime3bytes = 506832829U;
-static U32 ZSTD_hash3(U32 u, U32 h) { return ((u << (32-24)) * prime3bytes) >> (32-h) ; }
-MEM_STATIC size_t ZSTD_hash3Ptr(const void* ptr, U32 h) { return ZSTD_hash3(MEM_readLE32(ptr), h); } /* only in zstd_opt.h */
-
-static const U32 prime4bytes = 2654435761U;
-static U32 ZSTD_hash4(U32 u, U32 h) { return (u * prime4bytes) >> (32-h) ; }
-static size_t ZSTD_hash4Ptr(const void* ptr, U32 h) { return ZSTD_hash4(MEM_read32(ptr), h); }
-
-static const U64 prime5bytes = 889523592379ULL;
-static size_t ZSTD_hash5(U64 u, U32 h) { return (size_t)(((u << (64-40)) * prime5bytes) >> (64-h)) ; }
-static size_t ZSTD_hash5Ptr(const void* p, U32 h) { return ZSTD_hash5(MEM_readLE64(p), h); }
-
-static const U64 prime6bytes = 227718039650203ULL;
-static size_t ZSTD_hash6(U64 u, U32 h) { return (size_t)(((u << (64-48)) * prime6bytes) >> (64-h)) ; }
-static size_t ZSTD_hash6Ptr(const void* p, U32 h) { return ZSTD_hash6(MEM_readLE64(p), h); }
-
-static const U64 prime7bytes = 58295818150454627ULL;
-static size_t ZSTD_hash7(U64 u, U32 h) { return (size_t)(((u << (64-56)) * prime7bytes) >> (64-h)) ; }
-static size_t ZSTD_hash7Ptr(const void* p, U32 h) { return ZSTD_hash7(MEM_readLE64(p), h); }
-
-static const U64 prime8bytes = 0xCF1BBCDCB7A56463ULL;
-static size_t ZSTD_hash8(U64 u, U32 h) { return (size_t)(((u) * prime8bytes) >> (64-h)) ; }
-static size_t ZSTD_hash8Ptr(const void* p, U32 h) { return ZSTD_hash8(MEM_readLE64(p), h); }
-
-static size_t ZSTD_hashPtr(const void* p, U32 hBits, U32 mls)
-{
- switch(mls)
- {
- default:
- case 4: return ZSTD_hash4Ptr(p, hBits);
- case 5: return ZSTD_hash5Ptr(p, hBits);
- case 6: return ZSTD_hash6Ptr(p, hBits);
- case 7: return ZSTD_hash7Ptr(p, hBits);
- case 8: return ZSTD_hash8Ptr(p, hBits);
- }
-}
-
-
-/*-*************************************
-* Fast Scan
-***************************************/
-static void ZSTD_fillHashTable (ZSTD_CCtx* zc, const void* end, const U32 mls)
-{
- U32* const hashTable = zc->hashTable;
- U32 const hBits = zc->appliedParams.cParams.hashLog;
- const BYTE* const base = zc->base;
- const BYTE* ip = base + zc->nextToUpdate;
- const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE;
- const size_t fastHashFillStep = 3;
-
- while(ip <= iend) {
- hashTable[ZSTD_hashPtr(ip, hBits, mls)] = (U32)(ip - base);
- ip += fastHashFillStep;
- }
-}
-
-
-FORCE_INLINE_TEMPLATE
-void ZSTD_compressBlock_fast_generic(ZSTD_CCtx* cctx,
- const void* src, size_t srcSize,
- const U32 mls)
-{
- U32* const hashTable = cctx->hashTable;
- U32 const hBits = cctx->appliedParams.cParams.hashLog;
- seqStore_t* seqStorePtr = &(cctx->seqStore);
- const BYTE* const base = cctx->base;
- const BYTE* const istart = (const BYTE*)src;
- const BYTE* ip = istart;
- const BYTE* anchor = istart;
- const U32 lowestIndex = cctx->dictLimit;
- const BYTE* const lowest = base + lowestIndex;
- const BYTE* const iend = istart + srcSize;
- const BYTE* const ilimit = iend - HASH_READ_SIZE;
- U32 offset_1=seqStorePtr->rep[0], offset_2=seqStorePtr->rep[1];
- U32 offsetSaved = 0;
-
- /* init */
- ip += (ip==lowest);
- { U32 const maxRep = (U32)(ip-lowest);
- if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0;
- if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0;
- }
-
- /* Main Search Loop */
- while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */
- size_t mLength;
- size_t const h = ZSTD_hashPtr(ip, hBits, mls);
- U32 const current = (U32)(ip-base);
- U32 const matchIndex = hashTable[h];
- const BYTE* match = base + matchIndex;
- hashTable[h] = current; /* update hash table */
-
- if ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1))) {
- mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4;
- ip++;
- ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, 0, mLength-MINMATCH);
- } else {
- U32 offset;
- if ( (matchIndex <= lowestIndex) || (MEM_read32(match) != MEM_read32(ip)) ) {
- ip += ((ip-anchor) >> g_searchStrength) + 1;
- continue;
- }
- mLength = ZSTD_count(ip+4, match+4, iend) + 4;
- offset = (U32)(ip-match);
- while (((ip>anchor) & (match>lowest)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
- offset_2 = offset_1;
- offset_1 = offset;
-
- ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
- }
-
- /* match found */
- ip += mLength;
- anchor = ip;
-
- if (ip <= ilimit) {
- /* Fill Table */
- hashTable[ZSTD_hashPtr(base+current+2, hBits, mls)] = current+2; /* here because current+2 could be > iend-8 */
- hashTable[ZSTD_hashPtr(ip-2, hBits, mls)] = (U32)(ip-2-base);
- /* check immediate repcode */
- while ( (ip <= ilimit)
- && ( (offset_2>0)
- & (MEM_read32(ip) == MEM_read32(ip - offset_2)) )) {
- /* store sequence */
- size_t const rLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;
- { U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; } /* swap offset_2 <=> offset_1 */
- hashTable[ZSTD_hashPtr(ip, hBits, mls)] = (U32)(ip-base);
- ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, rLength-MINMATCH);
- ip += rLength;
- anchor = ip;
- continue; /* faster when present ... (?) */
- } } }
-
- /* save reps for next block */
- seqStorePtr->repToConfirm[0] = offset_1 ? offset_1 : offsetSaved;
- seqStorePtr->repToConfirm[1] = offset_2 ? offset_2 : offsetSaved;
-
- /* Last Literals */
- { size_t const lastLLSize = iend - anchor;
- memcpy(seqStorePtr->lit, anchor, lastLLSize);
- seqStorePtr->lit += lastLLSize;
- }
-}
-
-
-static void ZSTD_compressBlock_fast(ZSTD_CCtx* ctx,
- const void* src, size_t srcSize)
-{
- const U32 mls = ctx->appliedParams.cParams.searchLength;
- switch(mls)
- {
- default: /* includes case 3 */
- case 4 :
- ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 4); return;
- case 5 :
- ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 5); return;
- case 6 :
- ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 6); return;
- case 7 :
- ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 7); return;
- }
-}
-
-
-static void ZSTD_compressBlock_fast_extDict_generic(ZSTD_CCtx* ctx,
- const void* src, size_t srcSize,
- const U32 mls)
-{
- U32* hashTable = ctx->hashTable;
- const U32 hBits = ctx->appliedParams.cParams.hashLog;
- seqStore_t* seqStorePtr = &(ctx->seqStore);
- const BYTE* const base = ctx->base;
- const BYTE* const dictBase = ctx->dictBase;
- const BYTE* const istart = (const BYTE*)src;
- const BYTE* ip = istart;
- const BYTE* anchor = istart;
- const U32 lowestIndex = ctx->lowLimit;
- const BYTE* const dictStart = dictBase + lowestIndex;
- const U32 dictLimit = ctx->dictLimit;
- const BYTE* const lowPrefixPtr = base + dictLimit;
- const BYTE* const dictEnd = dictBase + dictLimit;
- const BYTE* const iend = istart + srcSize;
- const BYTE* const ilimit = iend - 8;
- U32 offset_1=seqStorePtr->rep[0], offset_2=seqStorePtr->rep[1];
-
- /* Search Loop */
- while (ip < ilimit) { /* < instead of <=, because (ip+1) */
- const size_t h = ZSTD_hashPtr(ip, hBits, mls);
- const U32 matchIndex = hashTable[h];
- const BYTE* matchBase = matchIndex < dictLimit ? dictBase : base;
- const BYTE* match = matchBase + matchIndex;
- const U32 current = (U32)(ip-base);
- const U32 repIndex = current + 1 - offset_1; /* offset_1 expected <= current +1 */
- const BYTE* repBase = repIndex < dictLimit ? dictBase : base;
- const BYTE* repMatch = repBase + repIndex;
- size_t mLength;
- hashTable[h] = current; /* update hash table */
-
- if ( (((U32)((dictLimit-1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > lowestIndex))
- && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
- const BYTE* repMatchEnd = repIndex < dictLimit ? dictEnd : iend;
- mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, lowPrefixPtr) + 4;
- ip++;
- ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, 0, mLength-MINMATCH);
- } else {
- if ( (matchIndex < lowestIndex) ||
- (MEM_read32(match) != MEM_read32(ip)) ) {
- ip += ((ip-anchor) >> g_searchStrength) + 1;
- continue;
- }
- { const BYTE* matchEnd = matchIndex < dictLimit ? dictEnd : iend;
- const BYTE* lowMatchPtr = matchIndex < dictLimit ? dictStart : lowPrefixPtr;
- U32 offset;
- mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, lowPrefixPtr) + 4;
- while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
- offset = current - matchIndex;
- offset_2 = offset_1;
- offset_1 = offset;
- ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
- } }
-
- /* found a match : store it */
- ip += mLength;
- anchor = ip;
-
- if (ip <= ilimit) {
- /* Fill Table */
- hashTable[ZSTD_hashPtr(base+current+2, hBits, mls)] = current+2;
- hashTable[ZSTD_hashPtr(ip-2, hBits, mls)] = (U32)(ip-2-base);
- /* check immediate repcode */
- while (ip <= ilimit) {
- U32 const current2 = (U32)(ip-base);
- U32 const repIndex2 = current2 - offset_2;
- const BYTE* repMatch2 = repIndex2 < dictLimit ? dictBase + repIndex2 : base + repIndex2;
- if ( (((U32)((dictLimit-1) - repIndex2) >= 3) & (repIndex2 > lowestIndex)) /* intentional overflow */
- && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
- const BYTE* const repEnd2 = repIndex2 < dictLimit ? dictEnd : iend;
- size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, lowPrefixPtr) + 4;
- U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
- ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, repLength2-MINMATCH);
- hashTable[ZSTD_hashPtr(ip, hBits, mls)] = current2;
- ip += repLength2;
- anchor = ip;
- continue;
- }
- break;
- } } }
-
- /* save reps for next block */
- seqStorePtr->repToConfirm[0] = offset_1; seqStorePtr->repToConfirm[1] = offset_2;
-
- /* Last Literals */
- { size_t const lastLLSize = iend - anchor;
- memcpy(seqStorePtr->lit, anchor, lastLLSize);
- seqStorePtr->lit += lastLLSize;
- }
-}
-
-
-static void ZSTD_compressBlock_fast_extDict(ZSTD_CCtx* ctx,
- const void* src, size_t srcSize)
-{
- U32 const mls = ctx->appliedParams.cParams.searchLength;
- switch(mls)
- {
- default: /* includes case 3 */
- case 4 :
- ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 4); return;
- case 5 :
- ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 5); return;
- case 6 :
- ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 6); return;
- case 7 :
- ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 7); return;
- }
-}
-
-
-/*-*************************************
-* Double Fast
-***************************************/
-static void ZSTD_fillDoubleHashTable (ZSTD_CCtx* cctx, const void* end, const U32 mls)
-{
- U32* const hashLarge = cctx->hashTable;
- U32 const hBitsL = cctx->appliedParams.cParams.hashLog;
- U32* const hashSmall = cctx->chainTable;
- U32 const hBitsS = cctx->appliedParams.cParams.chainLog;
- const BYTE* const base = cctx->base;
- const BYTE* ip = base + cctx->nextToUpdate;
- const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE;
- const size_t fastHashFillStep = 3;
-
- while(ip <= iend) {
- hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip - base);
- hashLarge[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip - base);
- ip += fastHashFillStep;
- }
-}
-
-
-FORCE_INLINE_TEMPLATE
-void ZSTD_compressBlock_doubleFast_generic(ZSTD_CCtx* cctx,
- const void* src, size_t srcSize,
- const U32 mls)
-{
- U32* const hashLong = cctx->hashTable;
- const U32 hBitsL = cctx->appliedParams.cParams.hashLog;
- U32* const hashSmall = cctx->chainTable;
- const U32 hBitsS = cctx->appliedParams.cParams.chainLog;
- seqStore_t* seqStorePtr = &(cctx->seqStore);
- const BYTE* const base = cctx->base;
- const BYTE* const istart = (const BYTE*)src;
- const BYTE* ip = istart;
- const BYTE* anchor = istart;
- const U32 lowestIndex = cctx->dictLimit;
- const BYTE* const lowest = base + lowestIndex;
- const BYTE* const iend = istart + srcSize;
- const BYTE* const ilimit = iend - HASH_READ_SIZE;
- U32 offset_1=seqStorePtr->rep[0], offset_2=seqStorePtr->rep[1];
- U32 offsetSaved = 0;
-
- /* init */
- ip += (ip==lowest);
- { U32 const maxRep = (U32)(ip-lowest);
- if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0;
- if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0;
- }
-
- /* Main Search Loop */
- while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */
- size_t mLength;
- size_t const h2 = ZSTD_hashPtr(ip, hBitsL, 8);
- size_t const h = ZSTD_hashPtr(ip, hBitsS, mls);
- U32 const current = (U32)(ip-base);
- U32 const matchIndexL = hashLong[h2];
- U32 const matchIndexS = hashSmall[h];
- const BYTE* matchLong = base + matchIndexL;
- const BYTE* match = base + matchIndexS;
- hashLong[h2] = hashSmall[h] = current; /* update hash tables */
-
- assert(offset_1 <= current); /* supposed guaranteed by construction */
- if ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1))) {
- /* favor repcode */
- mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4;
- ip++;
- ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, 0, mLength-MINMATCH);
- } else {
- U32 offset;
- if ( (matchIndexL > lowestIndex) && (MEM_read64(matchLong) == MEM_read64(ip)) ) {
- mLength = ZSTD_count(ip+8, matchLong+8, iend) + 8;
- offset = (U32)(ip-matchLong);
- while (((ip>anchor) & (matchLong>lowest)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */
- } else if ( (matchIndexS > lowestIndex) && (MEM_read32(match) == MEM_read32(ip)) ) {
- size_t const hl3 = ZSTD_hashPtr(ip+1, hBitsL, 8);
- U32 const matchIndexL3 = hashLong[hl3];
- const BYTE* matchL3 = base + matchIndexL3;
- hashLong[hl3] = current + 1;
- if ( (matchIndexL3 > lowestIndex) && (MEM_read64(matchL3) == MEM_read64(ip+1)) ) {
- mLength = ZSTD_count(ip+9, matchL3+8, iend) + 8;
- ip++;
- offset = (U32)(ip-matchL3);
- while (((ip>anchor) & (matchL3>lowest)) && (ip[-1] == matchL3[-1])) { ip--; matchL3--; mLength++; } /* catch up */
- } else {
- mLength = ZSTD_count(ip+4, match+4, iend) + 4;
- offset = (U32)(ip-match);
- while (((ip>anchor) & (match>lowest)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
- }
- } else {
- ip += ((ip-anchor) >> g_searchStrength) + 1;
- continue;
- }
-
- offset_2 = offset_1;
- offset_1 = offset;
-
- ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
- }
-
- /* match found */
- ip += mLength;
- anchor = ip;
-
- if (ip <= ilimit) {
- /* Fill Table */
- hashLong[ZSTD_hashPtr(base+current+2, hBitsL, 8)] =
- hashSmall[ZSTD_hashPtr(base+current+2, hBitsS, mls)] = current+2; /* here because current+2 could be > iend-8 */
- hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] =
- hashSmall[ZSTD_hashPtr(ip-2, hBitsS, mls)] = (U32)(ip-2-base);
-
- /* check immediate repcode */
- while ( (ip <= ilimit)
- && ( (offset_2>0)
- & (MEM_read32(ip) == MEM_read32(ip - offset_2)) )) {
- /* store sequence */
- size_t const rLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;
- { U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; } /* swap offset_2 <=> offset_1 */
- hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip-base);
- hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip-base);
- ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, rLength-MINMATCH);
- ip += rLength;
- anchor = ip;
- continue; /* faster when present ... (?) */
- } } }
-
- /* save reps for next block */
- seqStorePtr->repToConfirm[0] = offset_1 ? offset_1 : offsetSaved;
- seqStorePtr->repToConfirm[1] = offset_2 ? offset_2 : offsetSaved;
-
- /* Last Literals */
- { size_t const lastLLSize = iend - anchor;
- memcpy(seqStorePtr->lit, anchor, lastLLSize);
- seqStorePtr->lit += lastLLSize;
- }
-}
-
-
-static void ZSTD_compressBlock_doubleFast(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
-{
- const U32 mls = ctx->appliedParams.cParams.searchLength;
- switch(mls)
- {
- default: /* includes case 3 */
- case 4 :
- ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 4); return;
- case 5 :
- ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 5); return;
- case 6 :
- ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 6); return;
- case 7 :
- ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 7); return;
- }
-}
-
-
-static void ZSTD_compressBlock_doubleFast_extDict_generic(ZSTD_CCtx* ctx,
- const void* src, size_t srcSize,
- const U32 mls)
-{
- U32* const hashLong = ctx->hashTable;
- U32 const hBitsL = ctx->appliedParams.cParams.hashLog;
- U32* const hashSmall = ctx->chainTable;
- U32 const hBitsS = ctx->appliedParams.cParams.chainLog;
- seqStore_t* seqStorePtr = &(ctx->seqStore);
- const BYTE* const base = ctx->base;
- const BYTE* const dictBase = ctx->dictBase;
- const BYTE* const istart = (const BYTE*)src;
- const BYTE* ip = istart;
- const BYTE* anchor = istart;
- const U32 lowestIndex = ctx->lowLimit;
- const BYTE* const dictStart = dictBase + lowestIndex;
- const U32 dictLimit = ctx->dictLimit;
- const BYTE* const lowPrefixPtr = base + dictLimit;
- const BYTE* const dictEnd = dictBase + dictLimit;
- const BYTE* const iend = istart + srcSize;
- const BYTE* const ilimit = iend - 8;
- U32 offset_1=seqStorePtr->rep[0], offset_2=seqStorePtr->rep[1];
-
- /* Search Loop */
- while (ip < ilimit) { /* < instead of <=, because (ip+1) */
- const size_t hSmall = ZSTD_hashPtr(ip, hBitsS, mls);
- const U32 matchIndex = hashSmall[hSmall];
- const BYTE* matchBase = matchIndex < dictLimit ? dictBase : base;
- const BYTE* match = matchBase + matchIndex;
-
- const size_t hLong = ZSTD_hashPtr(ip, hBitsL, 8);
- const U32 matchLongIndex = hashLong[hLong];
- const BYTE* matchLongBase = matchLongIndex < dictLimit ? dictBase : base;
- const BYTE* matchLong = matchLongBase + matchLongIndex;
-
- const U32 current = (U32)(ip-base);
- const U32 repIndex = current + 1 - offset_1; /* offset_1 expected <= current +1 */
- const BYTE* repBase = repIndex < dictLimit ? dictBase : base;
- const BYTE* repMatch = repBase + repIndex;
- size_t mLength;
- hashSmall[hSmall] = hashLong[hLong] = current; /* update hash table */
-
- if ( (((U32)((dictLimit-1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > lowestIndex))
- && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
- const BYTE* repMatchEnd = repIndex < dictLimit ? dictEnd : iend;
- mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, lowPrefixPtr) + 4;
- ip++;
- ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, 0, mLength-MINMATCH);
- } else {
- if ((matchLongIndex > lowestIndex) && (MEM_read64(matchLong) == MEM_read64(ip))) {
- const BYTE* matchEnd = matchLongIndex < dictLimit ? dictEnd : iend;
- const BYTE* lowMatchPtr = matchLongIndex < dictLimit ? dictStart : lowPrefixPtr;
- U32 offset;
- mLength = ZSTD_count_2segments(ip+8, matchLong+8, iend, matchEnd, lowPrefixPtr) + 8;
- offset = current - matchLongIndex;
- while (((ip>anchor) & (matchLong>lowMatchPtr)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */
- offset_2 = offset_1;
- offset_1 = offset;
- ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
-
- } else if ((matchIndex > lowestIndex) && (MEM_read32(match) == MEM_read32(ip))) {
- size_t const h3 = ZSTD_hashPtr(ip+1, hBitsL, 8);
- U32 const matchIndex3 = hashLong[h3];
- const BYTE* const match3Base = matchIndex3 < dictLimit ? dictBase : base;
- const BYTE* match3 = match3Base + matchIndex3;
- U32 offset;
- hashLong[h3] = current + 1;
- if ( (matchIndex3 > lowestIndex) && (MEM_read64(match3) == MEM_read64(ip+1)) ) {
- const BYTE* matchEnd = matchIndex3 < dictLimit ? dictEnd : iend;
- const BYTE* lowMatchPtr = matchIndex3 < dictLimit ? dictStart : lowPrefixPtr;
- mLength = ZSTD_count_2segments(ip+9, match3+8, iend, matchEnd, lowPrefixPtr) + 8;
- ip++;
- offset = current+1 - matchIndex3;
- while (((ip>anchor) & (match3>lowMatchPtr)) && (ip[-1] == match3[-1])) { ip--; match3--; mLength++; } /* catch up */
- } else {
- const BYTE* matchEnd = matchIndex < dictLimit ? dictEnd : iend;
- const BYTE* lowMatchPtr = matchIndex < dictLimit ? dictStart : lowPrefixPtr;
- mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, lowPrefixPtr) + 4;
- offset = current - matchIndex;
- while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
- }
- offset_2 = offset_1;
- offset_1 = offset;
- ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
-
- } else {
- ip += ((ip-anchor) >> g_searchStrength) + 1;
- continue;
- } }
-
- /* found a match : store it */
- ip += mLength;
- anchor = ip;
-
- if (ip <= ilimit) {
- /* Fill Table */
- hashSmall[ZSTD_hashPtr(base+current+2, hBitsS, mls)] = current+2;
- hashLong[ZSTD_hashPtr(base+current+2, hBitsL, 8)] = current+2;
- hashSmall[ZSTD_hashPtr(ip-2, hBitsS, mls)] = (U32)(ip-2-base);
- hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = (U32)(ip-2-base);
- /* check immediate repcode */
- while (ip <= ilimit) {
- U32 const current2 = (U32)(ip-base);
- U32 const repIndex2 = current2 - offset_2;
- const BYTE* repMatch2 = repIndex2 < dictLimit ? dictBase + repIndex2 : base + repIndex2;
- if ( (((U32)((dictLimit-1) - repIndex2) >= 3) & (repIndex2 > lowestIndex)) /* intentional overflow */
- && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
- const BYTE* const repEnd2 = repIndex2 < dictLimit ? dictEnd : iend;
- size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, lowPrefixPtr) + 4;
- U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
- ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, repLength2-MINMATCH);
- hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2;
- hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2;
- ip += repLength2;
- anchor = ip;
- continue;
- }
- break;
- } } }
-
- /* save reps for next block */
- seqStorePtr->repToConfirm[0] = offset_1; seqStorePtr->repToConfirm[1] = offset_2;
-
- /* Last Literals */
- { size_t const lastLLSize = iend - anchor;
- memcpy(seqStorePtr->lit, anchor, lastLLSize);
- seqStorePtr->lit += lastLLSize;
- }
-}
-
-
-static void ZSTD_compressBlock_doubleFast_extDict(ZSTD_CCtx* ctx,
- const void* src, size_t srcSize)
-{
- U32 const mls = ctx->appliedParams.cParams.searchLength;
- switch(mls)
- {
- default: /* includes case 3 */
- case 4 :
- ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 4); return;
- case 5 :
- ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 5); return;
- case 6 :
- ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 6); return;
- case 7 :
- ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 7); return;
- }
-}
-
-
-/*-*************************************
-* Binary Tree search
-***************************************/
-/** ZSTD_insertBt1() : add one or multiple positions to tree.
-* ip : assumed <= iend-8 .
-* @return : nb of positions added */
-static U32 ZSTD_insertBt1(ZSTD_CCtx* zc, const BYTE* const ip, const U32 mls, const BYTE* const iend, U32 nbCompares,
- U32 extDict)
-{
- U32* const hashTable = zc->hashTable;
- U32 const hashLog = zc->appliedParams.cParams.hashLog;
- size_t const h = ZSTD_hashPtr(ip, hashLog, mls);
- U32* const bt = zc->chainTable;
- U32 const btLog = zc->appliedParams.cParams.chainLog - 1;
- U32 const btMask = (1 << btLog) - 1;
- U32 matchIndex = hashTable[h];
- size_t commonLengthSmaller=0, commonLengthLarger=0;
- const BYTE* const base = zc->base;
- const BYTE* const dictBase = zc->dictBase;
- const U32 dictLimit = zc->dictLimit;
- const BYTE* const dictEnd = dictBase + dictLimit;
- const BYTE* const prefixStart = base + dictLimit;
- const BYTE* match;
- const U32 current = (U32)(ip-base);
- const U32 btLow = btMask >= current ? 0 : current - btMask;
- U32* smallerPtr = bt + 2*(current&btMask);
- U32* largerPtr = smallerPtr + 1;
- U32 dummy32; /* to be nullified at the end */
- U32 const windowLow = zc->lowLimit;
- U32 matchEndIdx = current+8;
- size_t bestLength = 8;
-#ifdef ZSTD_C_PREDICT
- U32 predictedSmall = *(bt + 2*((current-1)&btMask) + 0);
- U32 predictedLarge = *(bt + 2*((current-1)&btMask) + 1);
- predictedSmall += (predictedSmall>0);
- predictedLarge += (predictedLarge>0);
-#endif /* ZSTD_C_PREDICT */
-
- hashTable[h] = current; /* Update Hash Table */
-
- while (nbCompares-- && (matchIndex > windowLow)) {
- U32* const nextPtr = bt + 2*(matchIndex & btMask);
- size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
-
-#ifdef ZSTD_C_PREDICT /* note : can create issues when hlog small <= 11 */
- const U32* predictPtr = bt + 2*((matchIndex-1) & btMask); /* written this way, as bt is a roll buffer */
- if (matchIndex == predictedSmall) {
- /* no need to check length, result known */
- *smallerPtr = matchIndex;
- if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */
- smallerPtr = nextPtr+1; /* new "smaller" => larger of match */
- matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */
- predictedSmall = predictPtr[1] + (predictPtr[1]>0);
- continue;
- }
- if (matchIndex == predictedLarge) {
- *largerPtr = matchIndex;
- if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */
- largerPtr = nextPtr;
- matchIndex = nextPtr[0];
- predictedLarge = predictPtr[0] + (predictPtr[0]>0);
- continue;
- }
-#endif
- if ((!extDict) || (matchIndex+matchLength >= dictLimit)) {
- match = base + matchIndex;
- if (match[matchLength] == ip[matchLength])
- matchLength += ZSTD_count(ip+matchLength+1, match+matchLength+1, iend) +1;
- } else {
- match = dictBase + matchIndex;
- matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);
- if (matchIndex+matchLength >= dictLimit)
- match = base + matchIndex; /* to prepare for next usage of match[matchLength] */
- }
-
- if (matchLength > bestLength) {
- bestLength = matchLength;
- if (matchLength > matchEndIdx - matchIndex)
- matchEndIdx = matchIndex + (U32)matchLength;
- }
-
- if (ip+matchLength == iend) /* equal : no way to know if inf or sup */
- break; /* drop , to guarantee consistency ; miss a bit of compression, but other solutions can corrupt the tree */
-
- if (match[matchLength] < ip[matchLength]) { /* necessarily within correct buffer */
- /* match is smaller than current */
- *smallerPtr = matchIndex; /* update smaller idx */
- commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
- if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */
- smallerPtr = nextPtr+1; /* new "smaller" => larger of match */
- matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */
- } else {
- /* match is larger than current */
- *largerPtr = matchIndex;
- commonLengthLarger = matchLength;
- if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */
- largerPtr = nextPtr;
- matchIndex = nextPtr[0];
- } }
-
- *smallerPtr = *largerPtr = 0;
- if (bestLength > 384) return MIN(192, (U32)(bestLength - 384)); /* speed optimization */
- if (matchEndIdx > current + 8) return matchEndIdx - current - 8;
- return 1;
-}
-
-
-static size_t ZSTD_insertBtAndFindBestMatch (
- ZSTD_CCtx* zc,
- const BYTE* const ip, const BYTE* const iend,
- size_t* offsetPtr,
- U32 nbCompares, const U32 mls,
- U32 extDict)
-{
- U32* const hashTable = zc->hashTable;
- U32 const hashLog = zc->appliedParams.cParams.hashLog;
- size_t const h = ZSTD_hashPtr(ip, hashLog, mls);
- U32* const bt = zc->chainTable;
- U32 const btLog = zc->appliedParams.cParams.chainLog - 1;
- U32 const btMask = (1 << btLog) - 1;
- U32 matchIndex = hashTable[h];
- size_t commonLengthSmaller=0, commonLengthLarger=0;
- const BYTE* const base = zc->base;
- const BYTE* const dictBase = zc->dictBase;
- const U32 dictLimit = zc->dictLimit;
- const BYTE* const dictEnd = dictBase + dictLimit;
- const BYTE* const prefixStart = base + dictLimit;
- const U32 current = (U32)(ip-base);
- const U32 btLow = btMask >= current ? 0 : current - btMask;
- const U32 windowLow = zc->lowLimit;
- U32* smallerPtr = bt + 2*(current&btMask);
- U32* largerPtr = bt + 2*(current&btMask) + 1;
- U32 matchEndIdx = current+8;
- U32 dummy32; /* to be nullified at the end */
- size_t bestLength = 0;
-
- hashTable[h] = current; /* Update Hash Table */
-
- while (nbCompares-- && (matchIndex > windowLow)) {
- U32* const nextPtr = bt + 2*(matchIndex & btMask);
- size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
- const BYTE* match;
-
- if ((!extDict) || (matchIndex+matchLength >= dictLimit)) {
- match = base + matchIndex;
- if (match[matchLength] == ip[matchLength])
- matchLength += ZSTD_count(ip+matchLength+1, match+matchLength+1, iend) +1;
- } else {
- match = dictBase + matchIndex;
- matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);
- if (matchIndex+matchLength >= dictLimit)
- match = base + matchIndex; /* to prepare for next usage of match[matchLength] */
- }
-
- if (matchLength > bestLength) {
- if (matchLength > matchEndIdx - matchIndex)
- matchEndIdx = matchIndex + (U32)matchLength;
- if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(current-matchIndex+1) - ZSTD_highbit32((U32)offsetPtr[0]+1)) )
- bestLength = matchLength, *offsetPtr = ZSTD_REP_MOVE + current - matchIndex;
- if (ip+matchLength == iend) /* equal : no way to know if inf or sup */
- break; /* drop, to guarantee consistency (miss a little bit of compression) */
- }
-
- if (match[matchLength] < ip[matchLength]) {
- /* match is smaller than current */
- *smallerPtr = matchIndex; /* update smaller idx */
- commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
- if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */
- smallerPtr = nextPtr+1; /* new "smaller" => larger of match */
- matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */
- } else {
- /* match is larger than current */
- *largerPtr = matchIndex;
- commonLengthLarger = matchLength;
- if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */
- largerPtr = nextPtr;
- matchIndex = nextPtr[0];
- } }
-
- *smallerPtr = *largerPtr = 0;
-
- zc->nextToUpdate = (matchEndIdx > current + 8) ? matchEndIdx - 8 : current+1;
- return bestLength;
-}
-
-
-static void ZSTD_updateTree(ZSTD_CCtx* zc, const BYTE* const ip, const BYTE* const iend, const U32 nbCompares, const U32 mls)
-{
- const BYTE* const base = zc->base;
- const U32 target = (U32)(ip - base);
- U32 idx = zc->nextToUpdate;
-
- while(idx < target)
- idx += ZSTD_insertBt1(zc, base+idx, mls, iend, nbCompares, 0);
-}
-
-/** ZSTD_BtFindBestMatch() : Tree updater, providing best match */
-static size_t ZSTD_BtFindBestMatch (
- ZSTD_CCtx* zc,
- const BYTE* const ip, const BYTE* const iLimit,
- size_t* offsetPtr,
- const U32 maxNbAttempts, const U32 mls)
-{
- if (ip < zc->base + zc->nextToUpdate) return 0; /* skipped area */
- ZSTD_updateTree(zc, ip, iLimit, maxNbAttempts, mls);
- return ZSTD_insertBtAndFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, mls, 0);
-}
-
-
-static size_t ZSTD_BtFindBestMatch_selectMLS (
- ZSTD_CCtx* zc, /* Index table will be updated */
- const BYTE* ip, const BYTE* const iLimit,
- size_t* offsetPtr,
- const U32 maxNbAttempts, const U32 matchLengthSearch)
-{
- switch(matchLengthSearch)
- {
- default : /* includes case 3 */
- case 4 : return ZSTD_BtFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4);
- case 5 : return ZSTD_BtFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5);
- case 7 :
- case 6 : return ZSTD_BtFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6);
- }
-}
-
-
-static void ZSTD_updateTree_extDict(ZSTD_CCtx* zc, const BYTE* const ip, const BYTE* const iend, const U32 nbCompares, const U32 mls)
-{
- const BYTE* const base = zc->base;
- const U32 target = (U32)(ip - base);
- U32 idx = zc->nextToUpdate;
-
- while (idx < target) idx += ZSTD_insertBt1(zc, base+idx, mls, iend, nbCompares, 1);
-}
-
-
-/** Tree updater, providing best match */
-static size_t ZSTD_BtFindBestMatch_extDict (
- ZSTD_CCtx* zc,
- const BYTE* const ip, const BYTE* const iLimit,
- size_t* offsetPtr,
- const U32 maxNbAttempts, const U32 mls)
-{
- if (ip < zc->base + zc->nextToUpdate) return 0; /* skipped area */
- ZSTD_updateTree_extDict(zc, ip, iLimit, maxNbAttempts, mls);
- return ZSTD_insertBtAndFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, mls, 1);
-}
-
-
-static size_t ZSTD_BtFindBestMatch_selectMLS_extDict (
- ZSTD_CCtx* zc, /* Index table will be updated */
- const BYTE* ip, const BYTE* const iLimit,
- size_t* offsetPtr,
- const U32 maxNbAttempts, const U32 matchLengthSearch)
-{
- switch(matchLengthSearch)
- {
- default : /* includes case 3 */
- case 4 : return ZSTD_BtFindBestMatch_extDict(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4);
- case 5 : return ZSTD_BtFindBestMatch_extDict(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5);
- case 7 :
- case 6 : return ZSTD_BtFindBestMatch_extDict(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6);
- }
-}
-
-
-
-/* *********************************
-* Hash Chain
-***********************************/
-#define NEXT_IN_CHAIN(d, mask) chainTable[(d) & mask]
-
-/* Update chains up to ip (excluded)
- Assumption : always within prefix (i.e. not within extDict) */
-FORCE_INLINE_TEMPLATE
-U32 ZSTD_insertAndFindFirstIndex (ZSTD_CCtx* zc, const BYTE* ip, U32 mls)
-{
- U32* const hashTable = zc->hashTable;
- const U32 hashLog = zc->appliedParams.cParams.hashLog;
- U32* const chainTable = zc->chainTable;
- const U32 chainMask = (1 << zc->appliedParams.cParams.chainLog) - 1;
- const BYTE* const base = zc->base;
- const U32 target = (U32)(ip - base);
- U32 idx = zc->nextToUpdate;
-
- while(idx < target) { /* catch up */
- size_t const h = ZSTD_hashPtr(base+idx, hashLog, mls);
- NEXT_IN_CHAIN(idx, chainMask) = hashTable[h];
- hashTable[h] = idx;
- idx++;
- }
-
- zc->nextToUpdate = target;
- return hashTable[ZSTD_hashPtr(ip, hashLog, mls)];
-}
-
-
-/* inlining is important to hardwire a hot branch (template emulation) */
-FORCE_INLINE_TEMPLATE
-size_t ZSTD_HcFindBestMatch_generic (
- ZSTD_CCtx* zc, /* Index table will be updated */
- const BYTE* const ip, const BYTE* const iLimit,
- size_t* offsetPtr,
- const U32 maxNbAttempts, const U32 mls, const U32 extDict)
-{
- U32* const chainTable = zc->chainTable;
- const U32 chainSize = (1 << zc->appliedParams.cParams.chainLog);
- const U32 chainMask = chainSize-1;
- const BYTE* const base = zc->base;
- const BYTE* const dictBase = zc->dictBase;
- const U32 dictLimit = zc->dictLimit;
- const BYTE* const prefixStart = base + dictLimit;
- const BYTE* const dictEnd = dictBase + dictLimit;
- const U32 lowLimit = zc->lowLimit;
- const U32 current = (U32)(ip-base);
- const U32 minChain = current > chainSize ? current - chainSize : 0;
- int nbAttempts=maxNbAttempts;
- size_t ml=4-1;
-
- /* HC4 match finder */
- U32 matchIndex = ZSTD_insertAndFindFirstIndex (zc, ip, mls);
-
- for ( ; (matchIndex>lowLimit) & (nbAttempts>0) ; nbAttempts--) {
- const BYTE* match;
- size_t currentMl=0;
- if ((!extDict) || matchIndex >= dictLimit) {
- match = base + matchIndex;
- if (match[ml] == ip[ml]) /* potentially better */
- currentMl = ZSTD_count(ip, match, iLimit);
- } else {
- match = dictBase + matchIndex;
- if (MEM_read32(match) == MEM_read32(ip)) /* assumption : matchIndex <= dictLimit-4 (by table construction) */
- currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, dictEnd, prefixStart) + 4;
- }
-
- /* save best solution */
- if (currentMl > ml) {
- ml = currentMl;
- *offsetPtr = current - matchIndex + ZSTD_REP_MOVE;
- if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */
- }
-
- if (matchIndex <= minChain) break;
- matchIndex = NEXT_IN_CHAIN(matchIndex, chainMask);
- }
-
- return ml;
-}
-
-
-FORCE_INLINE_TEMPLATE size_t ZSTD_HcFindBestMatch_selectMLS (
- ZSTD_CCtx* zc,
- const BYTE* ip, const BYTE* const iLimit,
- size_t* offsetPtr,
- const U32 maxNbAttempts, const U32 matchLengthSearch)
-{
- switch(matchLengthSearch)
- {
- default : /* includes case 3 */
- case 4 : return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4, 0);
- case 5 : return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5, 0);
- case 7 :
- case 6 : return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6, 0);
- }
-}
-
-
-FORCE_INLINE_TEMPLATE size_t ZSTD_HcFindBestMatch_extDict_selectMLS (
- ZSTD_CCtx* zc,
- const BYTE* ip, const BYTE* const iLimit,
- size_t* offsetPtr,
- const U32 maxNbAttempts, const U32 matchLengthSearch)
-{
- switch(matchLengthSearch)
- {
- default : /* includes case 3 */
- case 4 : return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4, 1);
- case 5 : return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5, 1);
- case 7 :
- case 6 : return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6, 1);
- }
-}
-
-
-/* *******************************
-* Common parser - lazy strategy
-*********************************/
-FORCE_INLINE_TEMPLATE
-void ZSTD_compressBlock_lazy_generic(ZSTD_CCtx* ctx,
- const void* src, size_t srcSize,
- const U32 searchMethod, const U32 depth)
-{
- seqStore_t* seqStorePtr = &(ctx->seqStore);
- const BYTE* const istart = (const BYTE*)src;
- const BYTE* ip = istart;
- const BYTE* anchor = istart;
- const BYTE* const iend = istart + srcSize;
- const BYTE* const ilimit = iend - 8;
- const BYTE* const base = ctx->base + ctx->dictLimit;
-
- U32 const maxSearches = 1 << ctx->appliedParams.cParams.searchLog;
- U32 const mls = ctx->appliedParams.cParams.searchLength;
-
- typedef size_t (*searchMax_f)(ZSTD_CCtx* zc, const BYTE* ip, const BYTE* iLimit,
- size_t* offsetPtr,
- U32 maxNbAttempts, U32 matchLengthSearch);
- searchMax_f const searchMax = searchMethod ? ZSTD_BtFindBestMatch_selectMLS : ZSTD_HcFindBestMatch_selectMLS;
- U32 offset_1 = seqStorePtr->rep[0], offset_2 = seqStorePtr->rep[1], savedOffset=0;
-
- /* init */
- ip += (ip==base);
- ctx->nextToUpdate3 = ctx->nextToUpdate;
- { U32 const maxRep = (U32)(ip-base);
- if (offset_2 > maxRep) savedOffset = offset_2, offset_2 = 0;
- if (offset_1 > maxRep) savedOffset = offset_1, offset_1 = 0;
- }
-
- /* Match Loop */
- while (ip < ilimit) {
- size_t matchLength=0;
- size_t offset=0;
- const BYTE* start=ip+1;
-
- /* check repCode */
- if ((offset_1>0) & (MEM_read32(ip+1) == MEM_read32(ip+1 - offset_1))) {
- /* repcode : we take it */
- matchLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4;
- if (depth==0) goto _storeSequence;
- }
-
- /* first search (depth 0) */
- { size_t offsetFound = 99999999;
- size_t const ml2 = searchMax(ctx, ip, iend, &offsetFound, maxSearches, mls);
- if (ml2 > matchLength)
- matchLength = ml2, start = ip, offset=offsetFound;
- }
-
- if (matchLength < 4) {
- ip += ((ip-anchor) >> g_searchStrength) + 1; /* jump faster over incompressible sections */
- continue;
- }
-
- /* let's try to find a better solution */
- if (depth>=1)
- while (ip<ilimit) {
- ip ++;
- if ((offset) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) {
- size_t const mlRep = ZSTD_count(ip+4, ip+4-offset_1, iend) + 4;
- int const gain2 = (int)(mlRep * 3);
- int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1);
- if ((mlRep >= 4) && (gain2 > gain1))
- matchLength = mlRep, offset = 0, start = ip;
- }
- { size_t offset2=99999999;
- size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls);
- int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */
- int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 4);
- if ((ml2 >= 4) && (gain2 > gain1)) {
- matchLength = ml2, offset = offset2, start = ip;
- continue; /* search a better one */
- } }
-
- /* let's find an even better one */
- if ((depth==2) && (ip<ilimit)) {
- ip ++;
- if ((offset) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) {
- size_t const ml2 = ZSTD_count(ip+4, ip+4-offset_1, iend) + 4;
- int const gain2 = (int)(ml2 * 4);
- int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1);
- if ((ml2 >= 4) && (gain2 > gain1))
- matchLength = ml2, offset = 0, start = ip;
- }
- { size_t offset2=99999999;
- size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls);
- int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */
- int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 7);
- if ((ml2 >= 4) && (gain2 > gain1)) {
- matchLength = ml2, offset = offset2, start = ip;
- continue;
- } } }
- break; /* nothing found : store previous solution */
- }
-
- /* NOTE:
- * start[-offset+ZSTD_REP_MOVE-1] is undefined behavior.
- * (-offset+ZSTD_REP_MOVE-1) is unsigned, and is added to start, which
- * overflows the pointer, which is undefined behavior.
- */
- /* catch up */
- if (offset) {
- while ( (start > anchor)
- && (start > base+offset-ZSTD_REP_MOVE)
- && (start[-1] == (start-offset+ZSTD_REP_MOVE)[-1]) ) /* only search for offset within prefix */
- { start--; matchLength++; }
- offset_2 = offset_1; offset_1 = (U32)(offset - ZSTD_REP_MOVE);
- }
- /* store sequence */
-_storeSequence:
- { size_t const litLength = start - anchor;
- ZSTD_storeSeq(seqStorePtr, litLength, anchor, (U32)offset, matchLength-MINMATCH);
- anchor = ip = start + matchLength;
- }
-
- /* check immediate repcode */
- while ( (ip <= ilimit)
- && ((offset_2>0)
- & (MEM_read32(ip) == MEM_read32(ip - offset_2)) )) {
- /* store sequence */
- matchLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;
- offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap repcodes */
- ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, matchLength-MINMATCH);
- ip += matchLength;
- anchor = ip;
- continue; /* faster when present ... (?) */
- } }
-
- /* Save reps for next block */
- seqStorePtr->repToConfirm[0] = offset_1 ? offset_1 : savedOffset;
- seqStorePtr->repToConfirm[1] = offset_2 ? offset_2 : savedOffset;
-
- /* Last Literals */
- { size_t const lastLLSize = iend - anchor;
- memcpy(seqStorePtr->lit, anchor, lastLLSize);
- seqStorePtr->lit += lastLLSize;
- }
-}
-
-
-static void ZSTD_compressBlock_btlazy2(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
-{
- ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 1, 2);
-}
-
-static void ZSTD_compressBlock_lazy2(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
-{
- ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 0, 2);
-}
-
-static void ZSTD_compressBlock_lazy(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
-{
- ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 0, 1);
-}
-
-static void ZSTD_compressBlock_greedy(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
-{
- ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 0, 0);
-}
-
-
-FORCE_INLINE_TEMPLATE
-void ZSTD_compressBlock_lazy_extDict_generic(ZSTD_CCtx* ctx,
- const void* src, size_t srcSize,
- const U32 searchMethod, const U32 depth)
-{
- seqStore_t* seqStorePtr = &(ctx->seqStore);
- const BYTE* const istart = (const BYTE*)src;
- const BYTE* ip = istart;
- const BYTE* anchor = istart;
- const BYTE* const iend = istart + srcSize;
- const BYTE* const ilimit = iend - 8;
- const BYTE* const base = ctx->base;
- const U32 dictLimit = ctx->dictLimit;
- const U32 lowestIndex = ctx->lowLimit;
- const BYTE* const prefixStart = base + dictLimit;
- const BYTE* const dictBase = ctx->dictBase;
- const BYTE* const dictEnd = dictBase + dictLimit;
- const BYTE* const dictStart = dictBase + ctx->lowLimit;
-
- const U32 maxSearches = 1 << ctx->appliedParams.cParams.searchLog;
- const U32 mls = ctx->appliedParams.cParams.searchLength;
-
- typedef size_t (*searchMax_f)(ZSTD_CCtx* zc, const BYTE* ip, const BYTE* iLimit,
- size_t* offsetPtr,
- U32 maxNbAttempts, U32 matchLengthSearch);
- searchMax_f searchMax = searchMethod ? ZSTD_BtFindBestMatch_selectMLS_extDict : ZSTD_HcFindBestMatch_extDict_selectMLS;
-
- U32 offset_1 = seqStorePtr->rep[0], offset_2 = seqStorePtr->rep[1];
-
- /* init */
- ctx->nextToUpdate3 = ctx->nextToUpdate;
- ip += (ip == prefixStart);
-
- /* Match Loop */
- while (ip < ilimit) {
- size_t matchLength=0;
- size_t offset=0;
- const BYTE* start=ip+1;
- U32 current = (U32)(ip-base);
-
- /* check repCode */
- { const U32 repIndex = (U32)(current+1 - offset_1);
- const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
- const BYTE* const repMatch = repBase + repIndex;
- if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
- if (MEM_read32(ip+1) == MEM_read32(repMatch)) {
- /* repcode detected we should take it */
- const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
- matchLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repEnd, prefixStart) + 4;
- if (depth==0) goto _storeSequence;
- } }
-
- /* first search (depth 0) */
- { size_t offsetFound = 99999999;
- size_t const ml2 = searchMax(ctx, ip, iend, &offsetFound, maxSearches, mls);
- if (ml2 > matchLength)
- matchLength = ml2, start = ip, offset=offsetFound;
- }
-
- if (matchLength < 4) {
- ip += ((ip-anchor) >> g_searchStrength) + 1; /* jump faster over incompressible sections */
- continue;
- }
-
- /* let's try to find a better solution */
- if (depth>=1)
- while (ip<ilimit) {
- ip ++;
- current++;
- /* check repCode */
- if (offset) {
- const U32 repIndex = (U32)(current - offset_1);
- const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
- const BYTE* const repMatch = repBase + repIndex;
- if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
- if (MEM_read32(ip) == MEM_read32(repMatch)) {
- /* repcode detected */
- const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
- size_t const repLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;
- int const gain2 = (int)(repLength * 3);
- int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1);
- if ((repLength >= 4) && (gain2 > gain1))
- matchLength = repLength, offset = 0, start = ip;
- } }
-
- /* search match, depth 1 */
- { size_t offset2=99999999;
- size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls);
- int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */
- int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 4);
- if ((ml2 >= 4) && (gain2 > gain1)) {
- matchLength = ml2, offset = offset2, start = ip;
- continue; /* search a better one */
- } }
-
- /* let's find an even better one */
- if ((depth==2) && (ip<ilimit)) {
- ip ++;
- current++;
- /* check repCode */
- if (offset) {
- const U32 repIndex = (U32)(current - offset_1);
- const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
- const BYTE* const repMatch = repBase + repIndex;
- if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
- if (MEM_read32(ip) == MEM_read32(repMatch)) {
- /* repcode detected */
- const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
- size_t const repLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;
- int const gain2 = (int)(repLength * 4);
- int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1);
- if ((repLength >= 4) && (gain2 > gain1))
- matchLength = repLength, offset = 0, start = ip;
- } }
-
- /* search match, depth 2 */
- { size_t offset2=99999999;
- size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls);
- int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */
- int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 7);
- if ((ml2 >= 4) && (gain2 > gain1)) {
- matchLength = ml2, offset = offset2, start = ip;
- continue;
- } } }
- break; /* nothing found : store previous solution */
- }
-
- /* catch up */
- if (offset) {
- U32 const matchIndex = (U32)((start-base) - (offset - ZSTD_REP_MOVE));
- const BYTE* match = (matchIndex < dictLimit) ? dictBase + matchIndex : base + matchIndex;
- const BYTE* const mStart = (matchIndex < dictLimit) ? dictStart : prefixStart;
- while ((start>anchor) && (match>mStart) && (start[-1] == match[-1])) { start--; match--; matchLength++; } /* catch up */
- offset_2 = offset_1; offset_1 = (U32)(offset - ZSTD_REP_MOVE);
- }
-
- /* store sequence */
-_storeSequence:
- { size_t const litLength = start - anchor;
- ZSTD_storeSeq(seqStorePtr, litLength, anchor, (U32)offset, matchLength-MINMATCH);
- anchor = ip = start + matchLength;
- }
-
- /* check immediate repcode */
- while (ip <= ilimit) {
- const U32 repIndex = (U32)((ip-base) - offset_2);
- const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
- const BYTE* const repMatch = repBase + repIndex;
- if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
- if (MEM_read32(ip) == MEM_read32(repMatch)) {
- /* repcode detected we should take it */
- const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
- matchLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;
- offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap offset history */
- ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, matchLength-MINMATCH);
- ip += matchLength;
- anchor = ip;
- continue; /* faster when present ... (?) */
- }
- break;
- } }
-
- /* Save reps for next block */
- seqStorePtr->repToConfirm[0] = offset_1; seqStorePtr->repToConfirm[1] = offset_2;
-
- /* Last Literals */
- { size_t const lastLLSize = iend - anchor;
- memcpy(seqStorePtr->lit, anchor, lastLLSize);
- seqStorePtr->lit += lastLLSize;
- }
-}
-
-
-void ZSTD_compressBlock_greedy_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
-{
- ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 0, 0);
-}
-
-static void ZSTD_compressBlock_lazy_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
-{
- ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 0, 1);
-}
-
-static void ZSTD_compressBlock_lazy2_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
-{
- ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 0, 2);
-}
-
-static void ZSTD_compressBlock_btlazy2_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
-{
- ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 1, 2);
-}
-
-
-/* The optimal parser */
-#include "zstd_opt.h"
-
-static void ZSTD_compressBlock_btopt(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
-{
-#ifdef ZSTD_OPT_H_91842398743
- ZSTD_compressBlock_opt_generic(ctx, src, srcSize, 0);
-#else
- (void)ctx; (void)src; (void)srcSize;
- return;
-#endif
-}
-
-static void ZSTD_compressBlock_btultra(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
-{
-#ifdef ZSTD_OPT_H_91842398743
- ZSTD_compressBlock_opt_generic(ctx, src, srcSize, 1);
-#else
- (void)ctx; (void)src; (void)srcSize;
- return;
-#endif
-}
-
-static void ZSTD_compressBlock_btopt_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
-{
-#ifdef ZSTD_OPT_H_91842398743
- ZSTD_compressBlock_opt_extDict_generic(ctx, src, srcSize, 0);
-#else
- (void)ctx; (void)src; (void)srcSize;
- return;
-#endif
-}
-
-static void ZSTD_compressBlock_btultra_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
-{
-#ifdef ZSTD_OPT_H_91842398743
- ZSTD_compressBlock_opt_extDict_generic(ctx, src, srcSize, 1);
-#else
- (void)ctx; (void)src; (void)srcSize;
- return;
-#endif
-}
-
-
/* ZSTD_selectBlockCompressor() :
+ * Not static, but internal use only (used by long distance matcher)
* assumption : strat is a valid strategy */
-typedef void (*ZSTD_blockCompressor) (ZSTD_CCtx* ctx, const void* src, size_t srcSize);
-static ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, int extDict)
+typedef size_t (*ZSTD_blockCompressor) (ZSTD_CCtx* ctx, const void* src, size_t srcSize);
+ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, int extDict)
{
static const ZSTD_blockCompressor blockCompressor[2][(unsigned)ZSTD_btultra+1] = {
{ ZSTD_compressBlock_fast /* default for 0 */,
@@ -2819,18 +1565,37 @@ static ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, int
return blockCompressor[extDict!=0][(U32)strat];
}
+static void ZSTD_storeLastLiterals(seqStore_t* seqStorePtr,
+ const BYTE* anchor, size_t lastLLSize)
+{
+ memcpy(seqStorePtr->lit, anchor, lastLLSize);
+ seqStorePtr->lit += lastLLSize;
+}
static size_t ZSTD_compressBlock_internal(ZSTD_CCtx* zc, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
{
- ZSTD_blockCompressor const blockCompressor = ZSTD_selectBlockCompressor(zc->appliedParams.cParams.strategy, zc->lowLimit < zc->dictLimit);
const BYTE* const base = zc->base;
const BYTE* const istart = (const BYTE*)src;
const U32 current = (U32)(istart-base);
+ size_t lastLLSize;
+ const BYTE* anchor;
+ U32 const extDict = zc->lowLimit < zc->dictLimit;
+ const ZSTD_blockCompressor blockCompressor =
+ zc->appliedParams.ldmParams.enableLdm
+ ? (extDict ? ZSTD_compressBlock_ldm_extDict : ZSTD_compressBlock_ldm)
+ : ZSTD_selectBlockCompressor(zc->appliedParams.cParams.strategy, extDict);
+
if (srcSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1) return 0; /* don't even attempt compression below a certain srcSize */
ZSTD_resetSeqStore(&(zc->seqStore));
if (current > zc->nextToUpdate + 384)
zc->nextToUpdate = current - MIN(192, (U32)(current - zc->nextToUpdate - 384)); /* limited update after finding a very long match */
- blockCompressor(zc, src, srcSize);
+
+ lastLLSize = blockCompressor(zc, src, srcSize);
+
+ /* Last literals */
+ anchor = (const BYTE*)src + srcSize - lastLLSize;
+ ZSTD_storeLastLiterals(&zc->seqStore, anchor, lastLLSize);
+
return ZSTD_compressSequences(&zc->seqStore, zc->entropy, &zc->appliedParams.cParams, dst, dstCapacity, srcSize);
}
@@ -2852,7 +1617,7 @@ static size_t ZSTD_compress_frameChunk (ZSTD_CCtx* cctx,
const BYTE* ip = (const BYTE*)src;
BYTE* const ostart = (BYTE*)dst;
BYTE* op = ostart;
- U32 const maxDist = 1 << cctx->appliedParams.cParams.windowLog;
+ U32 const maxDist = (U32)1 << cctx->appliedParams.cParams.windowLog;
if (cctx->appliedParams.fParams.checksumFlag && srcSize)
XXH64_update(&cctx->xxhState, src, srcSize);
@@ -2865,13 +1630,33 @@ static size_t ZSTD_compress_frameChunk (ZSTD_CCtx* cctx,
return ERROR(dstSize_tooSmall); /* not enough space to store compressed block */
if (remaining < blockSize) blockSize = remaining;
- /* preemptive overflow correction */
+ /* preemptive overflow correction:
+ * 1. correction is large enough:
+ * lowLimit > (3<<29) ==> current > 3<<29 + 1<<windowLog - blockSize
+ * 1<<windowLog <= newCurrent < 1<<chainLog + 1<<windowLog
+ *
+ * current - newCurrent
+ * > (3<<29 + 1<<windowLog - blockSize) - (1<<windowLog + 1<<chainLog)
+ * > (3<<29 - blockSize) - (1<<chainLog)
+ * > (3<<29 - blockSize) - (1<<30) (NOTE: chainLog <= 30)
+ * > 1<<29 - 1<<17
+ *
+ * 2. (ip+blockSize - cctx->base) doesn't overflow:
+ * In 32 bit mode we limit windowLog to 30 so we don't get
+ * differences larger than 1<<31-1.
+ * 3. cctx->lowLimit < 1<<32:
+ * windowLog <= 31 ==> 3<<29 + 1<<windowLog < 7<<29 < 1<<32.
+ */
if (cctx->lowLimit > (3U<<29)) {
- U32 const cycleMask = (1 << ZSTD_cycleLog(cctx->appliedParams.cParams.hashLog, cctx->appliedParams.cParams.strategy)) - 1;
+ U32 const cycleMask = ((U32)1 << ZSTD_cycleLog(cctx->appliedParams.cParams.chainLog, cctx->appliedParams.cParams.strategy)) - 1;
U32 const current = (U32)(ip - cctx->base);
- U32 const newCurrent = (current & cycleMask) + (1 << cctx->appliedParams.cParams.windowLog);
+ U32 const newCurrent = (current & cycleMask) + ((U32)1 << cctx->appliedParams.cParams.windowLog);
U32 const correction = current - newCurrent;
- ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX_64 <= 30);
+ ZSTD_STATIC_ASSERT(ZSTD_CHAINLOG_MAX <= 30);
+ ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX_32 <= 30);
+ ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX <= 31);
+ assert(current > newCurrent);
+ assert(correction > 1<<28); /* Loose bound, should be about 1<<29 */
ZSTD_reduceIndex(cctx, correction);
cctx->base += correction;
cctx->dictBase += correction;
@@ -2879,6 +1664,7 @@ static size_t ZSTD_compress_frameChunk (ZSTD_CCtx* cctx,
cctx->dictLimit -= correction;
if (cctx->nextToUpdate < correction) cctx->nextToUpdate = 0;
else cctx->nextToUpdate -= correction;
+ DEBUGLOG(4, "Correction of 0x%x bytes to lowLimit=0x%x\n", correction, cctx->lowLimit);
}
if ((U32)(ip+blockSize - cctx->base) > cctx->loadedDictEnd + maxDist) {
@@ -2915,25 +1701,29 @@ static size_t ZSTD_compress_frameChunk (ZSTD_CCtx* cctx,
static size_t ZSTD_writeFrameHeader(void* dst, size_t dstCapacity,
- ZSTD_parameters params, U64 pledgedSrcSize, U32 dictID)
+ ZSTD_CCtx_params params, U64 pledgedSrcSize, U32 dictID)
{ BYTE* const op = (BYTE*)dst;
U32 const dictIDSizeCodeLength = (dictID>0) + (dictID>=256) + (dictID>=65536); /* 0-3 */
U32 const dictIDSizeCode = params.fParams.noDictIDFlag ? 0 : dictIDSizeCodeLength; /* 0-3 */
U32 const checksumFlag = params.fParams.checksumFlag>0;
- U32 const windowSize = 1U << params.cParams.windowLog;
+ U32 const windowSize = (U32)1 << params.cParams.windowLog;
U32 const singleSegment = params.fParams.contentSizeFlag && (windowSize >= pledgedSrcSize);
BYTE const windowLogByte = (BYTE)((params.cParams.windowLog - ZSTD_WINDOWLOG_ABSOLUTEMIN) << 3);
U32 const fcsCode = params.fParams.contentSizeFlag ?
(pledgedSrcSize>=256) + (pledgedSrcSize>=65536+256) + (pledgedSrcSize>=0xFFFFFFFFU) : 0; /* 0-3 */
BYTE const frameHeaderDecriptionByte = (BYTE)(dictIDSizeCode + (checksumFlag<<2) + (singleSegment<<5) + (fcsCode<<6) );
- size_t pos;
+ size_t pos=0;
if (dstCapacity < ZSTD_frameHeaderSize_max) return ERROR(dstSize_tooSmall);
- DEBUGLOG(5, "ZSTD_writeFrameHeader : dictIDFlag : %u ; dictID : %u ; dictIDSizeCode : %u",
+ DEBUGLOG(4, "ZSTD_writeFrameHeader : dictIDFlag : %u ; dictID : %u ; dictIDSizeCode : %u",
!params.fParams.noDictIDFlag, dictID, dictIDSizeCode);
- MEM_writeLE32(dst, ZSTD_MAGICNUMBER);
- op[4] = frameHeaderDecriptionByte; pos=5;
+ if (params.format == ZSTD_f_zstd1) {
+ DEBUGLOG(4, "writing zstd magic number");
+ MEM_writeLE32(dst, ZSTD_MAGICNUMBER);
+ pos = 4;
+ }
+ op[pos++] = frameHeaderDecriptionByte;
if (!singleSegment) op[pos++] = windowLogByte;
switch(dictIDSizeCode)
{
@@ -2969,7 +1759,7 @@ static size_t ZSTD_compressContinue_internal (ZSTD_CCtx* cctx,
if (frame && (cctx->stage==ZSTDcs_init)) {
fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, cctx->appliedParams,
- cctx->pledgedSrcSizePlusOne-1, cctx->dictID);
+ cctx->pledgedSrcSizePlusOne-1, cctx->dictID);
if (ZSTD_isError(fhSize)) return fhSize;
dstCapacity -= fhSize;
dst = (char*)dst + fhSize;
@@ -3018,11 +1808,9 @@ size_t ZSTD_compressContinue (ZSTD_CCtx* cctx,
size_t ZSTD_getBlockSize(const ZSTD_CCtx* cctx)
{
- U32 const cLevel = cctx->compressionLevel;
- ZSTD_compressionParameters cParams = (cLevel == ZSTD_CLEVEL_CUSTOM) ?
- cctx->appliedParams.cParams :
- ZSTD_getCParams(cLevel, 0, 0);
- return MIN (ZSTD_BLOCKSIZE_MAX, 1 << cParams.windowLog);
+ ZSTD_compressionParameters const cParams =
+ ZSTD_getCParamsFromCCtxParams(cctx->appliedParams, 0, 0);
+ return MIN (ZSTD_BLOCKSIZE_MAX, (U32)1 << cParams.windowLog);
}
size_t ZSTD_compressBlock(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
@@ -3046,7 +1834,7 @@ static size_t ZSTD_loadDictionaryContent(ZSTD_CCtx* zc, const void* src, size_t
zc->dictBase = zc->base;
zc->base += ip - zc->nextSrc;
zc->nextToUpdate = zc->dictLimit;
- zc->loadedDictEnd = zc->forceWindow ? 0 : (U32)(iend - zc->base);
+ zc->loadedDictEnd = zc->appliedParams.forceWindow ? 0 : (U32)(iend - zc->base);
zc->nextSrc = iend;
if (srcSize <= HASH_READ_SIZE) return 0;
@@ -3056,7 +1844,6 @@ static size_t ZSTD_loadDictionaryContent(ZSTD_CCtx* zc, const void* src, size_t
case ZSTD_fast:
ZSTD_fillHashTable (zc, iend, zc->appliedParams.cParams.searchLength);
break;
-
case ZSTD_dfast:
ZSTD_fillDoubleHashTable (zc, iend, zc->appliedParams.cParams.searchLength);
break;
@@ -3072,7 +1859,7 @@ static size_t ZSTD_loadDictionaryContent(ZSTD_CCtx* zc, const void* src, size_t
case ZSTD_btopt:
case ZSTD_btultra:
if (srcSize >= HASH_READ_SIZE)
- ZSTD_updateTree(zc, iend-HASH_READ_SIZE, iend, 1 << zc->appliedParams.cParams.searchLog, zc->appliedParams.cParams.searchLength);
+ ZSTD_updateTree(zc, iend-HASH_READ_SIZE, iend, (U32)1 << zc->appliedParams.cParams.searchLog, zc->appliedParams.cParams.searchLength);
break;
default:
@@ -3120,8 +1907,10 @@ static size_t ZSTD_loadZstdDictionary(ZSTD_CCtx* cctx, const void* dict, size_t
cctx->dictID = cctx->appliedParams.fParams.noDictIDFlag ? 0 : MEM_readLE32(dictPtr);
dictPtr += 4;
- { size_t const hufHeaderSize = HUF_readCTable((HUF_CElt*)cctx->entropy->hufCTable, 255, dictPtr, dictEnd-dictPtr);
+ { unsigned maxSymbolValue = 255;
+ size_t const hufHeaderSize = HUF_readCTable((HUF_CElt*)cctx->entropy->hufCTable, &maxSymbolValue, dictPtr, dictEnd-dictPtr);
if (HUF_isError(hufHeaderSize)) return ERROR(dictionary_corrupted);
+ if (maxSymbolValue < 255) return ERROR(dictionary_corrupted);
dictPtr += hufHeaderSize;
}
@@ -3221,12 +2010,10 @@ static size_t ZSTD_compressBegin_internal(ZSTD_CCtx* cctx,
const void* dict, size_t dictSize,
ZSTD_dictMode_e dictMode,
const ZSTD_CDict* cdict,
- ZSTD_parameters params, U64 pledgedSrcSize,
+ ZSTD_CCtx_params params, U64 pledgedSrcSize,
ZSTD_buffered_policy_e zbuff)
{
DEBUGLOG(4, "ZSTD_compressBegin_internal");
- DEBUGLOG(4, "dict ? %s", dict ? "dict" : (cdict ? "cdict" : "none"));
- DEBUGLOG(4, "dictMode : %u", (U32)dictMode);
/* params are supposed to be fully validated at this point */
assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
assert(!((dict) && (cdict))); /* either dict or cdict, not both */
@@ -3242,6 +2029,19 @@ static size_t ZSTD_compressBegin_internal(ZSTD_CCtx* cctx,
return ZSTD_compress_insertDictionary(cctx, dict, dictSize, dictMode);
}
+size_t ZSTD_compressBegin_advanced_internal(
+ ZSTD_CCtx* cctx,
+ const void* dict, size_t dictSize,
+ ZSTD_dictMode_e dictMode,
+ ZSTD_CCtx_params params,
+ unsigned long long pledgedSrcSize)
+{
+ /* compression parameters verification and optimization */
+ CHECK_F( ZSTD_checkCParams(params.cParams) );
+ return ZSTD_compressBegin_internal(cctx, dict, dictSize, dictMode, NULL,
+ params, pledgedSrcSize,
+ ZSTDb_not_buffered);
+}
/*! ZSTD_compressBegin_advanced() :
* @return : 0, or an error code */
@@ -3249,21 +2049,22 @@ size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx,
const void* dict, size_t dictSize,
ZSTD_parameters params, unsigned long long pledgedSrcSize)
{
- /* compression parameters verification and optimization */
- CHECK_F(ZSTD_checkCParams(params.cParams));
- return ZSTD_compressBegin_internal(cctx, dict, dictSize, ZSTD_dm_auto, NULL,
- params, pledgedSrcSize, ZSTDb_not_buffered);
+ ZSTD_CCtx_params const cctxParams =
+ ZSTD_assignParamsToCCtxParams(cctx->requestedParams, params);
+ return ZSTD_compressBegin_advanced_internal(cctx, dict, dictSize, ZSTD_dm_auto,
+ cctxParams,
+ pledgedSrcSize);
}
-
size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel)
{
ZSTD_parameters const params = ZSTD_getParams(compressionLevel, 0, dictSize);
+ ZSTD_CCtx_params const cctxParams =
+ ZSTD_assignParamsToCCtxParams(cctx->requestedParams, params);
return ZSTD_compressBegin_internal(cctx, dict, dictSize, ZSTD_dm_auto, NULL,
- params, 0, ZSTDb_not_buffered);
+ cctxParams, 0, ZSTDb_not_buffered);
}
-
size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel)
{
return ZSTD_compressBegin_usingDict(cctx, NULL, 0, compressionLevel);
@@ -3324,9 +2125,9 @@ size_t ZSTD_compressEnd (ZSTD_CCtx* cctx,
endResult = ZSTD_writeEpilogue(cctx, (char*)dst + cSize, dstCapacity-cSize);
if (ZSTD_isError(endResult)) return endResult;
if (cctx->appliedParams.fParams.contentSizeFlag) { /* control src size */
- DEBUGLOG(5, "end of frame : controlling src size");
+ DEBUGLOG(4, "end of frame : controlling src size");
if (cctx->pledgedSrcSizePlusOne != cctx->consumedSrcSize+1) {
- DEBUGLOG(5, "error : pledgedSrcSize = %u, while realSrcSize = %u",
+ DEBUGLOG(4, "error : pledgedSrcSize = %u, while realSrcSize = %u",
(U32)cctx->pledgedSrcSizePlusOne-1, (U32)cctx->consumedSrcSize);
return ERROR(srcSize_wrong);
} }
@@ -3340,9 +2141,13 @@ static size_t ZSTD_compress_internal (ZSTD_CCtx* cctx,
const void* dict,size_t dictSize,
ZSTD_parameters params)
{
- CHECK_F( ZSTD_compressBegin_internal(cctx, dict, dictSize, ZSTD_dm_auto, NULL,
- params, srcSize, ZSTDb_not_buffered) );
- return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);
+ ZSTD_CCtx_params const cctxParams =
+ ZSTD_assignParamsToCCtxParams(cctx->requestedParams, params);
+ return ZSTD_compress_advanced_internal(cctx,
+ dst, dstCapacity,
+ src, srcSize,
+ dict, dictSize,
+ cctxParams);
}
size_t ZSTD_compress_advanced (ZSTD_CCtx* ctx,
@@ -3355,6 +2160,19 @@ size_t ZSTD_compress_advanced (ZSTD_CCtx* ctx,
return ZSTD_compress_internal(ctx, dst, dstCapacity, src, srcSize, dict, dictSize, params);
}
+/* Internal */
+size_t ZSTD_compress_advanced_internal(
+ ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const void* dict,size_t dictSize,
+ ZSTD_CCtx_params params)
+{
+ CHECK_F( ZSTD_compressBegin_internal(cctx, dict, dictSize, ZSTD_dm_auto, NULL,
+ params, srcSize, ZSTDb_not_buffered) );
+ return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);
+}
+
size_t ZSTD_compress_usingDict(ZSTD_CCtx* ctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize,
const void* dict, size_t dictSize, int compressionLevel)
{
@@ -3384,18 +2202,21 @@ size_t ZSTD_compress(void* dst, size_t dstCapacity, const void* src, size_t srcS
/*! ZSTD_estimateCDictSize_advanced() :
* Estimate amount of memory that will be needed to create a dictionary with following arguments */
-size_t ZSTD_estimateCDictSize_advanced(size_t dictSize, ZSTD_compressionParameters cParams, unsigned byReference)
+size_t ZSTD_estimateCDictSize_advanced(
+ size_t dictSize, ZSTD_compressionParameters cParams,
+ ZSTD_dictLoadMethod_e dictLoadMethod)
{
DEBUGLOG(5, "sizeof(ZSTD_CDict) : %u", (U32)sizeof(ZSTD_CDict));
- DEBUGLOG(5, "CCtx estimate : %u", (U32)ZSTD_estimateCCtxSize_advanced(cParams));
- return sizeof(ZSTD_CDict) + ZSTD_estimateCCtxSize_advanced(cParams)
- + (byReference ? 0 : dictSize);
+ DEBUGLOG(5, "CCtx estimate : %u",
+ (U32)ZSTD_estimateCCtxSize_usingCParams(cParams));
+ return sizeof(ZSTD_CDict) + ZSTD_estimateCCtxSize_usingCParams(cParams)
+ + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize);
}
size_t ZSTD_estimateCDictSize(size_t dictSize, int compressionLevel)
{
ZSTD_compressionParameters const cParams = ZSTD_getCParams(compressionLevel, 0, dictSize);
- return ZSTD_estimateCDictSize_advanced(dictSize, cParams, 0);
+ return ZSTD_estimateCDictSize_advanced(dictSize, cParams, ZSTD_dlm_byCopy);
}
size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict)
@@ -3406,22 +2227,15 @@ size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict)
return ZSTD_sizeof_CCtx(cdict->refContext) + (cdict->dictBuffer ? cdict->dictContentSize : 0) + sizeof(*cdict);
}
-static ZSTD_parameters ZSTD_makeParams(ZSTD_compressionParameters cParams, ZSTD_frameParameters fParams)
-{
- ZSTD_parameters params;
- params.cParams = cParams;
- params.fParams = fParams;
- return params;
-}
-
static size_t ZSTD_initCDict_internal(
ZSTD_CDict* cdict,
const void* dictBuffer, size_t dictSize,
- unsigned byReference, ZSTD_dictMode_e dictMode,
+ ZSTD_dictLoadMethod_e dictLoadMethod,
+ ZSTD_dictMode_e dictMode,
ZSTD_compressionParameters cParams)
{
DEBUGLOG(5, "ZSTD_initCDict_internal, mode %u", (U32)dictMode);
- if ((byReference) || (!dictBuffer) || (!dictSize)) {
+ if ((dictLoadMethod == ZSTD_dlm_byRef) || (!dictBuffer) || (!dictSize)) {
cdict->dictBuffer = NULL;
cdict->dictContent = dictBuffer;
} else {
@@ -3433,13 +2247,12 @@ static size_t ZSTD_initCDict_internal(
}
cdict->dictContentSize = dictSize;
- { ZSTD_frameParameters const fParams = { 0 /* contentSizeFlag */,
- 0 /* checksumFlag */, 0 /* noDictIDFlag */ }; /* dummy */
- ZSTD_parameters const params = ZSTD_makeParams(cParams, fParams);
+ { ZSTD_CCtx_params cctxParams = cdict->refContext->requestedParams;
+ cctxParams.cParams = cParams;
CHECK_F( ZSTD_compressBegin_internal(cdict->refContext,
cdict->dictContent, dictSize, dictMode,
NULL,
- params, ZSTD_CONTENTSIZE_UNKNOWN,
+ cctxParams, ZSTD_CONTENTSIZE_UNKNOWN,
ZSTDb_not_buffered) );
}
@@ -3447,7 +2260,8 @@ static size_t ZSTD_initCDict_internal(
}
ZSTD_CDict* ZSTD_createCDict_advanced(const void* dictBuffer, size_t dictSize,
- unsigned byReference, ZSTD_dictMode_e dictMode,
+ ZSTD_dictLoadMethod_e dictLoadMethod,
+ ZSTD_dictMode_e dictMode,
ZSTD_compressionParameters cParams, ZSTD_customMem customMem)
{
DEBUGLOG(5, "ZSTD_createCDict_advanced, mode %u", (U32)dictMode);
@@ -3462,10 +2276,9 @@ ZSTD_CDict* ZSTD_createCDict_advanced(const void* dictBuffer, size_t dictSize,
return NULL;
}
cdict->refContext = cctx;
-
if (ZSTD_isError( ZSTD_initCDict_internal(cdict,
dictBuffer, dictSize,
- byReference, dictMode,
+ dictLoadMethod, dictMode,
cParams) )) {
ZSTD_freeCDict(cdict);
return NULL;
@@ -3479,7 +2292,7 @@ ZSTD_CDict* ZSTD_createCDict(const void* dict, size_t dictSize, int compressionL
{
ZSTD_compressionParameters cParams = ZSTD_getCParams(compressionLevel, 0, dictSize);
return ZSTD_createCDict_advanced(dict, dictSize,
- 0 /* byReference */, ZSTD_dm_auto,
+ ZSTD_dlm_byCopy, ZSTD_dm_auto,
cParams, ZSTD_defaultCMem);
}
@@ -3487,7 +2300,7 @@ ZSTD_CDict* ZSTD_createCDict_byReference(const void* dict, size_t dictSize, int
{
ZSTD_compressionParameters cParams = ZSTD_getCParams(compressionLevel, 0, dictSize);
return ZSTD_createCDict_advanced(dict, dictSize,
- 1 /* byReference */, ZSTD_dm_auto,
+ ZSTD_dlm_byRef, ZSTD_dm_auto,
cParams, ZSTD_defaultCMem);
}
@@ -3517,11 +2330,12 @@ size_t ZSTD_freeCDict(ZSTD_CDict* cdict)
*/
ZSTD_CDict* ZSTD_initStaticCDict(void* workspace, size_t workspaceSize,
const void* dict, size_t dictSize,
- unsigned byReference, ZSTD_dictMode_e dictMode,
+ ZSTD_dictLoadMethod_e dictLoadMethod,
+ ZSTD_dictMode_e dictMode,
ZSTD_compressionParameters cParams)
{
- size_t const cctxSize = ZSTD_estimateCCtxSize_advanced(cParams);
- size_t const neededSize = sizeof(ZSTD_CDict) + (byReference ? 0 : dictSize)
+ size_t const cctxSize = ZSTD_estimateCCtxSize_usingCParams(cParams);
+ size_t const neededSize = sizeof(ZSTD_CDict) + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize)
+ cctxSize;
ZSTD_CDict* const cdict = (ZSTD_CDict*) workspace;
void* ptr;
@@ -3531,7 +2345,7 @@ ZSTD_CDict* ZSTD_initStaticCDict(void* workspace, size_t workspaceSize,
(U32)workspaceSize, (U32)neededSize, (U32)(workspaceSize < neededSize));
if (workspaceSize < neededSize) return NULL;
- if (!byReference) {
+ if (dictLoadMethod == ZSTD_dlm_byCopy) {
memcpy(cdict+1, dict, dictSize);
dict = cdict+1;
ptr = (char*)workspace + sizeof(ZSTD_CDict) + dictSize;
@@ -3542,15 +2356,15 @@ ZSTD_CDict* ZSTD_initStaticCDict(void* workspace, size_t workspaceSize,
if (ZSTD_isError( ZSTD_initCDict_internal(cdict,
dict, dictSize,
- 1 /* byReference */, dictMode,
+ ZSTD_dlm_byRef, dictMode,
cParams) ))
return NULL;
return cdict;
}
-ZSTD_parameters ZSTD_getParamsFromCDict(const ZSTD_CDict* cdict) {
- return ZSTD_getParamsFromCCtx(cdict->refContext);
+ZSTD_compressionParameters ZSTD_getCParamsFromCDict(const ZSTD_CDict* cdict) {
+ return cdict->refContext->appliedParams.cParams;
}
/* ZSTD_compressBegin_usingCDict_advanced() :
@@ -3560,7 +2374,8 @@ size_t ZSTD_compressBegin_usingCDict_advanced(
ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize)
{
if (cdict==NULL) return ERROR(dictionary_wrong);
- { ZSTD_parameters params = cdict->refContext->appliedParams;
+ { ZSTD_CCtx_params params = cctx->requestedParams;
+ params.cParams = ZSTD_getCParamsFromCDict(cdict);
params.fParams = fParams;
DEBUGLOG(5, "ZSTD_compressBegin_usingCDict_advanced");
return ZSTD_compressBegin_internal(cctx,
@@ -3644,7 +2459,7 @@ size_t ZSTD_CStreamOutSize(void)
static size_t ZSTD_resetCStream_internal(ZSTD_CStream* zcs,
const void* dict, size_t dictSize, ZSTD_dictMode_e dictMode,
const ZSTD_CDict* cdict,
- ZSTD_parameters params, unsigned long long pledgedSrcSize)
+ const ZSTD_CCtx_params params, unsigned long long pledgedSrcSize)
{
DEBUGLOG(4, "ZSTD_resetCStream_internal");
/* params are supposed to be fully validated at this point */
@@ -3668,13 +2483,11 @@ static size_t ZSTD_resetCStream_internal(ZSTD_CStream* zcs,
size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize)
{
- ZSTD_parameters params = zcs->requestedParams;
+ ZSTD_CCtx_params params = zcs->requestedParams;
params.fParams.contentSizeFlag = (pledgedSrcSize > 0);
- DEBUGLOG(5, "ZSTD_resetCStream");
- if (zcs->compressionLevel != ZSTD_CLEVEL_CUSTOM) {
- params.cParams = ZSTD_getCParams(zcs->compressionLevel, pledgedSrcSize, 0 /* dictSize */);
- }
- return ZSTD_resetCStream_internal(zcs, NULL, 0, zcs->dictMode, zcs->cdict, params, pledgedSrcSize);
+ params.cParams = ZSTD_getCParamsFromCCtxParams(params, pledgedSrcSize, 0);
+ DEBUGLOG(4, "ZSTD_resetCStream");
+ return ZSTD_resetCStream_internal(zcs, NULL, 0, ZSTD_dm_auto, zcs->cdict, params, pledgedSrcSize);
}
/*! ZSTD_initCStream_internal() :
@@ -3683,9 +2496,9 @@ size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize)
* Assumption 2 : either dict, or cdict, is defined, not both */
size_t ZSTD_initCStream_internal(ZSTD_CStream* zcs,
const void* dict, size_t dictSize, const ZSTD_CDict* cdict,
- ZSTD_parameters params, unsigned long long pledgedSrcSize)
+ ZSTD_CCtx_params params, unsigned long long pledgedSrcSize)
{
- DEBUGLOG(5, "ZSTD_initCStream_internal");
+ DEBUGLOG(4, "ZSTD_initCStream_internal");
assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
assert(!((dict) && (cdict))); /* either dict or cdict, not both */
@@ -3697,23 +2510,23 @@ size_t ZSTD_initCStream_internal(ZSTD_CStream* zcs,
}
ZSTD_freeCDict(zcs->cdictLocal);
zcs->cdictLocal = ZSTD_createCDict_advanced(dict, dictSize,
- zcs->dictContentByRef, zcs->dictMode,
+ ZSTD_dlm_byCopy, ZSTD_dm_auto,
params.cParams, zcs->customMem);
zcs->cdict = zcs->cdictLocal;
if (zcs->cdictLocal == NULL) return ERROR(memory_allocation);
} else {
if (cdict) {
- ZSTD_parameters const cdictParams = ZSTD_getParamsFromCDict(cdict);
- params.cParams = cdictParams.cParams; /* cParams are enforced from cdict */
+ params.cParams = ZSTD_getCParamsFromCDict(cdict); /* cParams are enforced from cdict */
}
ZSTD_freeCDict(zcs->cdictLocal);
zcs->cdictLocal = NULL;
zcs->cdict = cdict;
}
+ params.compressionLevel = ZSTD_CLEVEL_CUSTOM;
zcs->requestedParams = params;
- zcs->compressionLevel = ZSTD_CLEVEL_CUSTOM;
- return ZSTD_resetCStream_internal(zcs, NULL, 0, zcs->dictMode, zcs->cdict, params, pledgedSrcSize);
+
+ return ZSTD_resetCStream_internal(zcs, NULL, 0, ZSTD_dm_auto, zcs->cdict, params, pledgedSrcSize);
}
/* ZSTD_initCStream_usingCDict_advanced() :
@@ -3724,7 +2537,8 @@ size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs,
unsigned long long pledgedSrcSize)
{ /* cannot handle NULL cdict (does not know what to do) */
if (!cdict) return ERROR(dictionary_wrong);
- { ZSTD_parameters params = ZSTD_getParamsFromCDict(cdict);
+ { ZSTD_CCtx_params params = zcs->requestedParams;
+ params.cParams = ZSTD_getCParamsFromCDict(cdict);
params.fParams = fParams;
return ZSTD_initCStream_internal(zcs,
NULL, 0, cdict,
@@ -3743,30 +2557,32 @@ size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs,
const void* dict, size_t dictSize,
ZSTD_parameters params, unsigned long long pledgedSrcSize)
{
+ ZSTD_CCtx_params const cctxParams =
+ ZSTD_assignParamsToCCtxParams(zcs->requestedParams, params);
CHECK_F( ZSTD_checkCParams(params.cParams) );
- zcs->requestedParams = params;
- zcs->compressionLevel = ZSTD_CLEVEL_CUSTOM;
- return ZSTD_initCStream_internal(zcs, dict, dictSize, NULL, params, pledgedSrcSize);
+ return ZSTD_initCStream_internal(zcs, dict, dictSize, NULL, cctxParams, pledgedSrcSize);
}
size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel)
{
ZSTD_parameters const params = ZSTD_getParams(compressionLevel, 0, dictSize);
- zcs->compressionLevel = compressionLevel;
- return ZSTD_initCStream_internal(zcs, dict, dictSize, NULL, params, 0);
+ ZSTD_CCtx_params const cctxParams =
+ ZSTD_assignParamsToCCtxParams(zcs->requestedParams, params);
+ return ZSTD_initCStream_internal(zcs, dict, dictSize, NULL, cctxParams, 0);
}
size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, int compressionLevel, unsigned long long pledgedSrcSize)
{
- ZSTD_parameters params = ZSTD_getParams(compressionLevel, pledgedSrcSize, 0);
- params.fParams.contentSizeFlag = (pledgedSrcSize>0);
- return ZSTD_initCStream_internal(zcs, NULL, 0, NULL, params, pledgedSrcSize);
+ ZSTD_CCtx_params cctxParams;
+ ZSTD_parameters const params = ZSTD_getParams(compressionLevel, pledgedSrcSize, 0);
+ cctxParams = ZSTD_assignParamsToCCtxParams(zcs->requestedParams, params);
+ cctxParams.fParams.contentSizeFlag = (pledgedSrcSize>0);
+ return ZSTD_initCStream_internal(zcs, NULL, 0, NULL, cctxParams, pledgedSrcSize);
}
size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel)
{
- ZSTD_parameters const params = ZSTD_getParams(compressionLevel, 0, 0);
- return ZSTD_initCStream_internal(zcs, NULL, 0, NULL, params, 0);
+ return ZSTD_initCStream_srcSize(zcs, compressionLevel, 0);
}
/*====== Compression ======*/
@@ -3781,6 +2597,7 @@ MEM_STATIC size_t ZSTD_limitCopy(void* dst, size_t dstCapacity,
/** ZSTD_compressStream_generic():
* internal function for all *compressStream*() variants and *compress_generic()
+ * non-static, because can be called from zstdmt.c
* @return : hint size for next input */
size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
ZSTD_outBuffer* output,
@@ -3934,21 +2751,13 @@ size_t ZSTD_compressStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuf
return ZSTD_compressStream_generic(zcs, output, input, ZSTD_e_continue);
}
-/*! ZSTDMT_initCStream_internal() :
- * Private use only. Init streaming operation.
- * expects params to be valid.
- * must receive dict, or cdict, or none, but not both.
- * @return : 0, or an error code */
-size_t ZSTDMT_initCStream_internal(ZSTDMT_CCtx* zcs,
- const void* dict, size_t dictSize, const ZSTD_CDict* cdict,
- ZSTD_parameters params, unsigned long long pledgedSrcSize);
-
size_t ZSTD_compress_generic (ZSTD_CCtx* cctx,
ZSTD_outBuffer* output,
ZSTD_inBuffer* input,
ZSTD_EndDirective endOp)
{
+ DEBUGLOG(5, "ZSTD_compress_generic");
/* check conditions */
if (output->pos > output->size) return ERROR(GENERIC);
if (input->pos > input->size) return ERROR(GENERIC);
@@ -3956,31 +2765,42 @@ size_t ZSTD_compress_generic (ZSTD_CCtx* cctx,
/* transparent initialization stage */
if (cctx->streamStage == zcss_init) {
- const void* const prefix = cctx->prefix;
- size_t const prefixSize = cctx->prefixSize;
- ZSTD_parameters params = cctx->requestedParams;
- if (cctx->compressionLevel != ZSTD_CLEVEL_CUSTOM)
- params.cParams = ZSTD_getCParams(cctx->compressionLevel,
- cctx->pledgedSrcSizePlusOne-1, 0 /*dictSize*/);
- cctx->prefix = NULL; cctx->prefixSize = 0; /* single usage */
- assert(prefix==NULL || cctx->cdict==NULL); /* only one can be set */
+ ZSTD_prefixDict const prefixDict = cctx->prefixDict;
+ ZSTD_CCtx_params params = cctx->requestedParams;
+ params.cParams = ZSTD_getCParamsFromCCtxParams(
+ cctx->requestedParams, cctx->pledgedSrcSizePlusOne-1, 0 /*dictSize*/);
+ memset(&cctx->prefixDict, 0, sizeof(cctx->prefixDict)); /* single usage */
+ assert(prefixDict.dict==NULL || cctx->cdict==NULL); /* only one can be set */
+ DEBUGLOG(4, "ZSTD_compress_generic : transparent init stage");
#ifdef ZSTD_MULTITHREAD
- if (cctx->nbThreads > 1) {
- DEBUGLOG(4, "call ZSTDMT_initCStream_internal as nbThreads=%u", cctx->nbThreads);
- CHECK_F( ZSTDMT_initCStream_internal(cctx->mtctx, prefix, prefixSize, cctx->cdict, params, cctx->pledgedSrcSizePlusOne-1) );
+ if (params.nbThreads > 1) {
+ if (cctx->mtctx == NULL || cctx->appliedParams.nbThreads != params.nbThreads) {
+ ZSTDMT_freeCCtx(cctx->mtctx);
+ cctx->mtctx = ZSTDMT_createCCtx_advanced(params.nbThreads, cctx->customMem);
+ if (cctx->mtctx == NULL) return ERROR(memory_allocation);
+ }
+ DEBUGLOG(4, "call ZSTDMT_initCStream_internal as nbThreads=%u", params.nbThreads);
+ CHECK_F( ZSTDMT_initCStream_internal(
+ cctx->mtctx,
+ prefixDict.dict, prefixDict.dictSize, ZSTD_dm_rawContent,
+ cctx->cdict, params, cctx->pledgedSrcSizePlusOne-1) );
cctx->streamStage = zcss_load;
+ cctx->appliedParams.nbThreads = params.nbThreads;
} else
#endif
{
- CHECK_F( ZSTD_resetCStream_internal(cctx, prefix, prefixSize, cctx->dictMode, cctx->cdict, params, cctx->pledgedSrcSizePlusOne-1) );
+ CHECK_F( ZSTD_resetCStream_internal(
+ cctx, prefixDict.dict, prefixDict.dictSize,
+ prefixDict.dictMode, cctx->cdict, params,
+ cctx->pledgedSrcSizePlusOne-1) );
} }
/* compression stage */
#ifdef ZSTD_MULTITHREAD
- if (cctx->nbThreads > 1) {
+ if (cctx->appliedParams.nbThreads > 1) {
size_t const flushMin = ZSTDMT_compressStream_generic(cctx->mtctx, output, input, endOp);
- DEBUGLOG(5, "ZSTDMT_compressStream_generic : %u", (U32)flushMin);
+ DEBUGLOG(5, "ZSTDMT_compressStream_generic result : %u", (U32)flushMin);
if ( ZSTD_isError(flushMin)
|| (endOp == ZSTD_e_end && flushMin == 0) ) { /* compression completed */
ZSTD_startNewCompression(cctx);
@@ -3988,7 +2808,6 @@ size_t ZSTD_compress_generic (ZSTD_CCtx* cctx,
return flushMin;
}
#endif
-
CHECK_F( ZSTD_compressStream_generic(cctx, output, input, endOp) );
DEBUGLOG(5, "completed ZSTD_compress_generic");
return cctx->outBuffContentSize - cctx->outBuffFlushedSize; /* remaining to flush */
@@ -4189,6 +3008,7 @@ ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long l
if (compressionLevel > ZSTD_MAX_CLEVEL) compressionLevel = ZSTD_MAX_CLEVEL;
{ ZSTD_compressionParameters const cp = ZSTD_defaultCParameters[tableID][compressionLevel];
return ZSTD_adjustCParams_internal(cp, srcSizeHint, dictSize); }
+
}
/*! ZSTD_getParams() :
diff --git a/lib/compress/zstd_compress.h b/lib/compress/zstd_compress.h
new file mode 100644
index 000000000000..94606edc935e
--- /dev/null
+++ b/lib/compress/zstd_compress.h
@@ -0,0 +1,307 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+
+#ifndef ZSTD_COMPRESS_H
+#define ZSTD_COMPRESS_H
+
+/*-*************************************
+* Dependencies
+***************************************/
+#include "zstd_internal.h"
+#ifdef ZSTD_MULTITHREAD
+# include "zstdmt_compress.h"
+#endif
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+/*-*************************************
+* Constants
+***************************************/
+static const U32 g_searchStrength = 8;
+#define HASH_READ_SIZE 8
+
+
+/*-*************************************
+* Context memory management
+***************************************/
+typedef enum { ZSTDcs_created=0, ZSTDcs_init, ZSTDcs_ongoing, ZSTDcs_ending } ZSTD_compressionStage_e;
+typedef enum { zcss_init=0, zcss_load, zcss_flush } ZSTD_cStreamStage;
+
+typedef struct ZSTD_prefixDict_s {
+ const void* dict;
+ size_t dictSize;
+ ZSTD_dictMode_e dictMode;
+} ZSTD_prefixDict;
+
+struct ZSTD_CCtx_s {
+ const BYTE* nextSrc; /* next block here to continue on current prefix */
+ const BYTE* base; /* All regular indexes relative to this position */
+ const BYTE* dictBase; /* extDict indexes relative to this position */
+ U32 dictLimit; /* below that point, need extDict */
+ U32 lowLimit; /* below that point, no more data */
+ U32 nextToUpdate; /* index from which to continue dictionary update */
+ U32 nextToUpdate3; /* index from which to continue dictionary update */
+ U32 hashLog3; /* dispatch table : larger == faster, more memory */
+ U32 loadedDictEnd; /* index of end of dictionary */
+ ZSTD_compressionStage_e stage;
+ U32 dictID;
+ ZSTD_CCtx_params requestedParams;
+ ZSTD_CCtx_params appliedParams;
+ void* workSpace;
+ size_t workSpaceSize;
+ size_t blockSize;
+ U64 pledgedSrcSizePlusOne; /* this way, 0 (default) == unknown */
+ U64 consumedSrcSize;
+ XXH64_state_t xxhState;
+ ZSTD_customMem customMem;
+ size_t staticSize;
+
+ seqStore_t seqStore; /* sequences storage ptrs */
+ optState_t optState;
+ ldmState_t ldmState; /* long distance matching state */
+ U32* hashTable;
+ U32* hashTable3;
+ U32* chainTable;
+ ZSTD_entropyCTables_t* entropy;
+
+ /* streaming */
+ char* inBuff;
+ size_t inBuffSize;
+ size_t inToCompress;
+ size_t inBuffPos;
+ size_t inBuffTarget;
+ char* outBuff;
+ size_t outBuffSize;
+ size_t outBuffContentSize;
+ size_t outBuffFlushedSize;
+ ZSTD_cStreamStage streamStage;
+ U32 frameEnded;
+
+ /* Dictionary */
+ ZSTD_CDict* cdictLocal;
+ const ZSTD_CDict* cdict;
+ ZSTD_prefixDict prefixDict; /* single-usage dictionary */
+
+ /* Multi-threading */
+#ifdef ZSTD_MULTITHREAD
+ ZSTDMT_CCtx* mtctx;
+#endif
+};
+
+
+static const BYTE LL_Code[64] = { 0, 1, 2, 3, 4, 5, 6, 7,
+ 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 16, 17, 17, 18, 18, 19, 19,
+ 20, 20, 20, 20, 21, 21, 21, 21,
+ 22, 22, 22, 22, 22, 22, 22, 22,
+ 23, 23, 23, 23, 23, 23, 23, 23,
+ 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24 };
+
+static const BYTE ML_Code[128] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37,
+ 38, 38, 38, 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39,
+ 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40,
+ 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41,
+ 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42,
+ 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42 };
+
+/*! ZSTD_storeSeq() :
+ Store a sequence (literal length, literals, offset code and match length code) into seqStore_t.
+ `offsetCode` : distance to match, or 0 == repCode.
+ `matchCode` : matchLength - MINMATCH
+*/
+MEM_STATIC void ZSTD_storeSeq(seqStore_t* seqStorePtr, size_t litLength, const void* literals, U32 offsetCode, size_t matchCode)
+{
+#if defined(ZSTD_DEBUG) && (ZSTD_DEBUG >= 6)
+ static const BYTE* g_start = NULL;
+ U32 const pos = (U32)((const BYTE*)literals - g_start);
+ if (g_start==NULL) g_start = (const BYTE*)literals;
+ if ((pos > 0) && (pos < 1000000000))
+ DEBUGLOG(6, "Cpos %6u :%5u literals & match %3u bytes at distance %6u",
+ pos, (U32)litLength, (U32)matchCode+MINMATCH, (U32)offsetCode);
+#endif
+ /* copy Literals */
+ assert(seqStorePtr->lit + litLength <= seqStorePtr->litStart + 128 KB);
+ ZSTD_wildcopy(seqStorePtr->lit, literals, litLength);
+ seqStorePtr->lit += litLength;
+
+ /* literal Length */
+ if (litLength>0xFFFF) {
+ seqStorePtr->longLengthID = 1;
+ seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
+ }
+ seqStorePtr->sequences[0].litLength = (U16)litLength;
+
+ /* match offset */
+ seqStorePtr->sequences[0].offset = offsetCode + 1;
+
+ /* match Length */
+ if (matchCode>0xFFFF) {
+ seqStorePtr->longLengthID = 2;
+ seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
+ }
+ seqStorePtr->sequences[0].matchLength = (U16)matchCode;
+
+ seqStorePtr->sequences++;
+}
+
+
+/*-*************************************
+* Match length counter
+***************************************/
+static unsigned ZSTD_NbCommonBytes (register size_t val)
+{
+ if (MEM_isLittleEndian()) {
+ if (MEM_64bits()) {
+# if defined(_MSC_VER) && defined(_WIN64)
+ unsigned long r = 0;
+ _BitScanForward64( &r, (U64)val );
+ return (unsigned)(r>>3);
+# elif defined(__GNUC__) && (__GNUC__ >= 4)
+ return (__builtin_ctzll((U64)val) >> 3);
+# else
+ static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2,
+ 0, 3, 1, 3, 1, 4, 2, 7,
+ 0, 2, 3, 6, 1, 5, 3, 5,
+ 1, 3, 4, 4, 2, 5, 6, 7,
+ 7, 0, 1, 2, 3, 3, 4, 6,
+ 2, 6, 5, 5, 3, 4, 5, 6,
+ 7, 1, 2, 4, 6, 4, 4, 5,
+ 7, 2, 6, 5, 7, 6, 7, 7 };
+ return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58];
+# endif
+ } else { /* 32 bits */
+# if defined(_MSC_VER)
+ unsigned long r=0;
+ _BitScanForward( &r, (U32)val );
+ return (unsigned)(r>>3);
+# elif defined(__GNUC__) && (__GNUC__ >= 3)
+ return (__builtin_ctz((U32)val) >> 3);
+# else
+ static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0,
+ 3, 2, 2, 1, 3, 2, 0, 1,
+ 3, 3, 1, 2, 2, 2, 2, 0,
+ 3, 1, 2, 0, 1, 0, 1, 1 };
+ return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27];
+# endif
+ }
+ } else { /* Big Endian CPU */
+ if (MEM_64bits()) {
+# if defined(_MSC_VER) && defined(_WIN64)
+ unsigned long r = 0;
+ _BitScanReverse64( &r, val );
+ return (unsigned)(r>>3);
+# elif defined(__GNUC__) && (__GNUC__ >= 4)
+ return (__builtin_clzll(val) >> 3);
+# else
+ unsigned r;
+ const unsigned n32 = sizeof(size_t)*4; /* calculate this way due to compiler complaining in 32-bits mode */
+ if (!(val>>n32)) { r=4; } else { r=0; val>>=n32; }
+ if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }
+ r += (!val);
+ return r;
+# endif
+ } else { /* 32 bits */
+# if defined(_MSC_VER)
+ unsigned long r = 0;
+ _BitScanReverse( &r, (unsigned long)val );
+ return (unsigned)(r>>3);
+# elif defined(__GNUC__) && (__GNUC__ >= 3)
+ return (__builtin_clz((U32)val) >> 3);
+# else
+ unsigned r;
+ if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; }
+ r += (!val);
+ return r;
+# endif
+ } }
+}
+
+
+MEM_STATIC size_t ZSTD_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* const pInLimit)
+{
+ const BYTE* const pStart = pIn;
+ const BYTE* const pInLoopLimit = pInLimit - (sizeof(size_t)-1);
+
+ while (pIn < pInLoopLimit) {
+ size_t const diff = MEM_readST(pMatch) ^ MEM_readST(pIn);
+ if (!diff) { pIn+=sizeof(size_t); pMatch+=sizeof(size_t); continue; }
+ pIn += ZSTD_NbCommonBytes(diff);
+ return (size_t)(pIn - pStart);
+ }
+ if (MEM_64bits()) if ((pIn<(pInLimit-3)) && (MEM_read32(pMatch) == MEM_read32(pIn))) { pIn+=4; pMatch+=4; }
+ if ((pIn<(pInLimit-1)) && (MEM_read16(pMatch) == MEM_read16(pIn))) { pIn+=2; pMatch+=2; }
+ if ((pIn<pInLimit) && (*pMatch == *pIn)) pIn++;
+ return (size_t)(pIn - pStart);
+}
+
+/** ZSTD_count_2segments() :
+* can count match length with `ip` & `match` in 2 different segments.
+* convention : on reaching mEnd, match count continue starting from iStart
+*/
+MEM_STATIC size_t ZSTD_count_2segments(const BYTE* ip, const BYTE* match, const BYTE* iEnd, const BYTE* mEnd, const BYTE* iStart)
+{
+ const BYTE* const vEnd = MIN( ip + (mEnd - match), iEnd);
+ size_t const matchLength = ZSTD_count(ip, match, vEnd);
+ if (match + matchLength != mEnd) return matchLength;
+ return matchLength + ZSTD_count(ip+matchLength, iStart, iEnd);
+}
+
+
+/*-*************************************
+* Hashes
+***************************************/
+static const U32 prime3bytes = 506832829U;
+static U32 ZSTD_hash3(U32 u, U32 h) { return ((u << (32-24)) * prime3bytes) >> (32-h) ; }
+MEM_STATIC size_t ZSTD_hash3Ptr(const void* ptr, U32 h) { return ZSTD_hash3(MEM_readLE32(ptr), h); } /* only in zstd_opt.h */
+
+static const U32 prime4bytes = 2654435761U;
+static U32 ZSTD_hash4(U32 u, U32 h) { return (u * prime4bytes) >> (32-h) ; }
+static size_t ZSTD_hash4Ptr(const void* ptr, U32 h) { return ZSTD_hash4(MEM_read32(ptr), h); }
+
+static const U64 prime5bytes = 889523592379ULL;
+static size_t ZSTD_hash5(U64 u, U32 h) { return (size_t)(((u << (64-40)) * prime5bytes) >> (64-h)) ; }
+static size_t ZSTD_hash5Ptr(const void* p, U32 h) { return ZSTD_hash5(MEM_readLE64(p), h); }
+
+static const U64 prime6bytes = 227718039650203ULL;
+static size_t ZSTD_hash6(U64 u, U32 h) { return (size_t)(((u << (64-48)) * prime6bytes) >> (64-h)) ; }
+static size_t ZSTD_hash6Ptr(const void* p, U32 h) { return ZSTD_hash6(MEM_readLE64(p), h); }
+
+static const U64 prime7bytes = 58295818150454627ULL;
+static size_t ZSTD_hash7(U64 u, U32 h) { return (size_t)(((u << (64-56)) * prime7bytes) >> (64-h)) ; }
+static size_t ZSTD_hash7Ptr(const void* p, U32 h) { return ZSTD_hash7(MEM_readLE64(p), h); }
+
+static const U64 prime8bytes = 0xCF1BBCDCB7A56463ULL;
+static size_t ZSTD_hash8(U64 u, U32 h) { return (size_t)(((u) * prime8bytes) >> (64-h)) ; }
+static size_t ZSTD_hash8Ptr(const void* p, U32 h) { return ZSTD_hash8(MEM_readLE64(p), h); }
+
+MEM_STATIC size_t ZSTD_hashPtr(const void* p, U32 hBits, U32 mls)
+{
+ switch(mls)
+ {
+ default:
+ case 4: return ZSTD_hash4Ptr(p, hBits);
+ case 5: return ZSTD_hash5Ptr(p, hBits);
+ case 6: return ZSTD_hash6Ptr(p, hBits);
+ case 7: return ZSTD_hash7Ptr(p, hBits);
+ case 8: return ZSTD_hash8Ptr(p, hBits);
+ }
+}
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* ZSTD_COMPRESS_H */
diff --git a/lib/compress/zstd_double_fast.c b/lib/compress/zstd_double_fast.c
new file mode 100644
index 000000000000..876a36042ce8
--- /dev/null
+++ b/lib/compress/zstd_double_fast.c
@@ -0,0 +1,308 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#include "zstd_double_fast.h"
+
+
+void ZSTD_fillDoubleHashTable(ZSTD_CCtx* cctx, const void* end, const U32 mls)
+{
+ U32* const hashLarge = cctx->hashTable;
+ U32 const hBitsL = cctx->appliedParams.cParams.hashLog;
+ U32* const hashSmall = cctx->chainTable;
+ U32 const hBitsS = cctx->appliedParams.cParams.chainLog;
+ const BYTE* const base = cctx->base;
+ const BYTE* ip = base + cctx->nextToUpdate;
+ const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE;
+ const size_t fastHashFillStep = 3;
+
+ while(ip <= iend) {
+ hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip - base);
+ hashLarge[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip - base);
+ ip += fastHashFillStep;
+ }
+}
+
+
+FORCE_INLINE_TEMPLATE
+size_t ZSTD_compressBlock_doubleFast_generic(ZSTD_CCtx* cctx,
+ const void* src, size_t srcSize,
+ const U32 mls)
+{
+ U32* const hashLong = cctx->hashTable;
+ const U32 hBitsL = cctx->appliedParams.cParams.hashLog;
+ U32* const hashSmall = cctx->chainTable;
+ const U32 hBitsS = cctx->appliedParams.cParams.chainLog;
+ seqStore_t* seqStorePtr = &(cctx->seqStore);
+ const BYTE* const base = cctx->base;
+ const BYTE* const istart = (const BYTE*)src;
+ const BYTE* ip = istart;
+ const BYTE* anchor = istart;
+ const U32 lowestIndex = cctx->dictLimit;
+ const BYTE* const lowest = base + lowestIndex;
+ const BYTE* const iend = istart + srcSize;
+ const BYTE* const ilimit = iend - HASH_READ_SIZE;
+ U32 offset_1=seqStorePtr->rep[0], offset_2=seqStorePtr->rep[1];
+ U32 offsetSaved = 0;
+
+ /* init */
+ ip += (ip==lowest);
+ { U32 const maxRep = (U32)(ip-lowest);
+ if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0;
+ if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0;
+ }
+
+ /* Main Search Loop */
+ while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */
+ size_t mLength;
+ size_t const h2 = ZSTD_hashPtr(ip, hBitsL, 8);
+ size_t const h = ZSTD_hashPtr(ip, hBitsS, mls);
+ U32 const current = (U32)(ip-base);
+ U32 const matchIndexL = hashLong[h2];
+ U32 const matchIndexS = hashSmall[h];
+ const BYTE* matchLong = base + matchIndexL;
+ const BYTE* match = base + matchIndexS;
+ hashLong[h2] = hashSmall[h] = current; /* update hash tables */
+
+ assert(offset_1 <= current); /* supposed guaranteed by construction */
+ if ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1))) {
+ /* favor repcode */
+ mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4;
+ ip++;
+ ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, 0, mLength-MINMATCH);
+ } else {
+ U32 offset;
+ if ( (matchIndexL > lowestIndex) && (MEM_read64(matchLong) == MEM_read64(ip)) ) {
+ mLength = ZSTD_count(ip+8, matchLong+8, iend) + 8;
+ offset = (U32)(ip-matchLong);
+ while (((ip>anchor) & (matchLong>lowest)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */
+ } else if ( (matchIndexS > lowestIndex) && (MEM_read32(match) == MEM_read32(ip)) ) {
+ size_t const hl3 = ZSTD_hashPtr(ip+1, hBitsL, 8);
+ U32 const matchIndexL3 = hashLong[hl3];
+ const BYTE* matchL3 = base + matchIndexL3;
+ hashLong[hl3] = current + 1;
+ if ( (matchIndexL3 > lowestIndex) && (MEM_read64(matchL3) == MEM_read64(ip+1)) ) {
+ mLength = ZSTD_count(ip+9, matchL3+8, iend) + 8;
+ ip++;
+ offset = (U32)(ip-matchL3);
+ while (((ip>anchor) & (matchL3>lowest)) && (ip[-1] == matchL3[-1])) { ip--; matchL3--; mLength++; } /* catch up */
+ } else {
+ mLength = ZSTD_count(ip+4, match+4, iend) + 4;
+ offset = (U32)(ip-match);
+ while (((ip>anchor) & (match>lowest)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
+ }
+ } else {
+ ip += ((ip-anchor) >> g_searchStrength) + 1;
+ continue;
+ }
+
+ offset_2 = offset_1;
+ offset_1 = offset;
+
+ ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
+ }
+
+ /* match found */
+ ip += mLength;
+ anchor = ip;
+
+ if (ip <= ilimit) {
+ /* Fill Table */
+ hashLong[ZSTD_hashPtr(base+current+2, hBitsL, 8)] =
+ hashSmall[ZSTD_hashPtr(base+current+2, hBitsS, mls)] = current+2; /* here because current+2 could be > iend-8 */
+ hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] =
+ hashSmall[ZSTD_hashPtr(ip-2, hBitsS, mls)] = (U32)(ip-2-base);
+
+ /* check immediate repcode */
+ while ( (ip <= ilimit)
+ && ( (offset_2>0)
+ & (MEM_read32(ip) == MEM_read32(ip - offset_2)) )) {
+ /* store sequence */
+ size_t const rLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;
+ { U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; } /* swap offset_2 <=> offset_1 */
+ hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip-base);
+ hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip-base);
+ ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, rLength-MINMATCH);
+ ip += rLength;
+ anchor = ip;
+ continue; /* faster when present ... (?) */
+ } } }
+
+ /* save reps for next block */
+ seqStorePtr->repToConfirm[0] = offset_1 ? offset_1 : offsetSaved;
+ seqStorePtr->repToConfirm[1] = offset_2 ? offset_2 : offsetSaved;
+
+ /* Return the last literals size */
+ return iend - anchor;
+}
+
+
+size_t ZSTD_compressBlock_doubleFast(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
+{
+ const U32 mls = ctx->appliedParams.cParams.searchLength;
+ switch(mls)
+ {
+ default: /* includes case 3 */
+ case 4 :
+ return ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 4);
+ case 5 :
+ return ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 5);
+ case 6 :
+ return ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 6);
+ case 7 :
+ return ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 7);
+ }
+}
+
+
+static size_t ZSTD_compressBlock_doubleFast_extDict_generic(ZSTD_CCtx* ctx,
+ const void* src, size_t srcSize,
+ const U32 mls)
+{
+ U32* const hashLong = ctx->hashTable;
+ U32 const hBitsL = ctx->appliedParams.cParams.hashLog;
+ U32* const hashSmall = ctx->chainTable;
+ U32 const hBitsS = ctx->appliedParams.cParams.chainLog;
+ seqStore_t* seqStorePtr = &(ctx->seqStore);
+ const BYTE* const base = ctx->base;
+ const BYTE* const dictBase = ctx->dictBase;
+ const BYTE* const istart = (const BYTE*)src;
+ const BYTE* ip = istart;
+ const BYTE* anchor = istart;
+ const U32 lowestIndex = ctx->lowLimit;
+ const BYTE* const dictStart = dictBase + lowestIndex;
+ const U32 dictLimit = ctx->dictLimit;
+ const BYTE* const lowPrefixPtr = base + dictLimit;
+ const BYTE* const dictEnd = dictBase + dictLimit;
+ const BYTE* const iend = istart + srcSize;
+ const BYTE* const ilimit = iend - 8;
+ U32 offset_1=seqStorePtr->rep[0], offset_2=seqStorePtr->rep[1];
+
+ /* Search Loop */
+ while (ip < ilimit) { /* < instead of <=, because (ip+1) */
+ const size_t hSmall = ZSTD_hashPtr(ip, hBitsS, mls);
+ const U32 matchIndex = hashSmall[hSmall];
+ const BYTE* matchBase = matchIndex < dictLimit ? dictBase : base;
+ const BYTE* match = matchBase + matchIndex;
+
+ const size_t hLong = ZSTD_hashPtr(ip, hBitsL, 8);
+ const U32 matchLongIndex = hashLong[hLong];
+ const BYTE* matchLongBase = matchLongIndex < dictLimit ? dictBase : base;
+ const BYTE* matchLong = matchLongBase + matchLongIndex;
+
+ const U32 current = (U32)(ip-base);
+ const U32 repIndex = current + 1 - offset_1; /* offset_1 expected <= current +1 */
+ const BYTE* repBase = repIndex < dictLimit ? dictBase : base;
+ const BYTE* repMatch = repBase + repIndex;
+ size_t mLength;
+ hashSmall[hSmall] = hashLong[hLong] = current; /* update hash table */
+
+ if ( (((U32)((dictLimit-1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > lowestIndex))
+ && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
+ const BYTE* repMatchEnd = repIndex < dictLimit ? dictEnd : iend;
+ mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, lowPrefixPtr) + 4;
+ ip++;
+ ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, 0, mLength-MINMATCH);
+ } else {
+ if ((matchLongIndex > lowestIndex) && (MEM_read64(matchLong) == MEM_read64(ip))) {
+ const BYTE* matchEnd = matchLongIndex < dictLimit ? dictEnd : iend;
+ const BYTE* lowMatchPtr = matchLongIndex < dictLimit ? dictStart : lowPrefixPtr;
+ U32 offset;
+ mLength = ZSTD_count_2segments(ip+8, matchLong+8, iend, matchEnd, lowPrefixPtr) + 8;
+ offset = current - matchLongIndex;
+ while (((ip>anchor) & (matchLong>lowMatchPtr)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */
+ offset_2 = offset_1;
+ offset_1 = offset;
+ ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
+
+ } else if ((matchIndex > lowestIndex) && (MEM_read32(match) == MEM_read32(ip))) {
+ size_t const h3 = ZSTD_hashPtr(ip+1, hBitsL, 8);
+ U32 const matchIndex3 = hashLong[h3];
+ const BYTE* const match3Base = matchIndex3 < dictLimit ? dictBase : base;
+ const BYTE* match3 = match3Base + matchIndex3;
+ U32 offset;
+ hashLong[h3] = current + 1;
+ if ( (matchIndex3 > lowestIndex) && (MEM_read64(match3) == MEM_read64(ip+1)) ) {
+ const BYTE* matchEnd = matchIndex3 < dictLimit ? dictEnd : iend;
+ const BYTE* lowMatchPtr = matchIndex3 < dictLimit ? dictStart : lowPrefixPtr;
+ mLength = ZSTD_count_2segments(ip+9, match3+8, iend, matchEnd, lowPrefixPtr) + 8;
+ ip++;
+ offset = current+1 - matchIndex3;
+ while (((ip>anchor) & (match3>lowMatchPtr)) && (ip[-1] == match3[-1])) { ip--; match3--; mLength++; } /* catch up */
+ } else {
+ const BYTE* matchEnd = matchIndex < dictLimit ? dictEnd : iend;
+ const BYTE* lowMatchPtr = matchIndex < dictLimit ? dictStart : lowPrefixPtr;
+ mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, lowPrefixPtr) + 4;
+ offset = current - matchIndex;
+ while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
+ }
+ offset_2 = offset_1;
+ offset_1 = offset;
+ ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
+
+ } else {
+ ip += ((ip-anchor) >> g_searchStrength) + 1;
+ continue;
+ } }
+
+ /* found a match : store it */
+ ip += mLength;
+ anchor = ip;
+
+ if (ip <= ilimit) {
+ /* Fill Table */
+ hashSmall[ZSTD_hashPtr(base+current+2, hBitsS, mls)] = current+2;
+ hashLong[ZSTD_hashPtr(base+current+2, hBitsL, 8)] = current+2;
+ hashSmall[ZSTD_hashPtr(ip-2, hBitsS, mls)] = (U32)(ip-2-base);
+ hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = (U32)(ip-2-base);
+ /* check immediate repcode */
+ while (ip <= ilimit) {
+ U32 const current2 = (U32)(ip-base);
+ U32 const repIndex2 = current2 - offset_2;
+ const BYTE* repMatch2 = repIndex2 < dictLimit ? dictBase + repIndex2 : base + repIndex2;
+ if ( (((U32)((dictLimit-1) - repIndex2) >= 3) & (repIndex2 > lowestIndex)) /* intentional overflow */
+ && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
+ const BYTE* const repEnd2 = repIndex2 < dictLimit ? dictEnd : iend;
+ size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, lowPrefixPtr) + 4;
+ U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
+ ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, repLength2-MINMATCH);
+ hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2;
+ hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2;
+ ip += repLength2;
+ anchor = ip;
+ continue;
+ }
+ break;
+ } } }
+
+ /* save reps for next block */
+ seqStorePtr->repToConfirm[0] = offset_1; seqStorePtr->repToConfirm[1] = offset_2;
+
+ /* Return the last literals size */
+ return iend - anchor;
+}
+
+
+size_t ZSTD_compressBlock_doubleFast_extDict(ZSTD_CCtx* ctx,
+ const void* src, size_t srcSize)
+{
+ U32 const mls = ctx->appliedParams.cParams.searchLength;
+ switch(mls)
+ {
+ default: /* includes case 3 */
+ case 4 :
+ return ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 4);
+ case 5 :
+ return ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 5);
+ case 6 :
+ return ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 6);
+ case 7 :
+ return ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 7);
+ }
+}
diff --git a/lib/compress/zstd_double_fast.h b/lib/compress/zstd_double_fast.h
new file mode 100644
index 000000000000..3dba6c7108e5
--- /dev/null
+++ b/lib/compress/zstd_double_fast.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_DOUBLE_FAST_H
+#define ZSTD_DOUBLE_FAST_H
+
+#include "zstd_compress.h"
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+void ZSTD_fillDoubleHashTable(ZSTD_CCtx* cctx, const void* end, const U32 mls);
+size_t ZSTD_compressBlock_doubleFast(ZSTD_CCtx* ctx, const void* src, size_t srcSize);
+size_t ZSTD_compressBlock_doubleFast_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize);
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* ZSTD_DOUBLE_FAST_H */
diff --git a/lib/compress/zstd_fast.c b/lib/compress/zstd_fast.c
new file mode 100644
index 000000000000..2e057017b92e
--- /dev/null
+++ b/lib/compress/zstd_fast.c
@@ -0,0 +1,242 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#include "zstd_fast.h"
+
+
+void ZSTD_fillHashTable (ZSTD_CCtx* zc, const void* end, const U32 mls)
+{
+ U32* const hashTable = zc->hashTable;
+ U32 const hBits = zc->appliedParams.cParams.hashLog;
+ const BYTE* const base = zc->base;
+ const BYTE* ip = base + zc->nextToUpdate;
+ const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE;
+ const size_t fastHashFillStep = 3;
+
+ while(ip <= iend) {
+ hashTable[ZSTD_hashPtr(ip, hBits, mls)] = (U32)(ip - base);
+ ip += fastHashFillStep;
+ }
+}
+
+
+FORCE_INLINE_TEMPLATE
+size_t ZSTD_compressBlock_fast_generic(ZSTD_CCtx* cctx,
+ const void* src, size_t srcSize,
+ const U32 mls)
+{
+ U32* const hashTable = cctx->hashTable;
+ U32 const hBits = cctx->appliedParams.cParams.hashLog;
+ seqStore_t* seqStorePtr = &(cctx->seqStore);
+ const BYTE* const base = cctx->base;
+ const BYTE* const istart = (const BYTE*)src;
+ const BYTE* ip = istart;
+ const BYTE* anchor = istart;
+ const U32 lowestIndex = cctx->dictLimit;
+ const BYTE* const lowest = base + lowestIndex;
+ const BYTE* const iend = istart + srcSize;
+ const BYTE* const ilimit = iend - HASH_READ_SIZE;
+ U32 offset_1=seqStorePtr->rep[0], offset_2=seqStorePtr->rep[1];
+ U32 offsetSaved = 0;
+
+ /* init */
+ ip += (ip==lowest);
+ { U32 const maxRep = (U32)(ip-lowest);
+ if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0;
+ if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0;
+ }
+
+ /* Main Search Loop */
+ while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */
+ size_t mLength;
+ size_t const h = ZSTD_hashPtr(ip, hBits, mls);
+ U32 const current = (U32)(ip-base);
+ U32 const matchIndex = hashTable[h];
+ const BYTE* match = base + matchIndex;
+ hashTable[h] = current; /* update hash table */
+
+ if ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1))) {
+ mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4;
+ ip++;
+ ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, 0, mLength-MINMATCH);
+ } else {
+ U32 offset;
+ if ( (matchIndex <= lowestIndex) || (MEM_read32(match) != MEM_read32(ip)) ) {
+ ip += ((ip-anchor) >> g_searchStrength) + 1;
+ continue;
+ }
+ mLength = ZSTD_count(ip+4, match+4, iend) + 4;
+ offset = (U32)(ip-match);
+ while (((ip>anchor) & (match>lowest)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
+ offset_2 = offset_1;
+ offset_1 = offset;
+
+ ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
+ }
+
+ /* match found */
+ ip += mLength;
+ anchor = ip;
+
+ if (ip <= ilimit) {
+ /* Fill Table */
+ hashTable[ZSTD_hashPtr(base+current+2, hBits, mls)] = current+2; /* here because current+2 could be > iend-8 */
+ hashTable[ZSTD_hashPtr(ip-2, hBits, mls)] = (U32)(ip-2-base);
+ /* check immediate repcode */
+ while ( (ip <= ilimit)
+ && ( (offset_2>0)
+ & (MEM_read32(ip) == MEM_read32(ip - offset_2)) )) {
+ /* store sequence */
+ size_t const rLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;
+ { U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; } /* swap offset_2 <=> offset_1 */
+ hashTable[ZSTD_hashPtr(ip, hBits, mls)] = (U32)(ip-base);
+ ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, rLength-MINMATCH);
+ ip += rLength;
+ anchor = ip;
+ continue; /* faster when present ... (?) */
+ } } }
+
+ /* save reps for next block */
+ seqStorePtr->repToConfirm[0] = offset_1 ? offset_1 : offsetSaved;
+ seqStorePtr->repToConfirm[1] = offset_2 ? offset_2 : offsetSaved;
+
+ /* Return the last literals size */
+ return iend - anchor;
+}
+
+
+size_t ZSTD_compressBlock_fast(ZSTD_CCtx* ctx,
+ const void* src, size_t srcSize)
+{
+ const U32 mls = ctx->appliedParams.cParams.searchLength;
+ switch(mls)
+ {
+ default: /* includes case 3 */
+ case 4 :
+ return ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 4);
+ case 5 :
+ return ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 5);
+ case 6 :
+ return ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 6);
+ case 7 :
+ return ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 7);
+ }
+}
+
+
+static size_t ZSTD_compressBlock_fast_extDict_generic(ZSTD_CCtx* ctx,
+ const void* src, size_t srcSize,
+ const U32 mls)
+{
+ U32* hashTable = ctx->hashTable;
+ const U32 hBits = ctx->appliedParams.cParams.hashLog;
+ seqStore_t* seqStorePtr = &(ctx->seqStore);
+ const BYTE* const base = ctx->base;
+ const BYTE* const dictBase = ctx->dictBase;
+ const BYTE* const istart = (const BYTE*)src;
+ const BYTE* ip = istart;
+ const BYTE* anchor = istart;
+ const U32 lowestIndex = ctx->lowLimit;
+ const BYTE* const dictStart = dictBase + lowestIndex;
+ const U32 dictLimit = ctx->dictLimit;
+ const BYTE* const lowPrefixPtr = base + dictLimit;
+ const BYTE* const dictEnd = dictBase + dictLimit;
+ const BYTE* const iend = istart + srcSize;
+ const BYTE* const ilimit = iend - 8;
+ U32 offset_1=seqStorePtr->rep[0], offset_2=seqStorePtr->rep[1];
+
+ /* Search Loop */
+ while (ip < ilimit) { /* < instead of <=, because (ip+1) */
+ const size_t h = ZSTD_hashPtr(ip, hBits, mls);
+ const U32 matchIndex = hashTable[h];
+ const BYTE* matchBase = matchIndex < dictLimit ? dictBase : base;
+ const BYTE* match = matchBase + matchIndex;
+ const U32 current = (U32)(ip-base);
+ const U32 repIndex = current + 1 - offset_1; /* offset_1 expected <= current +1 */
+ const BYTE* repBase = repIndex < dictLimit ? dictBase : base;
+ const BYTE* repMatch = repBase + repIndex;
+ size_t mLength;
+ hashTable[h] = current; /* update hash table */
+
+ if ( (((U32)((dictLimit-1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > lowestIndex))
+ && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
+ const BYTE* repMatchEnd = repIndex < dictLimit ? dictEnd : iend;
+ mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, lowPrefixPtr) + 4;
+ ip++;
+ ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, 0, mLength-MINMATCH);
+ } else {
+ if ( (matchIndex < lowestIndex) ||
+ (MEM_read32(match) != MEM_read32(ip)) ) {
+ ip += ((ip-anchor) >> g_searchStrength) + 1;
+ continue;
+ }
+ { const BYTE* matchEnd = matchIndex < dictLimit ? dictEnd : iend;
+ const BYTE* lowMatchPtr = matchIndex < dictLimit ? dictStart : lowPrefixPtr;
+ U32 offset;
+ mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, lowPrefixPtr) + 4;
+ while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
+ offset = current - matchIndex;
+ offset_2 = offset_1;
+ offset_1 = offset;
+ ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
+ } }
+
+ /* found a match : store it */
+ ip += mLength;
+ anchor = ip;
+
+ if (ip <= ilimit) {
+ /* Fill Table */
+ hashTable[ZSTD_hashPtr(base+current+2, hBits, mls)] = current+2;
+ hashTable[ZSTD_hashPtr(ip-2, hBits, mls)] = (U32)(ip-2-base);
+ /* check immediate repcode */
+ while (ip <= ilimit) {
+ U32 const current2 = (U32)(ip-base);
+ U32 const repIndex2 = current2 - offset_2;
+ const BYTE* repMatch2 = repIndex2 < dictLimit ? dictBase + repIndex2 : base + repIndex2;
+ if ( (((U32)((dictLimit-1) - repIndex2) >= 3) & (repIndex2 > lowestIndex)) /* intentional overflow */
+ && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
+ const BYTE* const repEnd2 = repIndex2 < dictLimit ? dictEnd : iend;
+ size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, lowPrefixPtr) + 4;
+ U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
+ ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, repLength2-MINMATCH);
+ hashTable[ZSTD_hashPtr(ip, hBits, mls)] = current2;
+ ip += repLength2;
+ anchor = ip;
+ continue;
+ }
+ break;
+ } } }
+
+ /* save reps for next block */
+ seqStorePtr->repToConfirm[0] = offset_1; seqStorePtr->repToConfirm[1] = offset_2;
+
+ /* Return the last literals size */
+ return iend - anchor;
+}
+
+
+size_t ZSTD_compressBlock_fast_extDict(ZSTD_CCtx* ctx,
+ const void* src, size_t srcSize)
+{
+ U32 const mls = ctx->appliedParams.cParams.searchLength;
+ switch(mls)
+ {
+ default: /* includes case 3 */
+ case 4 :
+ return ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 4);
+ case 5 :
+ return ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 5);
+ case 6 :
+ return ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 6);
+ case 7 :
+ return ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 7);
+ }
+}
diff --git a/lib/compress/zstd_fast.h b/lib/compress/zstd_fast.h
new file mode 100644
index 000000000000..4205141a9aa0
--- /dev/null
+++ b/lib/compress/zstd_fast.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_FAST_H
+#define ZSTD_FAST_H
+
+#include "zstd_compress.h"
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+void ZSTD_fillHashTable(ZSTD_CCtx* zc, const void* end, const U32 mls);
+size_t ZSTD_compressBlock_fast(ZSTD_CCtx* ctx,
+ const void* src, size_t srcSize);
+size_t ZSTD_compressBlock_fast_extDict(ZSTD_CCtx* ctx,
+ const void* src, size_t srcSize);
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* ZSTD_FAST_H */
diff --git a/lib/compress/zstd_lazy.c b/lib/compress/zstd_lazy.c
new file mode 100644
index 000000000000..2a7f6a0fe2be
--- /dev/null
+++ b/lib/compress/zstd_lazy.c
@@ -0,0 +1,749 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#include "zstd_lazy.h"
+
+
+/*-*************************************
+* Binary Tree search
+***************************************/
+/** ZSTD_insertBt1() : add one or multiple positions to tree.
+* ip : assumed <= iend-8 .
+* @return : nb of positions added */
+static U32 ZSTD_insertBt1(ZSTD_CCtx* zc, const BYTE* const ip, const U32 mls, const BYTE* const iend, U32 nbCompares,
+ U32 extDict)
+{
+ U32* const hashTable = zc->hashTable;
+ U32 const hashLog = zc->appliedParams.cParams.hashLog;
+ size_t const h = ZSTD_hashPtr(ip, hashLog, mls);
+ U32* const bt = zc->chainTable;
+ U32 const btLog = zc->appliedParams.cParams.chainLog - 1;
+ U32 const btMask = (1 << btLog) - 1;
+ U32 matchIndex = hashTable[h];
+ size_t commonLengthSmaller=0, commonLengthLarger=0;
+ const BYTE* const base = zc->base;
+ const BYTE* const dictBase = zc->dictBase;
+ const U32 dictLimit = zc->dictLimit;
+ const BYTE* const dictEnd = dictBase + dictLimit;
+ const BYTE* const prefixStart = base + dictLimit;
+ const BYTE* match;
+ const U32 current = (U32)(ip-base);
+ const U32 btLow = btMask >= current ? 0 : current - btMask;
+ U32* smallerPtr = bt + 2*(current&btMask);
+ U32* largerPtr = smallerPtr + 1;
+ U32 dummy32; /* to be nullified at the end */
+ U32 const windowLow = zc->lowLimit;
+ U32 matchEndIdx = current+8;
+ size_t bestLength = 8;
+#ifdef ZSTD_C_PREDICT
+ U32 predictedSmall = *(bt + 2*((current-1)&btMask) + 0);
+ U32 predictedLarge = *(bt + 2*((current-1)&btMask) + 1);
+ predictedSmall += (predictedSmall>0);
+ predictedLarge += (predictedLarge>0);
+#endif /* ZSTD_C_PREDICT */
+
+ assert(ip <= iend-8); /* required for h calculation */
+ hashTable[h] = current; /* Update Hash Table */
+
+ while (nbCompares-- && (matchIndex > windowLow)) {
+ U32* const nextPtr = bt + 2*(matchIndex & btMask);
+ size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
+
+#ifdef ZSTD_C_PREDICT /* note : can create issues when hlog small <= 11 */
+ const U32* predictPtr = bt + 2*((matchIndex-1) & btMask); /* written this way, as bt is a roll buffer */
+ if (matchIndex == predictedSmall) {
+ /* no need to check length, result known */
+ *smallerPtr = matchIndex;
+ if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */
+ smallerPtr = nextPtr+1; /* new "smaller" => larger of match */
+ matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */
+ predictedSmall = predictPtr[1] + (predictPtr[1]>0);
+ continue;
+ }
+ if (matchIndex == predictedLarge) {
+ *largerPtr = matchIndex;
+ if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */
+ largerPtr = nextPtr;
+ matchIndex = nextPtr[0];
+ predictedLarge = predictPtr[0] + (predictPtr[0]>0);
+ continue;
+ }
+#endif
+ if ((!extDict) || (matchIndex+matchLength >= dictLimit)) {
+ match = base + matchIndex;
+ if (match[matchLength] == ip[matchLength])
+ matchLength += ZSTD_count(ip+matchLength+1, match+matchLength+1, iend) +1;
+ } else {
+ match = dictBase + matchIndex;
+ matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);
+ if (matchIndex+matchLength >= dictLimit)
+ match = base + matchIndex; /* to prepare for next usage of match[matchLength] */
+ }
+
+ if (matchLength > bestLength) {
+ bestLength = matchLength;
+ if (matchLength > matchEndIdx - matchIndex)
+ matchEndIdx = matchIndex + (U32)matchLength;
+ }
+
+ if (ip+matchLength == iend) /* equal : no way to know if inf or sup */
+ break; /* drop , to guarantee consistency ; miss a bit of compression, but other solutions can corrupt tree */
+
+ if (match[matchLength] < ip[matchLength]) { /* necessarily within buffer */
+ /* match+1 is smaller than current */
+ *smallerPtr = matchIndex; /* update smaller idx */
+ commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
+ if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop searching */
+ smallerPtr = nextPtr+1; /* new "smaller" => larger of match */
+ matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */
+ } else {
+ /* match is larger than current */
+ *largerPtr = matchIndex;
+ commonLengthLarger = matchLength;
+ if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop searching */
+ largerPtr = nextPtr;
+ matchIndex = nextPtr[0];
+ } }
+
+ *smallerPtr = *largerPtr = 0;
+ if (bestLength > 384) return MIN(192, (U32)(bestLength - 384)); /* speed optimization */
+ if (matchEndIdx > current + 8) return matchEndIdx - (current + 8);
+ return 1;
+}
+
+
+static size_t ZSTD_insertBtAndFindBestMatch (
+ ZSTD_CCtx* zc,
+ const BYTE* const ip, const BYTE* const iend,
+ size_t* offsetPtr,
+ U32 nbCompares, const U32 mls,
+ U32 extDict)
+{
+ U32* const hashTable = zc->hashTable;
+ U32 const hashLog = zc->appliedParams.cParams.hashLog;
+ size_t const h = ZSTD_hashPtr(ip, hashLog, mls);
+ U32* const bt = zc->chainTable;
+ U32 const btLog = zc->appliedParams.cParams.chainLog - 1;
+ U32 const btMask = (1 << btLog) - 1;
+ U32 matchIndex = hashTable[h];
+ size_t commonLengthSmaller=0, commonLengthLarger=0;
+ const BYTE* const base = zc->base;
+ const BYTE* const dictBase = zc->dictBase;
+ const U32 dictLimit = zc->dictLimit;
+ const BYTE* const dictEnd = dictBase + dictLimit;
+ const BYTE* const prefixStart = base + dictLimit;
+ const U32 current = (U32)(ip-base);
+ const U32 btLow = btMask >= current ? 0 : current - btMask;
+ const U32 windowLow = zc->lowLimit;
+ U32* smallerPtr = bt + 2*(current&btMask);
+ U32* largerPtr = bt + 2*(current&btMask) + 1;
+ U32 matchEndIdx = current+8;
+ U32 dummy32; /* to be nullified at the end */
+ size_t bestLength = 0;
+
+ assert(ip <= iend-8); /* required for h calculation */
+ hashTable[h] = current; /* Update Hash Table */
+
+ while (nbCompares-- && (matchIndex > windowLow)) {
+ U32* const nextPtr = bt + 2*(matchIndex & btMask);
+ size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
+ const BYTE* match;
+
+ if ((!extDict) || (matchIndex+matchLength >= dictLimit)) {
+ match = base + matchIndex;
+ if (match[matchLength] == ip[matchLength])
+ matchLength += ZSTD_count(ip+matchLength+1, match+matchLength+1, iend) +1;
+ } else {
+ match = dictBase + matchIndex;
+ matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);
+ if (matchIndex+matchLength >= dictLimit)
+ match = base + matchIndex; /* to prepare for next usage of match[matchLength] */
+ }
+
+ if (matchLength > bestLength) {
+ if (matchLength > matchEndIdx - matchIndex)
+ matchEndIdx = matchIndex + (U32)matchLength;
+ if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(current-matchIndex+1) - ZSTD_highbit32((U32)offsetPtr[0]+1)) )
+ bestLength = matchLength, *offsetPtr = ZSTD_REP_MOVE + current - matchIndex;
+ if (ip+matchLength == iend) /* equal : no way to know if inf or sup */
+ break; /* drop, to guarantee consistency (miss a little bit of compression) */
+ }
+
+ if (match[matchLength] < ip[matchLength]) {
+ /* match is smaller than current */
+ *smallerPtr = matchIndex; /* update smaller idx */
+ commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
+ if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */
+ smallerPtr = nextPtr+1; /* new "smaller" => larger of match */
+ matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */
+ } else {
+ /* match is larger than current */
+ *largerPtr = matchIndex;
+ commonLengthLarger = matchLength;
+ if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */
+ largerPtr = nextPtr;
+ matchIndex = nextPtr[0];
+ } }
+
+ *smallerPtr = *largerPtr = 0;
+
+ zc->nextToUpdate = (matchEndIdx > current + 8) ? matchEndIdx - 8 : current+1;
+ return bestLength;
+}
+
+
+void ZSTD_updateTree(ZSTD_CCtx* zc, const BYTE* const ip, const BYTE* const iend, const U32 nbCompares, const U32 mls)
+{
+ const BYTE* const base = zc->base;
+ const U32 target = (U32)(ip - base);
+ U32 idx = zc->nextToUpdate;
+
+ while(idx < target)
+ idx += ZSTD_insertBt1(zc, base+idx, mls, iend, nbCompares, 0);
+}
+
+/** ZSTD_BtFindBestMatch() : Tree updater, providing best match */
+static size_t ZSTD_BtFindBestMatch (
+ ZSTD_CCtx* zc,
+ const BYTE* const ip, const BYTE* const iLimit,
+ size_t* offsetPtr,
+ const U32 maxNbAttempts, const U32 mls)
+{
+ if (ip < zc->base + zc->nextToUpdate) return 0; /* skipped area */
+ ZSTD_updateTree(zc, ip, iLimit, maxNbAttempts, mls);
+ return ZSTD_insertBtAndFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, mls, 0);
+}
+
+
+static size_t ZSTD_BtFindBestMatch_selectMLS (
+ ZSTD_CCtx* zc, /* Index table will be updated */
+ const BYTE* ip, const BYTE* const iLimit,
+ size_t* offsetPtr,
+ const U32 maxNbAttempts, const U32 matchLengthSearch)
+{
+ switch(matchLengthSearch)
+ {
+ default : /* includes case 3 */
+ case 4 : return ZSTD_BtFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4);
+ case 5 : return ZSTD_BtFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5);
+ case 7 :
+ case 6 : return ZSTD_BtFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6);
+ }
+}
+
+
+void ZSTD_updateTree_extDict(ZSTD_CCtx* zc, const BYTE* const ip, const BYTE* const iend, const U32 nbCompares, const U32 mls)
+{
+ const BYTE* const base = zc->base;
+ const U32 target = (U32)(ip - base);
+ U32 idx = zc->nextToUpdate;
+
+ while (idx < target) idx += ZSTD_insertBt1(zc, base+idx, mls, iend, nbCompares, 1);
+}
+
+
+/** Tree updater, providing best match */
+static size_t ZSTD_BtFindBestMatch_extDict (
+ ZSTD_CCtx* zc,
+ const BYTE* const ip, const BYTE* const iLimit,
+ size_t* offsetPtr,
+ const U32 maxNbAttempts, const U32 mls)
+{
+ if (ip < zc->base + zc->nextToUpdate) return 0; /* skipped area */
+ ZSTD_updateTree_extDict(zc, ip, iLimit, maxNbAttempts, mls);
+ return ZSTD_insertBtAndFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, mls, 1);
+}
+
+
+static size_t ZSTD_BtFindBestMatch_selectMLS_extDict (
+ ZSTD_CCtx* zc, /* Index table will be updated */
+ const BYTE* ip, const BYTE* const iLimit,
+ size_t* offsetPtr,
+ const U32 maxNbAttempts, const U32 matchLengthSearch)
+{
+ switch(matchLengthSearch)
+ {
+ default : /* includes case 3 */
+ case 4 : return ZSTD_BtFindBestMatch_extDict(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4);
+ case 5 : return ZSTD_BtFindBestMatch_extDict(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5);
+ case 7 :
+ case 6 : return ZSTD_BtFindBestMatch_extDict(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6);
+ }
+}
+
+
+
+/* *********************************
+* Hash Chain
+***********************************/
+#define NEXT_IN_CHAIN(d, mask) chainTable[(d) & mask]
+
+/* Update chains up to ip (excluded)
+ Assumption : always within prefix (i.e. not within extDict) */
+U32 ZSTD_insertAndFindFirstIndex (ZSTD_CCtx* zc, const BYTE* ip, U32 mls)
+{
+ U32* const hashTable = zc->hashTable;
+ const U32 hashLog = zc->appliedParams.cParams.hashLog;
+ U32* const chainTable = zc->chainTable;
+ const U32 chainMask = (1 << zc->appliedParams.cParams.chainLog) - 1;
+ const BYTE* const base = zc->base;
+ const U32 target = (U32)(ip - base);
+ U32 idx = zc->nextToUpdate;
+
+ while(idx < target) { /* catch up */
+ size_t const h = ZSTD_hashPtr(base+idx, hashLog, mls);
+ NEXT_IN_CHAIN(idx, chainMask) = hashTable[h];
+ hashTable[h] = idx;
+ idx++;
+ }
+
+ zc->nextToUpdate = target;
+ return hashTable[ZSTD_hashPtr(ip, hashLog, mls)];
+}
+
+
+/* inlining is important to hardwire a hot branch (template emulation) */
+FORCE_INLINE_TEMPLATE
+size_t ZSTD_HcFindBestMatch_generic (
+ ZSTD_CCtx* zc, /* Index table will be updated */
+ const BYTE* const ip, const BYTE* const iLimit,
+ size_t* offsetPtr,
+ const U32 maxNbAttempts, const U32 mls, const U32 extDict)
+{
+ U32* const chainTable = zc->chainTable;
+ const U32 chainSize = (1 << zc->appliedParams.cParams.chainLog);
+ const U32 chainMask = chainSize-1;
+ const BYTE* const base = zc->base;
+ const BYTE* const dictBase = zc->dictBase;
+ const U32 dictLimit = zc->dictLimit;
+ const BYTE* const prefixStart = base + dictLimit;
+ const BYTE* const dictEnd = dictBase + dictLimit;
+ const U32 lowLimit = zc->lowLimit;
+ const U32 current = (U32)(ip-base);
+ const U32 minChain = current > chainSize ? current - chainSize : 0;
+ int nbAttempts=maxNbAttempts;
+ size_t ml=4-1;
+
+ /* HC4 match finder */
+ U32 matchIndex = ZSTD_insertAndFindFirstIndex (zc, ip, mls);
+
+ for ( ; (matchIndex>lowLimit) & (nbAttempts>0) ; nbAttempts--) {
+ const BYTE* match;
+ size_t currentMl=0;
+ if ((!extDict) || matchIndex >= dictLimit) {
+ match = base + matchIndex;
+ if (match[ml] == ip[ml]) /* potentially better */
+ currentMl = ZSTD_count(ip, match, iLimit);
+ } else {
+ match = dictBase + matchIndex;
+ if (MEM_read32(match) == MEM_read32(ip)) /* assumption : matchIndex <= dictLimit-4 (by table construction) */
+ currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, dictEnd, prefixStart) + 4;
+ }
+
+ /* save best solution */
+ if (currentMl > ml) {
+ ml = currentMl;
+ *offsetPtr = current - matchIndex + ZSTD_REP_MOVE;
+ if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */
+ }
+
+ if (matchIndex <= minChain) break;
+ matchIndex = NEXT_IN_CHAIN(matchIndex, chainMask);
+ }
+
+ return ml;
+}
+
+
+FORCE_INLINE_TEMPLATE size_t ZSTD_HcFindBestMatch_selectMLS (
+ ZSTD_CCtx* zc,
+ const BYTE* ip, const BYTE* const iLimit,
+ size_t* offsetPtr,
+ const U32 maxNbAttempts, const U32 matchLengthSearch)
+{
+ switch(matchLengthSearch)
+ {
+ default : /* includes case 3 */
+ case 4 : return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4, 0);
+ case 5 : return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5, 0);
+ case 7 :
+ case 6 : return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6, 0);
+ }
+}
+
+
+FORCE_INLINE_TEMPLATE size_t ZSTD_HcFindBestMatch_extDict_selectMLS (
+ ZSTD_CCtx* zc,
+ const BYTE* ip, const BYTE* const iLimit,
+ size_t* offsetPtr,
+ const U32 maxNbAttempts, const U32 matchLengthSearch)
+{
+ switch(matchLengthSearch)
+ {
+ default : /* includes case 3 */
+ case 4 : return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4, 1);
+ case 5 : return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5, 1);
+ case 7 :
+ case 6 : return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6, 1);
+ }
+}
+
+
+/* *******************************
+* Common parser - lazy strategy
+*********************************/
+FORCE_INLINE_TEMPLATE
+size_t ZSTD_compressBlock_lazy_generic(ZSTD_CCtx* ctx,
+ const void* src, size_t srcSize,
+ const U32 searchMethod, const U32 depth)
+{
+ seqStore_t* seqStorePtr = &(ctx->seqStore);
+ const BYTE* const istart = (const BYTE*)src;
+ const BYTE* ip = istart;
+ const BYTE* anchor = istart;
+ const BYTE* const iend = istart + srcSize;
+ const BYTE* const ilimit = iend - 8;
+ const BYTE* const base = ctx->base + ctx->dictLimit;
+
+ U32 const maxSearches = 1 << ctx->appliedParams.cParams.searchLog;
+ U32 const mls = ctx->appliedParams.cParams.searchLength;
+
+ typedef size_t (*searchMax_f)(ZSTD_CCtx* zc, const BYTE* ip, const BYTE* iLimit,
+ size_t* offsetPtr,
+ U32 maxNbAttempts, U32 matchLengthSearch);
+ searchMax_f const searchMax = searchMethod ? ZSTD_BtFindBestMatch_selectMLS : ZSTD_HcFindBestMatch_selectMLS;
+ U32 offset_1 = seqStorePtr->rep[0], offset_2 = seqStorePtr->rep[1], savedOffset=0;
+
+ /* init */
+ ip += (ip==base);
+ ctx->nextToUpdate3 = ctx->nextToUpdate;
+ { U32 const maxRep = (U32)(ip-base);
+ if (offset_2 > maxRep) savedOffset = offset_2, offset_2 = 0;
+ if (offset_1 > maxRep) savedOffset = offset_1, offset_1 = 0;
+ }
+
+ /* Match Loop */
+ while (ip < ilimit) {
+ size_t matchLength=0;
+ size_t offset=0;
+ const BYTE* start=ip+1;
+
+ /* check repCode */
+ if ((offset_1>0) & (MEM_read32(ip+1) == MEM_read32(ip+1 - offset_1))) {
+ /* repcode : we take it */
+ matchLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4;
+ if (depth==0) goto _storeSequence;
+ }
+
+ /* first search (depth 0) */
+ { size_t offsetFound = 99999999;
+ size_t const ml2 = searchMax(ctx, ip, iend, &offsetFound, maxSearches, mls);
+ if (ml2 > matchLength)
+ matchLength = ml2, start = ip, offset=offsetFound;
+ }
+
+ if (matchLength < 4) {
+ ip += ((ip-anchor) >> g_searchStrength) + 1; /* jump faster over incompressible sections */
+ continue;
+ }
+
+ /* let's try to find a better solution */
+ if (depth>=1)
+ while (ip<ilimit) {
+ ip ++;
+ if ((offset) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) {
+ size_t const mlRep = ZSTD_count(ip+4, ip+4-offset_1, iend) + 4;
+ int const gain2 = (int)(mlRep * 3);
+ int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1);
+ if ((mlRep >= 4) && (gain2 > gain1))
+ matchLength = mlRep, offset = 0, start = ip;
+ }
+ { size_t offset2=99999999;
+ size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls);
+ int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */
+ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 4);
+ if ((ml2 >= 4) && (gain2 > gain1)) {
+ matchLength = ml2, offset = offset2, start = ip;
+ continue; /* search a better one */
+ } }
+
+ /* let's find an even better one */
+ if ((depth==2) && (ip<ilimit)) {
+ ip ++;
+ if ((offset) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) {
+ size_t const ml2 = ZSTD_count(ip+4, ip+4-offset_1, iend) + 4;
+ int const gain2 = (int)(ml2 * 4);
+ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1);
+ if ((ml2 >= 4) && (gain2 > gain1))
+ matchLength = ml2, offset = 0, start = ip;
+ }
+ { size_t offset2=99999999;
+ size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls);
+ int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */
+ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 7);
+ if ((ml2 >= 4) && (gain2 > gain1)) {
+ matchLength = ml2, offset = offset2, start = ip;
+ continue;
+ } } }
+ break; /* nothing found : store previous solution */
+ }
+
+ /* NOTE:
+ * start[-offset+ZSTD_REP_MOVE-1] is undefined behavior.
+ * (-offset+ZSTD_REP_MOVE-1) is unsigned, and is added to start, which
+ * overflows the pointer, which is undefined behavior.
+ */
+ /* catch up */
+ if (offset) {
+ while ( (start > anchor)
+ && (start > base+offset-ZSTD_REP_MOVE)
+ && (start[-1] == (start-offset+ZSTD_REP_MOVE)[-1]) ) /* only search for offset within prefix */
+ { start--; matchLength++; }
+ offset_2 = offset_1; offset_1 = (U32)(offset - ZSTD_REP_MOVE);
+ }
+ /* store sequence */
+_storeSequence:
+ { size_t const litLength = start - anchor;
+ ZSTD_storeSeq(seqStorePtr, litLength, anchor, (U32)offset, matchLength-MINMATCH);
+ anchor = ip = start + matchLength;
+ }
+
+ /* check immediate repcode */
+ while ( (ip <= ilimit)
+ && ((offset_2>0)
+ & (MEM_read32(ip) == MEM_read32(ip - offset_2)) )) {
+ /* store sequence */
+ matchLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;
+ offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap repcodes */
+ ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, matchLength-MINMATCH);
+ ip += matchLength;
+ anchor = ip;
+ continue; /* faster when present ... (?) */
+ } }
+
+ /* Save reps for next block */
+ seqStorePtr->repToConfirm[0] = offset_1 ? offset_1 : savedOffset;
+ seqStorePtr->repToConfirm[1] = offset_2 ? offset_2 : savedOffset;
+
+ /* Return the last literals size */
+ return iend - anchor;
+}
+
+
+size_t ZSTD_compressBlock_btlazy2(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 1, 2);
+}
+
+size_t ZSTD_compressBlock_lazy2(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 0, 2);
+}
+
+size_t ZSTD_compressBlock_lazy(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 0, 1);
+}
+
+size_t ZSTD_compressBlock_greedy(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 0, 0);
+}
+
+
+FORCE_INLINE_TEMPLATE
+size_t ZSTD_compressBlock_lazy_extDict_generic(ZSTD_CCtx* ctx,
+ const void* src, size_t srcSize,
+ const U32 searchMethod, const U32 depth)
+{
+ seqStore_t* seqStorePtr = &(ctx->seqStore);
+ const BYTE* const istart = (const BYTE*)src;
+ const BYTE* ip = istart;
+ const BYTE* anchor = istart;
+ const BYTE* const iend = istart + srcSize;
+ const BYTE* const ilimit = iend - 8;
+ const BYTE* const base = ctx->base;
+ const U32 dictLimit = ctx->dictLimit;
+ const U32 lowestIndex = ctx->lowLimit;
+ const BYTE* const prefixStart = base + dictLimit;
+ const BYTE* const dictBase = ctx->dictBase;
+ const BYTE* const dictEnd = dictBase + dictLimit;
+ const BYTE* const dictStart = dictBase + ctx->lowLimit;
+
+ const U32 maxSearches = 1 << ctx->appliedParams.cParams.searchLog;
+ const U32 mls = ctx->appliedParams.cParams.searchLength;
+
+ typedef size_t (*searchMax_f)(ZSTD_CCtx* zc, const BYTE* ip, const BYTE* iLimit,
+ size_t* offsetPtr,
+ U32 maxNbAttempts, U32 matchLengthSearch);
+ searchMax_f searchMax = searchMethod ? ZSTD_BtFindBestMatch_selectMLS_extDict : ZSTD_HcFindBestMatch_extDict_selectMLS;
+
+ U32 offset_1 = seqStorePtr->rep[0], offset_2 = seqStorePtr->rep[1];
+
+ /* init */
+ ctx->nextToUpdate3 = ctx->nextToUpdate;
+ ip += (ip == prefixStart);
+
+ /* Match Loop */
+ while (ip < ilimit) {
+ size_t matchLength=0;
+ size_t offset=0;
+ const BYTE* start=ip+1;
+ U32 current = (U32)(ip-base);
+
+ /* check repCode */
+ { const U32 repIndex = (U32)(current+1 - offset_1);
+ const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
+ const BYTE* const repMatch = repBase + repIndex;
+ if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
+ if (MEM_read32(ip+1) == MEM_read32(repMatch)) {
+ /* repcode detected we should take it */
+ const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
+ matchLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repEnd, prefixStart) + 4;
+ if (depth==0) goto _storeSequence;
+ } }
+
+ /* first search (depth 0) */
+ { size_t offsetFound = 99999999;
+ size_t const ml2 = searchMax(ctx, ip, iend, &offsetFound, maxSearches, mls);
+ if (ml2 > matchLength)
+ matchLength = ml2, start = ip, offset=offsetFound;
+ }
+
+ if (matchLength < 4) {
+ ip += ((ip-anchor) >> g_searchStrength) + 1; /* jump faster over incompressible sections */
+ continue;
+ }
+
+ /* let's try to find a better solution */
+ if (depth>=1)
+ while (ip<ilimit) {
+ ip ++;
+ current++;
+ /* check repCode */
+ if (offset) {
+ const U32 repIndex = (U32)(current - offset_1);
+ const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
+ const BYTE* const repMatch = repBase + repIndex;
+ if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
+ if (MEM_read32(ip) == MEM_read32(repMatch)) {
+ /* repcode detected */
+ const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
+ size_t const repLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;
+ int const gain2 = (int)(repLength * 3);
+ int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1);
+ if ((repLength >= 4) && (gain2 > gain1))
+ matchLength = repLength, offset = 0, start = ip;
+ } }
+
+ /* search match, depth 1 */
+ { size_t offset2=99999999;
+ size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls);
+ int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */
+ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 4);
+ if ((ml2 >= 4) && (gain2 > gain1)) {
+ matchLength = ml2, offset = offset2, start = ip;
+ continue; /* search a better one */
+ } }
+
+ /* let's find an even better one */
+ if ((depth==2) && (ip<ilimit)) {
+ ip ++;
+ current++;
+ /* check repCode */
+ if (offset) {
+ const U32 repIndex = (U32)(current - offset_1);
+ const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
+ const BYTE* const repMatch = repBase + repIndex;
+ if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
+ if (MEM_read32(ip) == MEM_read32(repMatch)) {
+ /* repcode detected */
+ const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
+ size_t const repLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;
+ int const gain2 = (int)(repLength * 4);
+ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1);
+ if ((repLength >= 4) && (gain2 > gain1))
+ matchLength = repLength, offset = 0, start = ip;
+ } }
+
+ /* search match, depth 2 */
+ { size_t offset2=99999999;
+ size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls);
+ int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */
+ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 7);
+ if ((ml2 >= 4) && (gain2 > gain1)) {
+ matchLength = ml2, offset = offset2, start = ip;
+ continue;
+ } } }
+ break; /* nothing found : store previous solution */
+ }
+
+ /* catch up */
+ if (offset) {
+ U32 const matchIndex = (U32)((start-base) - (offset - ZSTD_REP_MOVE));
+ const BYTE* match = (matchIndex < dictLimit) ? dictBase + matchIndex : base + matchIndex;
+ const BYTE* const mStart = (matchIndex < dictLimit) ? dictStart : prefixStart;
+ while ((start>anchor) && (match>mStart) && (start[-1] == match[-1])) { start--; match--; matchLength++; } /* catch up */
+ offset_2 = offset_1; offset_1 = (U32)(offset - ZSTD_REP_MOVE);
+ }
+
+ /* store sequence */
+_storeSequence:
+ { size_t const litLength = start - anchor;
+ ZSTD_storeSeq(seqStorePtr, litLength, anchor, (U32)offset, matchLength-MINMATCH);
+ anchor = ip = start + matchLength;
+ }
+
+ /* check immediate repcode */
+ while (ip <= ilimit) {
+ const U32 repIndex = (U32)((ip-base) - offset_2);
+ const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
+ const BYTE* const repMatch = repBase + repIndex;
+ if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
+ if (MEM_read32(ip) == MEM_read32(repMatch)) {
+ /* repcode detected we should take it */
+ const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
+ matchLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;
+ offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap offset history */
+ ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, matchLength-MINMATCH);
+ ip += matchLength;
+ anchor = ip;
+ continue; /* faster when present ... (?) */
+ }
+ break;
+ } }
+
+ /* Save reps for next block */
+ seqStorePtr->repToConfirm[0] = offset_1; seqStorePtr->repToConfirm[1] = offset_2;
+
+ /* Return the last literals size */
+ return iend - anchor;
+}
+
+
+size_t ZSTD_compressBlock_greedy_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 0, 0);
+}
+
+size_t ZSTD_compressBlock_lazy_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 0, 1);
+}
+
+size_t ZSTD_compressBlock_lazy2_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 0, 2);
+}
+
+size_t ZSTD_compressBlock_btlazy2_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 1, 2);
+}
diff --git a/lib/compress/zstd_lazy.h b/lib/compress/zstd_lazy.h
new file mode 100644
index 000000000000..a9c4daed2582
--- /dev/null
+++ b/lib/compress/zstd_lazy.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_LAZY_H
+#define ZSTD_LAZY_H
+
+#include "zstd_compress.h"
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+U32 ZSTD_insertAndFindFirstIndex (ZSTD_CCtx* zc, const BYTE* ip, U32 mls);
+void ZSTD_updateTree(ZSTD_CCtx* zc, const BYTE* const ip, const BYTE* const iend, const U32 nbCompares, const U32 mls);
+void ZSTD_updateTree_extDict(ZSTD_CCtx* zc, const BYTE* const ip, const BYTE* const iend, const U32 nbCompares, const U32 mls);
+
+size_t ZSTD_compressBlock_btlazy2(ZSTD_CCtx* ctx, const void* src, size_t srcSize);
+size_t ZSTD_compressBlock_lazy2(ZSTD_CCtx* ctx, const void* src, size_t srcSize);
+size_t ZSTD_compressBlock_lazy(ZSTD_CCtx* ctx, const void* src, size_t srcSize);
+size_t ZSTD_compressBlock_greedy(ZSTD_CCtx* ctx, const void* src, size_t srcSize);
+
+size_t ZSTD_compressBlock_greedy_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize);
+size_t ZSTD_compressBlock_lazy_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize);
+size_t ZSTD_compressBlock_lazy2_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize);
+size_t ZSTD_compressBlock_btlazy2_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize);
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* ZSTD_LAZY_H */
diff --git a/lib/compress/zstd_ldm.c b/lib/compress/zstd_ldm.c
new file mode 100644
index 000000000000..be50872cf767
--- /dev/null
+++ b/lib/compress/zstd_ldm.c
@@ -0,0 +1,707 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ */
+
+#include "zstd_ldm.h"
+
+#include "zstd_fast.h" /* ZSTD_fillHashTable() */
+#include "zstd_double_fast.h" /* ZSTD_fillDoubleHashTable() */
+
+#define LDM_BUCKET_SIZE_LOG 3
+#define LDM_MIN_MATCH_LENGTH 64
+#define LDM_HASH_RLOG 7
+#define LDM_HASH_CHAR_OFFSET 10
+
+size_t ZSTD_ldm_initializeParameters(ldmParams_t* params, U32 enableLdm)
+{
+ ZSTD_STATIC_ASSERT(LDM_BUCKET_SIZE_LOG <= ZSTD_LDM_BUCKETSIZELOG_MAX);
+ params->enableLdm = enableLdm>0;
+ params->hashLog = 0;
+ params->bucketSizeLog = LDM_BUCKET_SIZE_LOG;
+ params->minMatchLength = LDM_MIN_MATCH_LENGTH;
+ params->hashEveryLog = ZSTD_LDM_HASHEVERYLOG_NOTSET;
+ return 0;
+}
+
+void ZSTD_ldm_adjustParameters(ldmParams_t* params, U32 windowLog)
+{
+ if (params->hashLog == 0) {
+ params->hashLog = MAX(ZSTD_HASHLOG_MIN, windowLog - LDM_HASH_RLOG);
+ assert(params->hashLog <= ZSTD_HASHLOG_MAX);
+ }
+ if (params->hashEveryLog == ZSTD_LDM_HASHEVERYLOG_NOTSET) {
+ params->hashEveryLog =
+ windowLog < params->hashLog ? 0 : windowLog - params->hashLog;
+ }
+ params->bucketSizeLog = MIN(params->bucketSizeLog, params->hashLog);
+}
+
+size_t ZSTD_ldm_getTableSize(U32 hashLog, U32 bucketSizeLog) {
+ size_t const ldmHSize = ((size_t)1) << hashLog;
+ size_t const ldmBucketSizeLog = MIN(bucketSizeLog, hashLog);
+ size_t const ldmBucketSize =
+ ((size_t)1) << (hashLog - ldmBucketSizeLog);
+ return ldmBucketSize + (ldmHSize * (sizeof(ldmEntry_t)));
+}
+
+/** ZSTD_ldm_getSmallHash() :
+ * numBits should be <= 32
+ * If numBits==0, returns 0.
+ * @return : the most significant numBits of value. */
+static U32 ZSTD_ldm_getSmallHash(U64 value, U32 numBits)
+{
+ assert(numBits <= 32);
+ return numBits == 0 ? 0 : (U32)(value >> (64 - numBits));
+}
+
+/** ZSTD_ldm_getChecksum() :
+ * numBitsToDiscard should be <= 32
+ * @return : the next most significant 32 bits after numBitsToDiscard */
+static U32 ZSTD_ldm_getChecksum(U64 hash, U32 numBitsToDiscard)
+{
+ assert(numBitsToDiscard <= 32);
+ return (hash >> (64 - 32 - numBitsToDiscard)) & 0xFFFFFFFF;
+}
+
+/** ZSTD_ldm_getTag() ;
+ * Given the hash, returns the most significant numTagBits bits
+ * after (32 + hbits) bits.
+ *
+ * If there are not enough bits remaining, return the last
+ * numTagBits bits. */
+static U32 ZSTD_ldm_getTag(U64 hash, U32 hbits, U32 numTagBits)
+{
+ assert(numTagBits < 32 && hbits <= 32);
+ if (32 - hbits < numTagBits) {
+ return hash & (((U32)1 << numTagBits) - 1);
+ } else {
+ return (hash >> (32 - hbits - numTagBits)) & (((U32)1 << numTagBits) - 1);
+ }
+}
+
+/** ZSTD_ldm_getBucket() :
+ * Returns a pointer to the start of the bucket associated with hash. */
+static ldmEntry_t* ZSTD_ldm_getBucket(
+ ldmState_t* ldmState, size_t hash, ldmParams_t const ldmParams)
+{
+ return ldmState->hashTable + (hash << ldmParams.bucketSizeLog);
+}
+
+/** ZSTD_ldm_insertEntry() :
+ * Insert the entry with corresponding hash into the hash table */
+static void ZSTD_ldm_insertEntry(ldmState_t* ldmState,
+ size_t const hash, const ldmEntry_t entry,
+ ldmParams_t const ldmParams)
+{
+ BYTE* const bucketOffsets = ldmState->bucketOffsets;
+ *(ZSTD_ldm_getBucket(ldmState, hash, ldmParams) + bucketOffsets[hash]) = entry;
+ bucketOffsets[hash]++;
+ bucketOffsets[hash] &= ((U32)1 << ldmParams.bucketSizeLog) - 1;
+}
+
+/** ZSTD_ldm_makeEntryAndInsertByTag() :
+ *
+ * Gets the small hash, checksum, and tag from the rollingHash.
+ *
+ * If the tag matches (1 << ldmParams.hashEveryLog)-1, then
+ * creates an ldmEntry from the offset, and inserts it into the hash table.
+ *
+ * hBits is the length of the small hash, which is the most significant hBits
+ * of rollingHash. The checksum is the next 32 most significant bits, followed
+ * by ldmParams.hashEveryLog bits that make up the tag. */
+static void ZSTD_ldm_makeEntryAndInsertByTag(ldmState_t* ldmState,
+ U64 const rollingHash,
+ U32 const hBits,
+ U32 const offset,
+ ldmParams_t const ldmParams)
+{
+ U32 const tag = ZSTD_ldm_getTag(rollingHash, hBits, ldmParams.hashEveryLog);
+ U32 const tagMask = ((U32)1 << ldmParams.hashEveryLog) - 1;
+ if (tag == tagMask) {
+ U32 const hash = ZSTD_ldm_getSmallHash(rollingHash, hBits);
+ U32 const checksum = ZSTD_ldm_getChecksum(rollingHash, hBits);
+ ldmEntry_t entry;
+ entry.offset = offset;
+ entry.checksum = checksum;
+ ZSTD_ldm_insertEntry(ldmState, hash, entry, ldmParams);
+ }
+}
+
+/** ZSTD_ldm_getRollingHash() :
+ * Get a 64-bit hash using the first len bytes from buf.
+ *
+ * Giving bytes s = s_1, s_2, ... s_k, the hash is defined to be
+ * H(s) = s_1*(a^(k-1)) + s_2*(a^(k-2)) + ... + s_k*(a^0)
+ *
+ * where the constant a is defined to be prime8bytes.
+ *
+ * The implementation adds an offset to each byte, so
+ * H(s) = (s_1 + HASH_CHAR_OFFSET)*(a^(k-1)) + ... */
+static U64 ZSTD_ldm_getRollingHash(const BYTE* buf, U32 len)
+{
+ U64 ret = 0;
+ U32 i;
+ for (i = 0; i < len; i++) {
+ ret *= prime8bytes;
+ ret += buf[i] + LDM_HASH_CHAR_OFFSET;
+ }
+ return ret;
+}
+
+/** ZSTD_ldm_ipow() :
+ * Return base^exp. */
+static U64 ZSTD_ldm_ipow(U64 base, U64 exp)
+{
+ U64 ret = 1;
+ while (exp) {
+ if (exp & 1) { ret *= base; }
+ exp >>= 1;
+ base *= base;
+ }
+ return ret;
+}
+
+U64 ZSTD_ldm_getHashPower(U32 minMatchLength) {
+ assert(minMatchLength >= ZSTD_LDM_MINMATCH_MIN);
+ return ZSTD_ldm_ipow(prime8bytes, minMatchLength - 1);
+}
+
+/** ZSTD_ldm_updateHash() :
+ * Updates hash by removing toRemove and adding toAdd. */
+static U64 ZSTD_ldm_updateHash(U64 hash, BYTE toRemove, BYTE toAdd, U64 hashPower)
+{
+ hash -= ((toRemove + LDM_HASH_CHAR_OFFSET) * hashPower);
+ hash *= prime8bytes;
+ hash += toAdd + LDM_HASH_CHAR_OFFSET;
+ return hash;
+}
+
+/** ZSTD_ldm_countBackwardsMatch() :
+ * Returns the number of bytes that match backwards before pIn and pMatch.
+ *
+ * We count only bytes where pMatch >= pBase and pIn >= pAnchor. */
+static size_t ZSTD_ldm_countBackwardsMatch(
+ const BYTE* pIn, const BYTE* pAnchor,
+ const BYTE* pMatch, const BYTE* pBase)
+{
+ size_t matchLength = 0;
+ while (pIn > pAnchor && pMatch > pBase && pIn[-1] == pMatch[-1]) {
+ pIn--;
+ pMatch--;
+ matchLength++;
+ }
+ return matchLength;
+}
+
+/** ZSTD_ldm_fillFastTables() :
+ *
+ * Fills the relevant tables for the ZSTD_fast and ZSTD_dfast strategies.
+ * This is similar to ZSTD_loadDictionaryContent.
+ *
+ * The tables for the other strategies are filled within their
+ * block compressors. */
+static size_t ZSTD_ldm_fillFastTables(ZSTD_CCtx* zc, const void* end)
+{
+ const BYTE* const iend = (const BYTE*)end;
+ const U32 mls = zc->appliedParams.cParams.searchLength;
+
+ switch(zc->appliedParams.cParams.strategy)
+ {
+ case ZSTD_fast:
+ ZSTD_fillHashTable(zc, iend, mls);
+ zc->nextToUpdate = (U32)(iend - zc->base);
+ break;
+
+ case ZSTD_dfast:
+ ZSTD_fillDoubleHashTable(zc, iend, mls);
+ zc->nextToUpdate = (U32)(iend - zc->base);
+ break;
+
+ case ZSTD_greedy:
+ case ZSTD_lazy:
+ case ZSTD_lazy2:
+ case ZSTD_btlazy2:
+ case ZSTD_btopt:
+ case ZSTD_btultra:
+ break;
+ default:
+ assert(0); /* not possible : not a valid strategy id */
+ }
+
+ return 0;
+}
+
+/** ZSTD_ldm_fillLdmHashTable() :
+ *
+ * Fills hashTable from (lastHashed + 1) to iend (non-inclusive).
+ * lastHash is the rolling hash that corresponds to lastHashed.
+ *
+ * Returns the rolling hash corresponding to position iend-1. */
+static U64 ZSTD_ldm_fillLdmHashTable(ldmState_t* state,
+ U64 lastHash, const BYTE* lastHashed,
+ const BYTE* iend, const BYTE* base,
+ U32 hBits, ldmParams_t const ldmParams)
+{
+ U64 rollingHash = lastHash;
+ const BYTE* cur = lastHashed + 1;
+
+ while (cur < iend) {
+ rollingHash = ZSTD_ldm_updateHash(rollingHash, cur[-1],
+ cur[ldmParams.minMatchLength-1],
+ state->hashPower);
+ ZSTD_ldm_makeEntryAndInsertByTag(state,
+ rollingHash, hBits,
+ (U32)(cur - base), ldmParams);
+ ++cur;
+ }
+ return rollingHash;
+}
+
+
+/** ZSTD_ldm_limitTableUpdate() :
+ *
+ * Sets cctx->nextToUpdate to a position corresponding closer to anchor
+ * if it is far way
+ * (after a long match, only update tables a limited amount). */
+static void ZSTD_ldm_limitTableUpdate(ZSTD_CCtx* cctx, const BYTE* anchor)
+{
+ U32 const current = (U32)(anchor - cctx->base);
+ if (current > cctx->nextToUpdate + 1024) {
+ cctx->nextToUpdate =
+ current - MIN(512, current - cctx->nextToUpdate - 1024);
+ }
+}
+
+typedef size_t (*ZSTD_blockCompressor) (ZSTD_CCtx* ctx, const void* src, size_t srcSize);
+/* defined in zstd_compress.c */
+ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, int extDict);
+
+FORCE_INLINE_TEMPLATE
+size_t ZSTD_compressBlock_ldm_generic(ZSTD_CCtx* cctx,
+ const void* src, size_t srcSize)
+{
+ ldmState_t* const ldmState = &(cctx->ldmState);
+ const ldmParams_t ldmParams = cctx->appliedParams.ldmParams;
+ const U64 hashPower = ldmState->hashPower;
+ const U32 hBits = ldmParams.hashLog - ldmParams.bucketSizeLog;
+ const U32 ldmBucketSize = ((U32)1 << ldmParams.bucketSizeLog);
+ const U32 ldmTagMask = ((U32)1 << ldmParams.hashEveryLog) - 1;
+ seqStore_t* const seqStorePtr = &(cctx->seqStore);
+ const BYTE* const base = cctx->base;
+ const BYTE* const istart = (const BYTE*)src;
+ const BYTE* ip = istart;
+ const BYTE* anchor = istart;
+ const U32 lowestIndex = cctx->dictLimit;
+ const BYTE* const lowest = base + lowestIndex;
+ const BYTE* const iend = istart + srcSize;
+ const BYTE* const ilimit = iend - MAX(ldmParams.minMatchLength, HASH_READ_SIZE);
+
+ const ZSTD_blockCompressor blockCompressor =
+ ZSTD_selectBlockCompressor(cctx->appliedParams.cParams.strategy, 0);
+ U32* const repToConfirm = seqStorePtr->repToConfirm;
+ U32 savedRep[ZSTD_REP_NUM];
+ U64 rollingHash = 0;
+ const BYTE* lastHashed = NULL;
+ size_t i, lastLiterals;
+
+ /* Save seqStorePtr->rep and copy repToConfirm */
+ for (i = 0; i < ZSTD_REP_NUM; i++)
+ savedRep[i] = repToConfirm[i] = seqStorePtr->rep[i];
+
+ /* Main Search Loop */
+ while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */
+ size_t mLength;
+ U32 const current = (U32)(ip - base);
+ size_t forwardMatchLength = 0, backwardMatchLength = 0;
+ ldmEntry_t* bestEntry = NULL;
+ if (ip != istart) {
+ rollingHash = ZSTD_ldm_updateHash(rollingHash, lastHashed[0],
+ lastHashed[ldmParams.minMatchLength],
+ hashPower);
+ } else {
+ rollingHash = ZSTD_ldm_getRollingHash(ip, ldmParams.minMatchLength);
+ }
+ lastHashed = ip;
+
+ /* Do not insert and do not look for a match */
+ if (ZSTD_ldm_getTag(rollingHash, hBits, ldmParams.hashEveryLog) !=
+ ldmTagMask) {
+ ip++;
+ continue;
+ }
+
+ /* Get the best entry and compute the match lengths */
+ {
+ ldmEntry_t* const bucket =
+ ZSTD_ldm_getBucket(ldmState,
+ ZSTD_ldm_getSmallHash(rollingHash, hBits),
+ ldmParams);
+ ldmEntry_t* cur;
+ size_t bestMatchLength = 0;
+ U32 const checksum = ZSTD_ldm_getChecksum(rollingHash, hBits);
+
+ for (cur = bucket; cur < bucket + ldmBucketSize; ++cur) {
+ const BYTE* const pMatch = cur->offset + base;
+ size_t curForwardMatchLength, curBackwardMatchLength,
+ curTotalMatchLength;
+ if (cur->checksum != checksum || cur->offset <= lowestIndex) {
+ continue;
+ }
+
+ curForwardMatchLength = ZSTD_count(ip, pMatch, iend);
+ if (curForwardMatchLength < ldmParams.minMatchLength) {
+ continue;
+ }
+ curBackwardMatchLength = ZSTD_ldm_countBackwardsMatch(
+ ip, anchor, pMatch, lowest);
+ curTotalMatchLength = curForwardMatchLength +
+ curBackwardMatchLength;
+
+ if (curTotalMatchLength > bestMatchLength) {
+ bestMatchLength = curTotalMatchLength;
+ forwardMatchLength = curForwardMatchLength;
+ backwardMatchLength = curBackwardMatchLength;
+ bestEntry = cur;
+ }
+ }
+ }
+
+ /* No match found -- continue searching */
+ if (bestEntry == NULL) {
+ ZSTD_ldm_makeEntryAndInsertByTag(ldmState, rollingHash,
+ hBits, current,
+ ldmParams);
+ ip++;
+ continue;
+ }
+
+ /* Match found */
+ mLength = forwardMatchLength + backwardMatchLength;
+ ip -= backwardMatchLength;
+
+ /* Call the block compressor on the remaining literals */
+ {
+ U32 const matchIndex = bestEntry->offset;
+ const BYTE* const match = base + matchIndex - backwardMatchLength;
+ U32 const offset = (U32)(ip - match);
+
+ /* Overwrite rep codes */
+ for (i = 0; i < ZSTD_REP_NUM; i++)
+ seqStorePtr->rep[i] = repToConfirm[i];
+
+ /* Fill tables for block compressor */
+ ZSTD_ldm_limitTableUpdate(cctx, anchor);
+ ZSTD_ldm_fillFastTables(cctx, anchor);
+
+ /* Call block compressor and get remaining literals */
+ lastLiterals = blockCompressor(cctx, anchor, ip - anchor);
+ cctx->nextToUpdate = (U32)(ip - base);
+
+ /* Update repToConfirm with the new offset */
+ for (i = ZSTD_REP_NUM - 1; i > 0; i--)
+ repToConfirm[i] = repToConfirm[i-1];
+ repToConfirm[0] = offset;
+
+ /* Store the sequence with the leftover literals */
+ ZSTD_storeSeq(seqStorePtr, lastLiterals, ip - lastLiterals,
+ offset + ZSTD_REP_MOVE, mLength - MINMATCH);
+ }
+
+ /* Insert the current entry into the hash table */
+ ZSTD_ldm_makeEntryAndInsertByTag(ldmState, rollingHash, hBits,
+ (U32)(lastHashed - base),
+ ldmParams);
+
+ assert(ip + backwardMatchLength == lastHashed);
+
+ /* Fill the hash table from lastHashed+1 to ip+mLength*/
+ /* Heuristic: don't need to fill the entire table at end of block */
+ if (ip + mLength < ilimit) {
+ rollingHash = ZSTD_ldm_fillLdmHashTable(
+ ldmState, rollingHash, lastHashed,
+ ip + mLength, base, hBits, ldmParams);
+ lastHashed = ip + mLength - 1;
+ }
+ ip += mLength;
+ anchor = ip;
+ /* Check immediate repcode */
+ while ( (ip < ilimit)
+ && ( (repToConfirm[1] > 0) && (repToConfirm[1] <= (U32)(ip-lowest))
+ && (MEM_read32(ip) == MEM_read32(ip - repToConfirm[1])) )) {
+
+ size_t const rLength = ZSTD_count(ip+4, ip+4-repToConfirm[1],
+ iend) + 4;
+ /* Swap repToConfirm[1] <=> repToConfirm[0] */
+ {
+ U32 const tmpOff = repToConfirm[1];
+ repToConfirm[1] = repToConfirm[0];
+ repToConfirm[0] = tmpOff;
+ }
+
+ ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, rLength-MINMATCH);
+
+ /* Fill the hash table from lastHashed+1 to ip+rLength*/
+ if (ip + rLength < ilimit) {
+ rollingHash = ZSTD_ldm_fillLdmHashTable(
+ ldmState, rollingHash, lastHashed,
+ ip + rLength, base, hBits, ldmParams);
+ lastHashed = ip + rLength - 1;
+ }
+ ip += rLength;
+ anchor = ip;
+ }
+ }
+
+ /* Overwrite rep */
+ for (i = 0; i < ZSTD_REP_NUM; i++)
+ seqStorePtr->rep[i] = repToConfirm[i];
+
+ ZSTD_ldm_limitTableUpdate(cctx, anchor);
+ ZSTD_ldm_fillFastTables(cctx, anchor);
+
+ lastLiterals = blockCompressor(cctx, anchor, iend - anchor);
+ cctx->nextToUpdate = (U32)(iend - base);
+
+ /* Restore seqStorePtr->rep */
+ for (i = 0; i < ZSTD_REP_NUM; i++)
+ seqStorePtr->rep[i] = savedRep[i];
+
+ /* Return the last literals size */
+ return lastLiterals;
+}
+
+size_t ZSTD_compressBlock_ldm(ZSTD_CCtx* ctx,
+ const void* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_ldm_generic(ctx, src, srcSize);
+}
+
+static size_t ZSTD_compressBlock_ldm_extDict_generic(
+ ZSTD_CCtx* ctx,
+ const void* src, size_t srcSize)
+{
+ ldmState_t* const ldmState = &(ctx->ldmState);
+ const ldmParams_t ldmParams = ctx->appliedParams.ldmParams;
+ const U64 hashPower = ldmState->hashPower;
+ const U32 hBits = ldmParams.hashLog - ldmParams.bucketSizeLog;
+ const U32 ldmBucketSize = ((U32)1 << ldmParams.bucketSizeLog);
+ const U32 ldmTagMask = ((U32)1 << ldmParams.hashEveryLog) - 1;
+ seqStore_t* const seqStorePtr = &(ctx->seqStore);
+ const BYTE* const base = ctx->base;
+ const BYTE* const dictBase = ctx->dictBase;
+ const BYTE* const istart = (const BYTE*)src;
+ const BYTE* ip = istart;
+ const BYTE* anchor = istart;
+ const U32 lowestIndex = ctx->lowLimit;
+ const BYTE* const dictStart = dictBase + lowestIndex;
+ const U32 dictLimit = ctx->dictLimit;
+ const BYTE* const lowPrefixPtr = base + dictLimit;
+ const BYTE* const dictEnd = dictBase + dictLimit;
+ const BYTE* const iend = istart + srcSize;
+ const BYTE* const ilimit = iend - MAX(ldmParams.minMatchLength, HASH_READ_SIZE);
+
+ const ZSTD_blockCompressor blockCompressor =
+ ZSTD_selectBlockCompressor(ctx->appliedParams.cParams.strategy, 1);
+ U32* const repToConfirm = seqStorePtr->repToConfirm;
+ U32 savedRep[ZSTD_REP_NUM];
+ U64 rollingHash = 0;
+ const BYTE* lastHashed = NULL;
+ size_t i, lastLiterals;
+
+ /* Save seqStorePtr->rep and copy repToConfirm */
+ for (i = 0; i < ZSTD_REP_NUM; i++) {
+ savedRep[i] = repToConfirm[i] = seqStorePtr->rep[i];
+ }
+
+ /* Search Loop */
+ while (ip < ilimit) { /* < instead of <=, because (ip+1) */
+ size_t mLength;
+ const U32 current = (U32)(ip-base);
+ size_t forwardMatchLength = 0, backwardMatchLength = 0;
+ ldmEntry_t* bestEntry = NULL;
+ if (ip != istart) {
+ rollingHash = ZSTD_ldm_updateHash(rollingHash, lastHashed[0],
+ lastHashed[ldmParams.minMatchLength],
+ hashPower);
+ } else {
+ rollingHash = ZSTD_ldm_getRollingHash(ip, ldmParams.minMatchLength);
+ }
+ lastHashed = ip;
+
+ if (ZSTD_ldm_getTag(rollingHash, hBits, ldmParams.hashEveryLog) !=
+ ldmTagMask) {
+ /* Don't insert and don't look for a match */
+ ip++;
+ continue;
+ }
+
+ /* Get the best entry and compute the match lengths */
+ {
+ ldmEntry_t* const bucket =
+ ZSTD_ldm_getBucket(ldmState,
+ ZSTD_ldm_getSmallHash(rollingHash, hBits),
+ ldmParams);
+ ldmEntry_t* cur;
+ size_t bestMatchLength = 0;
+ U32 const checksum = ZSTD_ldm_getChecksum(rollingHash, hBits);
+
+ for (cur = bucket; cur < bucket + ldmBucketSize; ++cur) {
+ const BYTE* const curMatchBase =
+ cur->offset < dictLimit ? dictBase : base;
+ const BYTE* const pMatch = curMatchBase + cur->offset;
+ const BYTE* const matchEnd =
+ cur->offset < dictLimit ? dictEnd : iend;
+ const BYTE* const lowMatchPtr =
+ cur->offset < dictLimit ? dictStart : lowPrefixPtr;
+ size_t curForwardMatchLength, curBackwardMatchLength,
+ curTotalMatchLength;
+
+ if (cur->checksum != checksum || cur->offset <= lowestIndex) {
+ continue;
+ }
+
+ curForwardMatchLength = ZSTD_count_2segments(
+ ip, pMatch, iend,
+ matchEnd, lowPrefixPtr);
+ if (curForwardMatchLength < ldmParams.minMatchLength) {
+ continue;
+ }
+ curBackwardMatchLength = ZSTD_ldm_countBackwardsMatch(
+ ip, anchor, pMatch, lowMatchPtr);
+ curTotalMatchLength = curForwardMatchLength +
+ curBackwardMatchLength;
+
+ if (curTotalMatchLength > bestMatchLength) {
+ bestMatchLength = curTotalMatchLength;
+ forwardMatchLength = curForwardMatchLength;
+ backwardMatchLength = curBackwardMatchLength;
+ bestEntry = cur;
+ }
+ }
+ }
+
+ /* No match found -- continue searching */
+ if (bestEntry == NULL) {
+ ZSTD_ldm_makeEntryAndInsertByTag(ldmState, rollingHash, hBits,
+ (U32)(lastHashed - base),
+ ldmParams);
+ ip++;
+ continue;
+ }
+
+ /* Match found */
+ mLength = forwardMatchLength + backwardMatchLength;
+ ip -= backwardMatchLength;
+
+ /* Call the block compressor on the remaining literals */
+ {
+ /* ip = current - backwardMatchLength
+ * The match is at (bestEntry->offset - backwardMatchLength) */
+ U32 const matchIndex = bestEntry->offset;
+ U32 const offset = current - matchIndex;
+
+ /* Overwrite rep codes */
+ for (i = 0; i < ZSTD_REP_NUM; i++)
+ seqStorePtr->rep[i] = repToConfirm[i];
+
+ /* Fill the hash table for the block compressor */
+ ZSTD_ldm_limitTableUpdate(ctx, anchor);
+ ZSTD_ldm_fillFastTables(ctx, anchor);
+
+ /* Call block compressor and get remaining literals */
+ lastLiterals = blockCompressor(ctx, anchor, ip - anchor);
+ ctx->nextToUpdate = (U32)(ip - base);
+
+ /* Update repToConfirm with the new offset */
+ for (i = ZSTD_REP_NUM - 1; i > 0; i--)
+ repToConfirm[i] = repToConfirm[i-1];
+ repToConfirm[0] = offset;
+
+ /* Store the sequence with the leftover literals */
+ ZSTD_storeSeq(seqStorePtr, lastLiterals, ip - lastLiterals,
+ offset + ZSTD_REP_MOVE, mLength - MINMATCH);
+ }
+
+ /* Insert the current entry into the hash table */
+ ZSTD_ldm_makeEntryAndInsertByTag(ldmState, rollingHash, hBits,
+ (U32)(lastHashed - base),
+ ldmParams);
+
+ /* Fill the hash table from lastHashed+1 to ip+mLength */
+ assert(ip + backwardMatchLength == lastHashed);
+ if (ip + mLength < ilimit) {
+ rollingHash = ZSTD_ldm_fillLdmHashTable(
+ ldmState, rollingHash, lastHashed,
+ ip + mLength, base, hBits,
+ ldmParams);
+ lastHashed = ip + mLength - 1;
+ }
+ ip += mLength;
+ anchor = ip;
+
+ /* check immediate repcode */
+ while (ip < ilimit) {
+ U32 const current2 = (U32)(ip-base);
+ U32 const repIndex2 = current2 - repToConfirm[1];
+ const BYTE* repMatch2 = repIndex2 < dictLimit ?
+ dictBase + repIndex2 : base + repIndex2;
+ if ( (((U32)((dictLimit-1) - repIndex2) >= 3) &
+ (repIndex2 > lowestIndex)) /* intentional overflow */
+ && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
+ const BYTE* const repEnd2 = repIndex2 < dictLimit ?
+ dictEnd : iend;
+ size_t const repLength2 =
+ ZSTD_count_2segments(ip+4, repMatch2+4, iend,
+ repEnd2, lowPrefixPtr) + 4;
+
+ U32 tmpOffset = repToConfirm[1];
+ repToConfirm[1] = repToConfirm[0];
+ repToConfirm[0] = tmpOffset;
+
+ ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, repLength2-MINMATCH);
+
+ /* Fill the hash table from lastHashed+1 to ip+repLength2*/
+ if (ip + repLength2 < ilimit) {
+ rollingHash = ZSTD_ldm_fillLdmHashTable(
+ ldmState, rollingHash, lastHashed,
+ ip + repLength2, base, hBits,
+ ldmParams);
+ lastHashed = ip + repLength2 - 1;
+ }
+ ip += repLength2;
+ anchor = ip;
+ continue;
+ }
+ break;
+ }
+ }
+
+ /* Overwrite rep */
+ for (i = 0; i < ZSTD_REP_NUM; i++)
+ seqStorePtr->rep[i] = repToConfirm[i];
+
+ ZSTD_ldm_limitTableUpdate(ctx, anchor);
+ ZSTD_ldm_fillFastTables(ctx, anchor);
+
+ /* Call the block compressor one last time on the last literals */
+ lastLiterals = blockCompressor(ctx, anchor, iend - anchor);
+ ctx->nextToUpdate = (U32)(iend - base);
+
+ /* Restore seqStorePtr->rep */
+ for (i = 0; i < ZSTD_REP_NUM; i++)
+ seqStorePtr->rep[i] = savedRep[i];
+
+ /* Return the last literals size */
+ return lastLiterals;
+}
+
+size_t ZSTD_compressBlock_ldm_extDict(ZSTD_CCtx* ctx,
+ const void* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_ldm_extDict_generic(ctx, src, srcSize);
+}
diff --git a/lib/compress/zstd_ldm.h b/lib/compress/zstd_ldm.h
new file mode 100644
index 000000000000..d6d3d42c337f
--- /dev/null
+++ b/lib/compress/zstd_ldm.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ */
+
+#ifndef ZSTD_LDM_H
+#define ZSTD_LDM_H
+
+#include "zstd_compress.h"
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+/*-*************************************
+* Long distance matching
+***************************************/
+
+#define ZSTD_LDM_DEFAULT_WINDOW_LOG ZSTD_WINDOWLOG_DEFAULTMAX
+#define ZSTD_LDM_HASHEVERYLOG_NOTSET 9999
+
+/** ZSTD_compressBlock_ldm_generic() :
+ *
+ * This is a block compressor intended for long distance matching.
+ *
+ * The function searches for matches of length at least
+ * ldmParams.minMatchLength using a hash table in cctx->ldmState.
+ * Matches can be at a distance of up to cParams.windowLog.
+ *
+ * Upon finding a match, the unmatched literals are compressed using a
+ * ZSTD_blockCompressor (depending on the strategy in the compression
+ * parameters), which stores the matched sequences. The "long distance"
+ * match is then stored with the remaining literals from the
+ * ZSTD_blockCompressor. */
+size_t ZSTD_compressBlock_ldm(ZSTD_CCtx* cctx, const void* src, size_t srcSize);
+size_t ZSTD_compressBlock_ldm_extDict(ZSTD_CCtx* ctx,
+ const void* src, size_t srcSize);
+
+/** ZSTD_ldm_initializeParameters() :
+ * Initialize the long distance matching parameters to their default values. */
+size_t ZSTD_ldm_initializeParameters(ldmParams_t* params, U32 enableLdm);
+
+/** ZSTD_ldm_getTableSize() :
+ * Estimate the space needed for long distance matching tables. */
+size_t ZSTD_ldm_getTableSize(U32 hashLog, U32 bucketSizeLog);
+
+/** ZSTD_ldm_getTableSize() :
+ * Return prime8bytes^(minMatchLength-1) */
+U64 ZSTD_ldm_getHashPower(U32 minMatchLength);
+
+/** ZSTD_ldm_adjustParameters() :
+ * If the params->hashEveryLog is not set, set it to its default value based on
+ * windowLog and params->hashLog.
+ *
+ * Ensures that params->bucketSizeLog is <= params->hashLog (setting it to
+ * params->hashLog if it is not). */
+void ZSTD_ldm_adjustParameters(ldmParams_t* params, U32 windowLog);
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* ZSTD_FAST_H */
diff --git a/lib/compress/zstd_opt.c b/lib/compress/zstd_opt.c
new file mode 100644
index 000000000000..c47ce23ad535
--- /dev/null
+++ b/lib/compress/zstd_opt.c
@@ -0,0 +1,957 @@
+/*
+ * Copyright (c) 2016-present, Przemyslaw Skibinski, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#include "zstd_opt.h"
+#include "zstd_lazy.h"
+
+
+#define ZSTD_LITFREQ_ADD 2
+#define ZSTD_FREQ_DIV 4
+#define ZSTD_MAX_PRICE (1<<30)
+
+/*-*************************************
+* Price functions for optimal parser
+***************************************/
+static void ZSTD_setLog2Prices(optState_t* optPtr)
+{
+ optPtr->log2matchLengthSum = ZSTD_highbit32(optPtr->matchLengthSum+1);
+ optPtr->log2litLengthSum = ZSTD_highbit32(optPtr->litLengthSum+1);
+ optPtr->log2litSum = ZSTD_highbit32(optPtr->litSum+1);
+ optPtr->log2offCodeSum = ZSTD_highbit32(optPtr->offCodeSum+1);
+ optPtr->factor = 1 + ((optPtr->litSum>>5) / optPtr->litLengthSum) + ((optPtr->litSum<<1) / (optPtr->litSum + optPtr->matchSum));
+}
+
+
+static void ZSTD_rescaleFreqs(optState_t* optPtr, const BYTE* src, size_t srcSize)
+{
+ unsigned u;
+
+ optPtr->cachedLiterals = NULL;
+ optPtr->cachedPrice = optPtr->cachedLitLength = 0;
+ optPtr->staticPrices = 0;
+
+ if (optPtr->litLengthSum == 0) {
+ if (srcSize <= 1024) optPtr->staticPrices = 1;
+
+ assert(optPtr->litFreq!=NULL);
+ for (u=0; u<=MaxLit; u++)
+ optPtr->litFreq[u] = 0;
+ for (u=0; u<srcSize; u++)
+ optPtr->litFreq[src[u]]++;
+
+ optPtr->litSum = 0;
+ optPtr->litLengthSum = MaxLL+1;
+ optPtr->matchLengthSum = MaxML+1;
+ optPtr->offCodeSum = (MaxOff+1);
+ optPtr->matchSum = (ZSTD_LITFREQ_ADD<<Litbits);
+
+ for (u=0; u<=MaxLit; u++) {
+ optPtr->litFreq[u] = 1 + (optPtr->litFreq[u]>>ZSTD_FREQ_DIV);
+ optPtr->litSum += optPtr->litFreq[u];
+ }
+ for (u=0; u<=MaxLL; u++)
+ optPtr->litLengthFreq[u] = 1;
+ for (u=0; u<=MaxML; u++)
+ optPtr->matchLengthFreq[u] = 1;
+ for (u=0; u<=MaxOff; u++)
+ optPtr->offCodeFreq[u] = 1;
+ } else {
+ optPtr->matchLengthSum = 0;
+ optPtr->litLengthSum = 0;
+ optPtr->offCodeSum = 0;
+ optPtr->matchSum = 0;
+ optPtr->litSum = 0;
+
+ for (u=0; u<=MaxLit; u++) {
+ optPtr->litFreq[u] = 1 + (optPtr->litFreq[u]>>(ZSTD_FREQ_DIV+1));
+ optPtr->litSum += optPtr->litFreq[u];
+ }
+ for (u=0; u<=MaxLL; u++) {
+ optPtr->litLengthFreq[u] = 1 + (optPtr->litLengthFreq[u]>>(ZSTD_FREQ_DIV+1));
+ optPtr->litLengthSum += optPtr->litLengthFreq[u];
+ }
+ for (u=0; u<=MaxML; u++) {
+ optPtr->matchLengthFreq[u] = 1 + (optPtr->matchLengthFreq[u]>>ZSTD_FREQ_DIV);
+ optPtr->matchLengthSum += optPtr->matchLengthFreq[u];
+ optPtr->matchSum += optPtr->matchLengthFreq[u] * (u + 3);
+ }
+ optPtr->matchSum *= ZSTD_LITFREQ_ADD;
+ for (u=0; u<=MaxOff; u++) {
+ optPtr->offCodeFreq[u] = 1 + (optPtr->offCodeFreq[u]>>ZSTD_FREQ_DIV);
+ optPtr->offCodeSum += optPtr->offCodeFreq[u];
+ }
+ }
+
+ ZSTD_setLog2Prices(optPtr);
+}
+
+
+static U32 ZSTD_getLiteralPrice(optState_t* optPtr, U32 litLength, const BYTE* literals)
+{
+ U32 price, u;
+
+ if (optPtr->staticPrices)
+ return ZSTD_highbit32((U32)litLength+1) + (litLength*6);
+
+ if (litLength == 0)
+ return optPtr->log2litLengthSum - ZSTD_highbit32(optPtr->litLengthFreq[0]+1);
+
+ /* literals */
+ if (optPtr->cachedLiterals == literals) {
+ U32 const additional = litLength - optPtr->cachedLitLength;
+ const BYTE* literals2 = optPtr->cachedLiterals + optPtr->cachedLitLength;
+ price = optPtr->cachedPrice + additional * optPtr->log2litSum;
+ for (u=0; u < additional; u++)
+ price -= ZSTD_highbit32(optPtr->litFreq[literals2[u]]+1);
+ optPtr->cachedPrice = price;
+ optPtr->cachedLitLength = litLength;
+ } else {
+ price = litLength * optPtr->log2litSum;
+ for (u=0; u < litLength; u++)
+ price -= ZSTD_highbit32(optPtr->litFreq[literals[u]]+1);
+
+ if (litLength >= 12) {
+ optPtr->cachedLiterals = literals;
+ optPtr->cachedPrice = price;
+ optPtr->cachedLitLength = litLength;
+ }
+ }
+
+ /* literal Length */
+ { const BYTE LL_deltaCode = 19;
+ const BYTE llCode = (litLength>63) ? (BYTE)ZSTD_highbit32(litLength) + LL_deltaCode : LL_Code[litLength];
+ price += LL_bits[llCode] + optPtr->log2litLengthSum - ZSTD_highbit32(optPtr->litLengthFreq[llCode]+1);
+ }
+
+ return price;
+}
+
+
+FORCE_INLINE_TEMPLATE U32 ZSTD_getPrice(optState_t* optPtr, U32 litLength, const BYTE* literals, U32 offset, U32 matchLength, const int ultra)
+{
+ /* offset */
+ U32 price;
+ BYTE const offCode = (BYTE)ZSTD_highbit32(offset+1);
+
+ if (optPtr->staticPrices)
+ return ZSTD_getLiteralPrice(optPtr, litLength, literals) + ZSTD_highbit32((U32)matchLength+1) + 16 + offCode;
+
+ price = offCode + optPtr->log2offCodeSum - ZSTD_highbit32(optPtr->offCodeFreq[offCode]+1);
+ if (!ultra && offCode >= 20) price += (offCode-19)*2;
+
+ /* match Length */
+ { const BYTE ML_deltaCode = 36;
+ const BYTE mlCode = (matchLength>127) ? (BYTE)ZSTD_highbit32(matchLength) + ML_deltaCode : ML_Code[matchLength];
+ price += ML_bits[mlCode] + optPtr->log2matchLengthSum - ZSTD_highbit32(optPtr->matchLengthFreq[mlCode]+1);
+ }
+
+ return price + ZSTD_getLiteralPrice(optPtr, litLength, literals) + optPtr->factor;
+}
+
+
+static void ZSTD_updatePrice(optState_t* optPtr, U32 litLength, const BYTE* literals, U32 offset, U32 matchLength)
+{
+ U32 u;
+
+ /* literals */
+ optPtr->litSum += litLength*ZSTD_LITFREQ_ADD;
+ for (u=0; u < litLength; u++)
+ optPtr->litFreq[literals[u]] += ZSTD_LITFREQ_ADD;
+
+ /* literal Length */
+ { const BYTE LL_deltaCode = 19;
+ const BYTE llCode = (litLength>63) ? (BYTE)ZSTD_highbit32(litLength) + LL_deltaCode : LL_Code[litLength];
+ optPtr->litLengthFreq[llCode]++;
+ optPtr->litLengthSum++;
+ }
+
+ /* match offset */
+ { BYTE const offCode = (BYTE)ZSTD_highbit32(offset+1);
+ optPtr->offCodeSum++;
+ optPtr->offCodeFreq[offCode]++;
+ }
+
+ /* match Length */
+ { const BYTE ML_deltaCode = 36;
+ const BYTE mlCode = (matchLength>127) ? (BYTE)ZSTD_highbit32(matchLength) + ML_deltaCode : ML_Code[matchLength];
+ optPtr->matchLengthFreq[mlCode]++;
+ optPtr->matchLengthSum++;
+ }
+
+ ZSTD_setLog2Prices(optPtr);
+}
+
+
+#define SET_PRICE(pos, mlen_, offset_, litlen_, price_) \
+ { \
+ while (last_pos < pos) { opt[last_pos+1].price = ZSTD_MAX_PRICE; last_pos++; } \
+ opt[pos].mlen = mlen_; \
+ opt[pos].off = offset_; \
+ opt[pos].litlen = litlen_; \
+ opt[pos].price = price_; \
+ }
+
+
+/* function safe only for comparisons */
+static U32 ZSTD_readMINMATCH(const void* memPtr, U32 length)
+{
+ switch (length)
+ {
+ default :
+ case 4 : return MEM_read32(memPtr);
+ case 3 : if (MEM_isLittleEndian())
+ return MEM_read32(memPtr)<<8;
+ else
+ return MEM_read32(memPtr)>>8;
+ }
+}
+
+
+/* Update hashTable3 up to ip (excluded)
+ Assumption : always within prefix (i.e. not within extDict) */
+static
+U32 ZSTD_insertAndFindFirstIndexHash3 (ZSTD_CCtx* zc, const BYTE* ip)
+{
+ U32* const hashTable3 = zc->hashTable3;
+ U32 const hashLog3 = zc->hashLog3;
+ const BYTE* const base = zc->base;
+ U32 idx = zc->nextToUpdate3;
+ const U32 target = zc->nextToUpdate3 = (U32)(ip - base);
+ const size_t hash3 = ZSTD_hash3Ptr(ip, hashLog3);
+
+ while(idx < target) {
+ hashTable3[ZSTD_hash3Ptr(base+idx, hashLog3)] = idx;
+ idx++;
+ }
+
+ return hashTable3[hash3];
+}
+
+
+/*-*************************************
+* Binary Tree search
+***************************************/
+static U32 ZSTD_insertBtAndGetAllMatches (
+ ZSTD_CCtx* zc,
+ const BYTE* const ip, const BYTE* const iLimit,
+ U32 nbCompares, const U32 mls,
+ U32 extDict, ZSTD_match_t* matches, const U32 minMatchLen)
+{
+ const BYTE* const base = zc->base;
+ const U32 current = (U32)(ip-base);
+ const U32 hashLog = zc->appliedParams.cParams.hashLog;
+ const size_t h = ZSTD_hashPtr(ip, hashLog, mls);
+ U32* const hashTable = zc->hashTable;
+ U32 matchIndex = hashTable[h];
+ U32* const bt = zc->chainTable;
+ const U32 btLog = zc->appliedParams.cParams.chainLog - 1;
+ const U32 btMask= (1U << btLog) - 1;
+ size_t commonLengthSmaller=0, commonLengthLarger=0;
+ const BYTE* const dictBase = zc->dictBase;
+ const U32 dictLimit = zc->dictLimit;
+ const BYTE* const dictEnd = dictBase + dictLimit;
+ const BYTE* const prefixStart = base + dictLimit;
+ const U32 btLow = btMask >= current ? 0 : current - btMask;
+ const U32 windowLow = zc->lowLimit;
+ U32* smallerPtr = bt + 2*(current&btMask);
+ U32* largerPtr = bt + 2*(current&btMask) + 1;
+ U32 matchEndIdx = current+8;
+ U32 dummy32; /* to be nullified at the end */
+ U32 mnum = 0;
+
+ const U32 minMatch = (mls == 3) ? 3 : 4;
+ size_t bestLength = minMatchLen-1;
+
+ if (minMatch == 3) { /* HC3 match finder */
+ U32 const matchIndex3 = ZSTD_insertAndFindFirstIndexHash3 (zc, ip);
+ if (matchIndex3>windowLow && (current - matchIndex3 < (1<<18))) {
+ const BYTE* match;
+ size_t currentMl=0;
+ if ((!extDict) || matchIndex3 >= dictLimit) {
+ match = base + matchIndex3;
+ if (match[bestLength] == ip[bestLength]) currentMl = ZSTD_count(ip, match, iLimit);
+ } else {
+ match = dictBase + matchIndex3;
+ if (ZSTD_readMINMATCH(match, MINMATCH) == ZSTD_readMINMATCH(ip, MINMATCH)) /* assumption : matchIndex3 <= dictLimit-4 (by table construction) */
+ currentMl = ZSTD_count_2segments(ip+MINMATCH, match+MINMATCH, iLimit, dictEnd, prefixStart) + MINMATCH;
+ }
+
+ /* save best solution */
+ if (currentMl > bestLength) {
+ bestLength = currentMl;
+ matches[mnum].off = ZSTD_REP_MOVE_OPT + current - matchIndex3;
+ matches[mnum].len = (U32)currentMl;
+ mnum++;
+ if (currentMl > ZSTD_OPT_NUM) goto update;
+ if (ip+currentMl == iLimit) goto update; /* best possible, and avoid read overflow*/
+ }
+ }
+ }
+
+ hashTable[h] = current; /* Update Hash Table */
+
+ while (nbCompares-- && (matchIndex > windowLow)) {
+ U32* nextPtr = bt + 2*(matchIndex & btMask);
+ size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
+ const BYTE* match;
+
+ if ((!extDict) || (matchIndex+matchLength >= dictLimit)) {
+ match = base + matchIndex;
+ if (match[matchLength] == ip[matchLength]) {
+ matchLength += ZSTD_count(ip+matchLength+1, match+matchLength+1, iLimit) +1;
+ }
+ } else {
+ match = dictBase + matchIndex;
+ matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iLimit, dictEnd, prefixStart);
+ if (matchIndex+matchLength >= dictLimit)
+ match = base + matchIndex; /* to prepare for next usage of match[matchLength] */
+ }
+
+ if (matchLength > bestLength) {
+ if (matchLength > matchEndIdx - matchIndex) matchEndIdx = matchIndex + (U32)matchLength;
+ bestLength = matchLength;
+ matches[mnum].off = ZSTD_REP_MOVE_OPT + current - matchIndex;
+ matches[mnum].len = (U32)matchLength;
+ mnum++;
+ if (matchLength > ZSTD_OPT_NUM) break;
+ if (ip+matchLength == iLimit) /* equal : no way to know if inf or sup */
+ break; /* drop, to guarantee consistency (miss a little bit of compression) */
+ }
+
+ if (match[matchLength] < ip[matchLength]) {
+ /* match is smaller than current */
+ *smallerPtr = matchIndex; /* update smaller idx */
+ commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
+ if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */
+ smallerPtr = nextPtr+1; /* new "smaller" => larger of match */
+ matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */
+ } else {
+ /* match is larger than current */
+ *largerPtr = matchIndex;
+ commonLengthLarger = matchLength;
+ if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */
+ largerPtr = nextPtr;
+ matchIndex = nextPtr[0];
+ } }
+
+ *smallerPtr = *largerPtr = 0;
+
+update:
+ zc->nextToUpdate = (matchEndIdx > current + 8) ? matchEndIdx - 8 : current+1;
+ return mnum;
+}
+
+
+/** Tree updater, providing best match */
+static U32 ZSTD_BtGetAllMatches (
+ ZSTD_CCtx* zc,
+ const BYTE* const ip, const BYTE* const iLimit,
+ const U32 maxNbAttempts, const U32 mls, ZSTD_match_t* matches, const U32 minMatchLen)
+{
+ if (ip < zc->base + zc->nextToUpdate) return 0; /* skipped area */
+ ZSTD_updateTree(zc, ip, iLimit, maxNbAttempts, mls);
+ return ZSTD_insertBtAndGetAllMatches(zc, ip, iLimit, maxNbAttempts, mls, 0, matches, minMatchLen);
+}
+
+
+static U32 ZSTD_BtGetAllMatches_selectMLS (
+ ZSTD_CCtx* zc, /* Index table will be updated */
+ const BYTE* ip, const BYTE* const iHighLimit,
+ const U32 maxNbAttempts, const U32 matchLengthSearch, ZSTD_match_t* matches, const U32 minMatchLen)
+{
+ switch(matchLengthSearch)
+ {
+ case 3 : return ZSTD_BtGetAllMatches(zc, ip, iHighLimit, maxNbAttempts, 3, matches, minMatchLen);
+ default :
+ case 4 : return ZSTD_BtGetAllMatches(zc, ip, iHighLimit, maxNbAttempts, 4, matches, minMatchLen);
+ case 5 : return ZSTD_BtGetAllMatches(zc, ip, iHighLimit, maxNbAttempts, 5, matches, minMatchLen);
+ case 7 :
+ case 6 : return ZSTD_BtGetAllMatches(zc, ip, iHighLimit, maxNbAttempts, 6, matches, minMatchLen);
+ }
+}
+
+/** Tree updater, providing best match */
+static U32 ZSTD_BtGetAllMatches_extDict (
+ ZSTD_CCtx* zc,
+ const BYTE* const ip, const BYTE* const iLimit,
+ const U32 maxNbAttempts, const U32 mls, ZSTD_match_t* matches, const U32 minMatchLen)
+{
+ if (ip < zc->base + zc->nextToUpdate) return 0; /* skipped area */
+ ZSTD_updateTree_extDict(zc, ip, iLimit, maxNbAttempts, mls);
+ return ZSTD_insertBtAndGetAllMatches(zc, ip, iLimit, maxNbAttempts, mls, 1, matches, minMatchLen);
+}
+
+
+static U32 ZSTD_BtGetAllMatches_selectMLS_extDict (
+ ZSTD_CCtx* zc, /* Index table will be updated */
+ const BYTE* ip, const BYTE* const iHighLimit,
+ const U32 maxNbAttempts, const U32 matchLengthSearch, ZSTD_match_t* matches, const U32 minMatchLen)
+{
+ switch(matchLengthSearch)
+ {
+ case 3 : return ZSTD_BtGetAllMatches_extDict(zc, ip, iHighLimit, maxNbAttempts, 3, matches, minMatchLen);
+ default :
+ case 4 : return ZSTD_BtGetAllMatches_extDict(zc, ip, iHighLimit, maxNbAttempts, 4, matches, minMatchLen);
+ case 5 : return ZSTD_BtGetAllMatches_extDict(zc, ip, iHighLimit, maxNbAttempts, 5, matches, minMatchLen);
+ case 7 :
+ case 6 : return ZSTD_BtGetAllMatches_extDict(zc, ip, iHighLimit, maxNbAttempts, 6, matches, minMatchLen);
+ }
+}
+
+
+/*-*******************************
+* Optimal parser
+*********************************/
+FORCE_INLINE_TEMPLATE
+size_t ZSTD_compressBlock_opt_generic(ZSTD_CCtx* ctx,
+ const void* src, size_t srcSize, const int ultra)
+{
+ seqStore_t* seqStorePtr = &(ctx->seqStore);
+ optState_t* optStatePtr = &(ctx->optState);
+ const BYTE* const istart = (const BYTE*)src;
+ const BYTE* ip = istart;
+ const BYTE* anchor = istart;
+ const BYTE* const iend = istart + srcSize;
+ const BYTE* const ilimit = iend - 8;
+ const BYTE* const base = ctx->base;
+ const BYTE* const prefixStart = base + ctx->dictLimit;
+
+ const U32 maxSearches = 1U << ctx->appliedParams.cParams.searchLog;
+ const U32 sufficient_len = ctx->appliedParams.cParams.targetLength;
+ const U32 mls = ctx->appliedParams.cParams.searchLength;
+ const U32 minMatch = (ctx->appliedParams.cParams.searchLength == 3) ? 3 : 4;
+
+ ZSTD_optimal_t* opt = optStatePtr->priceTable;
+ ZSTD_match_t* matches = optStatePtr->matchTable;
+ const BYTE* inr;
+ U32 offset, rep[ZSTD_REP_NUM];
+
+ /* init */
+ ctx->nextToUpdate3 = ctx->nextToUpdate;
+ ZSTD_rescaleFreqs(optStatePtr, (const BYTE*)src, srcSize);
+ ip += (ip==prefixStart);
+ { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) rep[i]=seqStorePtr->rep[i]; }
+
+ /* Match Loop */
+ while (ip < ilimit) {
+ U32 cur, match_num, last_pos, litlen, price;
+ U32 u, mlen, best_mlen, best_off, litLength;
+ memset(opt, 0, sizeof(ZSTD_optimal_t));
+ last_pos = 0;
+ litlen = (U32)(ip - anchor);
+
+ /* check repCode */
+ { U32 i, last_i = ZSTD_REP_CHECK + (ip==anchor);
+ for (i=(ip == anchor); i<last_i; i++) {
+ const S32 repCur = (i==ZSTD_REP_MOVE_OPT) ? (rep[0] - 1) : rep[i];
+ if ( (repCur > 0) && (repCur < (S32)(ip-prefixStart))
+ && (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(ip - repCur, minMatch))) {
+ mlen = (U32)ZSTD_count(ip+minMatch, ip+minMatch-repCur, iend) + minMatch;
+ if (mlen > sufficient_len || mlen >= ZSTD_OPT_NUM) {
+ best_mlen = mlen; best_off = i; cur = 0; last_pos = 1;
+ goto _storeSequence;
+ }
+ best_off = i - (ip == anchor);
+ do {
+ price = ZSTD_getPrice(optStatePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra);
+ if (mlen > last_pos || price < opt[mlen].price)
+ SET_PRICE(mlen, mlen, i, litlen, price); /* note : macro modifies last_pos */
+ mlen--;
+ } while (mlen >= minMatch);
+ } } }
+
+ match_num = ZSTD_BtGetAllMatches_selectMLS(ctx, ip, iend, maxSearches, mls, matches, minMatch);
+
+ if (!last_pos && !match_num) { ip++; continue; }
+
+ if (match_num && (matches[match_num-1].len > sufficient_len || matches[match_num-1].len >= ZSTD_OPT_NUM)) {
+ best_mlen = matches[match_num-1].len;
+ best_off = matches[match_num-1].off;
+ cur = 0;
+ last_pos = 1;
+ goto _storeSequence;
+ }
+
+ /* set prices using matches at position = 0 */
+ best_mlen = (last_pos) ? last_pos : minMatch;
+ for (u = 0; u < match_num; u++) {
+ mlen = (u>0) ? matches[u-1].len+1 : best_mlen;
+ best_mlen = matches[u].len;
+ while (mlen <= best_mlen) {
+ price = ZSTD_getPrice(optStatePtr, litlen, anchor, matches[u].off-1, mlen - MINMATCH, ultra);
+ if (mlen > last_pos || price < opt[mlen].price)
+ SET_PRICE(mlen, mlen, matches[u].off, litlen, price); /* note : macro modifies last_pos */
+ mlen++;
+ } }
+
+ if (last_pos < minMatch) { ip++; continue; }
+
+ /* initialize opt[0] */
+ { U32 i ; for (i=0; i<ZSTD_REP_NUM; i++) opt[0].rep[i] = rep[i]; }
+ opt[0].mlen = 1;
+ opt[0].litlen = litlen;
+
+ /* check further positions */
+ for (cur = 1; cur <= last_pos; cur++) {
+ inr = ip + cur;
+
+ if (opt[cur-1].mlen == 1) {
+ litlen = opt[cur-1].litlen + 1;
+ if (cur > litlen) {
+ price = opt[cur - litlen].price + ZSTD_getLiteralPrice(optStatePtr, litlen, inr-litlen);
+ } else
+ price = ZSTD_getLiteralPrice(optStatePtr, litlen, anchor);
+ } else {
+ litlen = 1;
+ price = opt[cur - 1].price + ZSTD_getLiteralPrice(optStatePtr, litlen, inr-1);
+ }
+
+ if (cur > last_pos || price <= opt[cur].price)
+ SET_PRICE(cur, 1, 0, litlen, price);
+
+ if (cur == last_pos) break;
+
+ if (inr > ilimit) /* last match must start at a minimum distance of 8 from oend */
+ continue;
+
+ mlen = opt[cur].mlen;
+ if (opt[cur].off > ZSTD_REP_MOVE_OPT) {
+ opt[cur].rep[2] = opt[cur-mlen].rep[1];
+ opt[cur].rep[1] = opt[cur-mlen].rep[0];
+ opt[cur].rep[0] = opt[cur].off - ZSTD_REP_MOVE_OPT;
+ } else {
+ opt[cur].rep[2] = (opt[cur].off > 1) ? opt[cur-mlen].rep[1] : opt[cur-mlen].rep[2];
+ opt[cur].rep[1] = (opt[cur].off > 0) ? opt[cur-mlen].rep[0] : opt[cur-mlen].rep[1];
+ /* If opt[cur].off == ZSTD_REP_MOVE_OPT, then mlen != 1.
+ * offset ZSTD_REP_MOVE_OPT is used for the special case
+ * litLength == 0, where offset 0 means something special.
+ * mlen == 1 means the previous byte was stored as a literal,
+ * so they are mutually exclusive.
+ */
+ assert(!(opt[cur].off == ZSTD_REP_MOVE_OPT && mlen == 1));
+ opt[cur].rep[0] = (opt[cur].off == ZSTD_REP_MOVE_OPT) ? (opt[cur-mlen].rep[0] - 1) : (opt[cur-mlen].rep[opt[cur].off]);
+ }
+
+ best_mlen = minMatch;
+ { U32 i, last_i = ZSTD_REP_CHECK + (mlen != 1);
+ for (i=(opt[cur].mlen != 1); i<last_i; i++) { /* check rep */
+ const S32 repCur = (i==ZSTD_REP_MOVE_OPT) ? (opt[cur].rep[0] - 1) : opt[cur].rep[i];
+ if ( (repCur > 0) && (repCur < (S32)(inr-prefixStart))
+ && (ZSTD_readMINMATCH(inr, minMatch) == ZSTD_readMINMATCH(inr - repCur, minMatch))) {
+ mlen = (U32)ZSTD_count(inr+minMatch, inr+minMatch - repCur, iend) + minMatch;
+
+ if (mlen > sufficient_len || cur + mlen >= ZSTD_OPT_NUM) {
+ best_mlen = mlen; best_off = i; last_pos = cur + 1;
+ goto _storeSequence;
+ }
+
+ best_off = i - (opt[cur].mlen != 1);
+ if (mlen > best_mlen) best_mlen = mlen;
+
+ do {
+ if (opt[cur].mlen == 1) {
+ litlen = opt[cur].litlen;
+ if (cur > litlen) {
+ price = opt[cur - litlen].price + ZSTD_getPrice(optStatePtr, litlen, inr-litlen, best_off, mlen - MINMATCH, ultra);
+ } else
+ price = ZSTD_getPrice(optStatePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra);
+ } else {
+ litlen = 0;
+ price = opt[cur].price + ZSTD_getPrice(optStatePtr, 0, NULL, best_off, mlen - MINMATCH, ultra);
+ }
+
+ if (cur + mlen > last_pos || price <= opt[cur + mlen].price)
+ SET_PRICE(cur + mlen, mlen, i, litlen, price);
+ mlen--;
+ } while (mlen >= minMatch);
+ } } }
+
+ match_num = ZSTD_BtGetAllMatches_selectMLS(ctx, inr, iend, maxSearches, mls, matches, best_mlen);
+
+ if (match_num > 0 && (matches[match_num-1].len > sufficient_len || cur + matches[match_num-1].len >= ZSTD_OPT_NUM)) {
+ best_mlen = matches[match_num-1].len;
+ best_off = matches[match_num-1].off;
+ last_pos = cur + 1;
+ goto _storeSequence;
+ }
+
+ /* set prices using matches at position = cur */
+ for (u = 0; u < match_num; u++) {
+ mlen = (u>0) ? matches[u-1].len+1 : best_mlen;
+ best_mlen = matches[u].len;
+
+ while (mlen <= best_mlen) {
+ if (opt[cur].mlen == 1) {
+ litlen = opt[cur].litlen;
+ if (cur > litlen)
+ price = opt[cur - litlen].price + ZSTD_getPrice(optStatePtr, litlen, ip+cur-litlen, matches[u].off-1, mlen - MINMATCH, ultra);
+ else
+ price = ZSTD_getPrice(optStatePtr, litlen, anchor, matches[u].off-1, mlen - MINMATCH, ultra);
+ } else {
+ litlen = 0;
+ price = opt[cur].price + ZSTD_getPrice(optStatePtr, 0, NULL, matches[u].off-1, mlen - MINMATCH, ultra);
+ }
+
+ if (cur + mlen > last_pos || (price < opt[cur + mlen].price))
+ SET_PRICE(cur + mlen, mlen, matches[u].off, litlen, price);
+
+ mlen++;
+ } } }
+
+ best_mlen = opt[last_pos].mlen;
+ best_off = opt[last_pos].off;
+ cur = last_pos - best_mlen;
+
+ /* store sequence */
+_storeSequence: /* cur, last_pos, best_mlen, best_off have to be set */
+ opt[0].mlen = 1;
+
+ while (1) {
+ mlen = opt[cur].mlen;
+ offset = opt[cur].off;
+ opt[cur].mlen = best_mlen;
+ opt[cur].off = best_off;
+ best_mlen = mlen;
+ best_off = offset;
+ if (mlen > cur) break;
+ cur -= mlen;
+ }
+
+ for (u = 0; u <= last_pos;) {
+ u += opt[u].mlen;
+ }
+
+ for (cur=0; cur < last_pos; ) {
+ mlen = opt[cur].mlen;
+ if (mlen == 1) { ip++; cur++; continue; }
+ offset = opt[cur].off;
+ cur += mlen;
+ litLength = (U32)(ip - anchor);
+
+ if (offset > ZSTD_REP_MOVE_OPT) {
+ rep[2] = rep[1];
+ rep[1] = rep[0];
+ rep[0] = offset - ZSTD_REP_MOVE_OPT;
+ offset--;
+ } else {
+ if (offset != 0) {
+ best_off = (offset==ZSTD_REP_MOVE_OPT) ? (rep[0] - 1) : (rep[offset]);
+ if (offset != 1) rep[2] = rep[1];
+ rep[1] = rep[0];
+ rep[0] = best_off;
+ }
+ if (litLength==0) offset--;
+ }
+
+ ZSTD_updatePrice(optStatePtr, litLength, anchor, offset, mlen-MINMATCH);
+ ZSTD_storeSeq(seqStorePtr, litLength, anchor, offset, mlen-MINMATCH);
+ anchor = ip = ip + mlen;
+ } } /* for (cur=0; cur < last_pos; ) */
+
+ /* Save reps for next block */
+ { int i; for (i=0; i<ZSTD_REP_NUM; i++) seqStorePtr->repToConfirm[i] = rep[i]; }
+
+ /* Return the last literals size */
+ return iend - anchor;
+}
+
+
+size_t ZSTD_compressBlock_btopt(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_opt_generic(ctx, src, srcSize, 0);
+}
+
+size_t ZSTD_compressBlock_btultra(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_opt_generic(ctx, src, srcSize, 1);
+}
+
+
+FORCE_INLINE_TEMPLATE
+size_t ZSTD_compressBlock_opt_extDict_generic(ZSTD_CCtx* ctx,
+ const void* src, size_t srcSize, const int ultra)
+{
+ seqStore_t* seqStorePtr = &(ctx->seqStore);
+ optState_t* optStatePtr = &(ctx->optState);
+ const BYTE* const istart = (const BYTE*)src;
+ const BYTE* ip = istart;
+ const BYTE* anchor = istart;
+ const BYTE* const iend = istart + srcSize;
+ const BYTE* const ilimit = iend - 8;
+ const BYTE* const base = ctx->base;
+ const U32 lowestIndex = ctx->lowLimit;
+ const U32 dictLimit = ctx->dictLimit;
+ const BYTE* const prefixStart = base + dictLimit;
+ const BYTE* const dictBase = ctx->dictBase;
+ const BYTE* const dictEnd = dictBase + dictLimit;
+
+ const U32 maxSearches = 1U << ctx->appliedParams.cParams.searchLog;
+ const U32 sufficient_len = ctx->appliedParams.cParams.targetLength;
+ const U32 mls = ctx->appliedParams.cParams.searchLength;
+ const U32 minMatch = (ctx->appliedParams.cParams.searchLength == 3) ? 3 : 4;
+
+ ZSTD_optimal_t* opt = optStatePtr->priceTable;
+ ZSTD_match_t* matches = optStatePtr->matchTable;
+ const BYTE* inr;
+
+ /* init */
+ U32 offset, rep[ZSTD_REP_NUM];
+ { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) rep[i]=seqStorePtr->rep[i]; }
+
+ ctx->nextToUpdate3 = ctx->nextToUpdate;
+ ZSTD_rescaleFreqs(optStatePtr, (const BYTE*)src, srcSize);
+ ip += (ip==prefixStart);
+
+ /* Match Loop */
+ while (ip < ilimit) {
+ U32 cur, match_num, last_pos, litlen, price;
+ U32 u, mlen, best_mlen, best_off, litLength;
+ U32 current = (U32)(ip-base);
+ memset(opt, 0, sizeof(ZSTD_optimal_t));
+ last_pos = 0;
+ opt[0].litlen = (U32)(ip - anchor);
+
+ /* check repCode */
+ { U32 i, last_i = ZSTD_REP_CHECK + (ip==anchor);
+ for (i = (ip==anchor); i<last_i; i++) {
+ const S32 repCur = (i==ZSTD_REP_MOVE_OPT) ? (rep[0] - 1) : rep[i];
+ const U32 repIndex = (U32)(current - repCur);
+ const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
+ const BYTE* const repMatch = repBase + repIndex;
+ if ( (repCur > 0 && repCur <= (S32)current)
+ && (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex>lowestIndex)) /* intentional overflow */
+ && (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch)) ) {
+ /* repcode detected we should take it */
+ const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
+ mlen = (U32)ZSTD_count_2segments(ip+minMatch, repMatch+minMatch, iend, repEnd, prefixStart) + minMatch;
+
+ if (mlen > sufficient_len || mlen >= ZSTD_OPT_NUM) {
+ best_mlen = mlen; best_off = i; cur = 0; last_pos = 1;
+ goto _storeSequence;
+ }
+
+ best_off = i - (ip==anchor);
+ litlen = opt[0].litlen;
+ do {
+ price = ZSTD_getPrice(optStatePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra);
+ if (mlen > last_pos || price < opt[mlen].price)
+ SET_PRICE(mlen, mlen, i, litlen, price); /* note : macro modifies last_pos */
+ mlen--;
+ } while (mlen >= minMatch);
+ } } }
+
+ match_num = ZSTD_BtGetAllMatches_selectMLS_extDict(ctx, ip, iend, maxSearches, mls, matches, minMatch); /* first search (depth 0) */
+
+ if (!last_pos && !match_num) { ip++; continue; }
+
+ { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) opt[0].rep[i] = rep[i]; }
+ opt[0].mlen = 1;
+
+ if (match_num && (matches[match_num-1].len > sufficient_len || matches[match_num-1].len >= ZSTD_OPT_NUM)) {
+ best_mlen = matches[match_num-1].len;
+ best_off = matches[match_num-1].off;
+ cur = 0;
+ last_pos = 1;
+ goto _storeSequence;
+ }
+
+ best_mlen = (last_pos) ? last_pos : minMatch;
+
+ /* set prices using matches at position = 0 */
+ for (u = 0; u < match_num; u++) {
+ mlen = (u>0) ? matches[u-1].len+1 : best_mlen;
+ best_mlen = matches[u].len;
+ litlen = opt[0].litlen;
+ while (mlen <= best_mlen) {
+ price = ZSTD_getPrice(optStatePtr, litlen, anchor, matches[u].off-1, mlen - MINMATCH, ultra);
+ if (mlen > last_pos || price < opt[mlen].price)
+ SET_PRICE(mlen, mlen, matches[u].off, litlen, price);
+ mlen++;
+ } }
+
+ if (last_pos < minMatch) {
+ ip++; continue;
+ }
+
+ /* check further positions */
+ for (cur = 1; cur <= last_pos; cur++) {
+ inr = ip + cur;
+
+ if (opt[cur-1].mlen == 1) {
+ litlen = opt[cur-1].litlen + 1;
+ if (cur > litlen) {
+ price = opt[cur - litlen].price + ZSTD_getLiteralPrice(optStatePtr, litlen, inr-litlen);
+ } else
+ price = ZSTD_getLiteralPrice(optStatePtr, litlen, anchor);
+ } else {
+ litlen = 1;
+ price = opt[cur - 1].price + ZSTD_getLiteralPrice(optStatePtr, litlen, inr-1);
+ }
+
+ if (cur > last_pos || price <= opt[cur].price)
+ SET_PRICE(cur, 1, 0, litlen, price);
+
+ if (cur == last_pos) break;
+
+ if (inr > ilimit) /* last match must start at a minimum distance of 8 from oend */
+ continue;
+
+ mlen = opt[cur].mlen;
+ if (opt[cur].off > ZSTD_REP_MOVE_OPT) {
+ opt[cur].rep[2] = opt[cur-mlen].rep[1];
+ opt[cur].rep[1] = opt[cur-mlen].rep[0];
+ opt[cur].rep[0] = opt[cur].off - ZSTD_REP_MOVE_OPT;
+ } else {
+ opt[cur].rep[2] = (opt[cur].off > 1) ? opt[cur-mlen].rep[1] : opt[cur-mlen].rep[2];
+ opt[cur].rep[1] = (opt[cur].off > 0) ? opt[cur-mlen].rep[0] : opt[cur-mlen].rep[1];
+ assert(!(opt[cur].off == ZSTD_REP_MOVE_OPT && mlen == 1));
+ opt[cur].rep[0] = (opt[cur].off == ZSTD_REP_MOVE_OPT) ? (opt[cur-mlen].rep[0] - 1) : (opt[cur-mlen].rep[opt[cur].off]);
+ }
+
+ best_mlen = minMatch;
+ { U32 i, last_i = ZSTD_REP_CHECK + (mlen != 1);
+ for (i = (mlen != 1); i<last_i; i++) {
+ const S32 repCur = (i==ZSTD_REP_MOVE_OPT) ? (opt[cur].rep[0] - 1) : opt[cur].rep[i];
+ const U32 repIndex = (U32)(current+cur - repCur);
+ const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
+ const BYTE* const repMatch = repBase + repIndex;
+ if ( (repCur > 0 && repCur <= (S32)(current+cur))
+ && (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex>lowestIndex)) /* intentional overflow */
+ && (ZSTD_readMINMATCH(inr, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch)) ) {
+ /* repcode detected */
+ const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
+ mlen = (U32)ZSTD_count_2segments(inr+minMatch, repMatch+minMatch, iend, repEnd, prefixStart) + minMatch;
+
+ if (mlen > sufficient_len || cur + mlen >= ZSTD_OPT_NUM) {
+ best_mlen = mlen; best_off = i; last_pos = cur + 1;
+ goto _storeSequence;
+ }
+
+ best_off = i - (opt[cur].mlen != 1);
+ if (mlen > best_mlen) best_mlen = mlen;
+
+ do {
+ if (opt[cur].mlen == 1) {
+ litlen = opt[cur].litlen;
+ if (cur > litlen) {
+ price = opt[cur - litlen].price + ZSTD_getPrice(optStatePtr, litlen, inr-litlen, best_off, mlen - MINMATCH, ultra);
+ } else
+ price = ZSTD_getPrice(optStatePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra);
+ } else {
+ litlen = 0;
+ price = opt[cur].price + ZSTD_getPrice(optStatePtr, 0, NULL, best_off, mlen - MINMATCH, ultra);
+ }
+
+ if (cur + mlen > last_pos || price <= opt[cur + mlen].price)
+ SET_PRICE(cur + mlen, mlen, i, litlen, price);
+ mlen--;
+ } while (mlen >= minMatch);
+ } } }
+
+ match_num = ZSTD_BtGetAllMatches_selectMLS_extDict(ctx, inr, iend, maxSearches, mls, matches, minMatch);
+
+ if (match_num > 0 && (matches[match_num-1].len > sufficient_len || cur + matches[match_num-1].len >= ZSTD_OPT_NUM)) {
+ best_mlen = matches[match_num-1].len;
+ best_off = matches[match_num-1].off;
+ last_pos = cur + 1;
+ goto _storeSequence;
+ }
+
+ /* set prices using matches at position = cur */
+ for (u = 0; u < match_num; u++) {
+ mlen = (u>0) ? matches[u-1].len+1 : best_mlen;
+ best_mlen = matches[u].len;
+
+ while (mlen <= best_mlen) {
+ if (opt[cur].mlen == 1) {
+ litlen = opt[cur].litlen;
+ if (cur > litlen)
+ price = opt[cur - litlen].price + ZSTD_getPrice(optStatePtr, litlen, ip+cur-litlen, matches[u].off-1, mlen - MINMATCH, ultra);
+ else
+ price = ZSTD_getPrice(optStatePtr, litlen, anchor, matches[u].off-1, mlen - MINMATCH, ultra);
+ } else {
+ litlen = 0;
+ price = opt[cur].price + ZSTD_getPrice(optStatePtr, 0, NULL, matches[u].off-1, mlen - MINMATCH, ultra);
+ }
+
+ if (cur + mlen > last_pos || (price < opt[cur + mlen].price))
+ SET_PRICE(cur + mlen, mlen, matches[u].off, litlen, price);
+
+ mlen++;
+ } } } /* for (cur = 1; cur <= last_pos; cur++) */
+
+ best_mlen = opt[last_pos].mlen;
+ best_off = opt[last_pos].off;
+ cur = last_pos - best_mlen;
+
+ /* store sequence */
+_storeSequence: /* cur, last_pos, best_mlen, best_off have to be set */
+ opt[0].mlen = 1;
+
+ while (1) {
+ mlen = opt[cur].mlen;
+ offset = opt[cur].off;
+ opt[cur].mlen = best_mlen;
+ opt[cur].off = best_off;
+ best_mlen = mlen;
+ best_off = offset;
+ if (mlen > cur) break;
+ cur -= mlen;
+ }
+
+ for (u = 0; u <= last_pos; ) {
+ u += opt[u].mlen;
+ }
+
+ for (cur=0; cur < last_pos; ) {
+ mlen = opt[cur].mlen;
+ if (mlen == 1) { ip++; cur++; continue; }
+ offset = opt[cur].off;
+ cur += mlen;
+ litLength = (U32)(ip - anchor);
+
+ if (offset > ZSTD_REP_MOVE_OPT) {
+ rep[2] = rep[1];
+ rep[1] = rep[0];
+ rep[0] = offset - ZSTD_REP_MOVE_OPT;
+ offset--;
+ } else {
+ if (offset != 0) {
+ best_off = (offset==ZSTD_REP_MOVE_OPT) ? (rep[0] - 1) : (rep[offset]);
+ if (offset != 1) rep[2] = rep[1];
+ rep[1] = rep[0];
+ rep[0] = best_off;
+ }
+
+ if (litLength==0) offset--;
+ }
+
+ ZSTD_updatePrice(optStatePtr, litLength, anchor, offset, mlen-MINMATCH);
+ ZSTD_storeSeq(seqStorePtr, litLength, anchor, offset, mlen-MINMATCH);
+ anchor = ip = ip + mlen;
+ } } /* for (cur=0; cur < last_pos; ) */
+
+ /* Save reps for next block */
+ { int i; for (i=0; i<ZSTD_REP_NUM; i++) seqStorePtr->repToConfirm[i] = rep[i]; }
+
+ /* Return the last literals size */
+ return iend - anchor;
+}
+
+
+size_t ZSTD_compressBlock_btopt_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_opt_extDict_generic(ctx, src, srcSize, 0);
+}
+
+size_t ZSTD_compressBlock_btultra_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_opt_extDict_generic(ctx, src, srcSize, 1);
+}
diff --git a/lib/compress/zstd_opt.h b/lib/compress/zstd_opt.h
index ae24732c7d84..816a1fabbf16 100644
--- a/lib/compress/zstd_opt.h
+++ b/lib/compress/zstd_opt.h
@@ -5,934 +5,26 @@
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
*/
+#ifndef ZSTD_OPT_H
+#define ZSTD_OPT_H
-/* Note : this file is intended to be included within zstd_compress.c */
+#include "zstd_compress.h"
+#if defined (__cplusplus)
+extern "C" {
+#endif
-#ifndef ZSTD_OPT_H_91842398743
-#define ZSTD_OPT_H_91842398743
+size_t ZSTD_compressBlock_btopt(ZSTD_CCtx* ctx, const void* src, size_t srcSize);
+size_t ZSTD_compressBlock_btultra(ZSTD_CCtx* ctx, const void* src, size_t srcSize);
+size_t ZSTD_compressBlock_btopt_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize);
+size_t ZSTD_compressBlock_btultra_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize);
-#define ZSTD_LITFREQ_ADD 2
-#define ZSTD_FREQ_DIV 4
-#define ZSTD_MAX_PRICE (1<<30)
-
-/*-*************************************
-* Price functions for optimal parser
-***************************************/
-static void ZSTD_setLog2Prices(optState_t* optPtr)
-{
- optPtr->log2matchLengthSum = ZSTD_highbit32(optPtr->matchLengthSum+1);
- optPtr->log2litLengthSum = ZSTD_highbit32(optPtr->litLengthSum+1);
- optPtr->log2litSum = ZSTD_highbit32(optPtr->litSum+1);
- optPtr->log2offCodeSum = ZSTD_highbit32(optPtr->offCodeSum+1);
- optPtr->factor = 1 + ((optPtr->litSum>>5) / optPtr->litLengthSum) + ((optPtr->litSum<<1) / (optPtr->litSum + optPtr->matchSum));
-}
-
-
-static void ZSTD_rescaleFreqs(optState_t* optPtr, const BYTE* src, size_t srcSize)
-{
- unsigned u;
-
- optPtr->cachedLiterals = NULL;
- optPtr->cachedPrice = optPtr->cachedLitLength = 0;
- optPtr->staticPrices = 0;
-
- if (optPtr->litLengthSum == 0) {
- if (srcSize <= 1024) optPtr->staticPrices = 1;
-
- assert(optPtr->litFreq!=NULL);
- for (u=0; u<=MaxLit; u++)
- optPtr->litFreq[u] = 0;
- for (u=0; u<srcSize; u++)
- optPtr->litFreq[src[u]]++;
-
- optPtr->litSum = 0;
- optPtr->litLengthSum = MaxLL+1;
- optPtr->matchLengthSum = MaxML+1;
- optPtr->offCodeSum = (MaxOff+1);
- optPtr->matchSum = (ZSTD_LITFREQ_ADD<<Litbits);
-
- for (u=0; u<=MaxLit; u++) {
- optPtr->litFreq[u] = 1 + (optPtr->litFreq[u]>>ZSTD_FREQ_DIV);
- optPtr->litSum += optPtr->litFreq[u];
- }
- for (u=0; u<=MaxLL; u++)
- optPtr->litLengthFreq[u] = 1;
- for (u=0; u<=MaxML; u++)
- optPtr->matchLengthFreq[u] = 1;
- for (u=0; u<=MaxOff; u++)
- optPtr->offCodeFreq[u] = 1;
- } else {
- optPtr->matchLengthSum = 0;
- optPtr->litLengthSum = 0;
- optPtr->offCodeSum = 0;
- optPtr->matchSum = 0;
- optPtr->litSum = 0;
-
- for (u=0; u<=MaxLit; u++) {
- optPtr->litFreq[u] = 1 + (optPtr->litFreq[u]>>(ZSTD_FREQ_DIV+1));
- optPtr->litSum += optPtr->litFreq[u];
- }
- for (u=0; u<=MaxLL; u++) {
- optPtr->litLengthFreq[u] = 1 + (optPtr->litLengthFreq[u]>>(ZSTD_FREQ_DIV+1));
- optPtr->litLengthSum += optPtr->litLengthFreq[u];
- }
- for (u=0; u<=MaxML; u++) {
- optPtr->matchLengthFreq[u] = 1 + (optPtr->matchLengthFreq[u]>>ZSTD_FREQ_DIV);
- optPtr->matchLengthSum += optPtr->matchLengthFreq[u];
- optPtr->matchSum += optPtr->matchLengthFreq[u] * (u + 3);
- }
- optPtr->matchSum *= ZSTD_LITFREQ_ADD;
- for (u=0; u<=MaxOff; u++) {
- optPtr->offCodeFreq[u] = 1 + (optPtr->offCodeFreq[u]>>ZSTD_FREQ_DIV);
- optPtr->offCodeSum += optPtr->offCodeFreq[u];
- }
- }
-
- ZSTD_setLog2Prices(optPtr);
-}
-
-
-static U32 ZSTD_getLiteralPrice(optState_t* optPtr, U32 litLength, const BYTE* literals)
-{
- U32 price, u;
-
- if (optPtr->staticPrices)
- return ZSTD_highbit32((U32)litLength+1) + (litLength*6);
-
- if (litLength == 0)
- return optPtr->log2litLengthSum - ZSTD_highbit32(optPtr->litLengthFreq[0]+1);
-
- /* literals */
- if (optPtr->cachedLiterals == literals) {
- U32 const additional = litLength - optPtr->cachedLitLength;
- const BYTE* literals2 = optPtr->cachedLiterals + optPtr->cachedLitLength;
- price = optPtr->cachedPrice + additional * optPtr->log2litSum;
- for (u=0; u < additional; u++)
- price -= ZSTD_highbit32(optPtr->litFreq[literals2[u]]+1);
- optPtr->cachedPrice = price;
- optPtr->cachedLitLength = litLength;
- } else {
- price = litLength * optPtr->log2litSum;
- for (u=0; u < litLength; u++)
- price -= ZSTD_highbit32(optPtr->litFreq[literals[u]]+1);
-
- if (litLength >= 12) {
- optPtr->cachedLiterals = literals;
- optPtr->cachedPrice = price;
- optPtr->cachedLitLength = litLength;
- }
- }
-
- /* literal Length */
- { const BYTE LL_deltaCode = 19;
- const BYTE llCode = (litLength>63) ? (BYTE)ZSTD_highbit32(litLength) + LL_deltaCode : LL_Code[litLength];
- price += LL_bits[llCode] + optPtr->log2litLengthSum - ZSTD_highbit32(optPtr->litLengthFreq[llCode]+1);
- }
-
- return price;
-}
-
-
-FORCE_INLINE_TEMPLATE U32 ZSTD_getPrice(optState_t* optPtr, U32 litLength, const BYTE* literals, U32 offset, U32 matchLength, const int ultra)
-{
- /* offset */
- U32 price;
- BYTE const offCode = (BYTE)ZSTD_highbit32(offset+1);
-
- if (optPtr->staticPrices)
- return ZSTD_getLiteralPrice(optPtr, litLength, literals) + ZSTD_highbit32((U32)matchLength+1) + 16 + offCode;
-
- price = offCode + optPtr->log2offCodeSum - ZSTD_highbit32(optPtr->offCodeFreq[offCode]+1);
- if (!ultra && offCode >= 20) price += (offCode-19)*2;
-
- /* match Length */
- { const BYTE ML_deltaCode = 36;
- const BYTE mlCode = (matchLength>127) ? (BYTE)ZSTD_highbit32(matchLength) + ML_deltaCode : ML_Code[matchLength];
- price += ML_bits[mlCode] + optPtr->log2matchLengthSum - ZSTD_highbit32(optPtr->matchLengthFreq[mlCode]+1);
- }
-
- return price + ZSTD_getLiteralPrice(optPtr, litLength, literals) + optPtr->factor;
-}
-
-
-static void ZSTD_updatePrice(optState_t* optPtr, U32 litLength, const BYTE* literals, U32 offset, U32 matchLength)
-{
- U32 u;
-
- /* literals */
- optPtr->litSum += litLength*ZSTD_LITFREQ_ADD;
- for (u=0; u < litLength; u++)
- optPtr->litFreq[literals[u]] += ZSTD_LITFREQ_ADD;
-
- /* literal Length */
- { const BYTE LL_deltaCode = 19;
- const BYTE llCode = (litLength>63) ? (BYTE)ZSTD_highbit32(litLength) + LL_deltaCode : LL_Code[litLength];
- optPtr->litLengthFreq[llCode]++;
- optPtr->litLengthSum++;
- }
-
- /* match offset */
- { BYTE const offCode = (BYTE)ZSTD_highbit32(offset+1);
- optPtr->offCodeSum++;
- optPtr->offCodeFreq[offCode]++;
- }
-
- /* match Length */
- { const BYTE ML_deltaCode = 36;
- const BYTE mlCode = (matchLength>127) ? (BYTE)ZSTD_highbit32(matchLength) + ML_deltaCode : ML_Code[matchLength];
- optPtr->matchLengthFreq[mlCode]++;
- optPtr->matchLengthSum++;
- }
-
- ZSTD_setLog2Prices(optPtr);
-}
-
-
-#define SET_PRICE(pos, mlen_, offset_, litlen_, price_) \
- { \
- while (last_pos < pos) { opt[last_pos+1].price = ZSTD_MAX_PRICE; last_pos++; } \
- opt[pos].mlen = mlen_; \
- opt[pos].off = offset_; \
- opt[pos].litlen = litlen_; \
- opt[pos].price = price_; \
- }
-
-
-/* function safe only for comparisons */
-static U32 ZSTD_readMINMATCH(const void* memPtr, U32 length)
-{
- switch (length)
- {
- default :
- case 4 : return MEM_read32(memPtr);
- case 3 : if (MEM_isLittleEndian())
- return MEM_read32(memPtr)<<8;
- else
- return MEM_read32(memPtr)>>8;
- }
-}
-
-
-/* Update hashTable3 up to ip (excluded)
- Assumption : always within prefix (i.e. not within extDict) */
-static
-U32 ZSTD_insertAndFindFirstIndexHash3 (ZSTD_CCtx* zc, const BYTE* ip)
-{
- U32* const hashTable3 = zc->hashTable3;
- U32 const hashLog3 = zc->hashLog3;
- const BYTE* const base = zc->base;
- U32 idx = zc->nextToUpdate3;
- const U32 target = zc->nextToUpdate3 = (U32)(ip - base);
- const size_t hash3 = ZSTD_hash3Ptr(ip, hashLog3);
-
- while(idx < target) {
- hashTable3[ZSTD_hash3Ptr(base+idx, hashLog3)] = idx;
- idx++;
- }
-
- return hashTable3[hash3];
-}
-
-
-/*-*************************************
-* Binary Tree search
-***************************************/
-static U32 ZSTD_insertBtAndGetAllMatches (
- ZSTD_CCtx* zc,
- const BYTE* const ip, const BYTE* const iLimit,
- U32 nbCompares, const U32 mls,
- U32 extDict, ZSTD_match_t* matches, const U32 minMatchLen)
-{
- const BYTE* const base = zc->base;
- const U32 current = (U32)(ip-base);
- const U32 hashLog = zc->appliedParams.cParams.hashLog;
- const size_t h = ZSTD_hashPtr(ip, hashLog, mls);
- U32* const hashTable = zc->hashTable;
- U32 matchIndex = hashTable[h];
- U32* const bt = zc->chainTable;
- const U32 btLog = zc->appliedParams.cParams.chainLog - 1;
- const U32 btMask= (1U << btLog) - 1;
- size_t commonLengthSmaller=0, commonLengthLarger=0;
- const BYTE* const dictBase = zc->dictBase;
- const U32 dictLimit = zc->dictLimit;
- const BYTE* const dictEnd = dictBase + dictLimit;
- const BYTE* const prefixStart = base + dictLimit;
- const U32 btLow = btMask >= current ? 0 : current - btMask;
- const U32 windowLow = zc->lowLimit;
- U32* smallerPtr = bt + 2*(current&btMask);
- U32* largerPtr = bt + 2*(current&btMask) + 1;
- U32 matchEndIdx = current+8;
- U32 dummy32; /* to be nullified at the end */
- U32 mnum = 0;
-
- const U32 minMatch = (mls == 3) ? 3 : 4;
- size_t bestLength = minMatchLen-1;
-
- if (minMatch == 3) { /* HC3 match finder */
- U32 const matchIndex3 = ZSTD_insertAndFindFirstIndexHash3 (zc, ip);
- if (matchIndex3>windowLow && (current - matchIndex3 < (1<<18))) {
- const BYTE* match;
- size_t currentMl=0;
- if ((!extDict) || matchIndex3 >= dictLimit) {
- match = base + matchIndex3;
- if (match[bestLength] == ip[bestLength]) currentMl = ZSTD_count(ip, match, iLimit);
- } else {
- match = dictBase + matchIndex3;
- if (ZSTD_readMINMATCH(match, MINMATCH) == ZSTD_readMINMATCH(ip, MINMATCH)) /* assumption : matchIndex3 <= dictLimit-4 (by table construction) */
- currentMl = ZSTD_count_2segments(ip+MINMATCH, match+MINMATCH, iLimit, dictEnd, prefixStart) + MINMATCH;
- }
-
- /* save best solution */
- if (currentMl > bestLength) {
- bestLength = currentMl;
- matches[mnum].off = ZSTD_REP_MOVE_OPT + current - matchIndex3;
- matches[mnum].len = (U32)currentMl;
- mnum++;
- if (currentMl > ZSTD_OPT_NUM) goto update;
- if (ip+currentMl == iLimit) goto update; /* best possible, and avoid read overflow*/
- }
- }
- }
-
- hashTable[h] = current; /* Update Hash Table */
-
- while (nbCompares-- && (matchIndex > windowLow)) {
- U32* nextPtr = bt + 2*(matchIndex & btMask);
- size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
- const BYTE* match;
-
- if ((!extDict) || (matchIndex+matchLength >= dictLimit)) {
- match = base + matchIndex;
- if (match[matchLength] == ip[matchLength]) {
- matchLength += ZSTD_count(ip+matchLength+1, match+matchLength+1, iLimit) +1;
- }
- } else {
- match = dictBase + matchIndex;
- matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iLimit, dictEnd, prefixStart);
- if (matchIndex+matchLength >= dictLimit)
- match = base + matchIndex; /* to prepare for next usage of match[matchLength] */
- }
-
- if (matchLength > bestLength) {
- if (matchLength > matchEndIdx - matchIndex) matchEndIdx = matchIndex + (U32)matchLength;
- bestLength = matchLength;
- matches[mnum].off = ZSTD_REP_MOVE_OPT + current - matchIndex;
- matches[mnum].len = (U32)matchLength;
- mnum++;
- if (matchLength > ZSTD_OPT_NUM) break;
- if (ip+matchLength == iLimit) /* equal : no way to know if inf or sup */
- break; /* drop, to guarantee consistency (miss a little bit of compression) */
- }
-
- if (match[matchLength] < ip[matchLength]) {
- /* match is smaller than current */
- *smallerPtr = matchIndex; /* update smaller idx */
- commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
- if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */
- smallerPtr = nextPtr+1; /* new "smaller" => larger of match */
- matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */
- } else {
- /* match is larger than current */
- *largerPtr = matchIndex;
- commonLengthLarger = matchLength;
- if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */
- largerPtr = nextPtr;
- matchIndex = nextPtr[0];
- } }
-
- *smallerPtr = *largerPtr = 0;
-
-update:
- zc->nextToUpdate = (matchEndIdx > current + 8) ? matchEndIdx - 8 : current+1;
- return mnum;
-}
-
-
-/** Tree updater, providing best match */
-static U32 ZSTD_BtGetAllMatches (
- ZSTD_CCtx* zc,
- const BYTE* const ip, const BYTE* const iLimit,
- const U32 maxNbAttempts, const U32 mls, ZSTD_match_t* matches, const U32 minMatchLen)
-{
- if (ip < zc->base + zc->nextToUpdate) return 0; /* skipped area */
- ZSTD_updateTree(zc, ip, iLimit, maxNbAttempts, mls);
- return ZSTD_insertBtAndGetAllMatches(zc, ip, iLimit, maxNbAttempts, mls, 0, matches, minMatchLen);
-}
-
-
-static U32 ZSTD_BtGetAllMatches_selectMLS (
- ZSTD_CCtx* zc, /* Index table will be updated */
- const BYTE* ip, const BYTE* const iHighLimit,
- const U32 maxNbAttempts, const U32 matchLengthSearch, ZSTD_match_t* matches, const U32 minMatchLen)
-{
- switch(matchLengthSearch)
- {
- case 3 : return ZSTD_BtGetAllMatches(zc, ip, iHighLimit, maxNbAttempts, 3, matches, minMatchLen);
- default :
- case 4 : return ZSTD_BtGetAllMatches(zc, ip, iHighLimit, maxNbAttempts, 4, matches, minMatchLen);
- case 5 : return ZSTD_BtGetAllMatches(zc, ip, iHighLimit, maxNbAttempts, 5, matches, minMatchLen);
- case 7 :
- case 6 : return ZSTD_BtGetAllMatches(zc, ip, iHighLimit, maxNbAttempts, 6, matches, minMatchLen);
- }
-}
-
-/** Tree updater, providing best match */
-static U32 ZSTD_BtGetAllMatches_extDict (
- ZSTD_CCtx* zc,
- const BYTE* const ip, const BYTE* const iLimit,
- const U32 maxNbAttempts, const U32 mls, ZSTD_match_t* matches, const U32 minMatchLen)
-{
- if (ip < zc->base + zc->nextToUpdate) return 0; /* skipped area */
- ZSTD_updateTree_extDict(zc, ip, iLimit, maxNbAttempts, mls);
- return ZSTD_insertBtAndGetAllMatches(zc, ip, iLimit, maxNbAttempts, mls, 1, matches, minMatchLen);
-}
-
-
-static U32 ZSTD_BtGetAllMatches_selectMLS_extDict (
- ZSTD_CCtx* zc, /* Index table will be updated */
- const BYTE* ip, const BYTE* const iHighLimit,
- const U32 maxNbAttempts, const U32 matchLengthSearch, ZSTD_match_t* matches, const U32 minMatchLen)
-{
- switch(matchLengthSearch)
- {
- case 3 : return ZSTD_BtGetAllMatches_extDict(zc, ip, iHighLimit, maxNbAttempts, 3, matches, minMatchLen);
- default :
- case 4 : return ZSTD_BtGetAllMatches_extDict(zc, ip, iHighLimit, maxNbAttempts, 4, matches, minMatchLen);
- case 5 : return ZSTD_BtGetAllMatches_extDict(zc, ip, iHighLimit, maxNbAttempts, 5, matches, minMatchLen);
- case 7 :
- case 6 : return ZSTD_BtGetAllMatches_extDict(zc, ip, iHighLimit, maxNbAttempts, 6, matches, minMatchLen);
- }
-}
-
-
-/*-*******************************
-* Optimal parser
-*********************************/
-FORCE_INLINE_TEMPLATE
-void ZSTD_compressBlock_opt_generic(ZSTD_CCtx* ctx,
- const void* src, size_t srcSize, const int ultra)
-{
- seqStore_t* seqStorePtr = &(ctx->seqStore);
- optState_t* optStatePtr = &(ctx->optState);
- const BYTE* const istart = (const BYTE*)src;
- const BYTE* ip = istart;
- const BYTE* anchor = istart;
- const BYTE* const iend = istart + srcSize;
- const BYTE* const ilimit = iend - 8;
- const BYTE* const base = ctx->base;
- const BYTE* const prefixStart = base + ctx->dictLimit;
-
- const U32 maxSearches = 1U << ctx->appliedParams.cParams.searchLog;
- const U32 sufficient_len = ctx->appliedParams.cParams.targetLength;
- const U32 mls = ctx->appliedParams.cParams.searchLength;
- const U32 minMatch = (ctx->appliedParams.cParams.searchLength == 3) ? 3 : 4;
-
- ZSTD_optimal_t* opt = optStatePtr->priceTable;
- ZSTD_match_t* matches = optStatePtr->matchTable;
- const BYTE* inr;
- U32 offset, rep[ZSTD_REP_NUM];
-
- /* init */
- ctx->nextToUpdate3 = ctx->nextToUpdate;
- ZSTD_rescaleFreqs(optStatePtr, (const BYTE*)src, srcSize);
- ip += (ip==prefixStart);
- { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) rep[i]=seqStorePtr->rep[i]; }
-
- /* Match Loop */
- while (ip < ilimit) {
- U32 cur, match_num, last_pos, litlen, price;
- U32 u, mlen, best_mlen, best_off, litLength;
- memset(opt, 0, sizeof(ZSTD_optimal_t));
- last_pos = 0;
- litlen = (U32)(ip - anchor);
-
- /* check repCode */
- { U32 i, last_i = ZSTD_REP_CHECK + (ip==anchor);
- for (i=(ip == anchor); i<last_i; i++) {
- const S32 repCur = (i==ZSTD_REP_MOVE_OPT) ? (rep[0] - 1) : rep[i];
- if ( (repCur > 0) && (repCur < (S32)(ip-prefixStart))
- && (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(ip - repCur, minMatch))) {
- mlen = (U32)ZSTD_count(ip+minMatch, ip+minMatch-repCur, iend) + minMatch;
- if (mlen > sufficient_len || mlen >= ZSTD_OPT_NUM) {
- best_mlen = mlen; best_off = i; cur = 0; last_pos = 1;
- goto _storeSequence;
- }
- best_off = i - (ip == anchor);
- do {
- price = ZSTD_getPrice(optStatePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra);
- if (mlen > last_pos || price < opt[mlen].price)
- SET_PRICE(mlen, mlen, i, litlen, price); /* note : macro modifies last_pos */
- mlen--;
- } while (mlen >= minMatch);
- } } }
-
- match_num = ZSTD_BtGetAllMatches_selectMLS(ctx, ip, iend, maxSearches, mls, matches, minMatch);
-
- if (!last_pos && !match_num) { ip++; continue; }
-
- if (match_num && (matches[match_num-1].len > sufficient_len || matches[match_num-1].len >= ZSTD_OPT_NUM)) {
- best_mlen = matches[match_num-1].len;
- best_off = matches[match_num-1].off;
- cur = 0;
- last_pos = 1;
- goto _storeSequence;
- }
-
- /* set prices using matches at position = 0 */
- best_mlen = (last_pos) ? last_pos : minMatch;
- for (u = 0; u < match_num; u++) {
- mlen = (u>0) ? matches[u-1].len+1 : best_mlen;
- best_mlen = matches[u].len;
- while (mlen <= best_mlen) {
- price = ZSTD_getPrice(optStatePtr, litlen, anchor, matches[u].off-1, mlen - MINMATCH, ultra);
- if (mlen > last_pos || price < opt[mlen].price)
- SET_PRICE(mlen, mlen, matches[u].off, litlen, price); /* note : macro modifies last_pos */
- mlen++;
- } }
-
- if (last_pos < minMatch) { ip++; continue; }
-
- /* initialize opt[0] */
- { U32 i ; for (i=0; i<ZSTD_REP_NUM; i++) opt[0].rep[i] = rep[i]; }
- opt[0].mlen = 1;
- opt[0].litlen = litlen;
-
- /* check further positions */
- for (cur = 1; cur <= last_pos; cur++) {
- inr = ip + cur;
-
- if (opt[cur-1].mlen == 1) {
- litlen = opt[cur-1].litlen + 1;
- if (cur > litlen) {
- price = opt[cur - litlen].price + ZSTD_getLiteralPrice(optStatePtr, litlen, inr-litlen);
- } else
- price = ZSTD_getLiteralPrice(optStatePtr, litlen, anchor);
- } else {
- litlen = 1;
- price = opt[cur - 1].price + ZSTD_getLiteralPrice(optStatePtr, litlen, inr-1);
- }
-
- if (cur > last_pos || price <= opt[cur].price)
- SET_PRICE(cur, 1, 0, litlen, price);
-
- if (cur == last_pos) break;
-
- if (inr > ilimit) /* last match must start at a minimum distance of 8 from oend */
- continue;
-
- mlen = opt[cur].mlen;
- if (opt[cur].off > ZSTD_REP_MOVE_OPT) {
- opt[cur].rep[2] = opt[cur-mlen].rep[1];
- opt[cur].rep[1] = opt[cur-mlen].rep[0];
- opt[cur].rep[0] = opt[cur].off - ZSTD_REP_MOVE_OPT;
- } else {
- opt[cur].rep[2] = (opt[cur].off > 1) ? opt[cur-mlen].rep[1] : opt[cur-mlen].rep[2];
- opt[cur].rep[1] = (opt[cur].off > 0) ? opt[cur-mlen].rep[0] : opt[cur-mlen].rep[1];
- opt[cur].rep[0] = ((opt[cur].off==ZSTD_REP_MOVE_OPT) && (mlen != 1)) ? (opt[cur-mlen].rep[0] - 1) : (opt[cur-mlen].rep[opt[cur].off]);
- }
-
- best_mlen = minMatch;
- { U32 i, last_i = ZSTD_REP_CHECK + (mlen != 1);
- for (i=(opt[cur].mlen != 1); i<last_i; i++) { /* check rep */
- const S32 repCur = (i==ZSTD_REP_MOVE_OPT) ? (opt[cur].rep[0] - 1) : opt[cur].rep[i];
- if ( (repCur > 0) && (repCur < (S32)(inr-prefixStart))
- && (ZSTD_readMINMATCH(inr, minMatch) == ZSTD_readMINMATCH(inr - repCur, minMatch))) {
- mlen = (U32)ZSTD_count(inr+minMatch, inr+minMatch - repCur, iend) + minMatch;
-
- if (mlen > sufficient_len || cur + mlen >= ZSTD_OPT_NUM) {
- best_mlen = mlen; best_off = i; last_pos = cur + 1;
- goto _storeSequence;
- }
-
- best_off = i - (opt[cur].mlen != 1);
- if (mlen > best_mlen) best_mlen = mlen;
-
- do {
- if (opt[cur].mlen == 1) {
- litlen = opt[cur].litlen;
- if (cur > litlen) {
- price = opt[cur - litlen].price + ZSTD_getPrice(optStatePtr, litlen, inr-litlen, best_off, mlen - MINMATCH, ultra);
- } else
- price = ZSTD_getPrice(optStatePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra);
- } else {
- litlen = 0;
- price = opt[cur].price + ZSTD_getPrice(optStatePtr, 0, NULL, best_off, mlen - MINMATCH, ultra);
- }
-
- if (cur + mlen > last_pos || price <= opt[cur + mlen].price)
- SET_PRICE(cur + mlen, mlen, i, litlen, price);
- mlen--;
- } while (mlen >= minMatch);
- } } }
-
- match_num = ZSTD_BtGetAllMatches_selectMLS(ctx, inr, iend, maxSearches, mls, matches, best_mlen);
-
- if (match_num > 0 && (matches[match_num-1].len > sufficient_len || cur + matches[match_num-1].len >= ZSTD_OPT_NUM)) {
- best_mlen = matches[match_num-1].len;
- best_off = matches[match_num-1].off;
- last_pos = cur + 1;
- goto _storeSequence;
- }
-
- /* set prices using matches at position = cur */
- for (u = 0; u < match_num; u++) {
- mlen = (u>0) ? matches[u-1].len+1 : best_mlen;
- best_mlen = matches[u].len;
-
- while (mlen <= best_mlen) {
- if (opt[cur].mlen == 1) {
- litlen = opt[cur].litlen;
- if (cur > litlen)
- price = opt[cur - litlen].price + ZSTD_getPrice(optStatePtr, litlen, ip+cur-litlen, matches[u].off-1, mlen - MINMATCH, ultra);
- else
- price = ZSTD_getPrice(optStatePtr, litlen, anchor, matches[u].off-1, mlen - MINMATCH, ultra);
- } else {
- litlen = 0;
- price = opt[cur].price + ZSTD_getPrice(optStatePtr, 0, NULL, matches[u].off-1, mlen - MINMATCH, ultra);
- }
-
- if (cur + mlen > last_pos || (price < opt[cur + mlen].price))
- SET_PRICE(cur + mlen, mlen, matches[u].off, litlen, price);
-
- mlen++;
- } } }
-
- best_mlen = opt[last_pos].mlen;
- best_off = opt[last_pos].off;
- cur = last_pos - best_mlen;
-
- /* store sequence */
-_storeSequence: /* cur, last_pos, best_mlen, best_off have to be set */
- opt[0].mlen = 1;
-
- while (1) {
- mlen = opt[cur].mlen;
- offset = opt[cur].off;
- opt[cur].mlen = best_mlen;
- opt[cur].off = best_off;
- best_mlen = mlen;
- best_off = offset;
- if (mlen > cur) break;
- cur -= mlen;
- }
-
- for (u = 0; u <= last_pos;) {
- u += opt[u].mlen;
- }
-
- for (cur=0; cur < last_pos; ) {
- mlen = opt[cur].mlen;
- if (mlen == 1) { ip++; cur++; continue; }
- offset = opt[cur].off;
- cur += mlen;
- litLength = (U32)(ip - anchor);
-
- if (offset > ZSTD_REP_MOVE_OPT) {
- rep[2] = rep[1];
- rep[1] = rep[0];
- rep[0] = offset - ZSTD_REP_MOVE_OPT;
- offset--;
- } else {
- if (offset != 0) {
- best_off = (offset==ZSTD_REP_MOVE_OPT) ? (rep[0] - 1) : (rep[offset]);
- if (offset != 1) rep[2] = rep[1];
- rep[1] = rep[0];
- rep[0] = best_off;
- }
- if (litLength==0) offset--;
- }
-
- ZSTD_updatePrice(optStatePtr, litLength, anchor, offset, mlen-MINMATCH);
- ZSTD_storeSeq(seqStorePtr, litLength, anchor, offset, mlen-MINMATCH);
- anchor = ip = ip + mlen;
- } } /* for (cur=0; cur < last_pos; ) */
-
- /* Save reps for next block */
- { int i; for (i=0; i<ZSTD_REP_NUM; i++) seqStorePtr->repToConfirm[i] = rep[i]; }
-
- /* Last Literals */
- { size_t const lastLLSize = iend - anchor;
- memcpy(seqStorePtr->lit, anchor, lastLLSize);
- seqStorePtr->lit += lastLLSize;
- }
-}
-
-
-FORCE_INLINE_TEMPLATE
-void ZSTD_compressBlock_opt_extDict_generic(ZSTD_CCtx* ctx,
- const void* src, size_t srcSize, const int ultra)
-{
- seqStore_t* seqStorePtr = &(ctx->seqStore);
- optState_t* optStatePtr = &(ctx->optState);
- const BYTE* const istart = (const BYTE*)src;
- const BYTE* ip = istart;
- const BYTE* anchor = istart;
- const BYTE* const iend = istart + srcSize;
- const BYTE* const ilimit = iend - 8;
- const BYTE* const base = ctx->base;
- const U32 lowestIndex = ctx->lowLimit;
- const U32 dictLimit = ctx->dictLimit;
- const BYTE* const prefixStart = base + dictLimit;
- const BYTE* const dictBase = ctx->dictBase;
- const BYTE* const dictEnd = dictBase + dictLimit;
-
- const U32 maxSearches = 1U << ctx->appliedParams.cParams.searchLog;
- const U32 sufficient_len = ctx->appliedParams.cParams.targetLength;
- const U32 mls = ctx->appliedParams.cParams.searchLength;
- const U32 minMatch = (ctx->appliedParams.cParams.searchLength == 3) ? 3 : 4;
-
- ZSTD_optimal_t* opt = optStatePtr->priceTable;
- ZSTD_match_t* matches = optStatePtr->matchTable;
- const BYTE* inr;
-
- /* init */
- U32 offset, rep[ZSTD_REP_NUM];
- { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) rep[i]=seqStorePtr->rep[i]; }
-
- ctx->nextToUpdate3 = ctx->nextToUpdate;
- ZSTD_rescaleFreqs(optStatePtr, (const BYTE*)src, srcSize);
- ip += (ip==prefixStart);
-
- /* Match Loop */
- while (ip < ilimit) {
- U32 cur, match_num, last_pos, litlen, price;
- U32 u, mlen, best_mlen, best_off, litLength;
- U32 current = (U32)(ip-base);
- memset(opt, 0, sizeof(ZSTD_optimal_t));
- last_pos = 0;
- opt[0].litlen = (U32)(ip - anchor);
-
- /* check repCode */
- { U32 i, last_i = ZSTD_REP_CHECK + (ip==anchor);
- for (i = (ip==anchor); i<last_i; i++) {
- const S32 repCur = (i==ZSTD_REP_MOVE_OPT) ? (rep[0] - 1) : rep[i];
- const U32 repIndex = (U32)(current - repCur);
- const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
- const BYTE* const repMatch = repBase + repIndex;
- if ( (repCur > 0 && repCur <= (S32)current)
- && (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex>lowestIndex)) /* intentional overflow */
- && (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch)) ) {
- /* repcode detected we should take it */
- const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
- mlen = (U32)ZSTD_count_2segments(ip+minMatch, repMatch+minMatch, iend, repEnd, prefixStart) + minMatch;
-
- if (mlen > sufficient_len || mlen >= ZSTD_OPT_NUM) {
- best_mlen = mlen; best_off = i; cur = 0; last_pos = 1;
- goto _storeSequence;
- }
-
- best_off = i - (ip==anchor);
- litlen = opt[0].litlen;
- do {
- price = ZSTD_getPrice(optStatePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra);
- if (mlen > last_pos || price < opt[mlen].price)
- SET_PRICE(mlen, mlen, i, litlen, price); /* note : macro modifies last_pos */
- mlen--;
- } while (mlen >= minMatch);
- } } }
-
- match_num = ZSTD_BtGetAllMatches_selectMLS_extDict(ctx, ip, iend, maxSearches, mls, matches, minMatch); /* first search (depth 0) */
-
- if (!last_pos && !match_num) { ip++; continue; }
-
- { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) opt[0].rep[i] = rep[i]; }
- opt[0].mlen = 1;
-
- if (match_num && (matches[match_num-1].len > sufficient_len || matches[match_num-1].len >= ZSTD_OPT_NUM)) {
- best_mlen = matches[match_num-1].len;
- best_off = matches[match_num-1].off;
- cur = 0;
- last_pos = 1;
- goto _storeSequence;
- }
-
- best_mlen = (last_pos) ? last_pos : minMatch;
-
- /* set prices using matches at position = 0 */
- for (u = 0; u < match_num; u++) {
- mlen = (u>0) ? matches[u-1].len+1 : best_mlen;
- best_mlen = matches[u].len;
- litlen = opt[0].litlen;
- while (mlen <= best_mlen) {
- price = ZSTD_getPrice(optStatePtr, litlen, anchor, matches[u].off-1, mlen - MINMATCH, ultra);
- if (mlen > last_pos || price < opt[mlen].price)
- SET_PRICE(mlen, mlen, matches[u].off, litlen, price);
- mlen++;
- } }
-
- if (last_pos < minMatch) {
- ip++; continue;
- }
-
- /* check further positions */
- for (cur = 1; cur <= last_pos; cur++) {
- inr = ip + cur;
-
- if (opt[cur-1].mlen == 1) {
- litlen = opt[cur-1].litlen + 1;
- if (cur > litlen) {
- price = opt[cur - litlen].price + ZSTD_getLiteralPrice(optStatePtr, litlen, inr-litlen);
- } else
- price = ZSTD_getLiteralPrice(optStatePtr, litlen, anchor);
- } else {
- litlen = 1;
- price = opt[cur - 1].price + ZSTD_getLiteralPrice(optStatePtr, litlen, inr-1);
- }
-
- if (cur > last_pos || price <= opt[cur].price)
- SET_PRICE(cur, 1, 0, litlen, price);
-
- if (cur == last_pos) break;
-
- if (inr > ilimit) /* last match must start at a minimum distance of 8 from oend */
- continue;
-
- mlen = opt[cur].mlen;
- if (opt[cur].off > ZSTD_REP_MOVE_OPT) {
- opt[cur].rep[2] = opt[cur-mlen].rep[1];
- opt[cur].rep[1] = opt[cur-mlen].rep[0];
- opt[cur].rep[0] = opt[cur].off - ZSTD_REP_MOVE_OPT;
- } else {
- opt[cur].rep[2] = (opt[cur].off > 1) ? opt[cur-mlen].rep[1] : opt[cur-mlen].rep[2];
- opt[cur].rep[1] = (opt[cur].off > 0) ? opt[cur-mlen].rep[0] : opt[cur-mlen].rep[1];
- opt[cur].rep[0] = ((opt[cur].off==ZSTD_REP_MOVE_OPT) && (mlen != 1)) ? (opt[cur-mlen].rep[0] - 1) : (opt[cur-mlen].rep[opt[cur].off]);
- }
-
- best_mlen = minMatch;
- { U32 i, last_i = ZSTD_REP_CHECK + (mlen != 1);
- for (i = (mlen != 1); i<last_i; i++) {
- const S32 repCur = (i==ZSTD_REP_MOVE_OPT) ? (opt[cur].rep[0] - 1) : opt[cur].rep[i];
- const U32 repIndex = (U32)(current+cur - repCur);
- const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
- const BYTE* const repMatch = repBase + repIndex;
- if ( (repCur > 0 && repCur <= (S32)(current+cur))
- && (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex>lowestIndex)) /* intentional overflow */
- && (ZSTD_readMINMATCH(inr, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch)) ) {
- /* repcode detected */
- const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
- mlen = (U32)ZSTD_count_2segments(inr+minMatch, repMatch+minMatch, iend, repEnd, prefixStart) + minMatch;
-
- if (mlen > sufficient_len || cur + mlen >= ZSTD_OPT_NUM) {
- best_mlen = mlen; best_off = i; last_pos = cur + 1;
- goto _storeSequence;
- }
-
- best_off = i - (opt[cur].mlen != 1);
- if (mlen > best_mlen) best_mlen = mlen;
-
- do {
- if (opt[cur].mlen == 1) {
- litlen = opt[cur].litlen;
- if (cur > litlen) {
- price = opt[cur - litlen].price + ZSTD_getPrice(optStatePtr, litlen, inr-litlen, best_off, mlen - MINMATCH, ultra);
- } else
- price = ZSTD_getPrice(optStatePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra);
- } else {
- litlen = 0;
- price = opt[cur].price + ZSTD_getPrice(optStatePtr, 0, NULL, best_off, mlen - MINMATCH, ultra);
- }
-
- if (cur + mlen > last_pos || price <= opt[cur + mlen].price)
- SET_PRICE(cur + mlen, mlen, i, litlen, price);
- mlen--;
- } while (mlen >= minMatch);
- } } }
-
- match_num = ZSTD_BtGetAllMatches_selectMLS_extDict(ctx, inr, iend, maxSearches, mls, matches, minMatch);
-
- if (match_num > 0 && (matches[match_num-1].len > sufficient_len || cur + matches[match_num-1].len >= ZSTD_OPT_NUM)) {
- best_mlen = matches[match_num-1].len;
- best_off = matches[match_num-1].off;
- last_pos = cur + 1;
- goto _storeSequence;
- }
-
- /* set prices using matches at position = cur */
- for (u = 0; u < match_num; u++) {
- mlen = (u>0) ? matches[u-1].len+1 : best_mlen;
- best_mlen = matches[u].len;
-
- while (mlen <= best_mlen) {
- if (opt[cur].mlen == 1) {
- litlen = opt[cur].litlen;
- if (cur > litlen)
- price = opt[cur - litlen].price + ZSTD_getPrice(optStatePtr, litlen, ip+cur-litlen, matches[u].off-1, mlen - MINMATCH, ultra);
- else
- price = ZSTD_getPrice(optStatePtr, litlen, anchor, matches[u].off-1, mlen - MINMATCH, ultra);
- } else {
- litlen = 0;
- price = opt[cur].price + ZSTD_getPrice(optStatePtr, 0, NULL, matches[u].off-1, mlen - MINMATCH, ultra);
- }
-
- if (cur + mlen > last_pos || (price < opt[cur + mlen].price))
- SET_PRICE(cur + mlen, mlen, matches[u].off, litlen, price);
-
- mlen++;
- } } } /* for (cur = 1; cur <= last_pos; cur++) */
-
- best_mlen = opt[last_pos].mlen;
- best_off = opt[last_pos].off;
- cur = last_pos - best_mlen;
-
- /* store sequence */
-_storeSequence: /* cur, last_pos, best_mlen, best_off have to be set */
- opt[0].mlen = 1;
-
- while (1) {
- mlen = opt[cur].mlen;
- offset = opt[cur].off;
- opt[cur].mlen = best_mlen;
- opt[cur].off = best_off;
- best_mlen = mlen;
- best_off = offset;
- if (mlen > cur) break;
- cur -= mlen;
- }
-
- for (u = 0; u <= last_pos; ) {
- u += opt[u].mlen;
- }
-
- for (cur=0; cur < last_pos; ) {
- mlen = opt[cur].mlen;
- if (mlen == 1) { ip++; cur++; continue; }
- offset = opt[cur].off;
- cur += mlen;
- litLength = (U32)(ip - anchor);
-
- if (offset > ZSTD_REP_MOVE_OPT) {
- rep[2] = rep[1];
- rep[1] = rep[0];
- rep[0] = offset - ZSTD_REP_MOVE_OPT;
- offset--;
- } else {
- if (offset != 0) {
- best_off = (offset==ZSTD_REP_MOVE_OPT) ? (rep[0] - 1) : (rep[offset]);
- if (offset != 1) rep[2] = rep[1];
- rep[1] = rep[0];
- rep[0] = best_off;
- }
-
- if (litLength==0) offset--;
- }
-
- ZSTD_updatePrice(optStatePtr, litLength, anchor, offset, mlen-MINMATCH);
- ZSTD_storeSeq(seqStorePtr, litLength, anchor, offset, mlen-MINMATCH);
- anchor = ip = ip + mlen;
- } } /* for (cur=0; cur < last_pos; ) */
-
- /* Save reps for next block */
- { int i; for (i=0; i<ZSTD_REP_NUM; i++) seqStorePtr->repToConfirm[i] = rep[i]; }
-
- /* Last Literals */
- { size_t lastLLSize = iend - anchor;
- memcpy(seqStorePtr->lit, anchor, lastLLSize);
- seqStorePtr->lit += lastLLSize;
- }
+#if defined (__cplusplus)
}
+#endif
-#endif /* ZSTD_OPT_H_91842398743 */
+#endif /* ZSTD_OPT_H */
diff --git a/lib/compress/zstdmt_compress.c b/lib/compress/zstdmt_compress.c
index 8564bc439214..7831cd3bd81a 100644
--- a/lib/compress/zstdmt_compress.c
+++ b/lib/compress/zstdmt_compress.c
@@ -5,11 +5,12 @@
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
*/
/* ====== Tuning parameters ====== */
-#define ZSTDMT_NBTHREADS_MAX 256
+#define ZSTDMT_NBTHREADS_MAX 200
#define ZSTDMT_OVERLAPLOG_DEFAULT 6
@@ -52,22 +53,24 @@ static unsigned long long GetCurrentClockTimeMicroseconds(void)
}
#define MUTEX_WAIT_TIME_DLEVEL 6
-#define PTHREAD_MUTEX_LOCK(mutex) { \
- if (ZSTD_DEBUG>=MUTEX_WAIT_TIME_DLEVEL) { \
+#define ZSTD_PTHREAD_MUTEX_LOCK(mutex) { \
+ if (ZSTD_DEBUG >= MUTEX_WAIT_TIME_DLEVEL) { \
unsigned long long const beforeTime = GetCurrentClockTimeMicroseconds(); \
- pthread_mutex_lock(mutex); \
+ ZSTD_pthread_mutex_lock(mutex); \
{ unsigned long long const afterTime = GetCurrentClockTimeMicroseconds(); \
unsigned long long const elapsedTime = (afterTime-beforeTime); \
if (elapsedTime > 1000) { /* or whatever threshold you like; I'm using 1 millisecond here */ \
DEBUGLOG(MUTEX_WAIT_TIME_DLEVEL, "Thread took %llu microseconds to acquire mutex %s \n", \
elapsedTime, #mutex); \
} } \
- } else pthread_mutex_lock(mutex); \
+ } else { \
+ ZSTD_pthread_mutex_lock(mutex); \
+ } \
}
#else
-# define PTHREAD_MUTEX_LOCK(m) pthread_mutex_lock(m)
+# define ZSTD_PTHREAD_MUTEX_LOCK(m) ZSTD_pthread_mutex_lock(m)
# define DEBUG_PRINTHEX(l,p,n) {}
#endif
@@ -84,7 +87,7 @@ typedef struct buffer_s {
static const buffer_t g_nullBuffer = { NULL, 0 };
typedef struct ZSTDMT_bufferPool_s {
- pthread_mutex_t poolMutex;
+ ZSTD_pthread_mutex_t poolMutex;
size_t bufferSize;
unsigned totalBuffers;
unsigned nbBuffers;
@@ -98,7 +101,7 @@ static ZSTDMT_bufferPool* ZSTDMT_createBufferPool(unsigned nbThreads, ZSTD_custo
ZSTDMT_bufferPool* const bufPool = (ZSTDMT_bufferPool*)ZSTD_calloc(
sizeof(ZSTDMT_bufferPool) + (maxNbBuffers-1) * sizeof(buffer_t), cMem);
if (bufPool==NULL) return NULL;
- if (pthread_mutex_init(&bufPool->poolMutex, NULL)) {
+ if (ZSTD_pthread_mutex_init(&bufPool->poolMutex, NULL)) {
ZSTD_free(bufPool, cMem);
return NULL;
}
@@ -112,10 +115,13 @@ static ZSTDMT_bufferPool* ZSTDMT_createBufferPool(unsigned nbThreads, ZSTD_custo
static void ZSTDMT_freeBufferPool(ZSTDMT_bufferPool* bufPool)
{
unsigned u;
+ DEBUGLOG(3, "ZSTDMT_freeBufferPool (address:%08X)", (U32)(size_t)bufPool);
if (!bufPool) return; /* compatibility with free on NULL */
- for (u=0; u<bufPool->totalBuffers; u++)
+ for (u=0; u<bufPool->totalBuffers; u++) {
+ DEBUGLOG(4, "free buffer %2u (address:%08X)", u, (U32)(size_t)bufPool->bTable[u].start);
ZSTD_free(bufPool->bTable[u].start, bufPool->cMem);
- pthread_mutex_destroy(&bufPool->poolMutex);
+ }
+ ZSTD_pthread_mutex_destroy(&bufPool->poolMutex);
ZSTD_free(bufPool, bufPool->cMem);
}
@@ -126,10 +132,10 @@ static size_t ZSTDMT_sizeof_bufferPool(ZSTDMT_bufferPool* bufPool)
+ (bufPool->totalBuffers - 1) * sizeof(buffer_t);
unsigned u;
size_t totalBufferSize = 0;
- pthread_mutex_lock(&bufPool->poolMutex);
+ ZSTD_pthread_mutex_lock(&bufPool->poolMutex);
for (u=0; u<bufPool->totalBuffers; u++)
totalBufferSize += bufPool->bTable[u].size;
- pthread_mutex_unlock(&bufPool->poolMutex);
+ ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);
return poolSize + totalBufferSize;
}
@@ -145,20 +151,21 @@ static buffer_t ZSTDMT_getBuffer(ZSTDMT_bufferPool* bufPool)
{
size_t const bSize = bufPool->bufferSize;
DEBUGLOG(5, "ZSTDMT_getBuffer");
- pthread_mutex_lock(&bufPool->poolMutex);
+ ZSTD_pthread_mutex_lock(&bufPool->poolMutex);
if (bufPool->nbBuffers) { /* try to use an existing buffer */
buffer_t const buf = bufPool->bTable[--(bufPool->nbBuffers)];
size_t const availBufferSize = buf.size;
+ bufPool->bTable[bufPool->nbBuffers] = g_nullBuffer;
if ((availBufferSize >= bSize) & (availBufferSize <= 10*bSize)) {
/* large enough, but not too much */
- pthread_mutex_unlock(&bufPool->poolMutex);
+ ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);
return buf;
}
/* size conditions not respected : scratch this buffer, create new one */
DEBUGLOG(5, "existing buffer does not meet size conditions => freeing");
ZSTD_free(buf.start, bufPool->cMem);
}
- pthread_mutex_unlock(&bufPool->poolMutex);
+ ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);
/* create new buffer */
DEBUGLOG(5, "create a new buffer");
{ buffer_t buffer;
@@ -174,24 +181,38 @@ static void ZSTDMT_releaseBuffer(ZSTDMT_bufferPool* bufPool, buffer_t buf)
{
if (buf.start == NULL) return; /* compatible with release on NULL */
DEBUGLOG(5, "ZSTDMT_releaseBuffer");
- pthread_mutex_lock(&bufPool->poolMutex);
+ ZSTD_pthread_mutex_lock(&bufPool->poolMutex);
if (bufPool->nbBuffers < bufPool->totalBuffers) {
bufPool->bTable[bufPool->nbBuffers++] = buf; /* stored for later use */
- pthread_mutex_unlock(&bufPool->poolMutex);
+ ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);
return;
}
- pthread_mutex_unlock(&bufPool->poolMutex);
+ ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);
/* Reached bufferPool capacity (should not happen) */
DEBUGLOG(5, "buffer pool capacity reached => freeing ");
ZSTD_free(buf.start, bufPool->cMem);
}
+/* Sets parameters relevant to the compression job, initializing others to
+ * default values. Notably, nbThreads should probably be zero. */
+static ZSTD_CCtx_params ZSTDMT_makeJobCCtxParams(ZSTD_CCtx_params const params)
+{
+ ZSTD_CCtx_params jobParams;
+ memset(&jobParams, 0, sizeof(jobParams));
+
+ jobParams.cParams = params.cParams;
+ jobParams.fParams = params.fParams;
+ jobParams.compressionLevel = params.compressionLevel;
+
+ jobParams.ldmParams = params.ldmParams;
+ return jobParams;
+}
/* ===== CCtx Pool ===== */
/* a single CCtx Pool can be invoked from multiple threads in parallel */
typedef struct {
- pthread_mutex_t poolMutex;
+ ZSTD_pthread_mutex_t poolMutex;
unsigned totalCCtx;
unsigned availCCtx;
ZSTD_customMem cMem;
@@ -204,7 +225,7 @@ static void ZSTDMT_freeCCtxPool(ZSTDMT_CCtxPool* pool)
unsigned u;
for (u=0; u<pool->totalCCtx; u++)
ZSTD_freeCCtx(pool->cctx[u]); /* note : compatible with free on NULL */
- pthread_mutex_destroy(&pool->poolMutex);
+ ZSTD_pthread_mutex_destroy(&pool->poolMutex);
ZSTD_free(pool, pool->cMem);
}
@@ -216,7 +237,7 @@ static ZSTDMT_CCtxPool* ZSTDMT_createCCtxPool(unsigned nbThreads,
ZSTDMT_CCtxPool* const cctxPool = (ZSTDMT_CCtxPool*) ZSTD_calloc(
sizeof(ZSTDMT_CCtxPool) + (nbThreads-1)*sizeof(ZSTD_CCtx*), cMem);
if (!cctxPool) return NULL;
- if (pthread_mutex_init(&cctxPool->poolMutex, NULL)) {
+ if (ZSTD_pthread_mutex_init(&cctxPool->poolMutex, NULL)) {
ZSTD_free(cctxPool, cMem);
return NULL;
}
@@ -232,7 +253,7 @@ static ZSTDMT_CCtxPool* ZSTDMT_createCCtxPool(unsigned nbThreads,
/* only works during initialization phase, not during compression */
static size_t ZSTDMT_sizeof_CCtxPool(ZSTDMT_CCtxPool* cctxPool)
{
- pthread_mutex_lock(&cctxPool->poolMutex);
+ ZSTD_pthread_mutex_lock(&cctxPool->poolMutex);
{ unsigned const nbThreads = cctxPool->totalCCtx;
size_t const poolSize = sizeof(*cctxPool)
+ (nbThreads-1)*sizeof(ZSTD_CCtx*);
@@ -241,7 +262,7 @@ static size_t ZSTDMT_sizeof_CCtxPool(ZSTDMT_CCtxPool* cctxPool)
for (u=0; u<nbThreads; u++) {
totalCCtxSize += ZSTD_sizeof_CCtx(cctxPool->cctx[u]);
}
- pthread_mutex_unlock(&cctxPool->poolMutex);
+ ZSTD_pthread_mutex_unlock(&cctxPool->poolMutex);
return poolSize + totalCCtxSize;
}
}
@@ -249,14 +270,14 @@ static size_t ZSTDMT_sizeof_CCtxPool(ZSTDMT_CCtxPool* cctxPool)
static ZSTD_CCtx* ZSTDMT_getCCtx(ZSTDMT_CCtxPool* cctxPool)
{
DEBUGLOG(5, "ZSTDMT_getCCtx");
- pthread_mutex_lock(&cctxPool->poolMutex);
+ ZSTD_pthread_mutex_lock(&cctxPool->poolMutex);
if (cctxPool->availCCtx) {
cctxPool->availCCtx--;
{ ZSTD_CCtx* const cctx = cctxPool->cctx[cctxPool->availCCtx];
- pthread_mutex_unlock(&cctxPool->poolMutex);
+ ZSTD_pthread_mutex_unlock(&cctxPool->poolMutex);
return cctx;
} }
- pthread_mutex_unlock(&cctxPool->poolMutex);
+ ZSTD_pthread_mutex_unlock(&cctxPool->poolMutex);
DEBUGLOG(5, "create one more CCtx");
return ZSTD_createCCtx_advanced(cctxPool->cMem); /* note : can be NULL, when creation fails ! */
}
@@ -264,7 +285,7 @@ static ZSTD_CCtx* ZSTDMT_getCCtx(ZSTDMT_CCtxPool* cctxPool)
static void ZSTDMT_releaseCCtx(ZSTDMT_CCtxPool* pool, ZSTD_CCtx* cctx)
{
if (cctx==NULL) return; /* compatibility with release on NULL */
- pthread_mutex_lock(&pool->poolMutex);
+ ZSTD_pthread_mutex_lock(&pool->poolMutex);
if (pool->availCCtx < pool->totalCCtx)
pool->cctx[pool->availCCtx++] = cctx;
else {
@@ -272,7 +293,7 @@ static void ZSTDMT_releaseCCtx(ZSTDMT_CCtxPool* pool, ZSTD_CCtx* cctx)
DEBUGLOG(5, "CCtx pool overflow : free cctx");
ZSTD_freeCCtx(cctx);
}
- pthread_mutex_unlock(&pool->poolMutex);
+ ZSTD_pthread_mutex_unlock(&pool->poolMutex);
}
@@ -290,9 +311,9 @@ typedef struct {
unsigned lastChunk;
unsigned jobCompleted;
unsigned jobScanned;
- pthread_mutex_t* jobCompleted_mutex;
- pthread_cond_t* jobCompleted_cond;
- ZSTD_parameters params;
+ ZSTD_pthread_mutex_t* jobCompleted_mutex;
+ ZSTD_pthread_cond_t* jobCompleted_cond;
+ ZSTD_CCtx_params params;
const ZSTD_CDict* cdict;
ZSTDMT_CCtxPool* cctxPool;
ZSTDMT_bufferPool* bufPool;
@@ -329,10 +350,15 @@ void ZSTDMT_compressChunk(void* jobDescription)
if (ZSTD_isError(initError)) { job->cSize = initError; goto _endJob; }
} else { /* srcStart points at reloaded section */
if (!job->firstChunk) job->params.fParams.contentSizeFlag = 0; /* ensure no srcSize control */
- { size_t const dictModeError = ZSTD_setCCtxParameter(cctx, ZSTD_p_forceRawDict, 1); /* Force loading dictionary in "content-only" mode (no header analysis) */
- size_t const initError = ZSTD_compressBegin_advanced(cctx, job->srcStart, job->dictSize, job->params, job->fullFrameSize);
- if (ZSTD_isError(initError) || ZSTD_isError(dictModeError)) { job->cSize = initError; goto _endJob; }
- ZSTD_setCCtxParameter(cctx, ZSTD_p_forceWindow, 1);
+ { ZSTD_CCtx_params jobParams = job->params;
+ size_t const forceWindowError =
+ ZSTD_CCtxParam_setParameter(&jobParams, ZSTD_p_forceMaxWindow, !job->firstChunk);
+ /* Force loading dictionary in "content-only" mode (no header analysis) */
+ size_t const initError = ZSTD_compressBegin_advanced_internal(cctx, job->srcStart, job->dictSize, ZSTD_dm_rawContent, jobParams, job->fullFrameSize);
+ if (ZSTD_isError(initError) || ZSTD_isError(forceWindowError)) {
+ job->cSize = initError;
+ goto _endJob;
+ }
} }
if (!job->firstChunk) { /* flush and overwrite frame header when it's not first segment */
size_t const hSize = ZSTD_compressContinue(cctx, dstBuff.start, dstBuff.size, src, 0);
@@ -353,11 +379,11 @@ _endJob:
ZSTDMT_releaseCCtx(job->cctxPool, cctx);
ZSTDMT_releaseBuffer(job->bufPool, job->src);
job->src = g_nullBuffer; job->srcStart = NULL;
- PTHREAD_MUTEX_LOCK(job->jobCompleted_mutex);
+ ZSTD_PTHREAD_MUTEX_LOCK(job->jobCompleted_mutex);
job->jobCompleted = 1;
job->jobScanned = 0;
- pthread_cond_signal(job->jobCompleted_cond);
- pthread_mutex_unlock(job->jobCompleted_mutex);
+ ZSTD_pthread_cond_signal(job->jobCompleted_cond);
+ ZSTD_pthread_mutex_unlock(job->jobCompleted_mutex);
}
@@ -375,24 +401,21 @@ struct ZSTDMT_CCtx_s {
ZSTDMT_jobDescription* jobs;
ZSTDMT_bufferPool* bufPool;
ZSTDMT_CCtxPool* cctxPool;
- pthread_mutex_t jobCompleted_mutex;
- pthread_cond_t jobCompleted_cond;
+ ZSTD_pthread_mutex_t jobCompleted_mutex;
+ ZSTD_pthread_cond_t jobCompleted_cond;
size_t targetSectionSize;
size_t inBuffSize;
size_t dictSize;
size_t targetDictSize;
inBuff_t inBuff;
- ZSTD_parameters params;
+ ZSTD_CCtx_params params;
XXH64_state_t xxhState;
- unsigned nbThreads;
unsigned jobIDMask;
unsigned doneJobID;
unsigned nextJobID;
unsigned frameEnded;
unsigned allJobsCompleted;
- unsigned overlapLog;
unsigned long long frameContentSize;
- size_t sectionSize;
ZSTD_customMem cMem;
ZSTD_CDict* cdictLocal;
const ZSTD_CDict* cdict;
@@ -407,6 +430,15 @@ static ZSTDMT_jobDescription* ZSTDMT_allocJobsTable(U32* nbJobsPtr, ZSTD_customM
nbJobs * sizeof(ZSTDMT_jobDescription), cMem);
}
+/* Internal only */
+size_t ZSTDMT_initializeCCtxParameters(ZSTD_CCtx_params* params, unsigned nbThreads)
+{
+ params->nbThreads = nbThreads;
+ params->overlapSizeLog = ZSTDMT_OVERLAPLOG_DEFAULT;
+ params->jobSize = 0;
+ return 0;
+}
+
ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced(unsigned nbThreads, ZSTD_customMem cMem)
{
ZSTDMT_CCtx* mtctx;
@@ -421,12 +453,10 @@ ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced(unsigned nbThreads, ZSTD_customMem cMem)
mtctx = (ZSTDMT_CCtx*) ZSTD_calloc(sizeof(ZSTDMT_CCtx), cMem);
if (!mtctx) return NULL;
+ ZSTDMT_initializeCCtxParameters(&mtctx->params, nbThreads);
mtctx->cMem = cMem;
- mtctx->nbThreads = nbThreads;
mtctx->allJobsCompleted = 1;
- mtctx->sectionSize = 0;
- mtctx->overlapLog = ZSTDMT_OVERLAPLOG_DEFAULT;
- mtctx->factory = POOL_create(nbThreads, 0);
+ mtctx->factory = POOL_create_advanced(nbThreads, 0, cMem);
mtctx->jobs = ZSTDMT_allocJobsTable(&nbJobs, cMem);
mtctx->jobIDMask = nbJobs - 1;
mtctx->bufPool = ZSTDMT_createBufferPool(nbThreads, cMem);
@@ -435,11 +465,11 @@ ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced(unsigned nbThreads, ZSTD_customMem cMem)
ZSTDMT_freeCCtx(mtctx);
return NULL;
}
- if (pthread_mutex_init(&mtctx->jobCompleted_mutex, NULL)) {
+ if (ZSTD_pthread_mutex_init(&mtctx->jobCompleted_mutex, NULL)) {
ZSTDMT_freeCCtx(mtctx);
return NULL;
}
- if (pthread_cond_init(&mtctx->jobCompleted_cond, NULL)) {
+ if (ZSTD_pthread_cond_init(&mtctx->jobCompleted_cond, NULL)) {
ZSTDMT_freeCCtx(mtctx);
return NULL;
}
@@ -459,28 +489,46 @@ static void ZSTDMT_releaseAllJobResources(ZSTDMT_CCtx* mtctx)
unsigned jobID;
DEBUGLOG(3, "ZSTDMT_releaseAllJobResources");
for (jobID=0; jobID <= mtctx->jobIDMask; jobID++) {
+ DEBUGLOG(4, "job%02u: release dst address %08X", jobID, (U32)(size_t)mtctx->jobs[jobID].dstBuff.start);
ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->jobs[jobID].dstBuff);
mtctx->jobs[jobID].dstBuff = g_nullBuffer;
+ DEBUGLOG(4, "job%02u: release src address %08X", jobID, (U32)(size_t)mtctx->jobs[jobID].src.start);
ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->jobs[jobID].src);
mtctx->jobs[jobID].src = g_nullBuffer;
}
memset(mtctx->jobs, 0, (mtctx->jobIDMask+1)*sizeof(ZSTDMT_jobDescription));
+ DEBUGLOG(4, "input: release address %08X", (U32)(size_t)mtctx->inBuff.buffer.start);
ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->inBuff.buffer);
mtctx->inBuff.buffer = g_nullBuffer;
mtctx->allJobsCompleted = 1;
}
+static void ZSTDMT_waitForAllJobsCompleted(ZSTDMT_CCtx* zcs)
+{
+ DEBUGLOG(4, "ZSTDMT_waitForAllJobsCompleted");
+ while (zcs->doneJobID < zcs->nextJobID) {
+ unsigned const jobID = zcs->doneJobID & zcs->jobIDMask;
+ ZSTD_PTHREAD_MUTEX_LOCK(&zcs->jobCompleted_mutex);
+ while (zcs->jobs[jobID].jobCompleted==0) {
+ DEBUGLOG(5, "waiting for jobCompleted signal from chunk %u", zcs->doneJobID); /* we want to block when waiting for data to flush */
+ ZSTD_pthread_cond_wait(&zcs->jobCompleted_cond, &zcs->jobCompleted_mutex);
+ }
+ ZSTD_pthread_mutex_unlock(&zcs->jobCompleted_mutex);
+ zcs->doneJobID++;
+ }
+}
+
size_t ZSTDMT_freeCCtx(ZSTDMT_CCtx* mtctx)
{
if (mtctx==NULL) return 0; /* compatible with free on NULL */
- POOL_free(mtctx->factory);
- if (!mtctx->allJobsCompleted) ZSTDMT_releaseAllJobResources(mtctx); /* stop workers first */
- ZSTDMT_freeBufferPool(mtctx->bufPool); /* release job resources into pools first */
+ POOL_free(mtctx->factory); /* stop and free worker threads */
+ ZSTDMT_releaseAllJobResources(mtctx); /* release job resources into pools first */
ZSTD_free(mtctx->jobs, mtctx->cMem);
+ ZSTDMT_freeBufferPool(mtctx->bufPool);
ZSTDMT_freeCCtxPool(mtctx->cctxPool);
ZSTD_freeCDict(mtctx->cdictLocal);
- pthread_mutex_destroy(&mtctx->jobCompleted_mutex);
- pthread_cond_destroy(&mtctx->jobCompleted_cond);
+ ZSTD_pthread_mutex_destroy(&mtctx->jobCompleted_mutex);
+ ZSTD_pthread_cond_destroy(&mtctx->jobCompleted_cond);
ZSTD_free(mtctx, mtctx->cMem);
return 0;
}
@@ -496,22 +544,35 @@ size_t ZSTDMT_sizeof_CCtx(ZSTDMT_CCtx* mtctx)
+ ZSTD_sizeof_CDict(mtctx->cdictLocal);
}
-size_t ZSTDMT_setMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSDTMT_parameter parameter, unsigned value)
-{
+/* Internal only */
+size_t ZSTDMT_CCtxParam_setMTCtxParameter(
+ ZSTD_CCtx_params* params, ZSTDMT_parameter parameter, unsigned value) {
switch(parameter)
{
case ZSTDMT_p_sectionSize :
- mtctx->sectionSize = value;
+ params->jobSize = value;
return 0;
case ZSTDMT_p_overlapSectionLog :
- DEBUGLOG(5, "ZSTDMT_p_overlapSectionLog : %u", value);
- mtctx->overlapLog = (value >= 9) ? 9 : value;
+ DEBUGLOG(4, "ZSTDMT_p_overlapSectionLog : %u", value);
+ params->overlapSizeLog = (value >= 9) ? 9 : value;
return 0;
default :
return ERROR(parameter_unsupported);
}
}
+size_t ZSTDMT_setMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSTDMT_parameter parameter, unsigned value)
+{
+ switch(parameter)
+ {
+ case ZSTDMT_p_sectionSize :
+ return ZSTDMT_CCtxParam_setMTCtxParameter(&mtctx->params, parameter, value);
+ case ZSTDMT_p_overlapSectionLog :
+ return ZSTDMT_CCtxParam_setMTCtxParameter(&mtctx->params, parameter, value);
+ default :
+ return ERROR(parameter_unsupported);
+ }
+}
/* ------------------------------------------ */
/* ===== Multi-threaded compression ===== */
@@ -528,17 +589,17 @@ static unsigned computeNbChunks(size_t srcSize, unsigned windowLog, unsigned nbT
return (multiplier>1) ? nbChunksLarge : nbChunksSmall;
}
-
-size_t ZSTDMT_compress_advanced(ZSTDMT_CCtx* mtctx,
- void* dst, size_t dstCapacity,
- const void* src, size_t srcSize,
- const ZSTD_CDict* cdict,
- ZSTD_parameters const params,
- unsigned overlapLog)
+static size_t ZSTDMT_compress_advanced_internal(
+ ZSTDMT_CCtx* mtctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const ZSTD_CDict* cdict,
+ ZSTD_CCtx_params const params)
{
- unsigned const overlapRLog = (overlapLog>9) ? 0 : 9-overlapLog;
+ ZSTD_CCtx_params const jobParams = ZSTDMT_makeJobCCtxParams(params);
+ unsigned const overlapRLog = (params.overlapSizeLog>9) ? 0 : 9-params.overlapSizeLog;
size_t const overlapSize = (overlapRLog>=9) ? 0 : (size_t)1 << (params.cParams.windowLog - overlapRLog);
- unsigned nbChunks = computeNbChunks(srcSize, params.cParams.windowLog, mtctx->nbThreads);
+ unsigned nbChunks = computeNbChunks(srcSize, params.cParams.windowLog, params.nbThreads);
size_t const proposedChunkSize = (srcSize + (nbChunks-1)) / nbChunks;
size_t const avgChunkSize = ((proposedChunkSize & 0x1FFFF) < 0x7FFF) ? proposedChunkSize + 0xFFFF : proposedChunkSize; /* avoid too small last block */
const char* const srcStart = (const char*)src;
@@ -546,12 +607,14 @@ size_t ZSTDMT_compress_advanced(ZSTDMT_CCtx* mtctx,
unsigned const compressWithinDst = (dstCapacity >= ZSTD_compressBound(srcSize)) ? nbChunks : (unsigned)(dstCapacity / ZSTD_compressBound(avgChunkSize)); /* presumes avgChunkSize >= 256 KB, which should be the case */
size_t frameStartPos = 0, dstBufferPos = 0;
XXH64_state_t xxh64;
+ assert(jobParams.nbThreads == 0);
+ assert(mtctx->cctxPool->totalCCtx == params.nbThreads);
DEBUGLOG(4, "nbChunks : %2u (chunkSize : %u bytes) ", nbChunks, (U32)avgChunkSize);
if (nbChunks==1) { /* fallback to single-thread mode */
ZSTD_CCtx* const cctx = mtctx->cctxPool->cctx[0];
- if (cdict) return ZSTD_compress_usingCDict_advanced(cctx, dst, dstCapacity, src, srcSize, cdict, params.fParams);
- return ZSTD_compress_advanced(cctx, dst, dstCapacity, src, srcSize, NULL, 0, params);
+ if (cdict) return ZSTD_compress_usingCDict_advanced(cctx, dst, dstCapacity, src, srcSize, cdict, jobParams.fParams);
+ return ZSTD_compress_advanced_internal(cctx, dst, dstCapacity, src, srcSize, NULL, 0, jobParams);
}
assert(avgChunkSize >= 256 KB); /* condition for ZSTD_compressBound(A) + ZSTD_compressBound(B) <= ZSTD_compressBound(A+B), which is required for compressWithinDst */
ZSTDMT_setBufferSize(mtctx->bufPool, ZSTD_compressBound(avgChunkSize) );
@@ -580,7 +643,7 @@ size_t ZSTDMT_compress_advanced(ZSTDMT_CCtx* mtctx,
mtctx->jobs[u].srcSize = chunkSize;
mtctx->jobs[u].cdict = mtctx->nextJobID==0 ? cdict : NULL;
mtctx->jobs[u].fullFrameSize = srcSize;
- mtctx->jobs[u].params = params;
+ mtctx->jobs[u].params = jobParams;
/* do not calculate checksum within sections, but write it in header for first section */
if (u!=0) mtctx->jobs[u].params.fParams.checksumFlag = 0;
mtctx->jobs[u].dstBuff = dstBuffer;
@@ -610,12 +673,12 @@ size_t ZSTDMT_compress_advanced(ZSTDMT_CCtx* mtctx,
unsigned chunkID;
for (chunkID=0; chunkID<nbChunks; chunkID++) {
DEBUGLOG(5, "waiting for chunk %u ", chunkID);
- PTHREAD_MUTEX_LOCK(&mtctx->jobCompleted_mutex);
+ ZSTD_PTHREAD_MUTEX_LOCK(&mtctx->jobCompleted_mutex);
while (mtctx->jobs[chunkID].jobCompleted==0) {
DEBUGLOG(5, "waiting for jobCompleted signal from chunk %u", chunkID);
- pthread_cond_wait(&mtctx->jobCompleted_cond, &mtctx->jobCompleted_mutex);
+ ZSTD_pthread_cond_wait(&mtctx->jobCompleted_cond, &mtctx->jobCompleted_mutex);
}
- pthread_mutex_unlock(&mtctx->jobCompleted_mutex);
+ ZSTD_pthread_mutex_unlock(&mtctx->jobCompleted_mutex);
DEBUGLOG(5, "ready to write chunk %u ", chunkID);
mtctx->jobs[chunkID].srcStart = NULL;
@@ -628,9 +691,8 @@ size_t ZSTDMT_compress_advanced(ZSTDMT_CCtx* mtctx,
if (chunkID >= compressWithinDst) { /* chunk compressed into its own buffer, which must be released */
DEBUGLOG(5, "releasing buffer %u>=%u", chunkID, compressWithinDst);
ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->jobs[chunkID].dstBuff);
- }
- mtctx->jobs[chunkID].dstBuff = g_nullBuffer;
- }
+ } }
+ mtctx->jobs[chunkID].dstBuff = g_nullBuffer;
dstPos += cSize ;
}
} /* for (chunkID=0; chunkID<nbChunks; chunkID++) */
@@ -651,6 +713,23 @@ size_t ZSTDMT_compress_advanced(ZSTDMT_CCtx* mtctx,
}
}
+size_t ZSTDMT_compress_advanced(ZSTDMT_CCtx* mtctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const ZSTD_CDict* cdict,
+ ZSTD_parameters const params,
+ unsigned overlapLog)
+{
+ ZSTD_CCtx_params cctxParams = mtctx->params;
+ cctxParams.cParams = params.cParams;
+ cctxParams.fParams = params.fParams;
+ cctxParams.overlapSizeLog = overlapLog;
+ return ZSTDMT_compress_advanced_internal(mtctx,
+ dst, dstCapacity,
+ src, srcSize,
+ cdict, cctxParams);
+}
+
size_t ZSTDMT_compressCCtx(ZSTDMT_CCtx* mtctx,
void* dst, size_t dstCapacity,
@@ -668,38 +747,25 @@ size_t ZSTDMT_compressCCtx(ZSTDMT_CCtx* mtctx,
/* ======= Streaming API ======= */
/* ====================================== */
-static void ZSTDMT_waitForAllJobsCompleted(ZSTDMT_CCtx* zcs)
-{
- DEBUGLOG(4, "ZSTDMT_waitForAllJobsCompleted");
- while (zcs->doneJobID < zcs->nextJobID) {
- unsigned const jobID = zcs->doneJobID & zcs->jobIDMask;
- PTHREAD_MUTEX_LOCK(&zcs->jobCompleted_mutex);
- while (zcs->jobs[jobID].jobCompleted==0) {
- DEBUGLOG(5, "waiting for jobCompleted signal from chunk %u", zcs->doneJobID); /* we want to block when waiting for data to flush */
- pthread_cond_wait(&zcs->jobCompleted_cond, &zcs->jobCompleted_mutex);
- }
- pthread_mutex_unlock(&zcs->jobCompleted_mutex);
- zcs->doneJobID++;
- }
-}
-
-
-/** ZSTDMT_initCStream_internal() :
- * internal usage only */
-size_t ZSTDMT_initCStream_internal(ZSTDMT_CCtx* zcs,
- const void* dict, size_t dictSize, const ZSTD_CDict* cdict,
- ZSTD_parameters params, unsigned long long pledgedSrcSize)
+size_t ZSTDMT_initCStream_internal(
+ ZSTDMT_CCtx* zcs,
+ const void* dict, size_t dictSize, ZSTD_dictMode_e dictMode,
+ const ZSTD_CDict* cdict, ZSTD_CCtx_params params,
+ unsigned long long pledgedSrcSize)
{
DEBUGLOG(4, "ZSTDMT_initCStream_internal");
/* params are supposed to be fully validated at this point */
assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
assert(!((dict) && (cdict))); /* either dict or cdict, not both */
+ assert(zcs->cctxPool->totalCCtx == params.nbThreads);
- if (zcs->nbThreads==1) {
+ if (params.nbThreads==1) {
+ ZSTD_CCtx_params const singleThreadParams = ZSTDMT_makeJobCCtxParams(params);
DEBUGLOG(4, "single thread mode");
+ assert(singleThreadParams.nbThreads == 0);
return ZSTD_initCStream_internal(zcs->cctxPool->cctx[0],
- dict, dictSize, cdict,
- params, pledgedSrcSize);
+ dict, dictSize, cdict,
+ singleThreadParams, pledgedSrcSize);
}
if (zcs->allJobsCompleted == 0) { /* previous compression not correctly finished */
@@ -714,7 +780,7 @@ size_t ZSTDMT_initCStream_internal(ZSTDMT_CCtx* zcs,
DEBUGLOG(4,"cdictLocal: %08X", (U32)(size_t)zcs->cdictLocal);
ZSTD_freeCDict(zcs->cdictLocal);
zcs->cdictLocal = ZSTD_createCDict_advanced(dict, dictSize,
- 0 /* byRef */, ZSTD_dm_auto, /* note : a loadPrefix becomes an internal CDict */
+ ZSTD_dlm_byCopy, dictMode, /* note : a loadPrefix becomes an internal CDict */
params.cParams, zcs->cMem);
zcs->cdict = zcs->cdictLocal;
if (zcs->cdictLocal == NULL) return ERROR(memory_allocation);
@@ -725,10 +791,10 @@ size_t ZSTDMT_initCStream_internal(ZSTDMT_CCtx* zcs,
zcs->cdict = cdict;
}
- zcs->targetDictSize = (zcs->overlapLog==0) ? 0 : (size_t)1 << (zcs->params.cParams.windowLog - (9 - zcs->overlapLog));
- DEBUGLOG(4, "overlapLog : %u ", zcs->overlapLog);
+ zcs->targetDictSize = (params.overlapSizeLog==0) ? 0 : (size_t)1 << (params.cParams.windowLog - (9 - params.overlapSizeLog));
+ DEBUGLOG(4, "overlapLog : %u ", params.overlapSizeLog);
DEBUGLOG(4, "overlap Size : %u KB", (U32)(zcs->targetDictSize>>10));
- zcs->targetSectionSize = zcs->sectionSize ? zcs->sectionSize : (size_t)1 << (zcs->params.cParams.windowLog + 2);
+ zcs->targetSectionSize = params.jobSize ? params.jobSize : (size_t)1 << (params.cParams.windowLog + 2);
zcs->targetSectionSize = MAX(ZSTDMT_SECTION_SIZE_MIN, zcs->targetSectionSize);
zcs->targetSectionSize = MAX(zcs->targetDictSize, zcs->targetSectionSize);
DEBUGLOG(4, "Section Size : %u KB", (U32)(zcs->targetSectionSize>>10));
@@ -749,8 +815,12 @@ size_t ZSTDMT_initCStream_advanced(ZSTDMT_CCtx* mtctx,
ZSTD_parameters params,
unsigned long long pledgedSrcSize)
{
+ ZSTD_CCtx_params cctxParams = mtctx->params;
DEBUGLOG(5, "ZSTDMT_initCStream_advanced");
- return ZSTDMT_initCStream_internal(mtctx, dict, dictSize, NULL, params, pledgedSrcSize);
+ cctxParams.cParams = params.cParams;
+ cctxParams.fParams = params.fParams;
+ return ZSTDMT_initCStream_internal(mtctx, dict, dictSize, ZSTD_dm_auto, NULL,
+ cctxParams, pledgedSrcSize);
}
size_t ZSTDMT_initCStream_usingCDict(ZSTDMT_CCtx* mtctx,
@@ -758,11 +828,12 @@ size_t ZSTDMT_initCStream_usingCDict(ZSTDMT_CCtx* mtctx,
ZSTD_frameParameters fParams,
unsigned long long pledgedSrcSize)
{
- ZSTD_parameters params = ZSTD_getParamsFromCDict(cdict);
+ ZSTD_CCtx_params cctxParams = mtctx->params;
+ cctxParams.cParams = ZSTD_getCParamsFromCDict(cdict);
+ cctxParams.fParams = fParams;
if (cdict==NULL) return ERROR(dictionary_wrong); /* method incompatible with NULL cdict */
- params.fParams = fParams;
- return ZSTDMT_initCStream_internal(mtctx, NULL, 0 /*dictSize*/, cdict,
- params, pledgedSrcSize);
+ return ZSTDMT_initCStream_internal(mtctx, NULL, 0 /*dictSize*/, ZSTD_dm_auto, cdict,
+ cctxParams, pledgedSrcSize);
}
@@ -770,14 +841,18 @@ size_t ZSTDMT_initCStream_usingCDict(ZSTDMT_CCtx* mtctx,
* pledgedSrcSize is optional and can be zero == unknown */
size_t ZSTDMT_resetCStream(ZSTDMT_CCtx* zcs, unsigned long long pledgedSrcSize)
{
- if (zcs->nbThreads==1)
+ if (zcs->params.nbThreads==1)
return ZSTD_resetCStream(zcs->cctxPool->cctx[0], pledgedSrcSize);
- return ZSTDMT_initCStream_internal(zcs, NULL, 0, 0, zcs->params, pledgedSrcSize);
+ return ZSTDMT_initCStream_internal(zcs, NULL, 0, ZSTD_dm_auto, 0, zcs->params,
+ pledgedSrcSize);
}
size_t ZSTDMT_initCStream(ZSTDMT_CCtx* zcs, int compressionLevel) {
ZSTD_parameters const params = ZSTD_getParams(compressionLevel, 0, 0);
- return ZSTDMT_initCStream_internal(zcs, NULL, 0, NULL, params, 0);
+ ZSTD_CCtx_params cctxParams = zcs->params;
+ cctxParams.cParams = params.cParams;
+ cctxParams.fParams = params.fParams;
+ return ZSTDMT_initCStream_internal(zcs, NULL, 0, ZSTD_dm_auto, NULL, cctxParams, 0);
}
@@ -856,13 +931,13 @@ static size_t ZSTDMT_flushNextJob(ZSTDMT_CCtx* zcs, ZSTD_outBuffer* output, unsi
{
unsigned const wJobID = zcs->doneJobID & zcs->jobIDMask;
if (zcs->doneJobID == zcs->nextJobID) return 0; /* all flushed ! */
- PTHREAD_MUTEX_LOCK(&zcs->jobCompleted_mutex);
+ ZSTD_PTHREAD_MUTEX_LOCK(&zcs->jobCompleted_mutex);
while (zcs->jobs[wJobID].jobCompleted==0) {
DEBUGLOG(5, "waiting for jobCompleted signal from job %u", zcs->doneJobID);
- if (!blockToFlush) { pthread_mutex_unlock(&zcs->jobCompleted_mutex); return 0; } /* nothing ready to be flushed => skip */
- pthread_cond_wait(&zcs->jobCompleted_cond, &zcs->jobCompleted_mutex); /* block when nothing available to flush */
+ if (!blockToFlush) { ZSTD_pthread_mutex_unlock(&zcs->jobCompleted_mutex); return 0; } /* nothing ready to be flushed => skip */
+ ZSTD_pthread_cond_wait(&zcs->jobCompleted_cond, &zcs->jobCompleted_mutex); /* block when nothing available to flush */
}
- pthread_mutex_unlock(&zcs->jobCompleted_mutex);
+ ZSTD_pthread_mutex_unlock(&zcs->jobCompleted_mutex);
/* compression job completed : output can be flushed */
{ ZSTDMT_jobDescription job = zcs->jobs[wJobID];
if (!job.jobScanned) {
@@ -906,7 +981,7 @@ static size_t ZSTDMT_flushNextJob(ZSTDMT_CCtx* zcs, ZSTD_outBuffer* output, unsi
/** ZSTDMT_compressStream_generic() :
- * internal use only
+ * internal use only - exposed to be invoked from zstd_compress.c
* assumption : output and input are valid (pos <= size)
* @return : minimum amount of data remaining to flush, 0 if none */
size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx,
@@ -915,25 +990,26 @@ size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx,
ZSTD_EndDirective endOp)
{
size_t const newJobThreshold = mtctx->dictSize + mtctx->targetSectionSize;
+ unsigned forwardInputProgress = 0;
assert(output->pos <= output->size);
assert(input->pos <= input->size);
if ((mtctx->frameEnded) && (endOp==ZSTD_e_continue)) {
- /* current frame being ended. Only flush/end are allowed. Or start new frame with init */
+ /* current frame being ended. Only flush/end are allowed */
return ERROR(stage_wrong);
}
- if (mtctx->nbThreads==1) { /* delegate to single-thread (synchronous) */
+ if (mtctx->params.nbThreads==1) { /* delegate to single-thread (synchronous) */
return ZSTD_compressStream_generic(mtctx->cctxPool->cctx[0], output, input, endOp);
}
- /* single-pass shortcut (note : this is synchronous-mode) */
- if ( (mtctx->nextJobID==0) /* just started */
- && (mtctx->inBuff.filled==0) /* nothing buffered */
- && (endOp==ZSTD_e_end) /* end order */
+ /* single-pass shortcut (note : synchronous-mode) */
+ if ( (mtctx->nextJobID == 0) /* just started */
+ && (mtctx->inBuff.filled == 0) /* nothing buffered */
+ && (endOp == ZSTD_e_end) /* end order */
&& (output->size - output->pos >= ZSTD_compressBound(input->size - input->pos)) ) { /* enough room */
- size_t const cSize = ZSTDMT_compress_advanced(mtctx,
+ size_t const cSize = ZSTDMT_compress_advanced_internal(mtctx,
(char*)output->dst + output->pos, output->size - output->pos,
(const char*)input->src + input->pos, input->size - input->pos,
- mtctx->cdict, mtctx->params, mtctx->overlapLog);
+ mtctx->cdict, mtctx->params);
if (ZSTD_isError(cSize)) return cSize;
input->pos = input->size;
output->pos += cSize;
@@ -946,15 +1022,16 @@ size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx,
/* fill input buffer */
if (input->size > input->pos) { /* support NULL input */
if (mtctx->inBuff.buffer.start == NULL) {
- mtctx->inBuff.buffer = ZSTDMT_getBuffer(mtctx->bufPool);
- if (mtctx->inBuff.buffer.start == NULL) return ERROR(memory_allocation);
+ mtctx->inBuff.buffer = ZSTDMT_getBuffer(mtctx->bufPool); /* note : may fail, in which case, no forward input progress */
mtctx->inBuff.filled = 0;
}
- { size_t const toLoad = MIN(input->size - input->pos, mtctx->inBuffSize - mtctx->inBuff.filled);
+ if (mtctx->inBuff.buffer.start) {
+ size_t const toLoad = MIN(input->size - input->pos, mtctx->inBuffSize - mtctx->inBuff.filled);
DEBUGLOG(5, "inBuff:%08X; inBuffSize=%u; ToCopy=%u", (U32)(size_t)mtctx->inBuff.buffer.start, (U32)mtctx->inBuffSize, (U32)toLoad);
memcpy((char*)mtctx->inBuff.buffer.start + mtctx->inBuff.filled, (const char*)input->src + input->pos, toLoad);
input->pos += toLoad;
mtctx->inBuff.filled += toLoad;
+ forwardInputProgress = toLoad>0;
} }
if ( (mtctx->inBuff.filled >= newJobThreshold) /* filled enough : let's compress */
@@ -963,7 +1040,7 @@ size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx,
}
/* check for potential compressed data ready to be flushed */
- CHECK_F( ZSTDMT_flushNextJob(mtctx, output, (mtctx->inBuff.filled == mtctx->inBuffSize) /* blockToFlush */) ); /* block if it wasn't possible to create new job due to saturation */
+ CHECK_F( ZSTDMT_flushNextJob(mtctx, output, !forwardInputProgress /* blockToFlush */) ); /* block if there was no forward input progress */
if (input->pos < input->size) /* input not consumed : do not flush yet */
endOp = ZSTD_e_continue;
@@ -1008,7 +1085,7 @@ static size_t ZSTDMT_flushStream_internal(ZSTDMT_CCtx* zcs, ZSTD_outBuffer* outp
size_t ZSTDMT_flushStream(ZSTDMT_CCtx* zcs, ZSTD_outBuffer* output)
{
DEBUGLOG(5, "ZSTDMT_flushStream");
- if (zcs->nbThreads==1)
+ if (zcs->params.nbThreads==1)
return ZSTD_flushStream(zcs->cctxPool->cctx[0], output);
return ZSTDMT_flushStream_internal(zcs, output, 0 /* endFrame */);
}
@@ -1016,7 +1093,7 @@ size_t ZSTDMT_flushStream(ZSTDMT_CCtx* zcs, ZSTD_outBuffer* output)
size_t ZSTDMT_endStream(ZSTDMT_CCtx* zcs, ZSTD_outBuffer* output)
{
DEBUGLOG(4, "ZSTDMT_endStream");
- if (zcs->nbThreads==1)
+ if (zcs->params.nbThreads==1)
return ZSTD_endStream(zcs->cctxPool->cctx[0], output);
return ZSTDMT_flushStream_internal(zcs, output, 1 /* endFrame */);
}
diff --git a/lib/compress/zstdmt_compress.h b/lib/compress/zstdmt_compress.h
index 0f0fc2b03fc6..8c59c684f1a3 100644
--- a/lib/compress/zstdmt_compress.h
+++ b/lib/compress/zstdmt_compress.h
@@ -5,6 +5,7 @@
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
*/
#ifndef ZSTDMT_COMPRESS_H
@@ -80,19 +81,19 @@ ZSTDLIB_API size_t ZSTDMT_initCStream_usingCDict(ZSTDMT_CCtx* mtctx,
ZSTD_frameParameters fparams,
unsigned long long pledgedSrcSize); /* note : zero means empty */
-/* ZSDTMT_parameter :
+/* ZSTDMT_parameter :
* List of parameters that can be set using ZSTDMT_setMTCtxParameter() */
typedef enum {
ZSTDMT_p_sectionSize, /* size of input "section". Each section is compressed in parallel. 0 means default, which is dynamically determined within compression functions */
ZSTDMT_p_overlapSectionLog /* Log of overlapped section; 0 == no overlap, 6(default) == use 1/8th of window, >=9 == use full window */
-} ZSDTMT_parameter;
+} ZSTDMT_parameter;
/* ZSTDMT_setMTCtxParameter() :
* allow setting individual parameters, one at a time, among a list of enums defined in ZSTDMT_parameter.
* The function must be called typically after ZSTD_createCCtx().
* Parameters not explicitly reset by ZSTDMT_init*() remain the same in consecutive compression sessions.
* @return : 0, or an error code (which can be tested using ZSTD_isError()) */
-ZSTDLIB_API size_t ZSTDMT_setMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSDTMT_parameter parameter, unsigned value);
+ZSTDLIB_API size_t ZSTDMT_setMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSTDMT_parameter parameter, unsigned value);
/*! ZSTDMT_compressStream_generic() :
@@ -107,6 +108,22 @@ ZSTDLIB_API size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx,
ZSTD_EndDirective endOp);
+/* === Private definitions; never ever use directly === */
+
+size_t ZSTDMT_CCtxParam_setMTCtxParameter(ZSTD_CCtx_params* params, ZSTDMT_parameter parameter, unsigned value);
+
+size_t ZSTDMT_initializeCCtxParameters(ZSTD_CCtx_params* params, unsigned nbThreads);
+
+/*! ZSTDMT_initCStream_internal() :
+ * Private use only. Init streaming operation.
+ * expects params to be valid.
+ * must receive dict, or cdict, or none, but not both.
+ * @return : 0, or an error code */
+size_t ZSTDMT_initCStream_internal(ZSTDMT_CCtx* zcs,
+ const void* dict, size_t dictSize, ZSTD_dictMode_e dictMode,
+ const ZSTD_CDict* cdict,
+ ZSTD_CCtx_params params, unsigned long long pledgedSrcSize);
+
#if defined (__cplusplus)
}
diff --git a/lib/decompress/zstd_decompress.c b/lib/decompress/zstd_decompress.c
index d2bc545e52c5..96fc60908960 100644
--- a/lib/decompress/zstd_decompress.c
+++ b/lib/decompress/zstd_decompress.c
@@ -5,6 +5,7 @@
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
*/
@@ -34,7 +35,7 @@
* Frames requiring more memory will be rejected.
*/
#ifndef ZSTD_MAXWINDOWSIZE_DEFAULT
-# define ZSTD_MAXWINDOWSIZE_DEFAULT ((1 << ZSTD_WINDOWLOG_MAX) + 1) /* defined within zstd.h */
+# define ZSTD_MAXWINDOWSIZE_DEFAULT (((U32)1 << ZSTD_WINDOWLOG_DEFAULTMAX) + 1)
#endif
@@ -101,13 +102,15 @@ struct ZSTD_DCtx_s
const void* dictEnd; /* end of previous segment */
size_t expected;
ZSTD_frameHeader fParams;
- blockType_e bType; /* used in ZSTD_decompressContinue(), to transfer blockType between header decoding and block decoding stages */
+ U64 decodedSize;
+ blockType_e bType; /* used in ZSTD_decompressContinue(), store blockType between block header decoding and block decompression stages */
ZSTD_dStage stage;
U32 litEntropy;
U32 fseEntropy;
XXH64_state_t xxhState;
size_t headerSize;
U32 dictID;
+ ZSTD_format_e format;
const BYTE* litPtr;
ZSTD_customMem customMem;
size_t litSize;
@@ -126,7 +129,6 @@ struct ZSTD_DCtx_s
size_t outBuffSize;
size_t outStart;
size_t outEnd;
- size_t blockSize;
size_t lhSize;
void* legacyContext;
U32 previousLegacyVersion;
@@ -148,39 +150,44 @@ size_t ZSTD_sizeof_DCtx (const ZSTD_DCtx* dctx)
size_t ZSTD_estimateDCtxSize(void) { return sizeof(ZSTD_DCtx); }
-size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx)
+
+static size_t ZSTD_startingInputLength(ZSTD_format_e format)
{
- dctx->expected = ZSTD_frameHeaderSize_prefix;
- dctx->stage = ZSTDds_getFrameHeaderSize;
- dctx->previousDstEnd = NULL;
- dctx->base = NULL;
- dctx->vBase = NULL;
- dctx->dictEnd = NULL;
- dctx->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001); /* cover both little and big endian */
- dctx->litEntropy = dctx->fseEntropy = 0;
- dctx->dictID = 0;
- MEM_STATIC_ASSERT(sizeof(dctx->entropy.rep) == sizeof(repStartValue));
- memcpy(dctx->entropy.rep, repStartValue, sizeof(repStartValue)); /* initial repcodes */
- dctx->LLTptr = dctx->entropy.LLTable;
- dctx->MLTptr = dctx->entropy.MLTable;
- dctx->OFTptr = dctx->entropy.OFTable;
- dctx->HUFptr = dctx->entropy.hufTable;
- return 0;
+ size_t const startingInputLength = (format==ZSTD_f_zstd1_magicless) ?
+ ZSTD_frameHeaderSize_prefix - ZSTD_frameIdSize :
+ ZSTD_frameHeaderSize_prefix;
+ ZSTD_STATIC_ASSERT(ZSTD_FRAMEHEADERSIZE_PREFIX >= ZSTD_FRAMEIDSIZE);
+ /* only supports formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless */
+ assert( (format == ZSTD_f_zstd1) || (format == ZSTD_f_zstd1_magicless) );
+ return startingInputLength;
}
static void ZSTD_initDCtx_internal(ZSTD_DCtx* dctx)
{
- ZSTD_decompressBegin(dctx); /* cannot fail */
- dctx->staticSize = 0;
+ dctx->format = ZSTD_f_zstd1; /* ZSTD_decompressBegin() invokes ZSTD_startingInputLength() with argument dctx->format */
+ dctx->staticSize = 0;
dctx->maxWindowSize = ZSTD_MAXWINDOWSIZE_DEFAULT;
- dctx->ddict = NULL;
- dctx->ddictLocal = NULL;
- dctx->inBuff = NULL;
- dctx->inBuffSize = 0;
- dctx->outBuffSize= 0;
+ dctx->ddict = NULL;
+ dctx->ddictLocal = NULL;
+ dctx->inBuff = NULL;
+ dctx->inBuffSize = 0;
+ dctx->outBuffSize = 0;
dctx->streamStage = zdss_init;
}
+ZSTD_DCtx* ZSTD_initStaticDCtx(void *workspace, size_t workspaceSize)
+{
+ ZSTD_DCtx* const dctx = (ZSTD_DCtx*) workspace;
+
+ if ((size_t)workspace & 7) return NULL; /* 8-aligned */
+ if (workspaceSize < sizeof(ZSTD_DCtx)) return NULL; /* minimum size */
+
+ ZSTD_initDCtx_internal(dctx);
+ dctx->staticSize = workspaceSize;
+ dctx->inBuff = (char*)(dctx+1);
+ return dctx;
+}
+
ZSTD_DCtx* ZSTD_createDCtx_advanced(ZSTD_customMem customMem)
{
if (!customMem.customAlloc ^ !customMem.customFree) return NULL;
@@ -195,19 +202,6 @@ ZSTD_DCtx* ZSTD_createDCtx_advanced(ZSTD_customMem customMem)
}
}
-ZSTD_DCtx* ZSTD_initStaticDCtx(void *workspace, size_t workspaceSize)
-{
- ZSTD_DCtx* dctx = (ZSTD_DCtx*) workspace;
-
- if ((size_t)workspace & 7) return NULL; /* 8-aligned */
- if (workspaceSize < sizeof(ZSTD_DCtx)) return NULL; /* minimum size */
-
- ZSTD_initDCtx_internal(dctx);
- dctx->staticSize = workspaceSize;
- dctx->inBuff = (char*)(dctx+1);
- return dctx;
-}
-
ZSTD_DCtx* ZSTD_createDCtx(void)
{
return ZSTD_createDCtx_advanced(ZSTD_defaultCMem);
@@ -250,7 +244,7 @@ void ZSTD_copyDCtx(ZSTD_DCtx* dstDCtx, const ZSTD_DCtx* srcDCtx)
* Note 3 : Skippable Frame Identifiers are considered valid. */
unsigned ZSTD_isFrame(const void* buffer, size_t size)
{
- if (size < 4) return 0;
+ if (size < ZSTD_frameIdSize) return 0;
{ U32 const magic = MEM_readLE32(buffer);
if (magic == ZSTD_MAGICNUMBER) return 1;
if ((magic & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) return 1;
@@ -261,55 +255,70 @@ unsigned ZSTD_isFrame(const void* buffer, size_t size)
return 0;
}
-
-/** ZSTD_frameHeaderSize() :
-* srcSize must be >= ZSTD_frameHeaderSize_prefix.
-* @return : size of the Frame Header */
-size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize)
+/** ZSTD_frameHeaderSize_internal() :
+ * srcSize must be large enough to reach header size fields.
+ * note : only works for formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless
+ * @return : size of the Frame Header
+ * or an error code, which can be tested with ZSTD_isError() */
+static size_t ZSTD_frameHeaderSize_internal(const void* src, size_t srcSize, ZSTD_format_e format)
{
- if (srcSize < ZSTD_frameHeaderSize_prefix) return ERROR(srcSize_wrong);
- { BYTE const fhd = ((const BYTE*)src)[4];
+ size_t const minInputSize = ZSTD_startingInputLength(format);
+ if (srcSize < minInputSize) return ERROR(srcSize_wrong);
+
+ { BYTE const fhd = ((const BYTE*)src)[minInputSize-1];
U32 const dictID= fhd & 3;
U32 const singleSegment = (fhd >> 5) & 1;
U32 const fcsId = fhd >> 6;
- return ZSTD_frameHeaderSize_prefix + !singleSegment + ZSTD_did_fieldSize[dictID] + ZSTD_fcs_fieldSize[fcsId]
- + (singleSegment && !fcsId);
+ return minInputSize + !singleSegment
+ + ZSTD_did_fieldSize[dictID] + ZSTD_fcs_fieldSize[fcsId]
+ + (singleSegment && !fcsId);
}
}
+/** ZSTD_frameHeaderSize() :
+ * srcSize must be >= ZSTD_frameHeaderSize_prefix.
+ * @return : size of the Frame Header */
+size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize)
+{
+ return ZSTD_frameHeaderSize_internal(src, srcSize, ZSTD_f_zstd1);
+}
-/** ZSTD_getFrameHeader() :
-* decode Frame Header, or require larger `srcSize`.
-* @return : 0, `zfhPtr` is correctly filled,
-* >0, `srcSize` is too small, result is expected `srcSize`,
-* or an error code, which can be tested using ZSTD_isError() */
-size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize)
+
+/** ZSTD_getFrameHeader_internal() :
+ * decode Frame Header, or require larger `srcSize`.
+ * note : only works for formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless
+ * @return : 0, `zfhPtr` is correctly filled,
+ * >0, `srcSize` is too small, value is wanted `srcSize` amount,
+ * or an error code, which can be tested using ZSTD_isError() */
+static size_t ZSTD_getFrameHeader_internal(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize, ZSTD_format_e format)
{
const BYTE* ip = (const BYTE*)src;
- if (srcSize < ZSTD_frameHeaderSize_prefix) return ZSTD_frameHeaderSize_prefix;
+ size_t const minInputSize = ZSTD_startingInputLength(format);
- if (MEM_readLE32(src) != ZSTD_MAGICNUMBER) {
+ if (srcSize < minInputSize) return minInputSize;
+
+ if ( (format != ZSTD_f_zstd1_magicless)
+ && (MEM_readLE32(src) != ZSTD_MAGICNUMBER) ) {
if ((MEM_readLE32(src) & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) {
/* skippable frame */
if (srcSize < ZSTD_skippableHeaderSize)
return ZSTD_skippableHeaderSize; /* magic number + frame length */
memset(zfhPtr, 0, sizeof(*zfhPtr));
- zfhPtr->frameContentSize = MEM_readLE32((const char *)src + 4);
+ zfhPtr->frameContentSize = MEM_readLE32((const char *)src + ZSTD_frameIdSize);
zfhPtr->frameType = ZSTD_skippableFrame;
- zfhPtr->windowSize = 0;
return 0;
}
return ERROR(prefix_unknown);
}
/* ensure there is enough `srcSize` to fully read/decode frame header */
- { size_t const fhsize = ZSTD_frameHeaderSize(src, srcSize);
+ { size_t const fhsize = ZSTD_frameHeaderSize_internal(src, srcSize, format);
if (srcSize < fhsize) return fhsize;
zfhPtr->headerSize = (U32)fhsize;
}
- { BYTE const fhdByte = ip[4];
- size_t pos = 5;
+ { BYTE const fhdByte = ip[minInputSize-1];
+ size_t pos = minInputSize;
U32 const dictIDSizeCode = fhdByte&3;
U32 const checksumFlag = (fhdByte>>2)&1;
U32 const singleSegment = (fhdByte>>5)&1;
@@ -349,12 +358,25 @@ size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t src
zfhPtr->frameType = ZSTD_frame;
zfhPtr->frameContentSize = frameContentSize;
zfhPtr->windowSize = windowSize;
+ zfhPtr->blockSizeMax = (unsigned) MIN(windowSize, ZSTD_BLOCKSIZE_MAX);
zfhPtr->dictID = dictID;
zfhPtr->checksumFlag = checksumFlag;
}
return 0;
}
+/** ZSTD_getFrameHeader() :
+ * decode Frame Header, or require larger `srcSize`.
+ * note : this function does not consume input, it only reads it.
+ * @return : 0, `zfhPtr` is correctly filled,
+ * >0, `srcSize` is too small, value is wanted `srcSize` amount,
+ * or an error code, which can be tested using ZSTD_isError() */
+size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize)
+{
+ return ZSTD_getFrameHeader_internal(zfhPtr, src, srcSize, ZSTD_f_zstd1);
+}
+
+
/** ZSTD_getFrameContentSize() :
* compatible with legacy mode
* @return : decompressed size of the single frame pointed to be `src` if known, otherwise
@@ -388,14 +410,14 @@ unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize)
unsigned long long totalDstSize = 0;
while (srcSize >= ZSTD_frameHeaderSize_prefix) {
- const U32 magicNumber = MEM_readLE32(src);
+ U32 const magicNumber = MEM_readLE32(src);
if ((magicNumber & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) {
size_t skippableSize;
if (srcSize < ZSTD_skippableHeaderSize)
return ERROR(srcSize_wrong);
- skippableSize = MEM_readLE32((const BYTE *)src + 4) +
- ZSTD_skippableHeaderSize;
+ skippableSize = MEM_readLE32((const BYTE *)src + ZSTD_frameIdSize)
+ + ZSTD_skippableHeaderSize;
if (srcSize < skippableSize) {
return ZSTD_CONTENTSIZE_ERROR;
}
@@ -420,11 +442,9 @@ unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize)
src = (const BYTE *)src + frameSrcSize;
srcSize -= frameSrcSize;
}
- }
+ } /* while (srcSize >= ZSTD_frameHeaderSize_prefix) */
- if (srcSize) {
- return ZSTD_CONTENTSIZE_ERROR;
- }
+ if (srcSize) return ZSTD_CONTENTSIZE_ERROR;
return totalDstSize;
}
@@ -440,7 +460,8 @@ unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize)
unsigned long long ZSTD_getDecompressedSize(const void* src, size_t srcSize)
{
unsigned long long const ret = ZSTD_getFrameContentSize(src, srcSize);
- return ret >= ZSTD_CONTENTSIZE_ERROR ? 0 : ret;
+ ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_ERROR < ZSTD_CONTENTSIZE_UNKNOWN);
+ return (ret >= ZSTD_CONTENTSIZE_ERROR) ? 0 : ret;
}
@@ -449,9 +470,9 @@ unsigned long long ZSTD_getDecompressedSize(const void* src, size_t srcSize)
* @return : 0 if success, or an error code, which can be tested using ZSTD_isError() */
static size_t ZSTD_decodeFrameHeader(ZSTD_DCtx* dctx, const void* src, size_t headerSize)
{
- size_t const result = ZSTD_getFrameHeader(&(dctx->fParams), src, headerSize);
- if (ZSTD_isError(result)) return result; /* invalid header */
- if (result>0) return ERROR(srcSize_wrong); /* headerSize too small */
+ size_t const result = ZSTD_getFrameHeader_internal(&(dctx->fParams), src, headerSize, dctx->format);
+ if (ZSTD_isError(result)) return result; /* invalid header */
+ if (result>0) return ERROR(srcSize_wrong); /* headerSize too small */
if (dctx->fParams.dictID && (dctx->dictID != dctx->fParams.dictID))
return ERROR(dictionary_wrong);
if (dctx->fParams.checksumFlag) XXH64_reset(&dctx->xxhState, 0);
@@ -497,7 +518,8 @@ static size_t ZSTD_setRleBlock(void* dst, size_t dstCapacity,
}
/*! ZSTD_decodeLiteralsBlock() :
- @return : nb of bytes read from src (< srcSize ) */
+ * @return : nb of bytes read from src (< srcSize )
+ * note : symbol not declared but exposed for fullbench */
size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
const void* src, size_t srcSize) /* note : srcSize < BLOCKSIZE */
{
@@ -698,9 +720,9 @@ static const FSE_decode_t4 OF_defaultDTable[(1<<OF_DEFAULTNORMLOG)+1] = {
}; /* OF_defaultDTable */
/*! ZSTD_buildSeqTable() :
- @return : nb bytes read from src,
- or an error code if it fails, testable with ZSTD_isError()
-*/
+ * @return : nb bytes read from src,
+ * or an error code if it fails, testable with ZSTD_isError()
+ */
static size_t ZSTD_buildSeqTable(FSE_DTable* DTableSpace, const FSE_DTable** DTablePtr,
symbolEncodingType_e type, U32 max, U32 maxLog,
const void* src, size_t srcSize,
@@ -858,13 +880,25 @@ size_t ZSTD_execSequenceLast7(BYTE* op,
}
-static seq_t ZSTD_decodeSequence(seqState_t* seqState)
+typedef enum { ZSTD_lo_isRegularOffset, ZSTD_lo_isLongOffset=1 } ZSTD_longOffset_e;
+
+/* We need to add at most (ZSTD_WINDOWLOG_MAX_32 - 1) bits to read the maximum
+ * offset bits. But we can only read at most (STREAM_ACCUMULATOR_MIN_32 - 1)
+ * bits before reloading. This value is the maximum number of bytes we read
+ * after reloading when we are decoding long offets.
+ */
+#define LONG_OFFSETS_MAX_EXTRA_BITS_32 \
+ (ZSTD_WINDOWLOG_MAX_32 > STREAM_ACCUMULATOR_MIN_32 \
+ ? ZSTD_WINDOWLOG_MAX_32 - STREAM_ACCUMULATOR_MIN_32 \
+ : 0)
+
+static seq_t ZSTD_decodeSequence(seqState_t* seqState, const ZSTD_longOffset_e longOffsets)
{
seq_t seq;
U32 const llCode = FSE_peekSymbol(&seqState->stateLL);
U32 const mlCode = FSE_peekSymbol(&seqState->stateML);
- U32 const ofCode = FSE_peekSymbol(&seqState->stateOffb); /* <= maxOff, by table construction */
+ U32 const ofCode = FSE_peekSymbol(&seqState->stateOffb); /* <= MaxOff, by table construction */
U32 const llBits = LL_bits[llCode];
U32 const mlBits = ML_bits[mlCode];
@@ -891,15 +925,25 @@ static seq_t ZSTD_decodeSequence(seqState_t* seqState)
0, 1, 1, 5, 0xD, 0x1D, 0x3D, 0x7D,
0xFD, 0x1FD, 0x3FD, 0x7FD, 0xFFD, 0x1FFD, 0x3FFD, 0x7FFD,
0xFFFD, 0x1FFFD, 0x3FFFD, 0x7FFFD, 0xFFFFD, 0x1FFFFD, 0x3FFFFD, 0x7FFFFD,
- 0xFFFFFD, 0x1FFFFFD, 0x3FFFFFD, 0x7FFFFFD, 0xFFFFFFD };
+ 0xFFFFFD, 0x1FFFFFD, 0x3FFFFFD, 0x7FFFFFD, 0xFFFFFFD, 0x1FFFFFFD, 0x3FFFFFFD, 0x7FFFFFFD };
/* sequence */
{ size_t offset;
if (!ofCode)
offset = 0;
else {
- offset = OF_base[ofCode] + BIT_readBitsFast(&seqState->DStream, ofBits); /* <= (ZSTD_WINDOWLOG_MAX-1) bits */
- if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream);
+ ZSTD_STATIC_ASSERT(ZSTD_lo_isLongOffset == 1);
+ ZSTD_STATIC_ASSERT(LONG_OFFSETS_MAX_EXTRA_BITS_32 == 5);
+ assert(ofBits <= MaxOff);
+ if (MEM_32bits() && longOffsets) {
+ U32 const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN_32-1);
+ offset = OF_base[ofCode] + (BIT_readBitsFast(&seqState->DStream, ofBits - extraBits) << extraBits);
+ if (MEM_32bits() || extraBits) BIT_reloadDStream(&seqState->DStream);
+ if (extraBits) offset += BIT_readBitsFast(&seqState->DStream, extraBits);
+ } else {
+ offset = OF_base[ofCode] + BIT_readBitsFast(&seqState->DStream, ofBits); /* <= (ZSTD_WINDOWLOG_MAX-1) bits */
+ if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream);
+ }
}
if (ofCode <= 1) {
@@ -923,13 +967,17 @@ static seq_t ZSTD_decodeSequence(seqState_t* seqState)
seq.matchLength = ML_base[mlCode]
+ ((mlCode>31) ? BIT_readBitsFast(&seqState->DStream, mlBits) : 0); /* <= 16 bits */
- if (MEM_32bits() && (mlBits+llBits>24)) BIT_reloadDStream(&seqState->DStream);
+ if (MEM_32bits() && (mlBits+llBits >= STREAM_ACCUMULATOR_MIN_32-LONG_OFFSETS_MAX_EXTRA_BITS_32))
+ BIT_reloadDStream(&seqState->DStream);
+ if (MEM_64bits() && (totalBits >= STREAM_ACCUMULATOR_MIN_64-(LLFSELog+MLFSELog+OffFSELog)))
+ BIT_reloadDStream(&seqState->DStream);
+ /* Verify that there is enough bits to read the rest of the data in 64-bit mode. */
+ ZSTD_STATIC_ASSERT(16+LLFSELog+MLFSELog+OffFSELog < STREAM_ACCUMULATOR_MIN_64);
seq.litLength = LL_base[llCode]
+ ((llCode>15) ? BIT_readBitsFast(&seqState->DStream, llBits) : 0); /* <= 16 bits */
- if ( MEM_32bits()
- || (totalBits > 64 - 7 - (LLFSELog+MLFSELog+OffFSELog)) )
- BIT_reloadDStream(&seqState->DStream);
+ if (MEM_32bits())
+ BIT_reloadDStream(&seqState->DStream);
DEBUGLOG(6, "seq: litL=%u, matchL=%u, offset=%u",
(U32)seq.litLength, (U32)seq.matchLength, (U32)seq.offset);
@@ -1028,7 +1076,8 @@ size_t ZSTD_execSequence(BYTE* op,
static size_t ZSTD_decompressSequences(
ZSTD_DCtx* dctx,
void* dst, size_t maxDstSize,
- const void* seqStart, size_t seqSize)
+ const void* seqStart, size_t seqSize,
+ const ZSTD_longOffset_e isLongOffset)
{
const BYTE* ip = (const BYTE*)seqStart;
const BYTE* const iend = ip + seqSize;
@@ -1063,7 +1112,7 @@ static size_t ZSTD_decompressSequences(
for ( ; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && nbSeq ; ) {
nbSeq--;
- { seq_t const sequence = ZSTD_decodeSequence(&seqState);
+ { seq_t const sequence = ZSTD_decodeSequence(&seqState, isLongOffset);
size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litEnd, base, vBase, dictEnd);
DEBUGLOG(6, "regenerated sequence size : %u", (U32)oneSeqSize);
if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
@@ -1088,13 +1137,14 @@ static size_t ZSTD_decompressSequences(
}
-FORCE_INLINE_TEMPLATE seq_t ZSTD_decodeSequenceLong_generic(seqState_t* seqState, int const longOffsets)
+HINT_INLINE
+seq_t ZSTD_decodeSequenceLong(seqState_t* seqState, ZSTD_longOffset_e const longOffsets)
{
seq_t seq;
U32 const llCode = FSE_peekSymbol(&seqState->stateLL);
U32 const mlCode = FSE_peekSymbol(&seqState->stateML);
- U32 const ofCode = FSE_peekSymbol(&seqState->stateOffb); /* <= maxOff, by table construction */
+ U32 const ofCode = FSE_peekSymbol(&seqState->stateOffb); /* <= MaxOff, by table construction */
U32 const llBits = LL_bits[llCode];
U32 const mlBits = ML_bits[mlCode];
@@ -1121,15 +1171,18 @@ FORCE_INLINE_TEMPLATE seq_t ZSTD_decodeSequenceLong_generic(seqState_t* seqState
0, 1, 1, 5, 0xD, 0x1D, 0x3D, 0x7D,
0xFD, 0x1FD, 0x3FD, 0x7FD, 0xFFD, 0x1FFD, 0x3FFD, 0x7FFD,
0xFFFD, 0x1FFFD, 0x3FFFD, 0x7FFFD, 0xFFFFD, 0x1FFFFD, 0x3FFFFD, 0x7FFFFD,
- 0xFFFFFD, 0x1FFFFFD, 0x3FFFFFD, 0x7FFFFFD, 0xFFFFFFD };
+ 0xFFFFFD, 0x1FFFFFD, 0x3FFFFFD, 0x7FFFFFD, 0xFFFFFFD, 0x1FFFFFFD, 0x3FFFFFFD, 0x7FFFFFFD };
/* sequence */
{ size_t offset;
if (!ofCode)
offset = 0;
else {
- if (longOffsets) {
- int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN);
+ ZSTD_STATIC_ASSERT(ZSTD_lo_isLongOffset == 1);
+ ZSTD_STATIC_ASSERT(LONG_OFFSETS_MAX_EXTRA_BITS_32 == 5);
+ assert(ofBits <= MaxOff);
+ if (MEM_32bits() && longOffsets) {
+ U32 const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN_32-1);
offset = OF_base[ofCode] + (BIT_readBitsFast(&seqState->DStream, ofBits - extraBits) << extraBits);
if (MEM_32bits() || extraBits) BIT_reloadDStream(&seqState->DStream);
if (extraBits) offset += BIT_readBitsFast(&seqState->DStream, extraBits);
@@ -1159,11 +1212,16 @@ FORCE_INLINE_TEMPLATE seq_t ZSTD_decodeSequenceLong_generic(seqState_t* seqState
}
seq.matchLength = ML_base[mlCode] + ((mlCode>31) ? BIT_readBitsFast(&seqState->DStream, mlBits) : 0); /* <= 16 bits */
- if (MEM_32bits() && (mlBits+llBits>24)) BIT_reloadDStream(&seqState->DStream);
+ if (MEM_32bits() && (mlBits+llBits >= STREAM_ACCUMULATOR_MIN_32-LONG_OFFSETS_MAX_EXTRA_BITS_32))
+ BIT_reloadDStream(&seqState->DStream);
+ if (MEM_64bits() && (totalBits >= STREAM_ACCUMULATOR_MIN_64-(LLFSELog+MLFSELog+OffFSELog)))
+ BIT_reloadDStream(&seqState->DStream);
+ /* Verify that there is enough bits to read the rest of the data in 64-bit mode. */
+ ZSTD_STATIC_ASSERT(16+LLFSELog+MLFSELog+OffFSELog < STREAM_ACCUMULATOR_MIN_64);
seq.litLength = LL_base[llCode] + ((llCode>15) ? BIT_readBitsFast(&seqState->DStream, llBits) : 0); /* <= 16 bits */
- if (MEM_32bits() ||
- (totalBits > 64 - 7 - (LLFSELog+MLFSELog+OffFSELog)) ) BIT_reloadDStream(&seqState->DStream);
+ if (MEM_32bits())
+ BIT_reloadDStream(&seqState->DStream);
{ size_t const pos = seqState->pos + seq.litLength;
seq.match = seqState->base + pos - seq.offset; /* single memory segment */
@@ -1180,19 +1238,12 @@ FORCE_INLINE_TEMPLATE seq_t ZSTD_decodeSequenceLong_generic(seqState_t* seqState
return seq;
}
-static seq_t ZSTD_decodeSequenceLong(seqState_t* seqState, unsigned const windowSize) {
- if (ZSTD_highbit32(windowSize) > STREAM_ACCUMULATOR_MIN) {
- return ZSTD_decodeSequenceLong_generic(seqState, 1);
- } else {
- return ZSTD_decodeSequenceLong_generic(seqState, 0);
- }
-}
HINT_INLINE
size_t ZSTD_execSequenceLong(BYTE* op,
- BYTE* const oend, seq_t sequence,
- const BYTE** litPtr, const BYTE* const litLimit,
- const BYTE* const base, const BYTE* const vBase, const BYTE* const dictEnd)
+ BYTE* const oend, seq_t sequence,
+ const BYTE** litPtr, const BYTE* const litLimit,
+ const BYTE* const base, const BYTE* const vBase, const BYTE* const dictEnd)
{
BYTE* const oLitEnd = op + sequence.litLength;
size_t const sequenceLength = sequence.litLength + sequence.matchLength;
@@ -1202,11 +1253,9 @@ size_t ZSTD_execSequenceLong(BYTE* op,
const BYTE* match = sequence.match;
/* check */
-#if 1
if (oMatchEnd>oend) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */
if (iLitEnd > litLimit) return ERROR(corruption_detected); /* over-read beyond lit buffer */
if (oLitEnd>oend_w) return ZSTD_execSequenceLast7(op, oend, sequence, litPtr, litLimit, base, vBase, dictEnd);
-#endif
/* copy Literals */
ZSTD_copy8(op, *litPtr);
@@ -1216,7 +1265,6 @@ size_t ZSTD_execSequenceLong(BYTE* op,
*litPtr = iLitEnd; /* update for next sequence */
/* copy Match */
-#if 1
if (sequence.offset > (size_t)(oLitEnd - base)) {
/* offset beyond prefix */
if (sequence.offset > (size_t)(oLitEnd - vBase)) return ERROR(corruption_detected);
@@ -1236,8 +1284,8 @@ size_t ZSTD_execSequenceLong(BYTE* op,
return sequenceLength;
}
} }
- /* Requirement: op <= oend_w && sequence.matchLength >= MINMATCH */
-#endif
+ assert(op <= oend_w);
+ assert(sequence.matchLength >= MINMATCH);
/* match within prefix */
if (sequence.offset < 8) {
@@ -1273,7 +1321,8 @@ size_t ZSTD_execSequenceLong(BYTE* op,
static size_t ZSTD_decompressSequencesLong(
ZSTD_DCtx* dctx,
void* dst, size_t maxDstSize,
- const void* seqStart, size_t seqSize)
+ const void* seqStart, size_t seqSize,
+ const ZSTD_longOffset_e isLongOffset)
{
const BYTE* ip = (const BYTE*)seqStart;
const BYTE* const iend = ip + seqSize;
@@ -1285,7 +1334,6 @@ static size_t ZSTD_decompressSequencesLong(
const BYTE* const base = (const BYTE*) (dctx->base);
const BYTE* const vBase = (const BYTE*) (dctx->vBase);
const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd);
- unsigned const windowSize32 = (unsigned)dctx->fParams.windowSize;
int nbSeq;
/* Build Decoding Tables */
@@ -1315,13 +1363,13 @@ static size_t ZSTD_decompressSequencesLong(
/* prepare in advance */
for (seqNb=0; (BIT_reloadDStream(&seqState.DStream) <= BIT_DStream_completed) && seqNb<seqAdvance; seqNb++) {
- sequences[seqNb] = ZSTD_decodeSequenceLong(&seqState, windowSize32);
+ sequences[seqNb] = ZSTD_decodeSequenceLong(&seqState, isLongOffset);
}
if (seqNb<seqAdvance) return ERROR(corruption_detected);
/* decode and decompress */
for ( ; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && seqNb<nbSeq ; seqNb++) {
- seq_t const sequence = ZSTD_decodeSequenceLong(&seqState, windowSize32);
+ seq_t const sequence = ZSTD_decodeSequenceLong(&seqState, isLongOffset);
size_t const oneSeqSize = ZSTD_execSequenceLong(op, oend, sequences[(seqNb-ADVANCED_SEQS) & STOSEQ_MASK], &litPtr, litEnd, base, vBase, dictEnd);
if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
PREFETCH(sequence.match);
@@ -1355,10 +1403,20 @@ static size_t ZSTD_decompressSequencesLong(
static size_t ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx,
void* dst, size_t dstCapacity,
- const void* src, size_t srcSize)
+ const void* src, size_t srcSize, const int frame)
{ /* blockType == blockCompressed */
const BYTE* ip = (const BYTE*)src;
- DEBUGLOG(5, "ZSTD_decompressBlock_internal");
+ /* isLongOffset must be true if there are long offsets.
+ * Offsets are long if they are larger than 2^STREAM_ACCUMULATOR_MIN.
+ * We don't expect that to be the case in 64-bit mode.
+ * If we are in block mode we don't know the window size, so we have to be
+ * conservative.
+ */
+ ZSTD_longOffset_e const isLongOffset = (ZSTD_longOffset_e)(MEM_32bits() && (!frame || dctx->fParams.windowSize > (1ULL << STREAM_ACCUMULATOR_MIN)));
+ /* windowSize could be any value at this point, since it is only validated
+ * in the streaming API.
+ */
+ DEBUGLOG(5, "ZSTD_decompressBlock_internal (size : %u)", (U32)srcSize);
if (srcSize >= ZSTD_BLOCKSIZE_MAX) return ERROR(srcSize_wrong);
@@ -1369,13 +1427,9 @@ static size_t ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx,
ip += litCSize;
srcSize -= litCSize;
}
- if (sizeof(size_t) > 4) /* do not enable prefetching on 32-bits x86, as it's performance detrimental */
- /* likely because of register pressure */
- /* if that's the correct cause, then 32-bits ARM should be affected differently */
- /* it would be good to test this on ARM real hardware, to see if prefetch version improves speed */
- if (dctx->fParams.windowSize > (1<<23))
- return ZSTD_decompressSequencesLong(dctx, dst, dstCapacity, ip, srcSize);
- return ZSTD_decompressSequences(dctx, dst, dstCapacity, ip, srcSize);
+ if (frame && dctx->fParams.windowSize > (1<<23))
+ return ZSTD_decompressSequencesLong(dctx, dst, dstCapacity, ip, srcSize, isLongOffset);
+ return ZSTD_decompressSequences(dctx, dst, dstCapacity, ip, srcSize, isLongOffset);
}
@@ -1395,7 +1449,7 @@ size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx,
{
size_t dSize;
ZSTD_checkContinuity(dctx, dst);
- dSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize);
+ dSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, /* frame */ 0);
dctx->previousDstEnd = (char*)dst + dSize;
return dSize;
}
@@ -1411,7 +1465,7 @@ ZSTDLIB_API size_t ZSTD_insertBlock(ZSTD_DCtx* dctx, const void* blockStart, siz
}
-size_t ZSTD_generateNxBytes(void* dst, size_t dstCapacity, BYTE byte, size_t length)
+static size_t ZSTD_generateNxBytes(void* dst, size_t dstCapacity, BYTE byte, size_t length)
{
if (length > dstCapacity) return ERROR(dstSize_tooSmall);
memset(dst, byte, length);
@@ -1431,7 +1485,7 @@ size_t ZSTD_findFrameCompressedSize(const void *src, size_t srcSize)
#endif
if ( (srcSize >= ZSTD_skippableHeaderSize)
&& (MEM_readLE32(src) & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START ) {
- return ZSTD_skippableHeaderSize + MEM_readLE32((const BYTE*)src + 4);
+ return ZSTD_skippableHeaderSize + MEM_readLE32((const BYTE*)src + ZSTD_frameIdSize);
} else {
const BYTE* ip = (const BYTE*)src;
const BYTE* const ipstart = ip;
@@ -1511,7 +1565,7 @@ static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx,
switch(blockProperties.blockType)
{
case bt_compressed:
- decodedSize = ZSTD_decompressBlock_internal(dctx, op, oend-op, ip, cBlockSize);
+ decodedSize = ZSTD_decompressBlock_internal(dctx, op, oend-op, ip, cBlockSize, /* frame */ 1);
break;
case bt_raw :
decodedSize = ZSTD_copyRawBlock(op, oend-op, ip, cBlockSize);
@@ -1533,6 +1587,10 @@ static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx,
if (blockProperties.lastBlock) break;
}
+ if (dctx->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN) {
+ if ((U64)(op-ostart) != dctx->fParams.frameContentSize) {
+ return ERROR(corruption_detected);
+ } }
if (dctx->fParams.checksumFlag) { /* Frame content checksum verification */
U32 const checkCalc = (U32)XXH64_digest(&dctx->xxhState);
U32 checkRead;
@@ -1590,13 +1648,15 @@ static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx* dctx,
#endif
magicNumber = MEM_readLE32(src);
+ DEBUGLOG(4, "reading magic number %08X (expecting %08X)",
+ (U32)magicNumber, (U32)ZSTD_MAGICNUMBER);
if (magicNumber != ZSTD_MAGICNUMBER) {
if ((magicNumber & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) {
size_t skippableSize;
if (srcSize < ZSTD_skippableHeaderSize)
return ERROR(srcSize_wrong);
- skippableSize = MEM_readLE32((const BYTE *)src + 4) +
- ZSTD_skippableHeaderSize;
+ skippableSize = MEM_readLE32((const BYTE*)src + ZSTD_frameIdSize)
+ + ZSTD_skippableHeaderSize;
if (srcSize < skippableSize) return ERROR(srcSize_wrong);
src = (const BYTE *)src + skippableSize;
@@ -1699,33 +1759,31 @@ size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, c
{
DEBUGLOG(5, "ZSTD_decompressContinue");
/* Sanity check */
- if (srcSize != dctx->expected) return ERROR(srcSize_wrong); /* unauthorized */
+ if (srcSize != dctx->expected) return ERROR(srcSize_wrong); /* not allowed */
if (dstCapacity) ZSTD_checkContinuity(dctx, dst);
switch (dctx->stage)
{
case ZSTDds_getFrameHeaderSize :
- if (srcSize != ZSTD_frameHeaderSize_prefix) return ERROR(srcSize_wrong); /* unauthorized */
assert(src != NULL);
- if ((MEM_readLE32(src) & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) { /* skippable frame */
- memcpy(dctx->headerBuffer, src, ZSTD_frameHeaderSize_prefix);
- dctx->expected = ZSTD_skippableHeaderSize - ZSTD_frameHeaderSize_prefix; /* magic number + skippable frame length */
- dctx->stage = ZSTDds_decodeSkippableHeader;
- return 0;
- }
- dctx->headerSize = ZSTD_frameHeaderSize(src, ZSTD_frameHeaderSize_prefix);
+ if (dctx->format == ZSTD_f_zstd1) { /* allows header */
+ assert(srcSize >= ZSTD_frameIdSize); /* to read skippable magic number */
+ if ((MEM_readLE32(src) & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) { /* skippable frame */
+ memcpy(dctx->headerBuffer, src, srcSize);
+ dctx->expected = ZSTD_skippableHeaderSize - srcSize; /* remaining to load to get full skippable frame header */
+ dctx->stage = ZSTDds_decodeSkippableHeader;
+ return 0;
+ } }
+ dctx->headerSize = ZSTD_frameHeaderSize_internal(src, srcSize, dctx->format);
if (ZSTD_isError(dctx->headerSize)) return dctx->headerSize;
- memcpy(dctx->headerBuffer, src, ZSTD_frameHeaderSize_prefix);
- if (dctx->headerSize > ZSTD_frameHeaderSize_prefix) {
- dctx->expected = dctx->headerSize - ZSTD_frameHeaderSize_prefix;
- dctx->stage = ZSTDds_decodeFrameHeader;
- return 0;
- }
- dctx->expected = 0; /* not necessary to copy more */
- /* fall-through */
+ memcpy(dctx->headerBuffer, src, srcSize);
+ dctx->expected = dctx->headerSize - srcSize;
+ dctx->stage = ZSTDds_decodeFrameHeader;
+ return 0;
+
case ZSTDds_decodeFrameHeader:
assert(src != NULL);
- memcpy(dctx->headerBuffer + ZSTD_frameHeaderSize_prefix, src, dctx->expected);
+ memcpy(dctx->headerBuffer + (dctx->headerSize - srcSize), src, srcSize);
CHECK_F(ZSTD_decodeFrameHeader(dctx, dctx->headerBuffer, dctx->headerSize));
dctx->expected = ZSTD_blockHeaderSize;
dctx->stage = ZSTDds_decodeBlockHeader;
@@ -1757,6 +1815,7 @@ size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, c
}
return 0;
}
+
case ZSTDds_decompressLastBlock:
case ZSTDds_decompressBlock:
DEBUGLOG(5, "case ZSTDds_decompressBlock");
@@ -1765,7 +1824,7 @@ size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, c
{
case bt_compressed:
DEBUGLOG(5, "case bt_compressed");
- rSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize);
+ rSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, /* frame */ 1);
break;
case bt_raw :
rSize = ZSTD_copyRawBlock(dst, dstCapacity, src, srcSize);
@@ -1778,9 +1837,16 @@ size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, c
return ERROR(corruption_detected);
}
if (ZSTD_isError(rSize)) return rSize;
+ DEBUGLOG(5, "decoded size from block : %u", (U32)rSize);
+ dctx->decodedSize += rSize;
if (dctx->fParams.checksumFlag) XXH64_update(&dctx->xxhState, dst, rSize);
if (dctx->stage == ZSTDds_decompressLastBlock) { /* end of frame */
+ DEBUGLOG(4, "decoded size from frame : %u", (U32)dctx->decodedSize);
+ if (dctx->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN) {
+ if (dctx->decodedSize != dctx->fParams.frameContentSize) {
+ return ERROR(corruption_detected);
+ } }
if (dctx->fParams.checksumFlag) { /* another round for frame checksum */
dctx->expected = 4;
dctx->stage = ZSTDds_checkChecksum;
@@ -1795,26 +1861,31 @@ size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, c
}
return rSize;
}
+
case ZSTDds_checkChecksum:
+ assert(srcSize == 4); /* guaranteed by dctx->expected */
{ U32 const h32 = (U32)XXH64_digest(&dctx->xxhState);
- U32 const check32 = MEM_readLE32(src); /* srcSize == 4, guaranteed by dctx->expected */
+ U32 const check32 = MEM_readLE32(src);
+ DEBUGLOG(4, "checksum : calculated %08X :: %08X read", h32, check32);
if (check32 != h32) return ERROR(checksum_wrong);
dctx->expected = 0;
dctx->stage = ZSTDds_getFrameHeaderSize;
return 0;
}
+
case ZSTDds_decodeSkippableHeader:
- { assert(src != NULL);
- memcpy(dctx->headerBuffer + ZSTD_frameHeaderSize_prefix, src, dctx->expected);
- dctx->expected = MEM_readLE32(dctx->headerBuffer + 4);
- dctx->stage = ZSTDds_skipFrame;
- return 0;
- }
+ assert(src != NULL);
+ assert(srcSize <= ZSTD_skippableHeaderSize);
+ memcpy(dctx->headerBuffer + (ZSTD_skippableHeaderSize - srcSize), src, srcSize); /* complete skippable header */
+ dctx->expected = MEM_readLE32(dctx->headerBuffer + ZSTD_frameIdSize); /* note : dctx->expected can grow seriously large, beyond local buffer size */
+ dctx->stage = ZSTDds_skipFrame;
+ return 0;
+
case ZSTDds_skipFrame:
- { dctx->expected = 0;
- dctx->stage = ZSTDds_getFrameHeaderSize;
- return 0;
- }
+ dctx->expected = 0;
+ dctx->stage = ZSTDds_getFrameHeaderSize;
+ return 0;
+
default:
return ERROR(GENERIC); /* impossible */
}
@@ -1895,7 +1966,7 @@ static size_t ZSTD_decompress_insertDictionary(ZSTD_DCtx* dctx, const void* dict
if (magic != ZSTD_MAGIC_DICTIONARY) {
return ZSTD_refDictContent(dctx, dict, dictSize); /* pure content mode */
} }
- dctx->dictID = MEM_readLE32((const char*)dict + 4);
+ dctx->dictID = MEM_readLE32((const char*)dict + ZSTD_frameIdSize);
/* load entropy tables */
{ size_t const eSize = ZSTD_loadEntropy(&dctx->entropy, dict, dictSize);
@@ -1909,6 +1980,29 @@ static size_t ZSTD_decompress_insertDictionary(ZSTD_DCtx* dctx, const void* dict
return ZSTD_refDictContent(dctx, dict, dictSize);
}
+/* Note : this function cannot fail */
+size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx)
+{
+ assert(dctx != NULL);
+ dctx->expected = ZSTD_startingInputLength(dctx->format); /* dctx->format must be properly set */
+ dctx->stage = ZSTDds_getFrameHeaderSize;
+ dctx->decodedSize = 0;
+ dctx->previousDstEnd = NULL;
+ dctx->base = NULL;
+ dctx->vBase = NULL;
+ dctx->dictEnd = NULL;
+ dctx->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001); /* cover both little and big endian */
+ dctx->litEntropy = dctx->fseEntropy = 0;
+ dctx->dictID = 0;
+ ZSTD_STATIC_ASSERT(sizeof(dctx->entropy.rep) == sizeof(repStartValue));
+ memcpy(dctx->entropy.rep, repStartValue, sizeof(repStartValue)); /* initial repcodes */
+ dctx->LLTptr = dctx->entropy.LLTable;
+ dctx->MLTptr = dctx->entropy.MLTable;
+ dctx->OFTptr = dctx->entropy.OFTable;
+ dctx->HUFptr = dctx->entropy.hufTable;
+ return 0;
+}
+
size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)
{
CHECK_F( ZSTD_decompressBegin(dctx) );
@@ -1975,7 +2069,7 @@ static size_t ZSTD_loadEntropy_inDDict(ZSTD_DDict* ddict)
{ U32 const magic = MEM_readLE32(ddict->dictContent);
if (magic != ZSTD_MAGIC_DICTIONARY) return 0; /* pure content mode */
}
- ddict->dictID = MEM_readLE32((const char*)ddict->dictContent + 4);
+ ddict->dictID = MEM_readLE32((const char*)ddict->dictContent + ZSTD_frameIdSize);
/* load entropy tables */
CHECK_E( ZSTD_loadEntropy(&ddict->entropy, ddict->dictContent, ddict->dictSize), dictionary_corrupted );
@@ -1984,9 +2078,9 @@ static size_t ZSTD_loadEntropy_inDDict(ZSTD_DDict* ddict)
}
-static size_t ZSTD_initDDict_internal(ZSTD_DDict* ddict, const void* dict, size_t dictSize, unsigned byReference)
+static size_t ZSTD_initDDict_internal(ZSTD_DDict* ddict, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod)
{
- if ((byReference) || (!dict) || (!dictSize)) {
+ if ((dictLoadMethod == ZSTD_dlm_byRef) || (!dict) || (!dictSize)) {
ddict->dictBuffer = NULL;
ddict->dictContent = dict;
} else {
@@ -2005,7 +2099,7 @@ static size_t ZSTD_initDDict_internal(ZSTD_DDict* ddict, const void* dict, size_
return 0;
}
-ZSTD_DDict* ZSTD_createDDict_advanced(const void* dict, size_t dictSize, unsigned byReference, ZSTD_customMem customMem)
+ZSTD_DDict* ZSTD_createDDict_advanced(const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_customMem customMem)
{
if (!customMem.customAlloc ^ !customMem.customFree) return NULL;
@@ -2013,7 +2107,7 @@ ZSTD_DDict* ZSTD_createDDict_advanced(const void* dict, size_t dictSize, unsigne
if (!ddict) return NULL;
ddict->cMem = customMem;
- if (ZSTD_isError( ZSTD_initDDict_internal(ddict, dict, dictSize, byReference) )) {
+ if (ZSTD_isError( ZSTD_initDDict_internal(ddict, dict, dictSize, dictLoadMethod) )) {
ZSTD_freeDDict(ddict);
return NULL;
}
@@ -2029,7 +2123,7 @@ ZSTD_DDict* ZSTD_createDDict_advanced(const void* dict, size_t dictSize, unsigne
ZSTD_DDict* ZSTD_createDDict(const void* dict, size_t dictSize)
{
ZSTD_customMem const allocator = { NULL, NULL, NULL };
- return ZSTD_createDDict_advanced(dict, dictSize, 0, allocator);
+ return ZSTD_createDDict_advanced(dict, dictSize, ZSTD_dlm_byCopy, allocator);
}
/*! ZSTD_createDDict_byReference() :
@@ -2039,25 +2133,26 @@ ZSTD_DDict* ZSTD_createDDict(const void* dict, size_t dictSize)
ZSTD_DDict* ZSTD_createDDict_byReference(const void* dictBuffer, size_t dictSize)
{
ZSTD_customMem const allocator = { NULL, NULL, NULL };
- return ZSTD_createDDict_advanced(dictBuffer, dictSize, 1, allocator);
+ return ZSTD_createDDict_advanced(dictBuffer, dictSize, ZSTD_dlm_byRef, allocator);
}
ZSTD_DDict* ZSTD_initStaticDDict(void* workspace, size_t workspaceSize,
const void* dict, size_t dictSize,
- unsigned byReference)
+ ZSTD_dictLoadMethod_e dictLoadMethod)
{
- size_t const neededSpace = sizeof(ZSTD_DDict) + (byReference ? 0 : dictSize);
+ size_t const neededSpace =
+ sizeof(ZSTD_DDict) + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize);
ZSTD_DDict* const ddict = (ZSTD_DDict*)workspace;
assert(workspace != NULL);
assert(dict != NULL);
if ((size_t)workspace & 7) return NULL; /* 8-aligned */
if (workspaceSize < neededSpace) return NULL;
- if (!byReference) {
+ if (dictLoadMethod == ZSTD_dlm_byCopy) {
memcpy(ddict+1, dict, dictSize); /* local copy */
dict = ddict+1;
}
- if (ZSTD_isError( ZSTD_initDDict_internal(ddict, dict, dictSize, 1 /* byRef */) ))
+ if (ZSTD_isError( ZSTD_initDDict_internal(ddict, dict, dictSize, ZSTD_dlm_byRef) ))
return NULL;
return ddict;
}
@@ -2075,10 +2170,10 @@ size_t ZSTD_freeDDict(ZSTD_DDict* ddict)
/*! ZSTD_estimateDDictSize() :
* Estimate amount of memory that will be needed to create a dictionary for decompression.
- * Note : dictionary created "byReference" are smaller */
-size_t ZSTD_estimateDDictSize(size_t dictSize, unsigned byReference)
+ * Note : dictionary created by reference using ZSTD_dlm_byRef are smaller */
+size_t ZSTD_estimateDDictSize(size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod)
{
- return sizeof(ZSTD_DDict) + (byReference ? 0 : dictSize);
+ return sizeof(ZSTD_DDict) + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize);
}
size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict)
@@ -2095,7 +2190,7 @@ unsigned ZSTD_getDictID_fromDict(const void* dict, size_t dictSize)
{
if (dictSize < 8) return 0;
if (MEM_readLE32(dict) != ZSTD_MAGIC_DICTIONARY) return 0;
- return MEM_readLE32((const char*)dict + 4);
+ return MEM_readLE32((const char*)dict + ZSTD_frameIdSize);
}
/*! ZSTD_getDictID_fromDDict() :
@@ -2123,7 +2218,7 @@ unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict* ddict)
* ZSTD_getFrameHeader(), which will provide a more precise error code. */
unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize)
{
- ZSTD_frameHeader zfp = { 0, 0, ZSTD_frame, 0, 0, 0 };
+ ZSTD_frameHeader zfp = { 0, 0, 0, ZSTD_frame, 0, 0, 0 };
size_t const hError = ZSTD_getFrameHeader(&zfp, src, srcSize);
if (ZSTD_isError(hError)) return 0;
return zfp.dictID;
@@ -2190,13 +2285,15 @@ size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t di
return ZSTD_frameHeaderSize_prefix;
}
+/* note : this variant can't fail */
size_t ZSTD_initDStream(ZSTD_DStream* zds)
{
return ZSTD_initDStream_usingDict(zds, NULL, 0);
}
/* ZSTD_initDStream_usingDDict() :
- * ddict will just be referenced, and must outlive decompression session */
+ * ddict will just be referenced, and must outlive decompression session
+ * this function cannot fail */
size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* zds, const ZSTD_DDict* ddict)
{
size_t const initResult = ZSTD_initDStream(zds);
@@ -2216,31 +2313,66 @@ size_t ZSTD_resetDStream(ZSTD_DStream* zds)
size_t ZSTD_setDStreamParameter(ZSTD_DStream* zds,
ZSTD_DStreamParameter_e paramType, unsigned paramValue)
{
+ ZSTD_STATIC_ASSERT((unsigned)zdss_loadHeader >= (unsigned)zdss_init);
+ if ((unsigned)zds->streamStage > (unsigned)zdss_loadHeader)
+ return ERROR(stage_wrong);
switch(paramType)
{
default : return ERROR(parameter_unsupported);
- case DStream_p_maxWindowSize : zds->maxWindowSize = paramValue ? paramValue : (U32)(-1); break;
+ case DStream_p_maxWindowSize :
+ DEBUGLOG(4, "setting maxWindowSize = %u KB", paramValue >> 10);
+ zds->maxWindowSize = paramValue ? paramValue : (U32)(-1);
+ break;
}
return 0;
}
+size_t ZSTD_DCtx_setMaxWindowSize(ZSTD_DCtx* dctx, size_t maxWindowSize)
+{
+ ZSTD_STATIC_ASSERT((unsigned)zdss_loadHeader >= (unsigned)zdss_init);
+ if ((unsigned)dctx->streamStage > (unsigned)zdss_loadHeader)
+ return ERROR(stage_wrong);
+ dctx->maxWindowSize = maxWindowSize;
+ return 0;
+}
+
+size_t ZSTD_DCtx_setFormat(ZSTD_DCtx* dctx, ZSTD_format_e format)
+{
+ DEBUGLOG(4, "ZSTD_DCtx_setFormat : %u", (unsigned)format);
+ ZSTD_STATIC_ASSERT((unsigned)zdss_loadHeader >= (unsigned)zdss_init);
+ if ((unsigned)dctx->streamStage > (unsigned)zdss_loadHeader)
+ return ERROR(stage_wrong);
+ dctx->format = format;
+ return 0;
+}
+
size_t ZSTD_sizeof_DStream(const ZSTD_DStream* zds)
{
return ZSTD_sizeof_DCtx(zds);
}
+size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long long frameContentSize)
+{
+ size_t const blockSize = (size_t) MIN(windowSize, ZSTD_BLOCKSIZE_MAX);
+ unsigned long long const neededRBSize = windowSize + blockSize + (WILDCOPY_OVERLENGTH * 2);
+ unsigned long long const neededSize = MIN(frameContentSize, neededRBSize);
+ size_t const minRBSize = (size_t) neededSize;
+ if ((unsigned long long)minRBSize != neededSize) return ERROR(frameParameter_windowTooLarge);
+ return minRBSize;
+}
+
size_t ZSTD_estimateDStreamSize(size_t windowSize)
{
size_t const blockSize = MIN(windowSize, ZSTD_BLOCKSIZE_MAX);
size_t const inBuffSize = blockSize; /* no block can be larger */
- size_t const outBuffSize = windowSize + blockSize + (WILDCOPY_OVERLENGTH * 2);
+ size_t const outBuffSize = ZSTD_decodingBufferSize_min(windowSize, ZSTD_CONTENTSIZE_UNKNOWN);
return ZSTD_estimateDCtxSize() + inBuffSize + outBuffSize;
}
-ZSTDLIB_API size_t ZSTD_estimateDStreamSize_fromFrame(const void* src, size_t srcSize)
+size_t ZSTD_estimateDStreamSize_fromFrame(const void* src, size_t srcSize)
{
- U32 const windowSizeMax = 1U << ZSTD_WINDOWLOG_MAX;
+ U32 const windowSizeMax = 1U << ZSTD_WINDOWLOG_MAX; /* note : should be user-selectable */
ZSTD_frameHeader zfh;
size_t const err = ZSTD_getFrameHeader(&zfh, src, srcSize);
if (ZSTD_isError(err)) return err;
@@ -2272,7 +2404,18 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
U32 someMoreWork = 1;
DEBUGLOG(5, "ZSTD_decompressStream");
+ if (input->pos > input->size) { /* forbidden */
+ DEBUGLOG(5, "in: pos: %u vs size: %u",
+ (U32)input->pos, (U32)input->size);
+ return ERROR(srcSize_wrong);
+ }
+ if (output->pos > output->size) { /* forbidden */
+ DEBUGLOG(5, "out: pos: %u vs size: %u",
+ (U32)output->pos, (U32)output->size);
+ return ERROR(dstSize_tooSmall);
+ }
DEBUGLOG(5, "input size : %u", (U32)(input->size - input->pos));
+
#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1)
if (zds->legacyVersion) {
/* legacy support is incompatible with static dctx */
@@ -2289,7 +2432,9 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
/* fall-through */
case zdss_loadHeader :
- { size_t const hSize = ZSTD_getFrameHeader(&zds->fParams, zds->headerBuffer, zds->lhSize);
+ DEBUGLOG(5, "stage zdss_loadHeader (srcSize : %u)", (U32)(iend - ip));
+ { size_t const hSize = ZSTD_getFrameHeader_internal(&zds->fParams, zds->headerBuffer, zds->lhSize, zds->format);
+ DEBUGLOG(5, "header size : %u", (U32)hSize);
if (ZSTD_isError(hSize)) {
#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1)
U32 const legacyVersion = ZSTD_isLegacy(istart, iend-istart);
@@ -2342,7 +2487,7 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
CHECK_F(ZSTD_decompressBegin_usingDDict(zds, zds->ddict));
if ((MEM_readLE32(zds->headerBuffer) & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) { /* skippable frame */
- zds->expected = MEM_readLE32(zds->headerBuffer + 4);
+ zds->expected = MEM_readLE32(zds->headerBuffer + ZSTD_frameIdSize);
zds->stage = ZSTDds_skipFrame;
} else {
CHECK_F(ZSTD_decodeFrameHeader(zds, zds->headerBuffer, zds->lhSize));
@@ -2351,20 +2496,20 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
}
/* control buffer memory usage */
- DEBUGLOG(4, "Control max buffer memory usage");
+ DEBUGLOG(4, "Control max buffer memory usage (max %u KB)",
+ (U32)(zds->maxWindowSize >> 10));
zds->fParams.windowSize = MAX(zds->fParams.windowSize, 1U << ZSTD_WINDOWLOG_ABSOLUTEMIN);
if (zds->fParams.windowSize > zds->maxWindowSize) return ERROR(frameParameter_windowTooLarge);
/* Adapt buffer sizes to frame header instructions */
- { size_t const blockSize = (size_t)(MIN(zds->fParams.windowSize, ZSTD_BLOCKSIZE_MAX));
- size_t const neededOutSize = (size_t)(zds->fParams.windowSize + blockSize + WILDCOPY_OVERLENGTH * 2);
- zds->blockSize = blockSize;
- if ((zds->inBuffSize < blockSize) || (zds->outBuffSize < neededOutSize)) {
- size_t const bufferSize = blockSize + neededOutSize;
+ { size_t const neededInBuffSize = MAX(zds->fParams.blockSizeMax, 4 /* frame checksum */);
+ size_t const neededOutBuffSize = ZSTD_decodingBufferSize_min(zds->fParams.windowSize, zds->fParams.frameContentSize);
+ if ((zds->inBuffSize < neededInBuffSize) || (zds->outBuffSize < neededOutBuffSize)) {
+ size_t const bufferSize = neededInBuffSize + neededOutBuffSize;
DEBUGLOG(4, "inBuff : from %u to %u",
- (U32)zds->inBuffSize, (U32)blockSize);
+ (U32)zds->inBuffSize, (U32)neededInBuffSize);
DEBUGLOG(4, "outBuff : from %u to %u",
- (U32)zds->outBuffSize, (U32)neededOutSize);
+ (U32)zds->outBuffSize, (U32)neededOutBuffSize);
if (zds->staticSize) { /* static DCtx */
DEBUGLOG(4, "staticSize : %u", (U32)zds->staticSize);
assert(zds->staticSize >= sizeof(ZSTD_DCtx)); /* controlled at init */
@@ -2377,9 +2522,9 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
zds->inBuff = (char*)ZSTD_malloc(bufferSize, zds->customMem);
if (zds->inBuff == NULL) return ERROR(memory_allocation);
}
- zds->inBuffSize = blockSize;
+ zds->inBuffSize = neededInBuffSize;
zds->outBuff = zds->inBuff + zds->inBuffSize;
- zds->outBuffSize = neededOutSize;
+ zds->outBuffSize = neededOutBuffSize;
} }
zds->streamStage = zdss_read;
/* fall-through */
@@ -2437,8 +2582,13 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
zds->outStart += flushedSize;
if (flushedSize == toFlushSize) { /* flush completed */
zds->streamStage = zdss_read;
- if (zds->outStart + zds->blockSize > zds->outBuffSize)
+ if ( (zds->outBuffSize < zds->fParams.frameContentSize)
+ && (zds->outStart + zds->fParams.blockSizeMax > zds->outBuffSize) ) {
+ DEBUGLOG(5, "restart filling outBuff from beginning (left:%i, needed:%u)",
+ (int)(zds->outBuffSize - zds->outStart),
+ (U32)zds->fParams.blockSizeMax);
zds->outStart = zds->outEnd = 0;
+ }
break;
} }
/* cannot complete flush */
@@ -2476,3 +2626,30 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
return nextSrcSizeHint;
}
}
+
+
+size_t ZSTD_decompress_generic(ZSTD_DCtx* dctx, ZSTD_outBuffer* output, ZSTD_inBuffer* input)
+{
+ return ZSTD_decompressStream(dctx, output, input);
+}
+
+size_t ZSTD_decompress_generic_simpleArgs (
+ ZSTD_DCtx* dctx,
+ void* dst, size_t dstCapacity, size_t* dstPos,
+ const void* src, size_t srcSize, size_t* srcPos)
+{
+ ZSTD_outBuffer output = { dst, dstCapacity, *dstPos };
+ ZSTD_inBuffer input = { src, srcSize, *srcPos };
+ /* ZSTD_compress_generic() will check validity of dstPos and srcPos */
+ size_t const cErr = ZSTD_decompress_generic(dctx, &output, &input);
+ *dstPos = output.pos;
+ *srcPos = input.pos;
+ return cErr;
+}
+
+void ZSTD_DCtx_reset(ZSTD_DCtx* dctx)
+{
+ (void)ZSTD_initDStream(dctx);
+ dctx->format = ZSTD_f_zstd1;
+ dctx->maxWindowSize = ZSTD_MAXWINDOWSIZE_DEFAULT;
+}
diff --git a/lib/deprecated/zbuff.h b/lib/deprecated/zbuff.h
index e6ea84ad3b45..a93115da4a1c 100644
--- a/lib/deprecated/zbuff.h
+++ b/lib/deprecated/zbuff.h
@@ -5,6 +5,7 @@
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
*/
/* ***************************************************************
diff --git a/lib/deprecated/zbuff_common.c b/lib/deprecated/zbuff_common.c
index 2de45bec1705..661b9b0e18c5 100644
--- a/lib/deprecated/zbuff_common.c
+++ b/lib/deprecated/zbuff_common.c
@@ -5,6 +5,7 @@
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
*/
/*-*************************************
diff --git a/lib/deprecated/zbuff_compress.c b/lib/deprecated/zbuff_compress.c
index 4444e95d8fd1..8adbaec260d5 100644
--- a/lib/deprecated/zbuff_compress.c
+++ b/lib/deprecated/zbuff_compress.c
@@ -5,6 +5,7 @@
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
*/
diff --git a/lib/deprecated/zbuff_decompress.c b/lib/deprecated/zbuff_decompress.c
index a819d7f40429..923c22b73c57 100644
--- a/lib/deprecated/zbuff_decompress.c
+++ b/lib/deprecated/zbuff_decompress.c
@@ -5,6 +5,7 @@
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
*/
diff --git a/lib/dictBuilder/cover.c b/lib/dictBuilder/cover.c
index 3d445ae8b81d..efdffddbf930 100644
--- a/lib/dictBuilder/cover.c
+++ b/lib/dictBuilder/cover.c
@@ -5,6 +5,7 @@
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
*/
/* *****************************************************************************
@@ -382,7 +383,7 @@ static void COVER_group(COVER_ctx_t *ctx, const void *group,
typedef struct {
U32 begin;
U32 end;
- double score;
+ U32 score;
} COVER_segment_t;
/**
@@ -479,11 +480,16 @@ static COVER_segment_t COVER_selectSegment(const COVER_ctx_t *ctx, U32 *freqs,
* Check the validity of the parameters.
* Returns non-zero if the parameters are valid and 0 otherwise.
*/
-static int COVER_checkParameters(ZDICT_cover_params_t parameters) {
+static int COVER_checkParameters(ZDICT_cover_params_t parameters,
+ size_t maxDictSize) {
/* k and d are required parameters */
if (parameters.d == 0 || parameters.k == 0) {
return 0;
}
+ /* k <= maxDictSize */
+ if (parameters.k > maxDictSize) {
+ return 0;
+ }
/* d <= k */
if (parameters.d > parameters.k) {
return 0;
@@ -622,9 +628,13 @@ static size_t COVER_buildDictionary(const COVER_ctx_t *ctx, U32 *freqs,
/* Select a segment */
COVER_segment_t segment = COVER_selectSegment(
ctx, freqs, activeDmers, epochBegin, epochEnd, parameters);
- /* Trim the segment if necessary and if it is empty then we are done */
+ /* If the segment covers no dmers, then we are out of content */
+ if (segment.score == 0) {
+ break;
+ }
+ /* Trim the segment if necessary and if it is too small then we are done */
segmentSize = MIN(segment.end - segment.begin + parameters.d - 1, tail);
- if (segmentSize == 0) {
+ if (segmentSize < parameters.d) {
break;
}
/* We fill the dictionary from the back to allow the best segments to be
@@ -648,7 +658,7 @@ ZDICTLIB_API size_t ZDICT_trainFromBuffer_cover(
COVER_ctx_t ctx;
COVER_map_t activeDmers;
/* Checks */
- if (!COVER_checkParameters(parameters)) {
+ if (!COVER_checkParameters(parameters, dictBufferCapacity)) {
DISPLAYLEVEL(1, "Cover parameters incorrect\n");
return ERROR(GENERIC);
}
@@ -701,8 +711,8 @@ ZDICTLIB_API size_t ZDICT_trainFromBuffer_cover(
* compiled with multithreaded support.
*/
typedef struct COVER_best_s {
- pthread_mutex_t mutex;
- pthread_cond_t cond;
+ ZSTD_pthread_mutex_t mutex;
+ ZSTD_pthread_cond_t cond;
size_t liveJobs;
void *dict;
size_t dictSize;
@@ -715,8 +725,8 @@ typedef struct COVER_best_s {
*/
static void COVER_best_init(COVER_best_t *best) {
if (best==NULL) return; /* compatible with init on NULL */
- (void)pthread_mutex_init(&best->mutex, NULL);
- (void)pthread_cond_init(&best->cond, NULL);
+ (void)ZSTD_pthread_mutex_init(&best->mutex, NULL);
+ (void)ZSTD_pthread_cond_init(&best->cond, NULL);
best->liveJobs = 0;
best->dict = NULL;
best->dictSize = 0;
@@ -731,11 +741,11 @@ static void COVER_best_wait(COVER_best_t *best) {
if (!best) {
return;
}
- pthread_mutex_lock(&best->mutex);
+ ZSTD_pthread_mutex_lock(&best->mutex);
while (best->liveJobs != 0) {
- pthread_cond_wait(&best->cond, &best->mutex);
+ ZSTD_pthread_cond_wait(&best->cond, &best->mutex);
}
- pthread_mutex_unlock(&best->mutex);
+ ZSTD_pthread_mutex_unlock(&best->mutex);
}
/**
@@ -749,8 +759,8 @@ static void COVER_best_destroy(COVER_best_t *best) {
if (best->dict) {
free(best->dict);
}
- pthread_mutex_destroy(&best->mutex);
- pthread_cond_destroy(&best->cond);
+ ZSTD_pthread_mutex_destroy(&best->mutex);
+ ZSTD_pthread_cond_destroy(&best->cond);
}
/**
@@ -761,9 +771,9 @@ static void COVER_best_start(COVER_best_t *best) {
if (!best) {
return;
}
- pthread_mutex_lock(&best->mutex);
+ ZSTD_pthread_mutex_lock(&best->mutex);
++best->liveJobs;
- pthread_mutex_unlock(&best->mutex);
+ ZSTD_pthread_mutex_unlock(&best->mutex);
}
/**
@@ -779,7 +789,7 @@ static void COVER_best_finish(COVER_best_t *best, size_t compressedSize,
}
{
size_t liveJobs;
- pthread_mutex_lock(&best->mutex);
+ ZSTD_pthread_mutex_lock(&best->mutex);
--best->liveJobs;
liveJobs = best->liveJobs;
/* If the new dictionary is better */
@@ -802,9 +812,9 @@ static void COVER_best_finish(COVER_best_t *best, size_t compressedSize,
best->parameters = parameters;
best->compressedSize = compressedSize;
}
- pthread_mutex_unlock(&best->mutex);
+ ZSTD_pthread_mutex_unlock(&best->mutex);
if (liveJobs == 0) {
- pthread_cond_broadcast(&best->cond);
+ ZSTD_pthread_cond_broadcast(&best->cond);
}
}
}
@@ -884,7 +894,7 @@ static void COVER_tryParameters(void *opaque) {
goto _compressCleanup;
}
/* Compress each sample and sum their sizes (or error) */
- totalCompressedSize = 0;
+ totalCompressedSize = dictBufferCapacity;
for (i = 0; i < ctx->nbSamples; ++i) {
const size_t size = ZSTD_compress_usingCDict(
cctx, dst, dstCapacity, ctx->samples + ctx->offsets[i],
@@ -960,7 +970,7 @@ ZDICTLIB_API size_t ZDICT_optimizeTrainFromBuffer_cover(
/* Initialization */
COVER_best_init(&best);
/* Turn down global display level to clean up display at level 2 and below */
- g_displayLevel = parameters->zParams.notificationLevel - 1;
+ g_displayLevel = displayLevel == 0 ? 0 : displayLevel - 1;
/* Loop through d first because each new value needs a new context */
LOCALDISPLAYLEVEL(displayLevel, 2, "Trying %u different sets of parameters\n",
kIterations);
@@ -994,8 +1004,9 @@ ZDICTLIB_API size_t ZDICT_optimizeTrainFromBuffer_cover(
data->parameters.k = k;
data->parameters.d = d;
data->parameters.steps = kSteps;
+ data->parameters.zParams.notificationLevel = g_displayLevel;
/* Check the parameters */
- if (!COVER_checkParameters(data->parameters)) {
+ if (!COVER_checkParameters(data->parameters, dictBufferCapacity)) {
DISPLAYLEVEL(1, "Cover parameters incorrect\n");
free(data);
continue;
diff --git a/lib/dictBuilder/zdict.c b/lib/dictBuilder/zdict.c
index c2871c2ccfbc..1bb8b0683d04 100644
--- a/lib/dictBuilder/zdict.c
+++ b/lib/dictBuilder/zdict.c
@@ -5,6 +5,7 @@
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
*/
@@ -60,7 +61,7 @@
#define NOISELENGTH 32
-static const int g_compressionLevel_default = 6;
+static const int g_compressionLevel_default = 3;
static const U32 g_selectivity_default = 9;
@@ -374,7 +375,7 @@ static int isIncluded(const void* in, const void* container, size_t length)
return u==length;
}
-/*! ZDICT_checkMerge
+/*! ZDICT_tryMerge() :
check if dictItem can be merged, do it if possible
@return : id of destination elt, 0 if not merged
*/
@@ -439,8 +440,8 @@ static U32 ZDICT_tryMerge(dictItem* table, dictItem elt, U32 eltNbToSkip, const
static void ZDICT_removeDictItem(dictItem* table, U32 id)
{
- /* convention : first element is nb of elts */
- U32 const max = table->pos;
+ /* convention : table[0].pos stores nb of elts */
+ U32 const max = table[0].pos;
U32 u;
if (!id) return; /* protection, should never happen */
for (u=id; u<max-1; u++)
@@ -703,7 +704,7 @@ static size_t ZDICT_analyzeEntropy(void* dstBuffer, size_t maxDstSize,
memset(repOffset, 0, sizeof(repOffset));
repOffset[1] = repOffset[4] = repOffset[8] = 1;
memset(bestRepOffset, 0, sizeof(bestRepOffset));
- if (compressionLevel==0) compressionLevel = g_compressionLevel_default;
+ if (compressionLevel<=0) compressionLevel = g_compressionLevel_default;
params = ZSTD_getParams(compressionLevel, averageSampleSize, dictBufferSize);
{ size_t const beginResult = ZSTD_compressBegin_advanced(esr.ref, dictBuffer, dictBufferSize, params, 0);
if (ZSTD_isError(beginResult)) {
@@ -1056,6 +1057,8 @@ size_t ZDICT_trainFromBuffer(void* dictBuffer, size_t dictBufferCapacity,
memset(&params, 0, sizeof(params));
params.d = 8;
params.steps = 4;
+ /* Default to level 6 since no compression level information is avaialble */
+ params.zParams.compressionLevel = 6;
return ZDICT_optimizeTrainFromBuffer_cover(dictBuffer, dictBufferCapacity,
samplesBuffer, samplesSizes,
nbSamples, &params);
diff --git a/lib/dictBuilder/zdict.h b/lib/dictBuilder/zdict.h
index 3d72a465e5e7..5f0000b1c7c8 100644
--- a/lib/dictBuilder/zdict.h
+++ b/lib/dictBuilder/zdict.h
@@ -5,6 +5,7 @@
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
*/
#ifndef DICTBUILDER_H_001
diff --git a/lib/legacy/zstd_legacy.h b/lib/legacy/zstd_legacy.h
index 1126e24669f3..487ff0b284b5 100644
--- a/lib/legacy/zstd_legacy.h
+++ b/lib/legacy/zstd_legacy.h
@@ -5,6 +5,7 @@
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
*/
#ifndef ZSTD_LEGACY_H
diff --git a/lib/legacy/zstd_v01.c b/lib/legacy/zstd_v01.c
index 45f421ae6f2c..70003cbedffd 100644
--- a/lib/legacy/zstd_v01.c
+++ b/lib/legacy/zstd_v01.c
@@ -5,6 +5,7 @@
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
*/
diff --git a/lib/legacy/zstd_v01.h b/lib/legacy/zstd_v01.h
index a91c6a133bac..42f0897c7d23 100644
--- a/lib/legacy/zstd_v01.h
+++ b/lib/legacy/zstd_v01.h
@@ -5,6 +5,7 @@
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
*/
#ifndef ZSTD_V01_H_28739879432
diff --git a/lib/legacy/zstd_v02.c b/lib/legacy/zstd_v02.c
index dc1ec0e7c678..b935a4d18005 100644
--- a/lib/legacy/zstd_v02.c
+++ b/lib/legacy/zstd_v02.c
@@ -5,6 +5,7 @@
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
*/
diff --git a/lib/legacy/zstd_v02.h b/lib/legacy/zstd_v02.h
index 63cb3b8d578f..0dde7a63773c 100644
--- a/lib/legacy/zstd_v02.h
+++ b/lib/legacy/zstd_v02.h
@@ -5,6 +5,7 @@
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
*/
#ifndef ZSTD_V02_H_4174539423
diff --git a/lib/legacy/zstd_v03.c b/lib/legacy/zstd_v03.c
index 8257de7e6cd3..35370dd0ca61 100644
--- a/lib/legacy/zstd_v03.c
+++ b/lib/legacy/zstd_v03.c
@@ -5,6 +5,7 @@
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
*/
diff --git a/lib/legacy/zstd_v03.h b/lib/legacy/zstd_v03.h
index e38e0109b136..b4449e2999e0 100644
--- a/lib/legacy/zstd_v03.h
+++ b/lib/legacy/zstd_v03.h
@@ -5,6 +5,7 @@
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
*/
#ifndef ZSTD_V03_H_298734209782
diff --git a/lib/legacy/zstd_v04.c b/lib/legacy/zstd_v04.c
index 951561a6cc8c..1b5f6f3b0cdb 100644
--- a/lib/legacy/zstd_v04.c
+++ b/lib/legacy/zstd_v04.c
@@ -5,6 +5,7 @@
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
*/
diff --git a/lib/legacy/zstd_v04.h b/lib/legacy/zstd_v04.h
index a7d6623305f2..6391631fc432 100644
--- a/lib/legacy/zstd_v04.h
+++ b/lib/legacy/zstd_v04.h
@@ -5,6 +5,7 @@
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
*/
#ifndef ZSTD_V04_H_91868324769238
diff --git a/lib/legacy/zstd_v05.c b/lib/legacy/zstd_v05.c
index 4a1d4d4bd931..23188f504d38 100644
--- a/lib/legacy/zstd_v05.c
+++ b/lib/legacy/zstd_v05.c
@@ -5,6 +5,7 @@
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
*/
diff --git a/lib/legacy/zstd_v05.h b/lib/legacy/zstd_v05.h
index a333bd127bd8..b68fd578ee9f 100644
--- a/lib/legacy/zstd_v05.h
+++ b/lib/legacy/zstd_v05.h
@@ -5,6 +5,7 @@
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
*/
#ifndef ZSTDv05_H
diff --git a/lib/legacy/zstd_v06.c b/lib/legacy/zstd_v06.c
index a285a09016f6..62683f994a95 100644
--- a/lib/legacy/zstd_v06.c
+++ b/lib/legacy/zstd_v06.c
@@ -5,6 +5,7 @@
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
*/
diff --git a/lib/legacy/zstd_v06.h b/lib/legacy/zstd_v06.h
index ee043a179090..fb4eb37c89e2 100644
--- a/lib/legacy/zstd_v06.h
+++ b/lib/legacy/zstd_v06.h
@@ -5,6 +5,7 @@
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
*/
#ifndef ZSTDv06_H
diff --git a/lib/legacy/zstd_v07.c b/lib/legacy/zstd_v07.c
index ad392e90b61e..aad9b1f659f8 100644
--- a/lib/legacy/zstd_v07.c
+++ b/lib/legacy/zstd_v07.c
@@ -5,6 +5,7 @@
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
*/
diff --git a/lib/legacy/zstd_v07.h b/lib/legacy/zstd_v07.h
index 68d18e9636bf..6591cd3014b0 100644
--- a/lib/legacy/zstd_v07.h
+++ b/lib/legacy/zstd_v07.h
@@ -5,6 +5,7 @@
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
*/
#ifndef ZSTDv07_H_235446
diff --git a/lib/zstd.h b/lib/zstd.h
index 13b4563fd69a..2194a3b23d6d 100644
--- a/lib/zstd.h
+++ b/lib/zstd.h
@@ -5,6 +5,7 @@
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
*/
#if defined (__cplusplus)
extern "C" {
@@ -58,7 +59,7 @@ extern "C" {
/*------ Version ------*/
#define ZSTD_VERSION_MAJOR 1
#define ZSTD_VERSION_MINOR 3
-#define ZSTD_VERSION_RELEASE 1
+#define ZSTD_VERSION_RELEASE 2
#define ZSTD_VERSION_NUMBER (ZSTD_VERSION_MAJOR *100*100 + ZSTD_VERSION_MINOR *100 + ZSTD_VERSION_RELEASE)
ZSTDLIB_API unsigned ZSTD_versionNumber(void); /**< useful to check dll version */
@@ -130,10 +131,11 @@ ZSTDLIB_API unsigned long long ZSTD_getDecompressedSize(const void* src, size_t
/*====== Helper functions ======*/
-ZSTDLIB_API int ZSTD_maxCLevel(void); /*!< maximum compression level available */
+#define ZSTD_COMPRESSBOUND(srcSize) ((srcSize) + ((srcSize)>>8) + (((srcSize) < 128 KB) ? ((128 KB - (srcSize)) >> 11) /* margin, from 64 to 0 */ : 0)) /* this formula ensures that bound(A) + bound(B) <= bound(A+B) as long as A and B >= 128 KB */
ZSTDLIB_API size_t ZSTD_compressBound(size_t srcSize); /*!< maximum compressed size in worst case scenario */
ZSTDLIB_API unsigned ZSTD_isError(size_t code); /*!< tells if a `size_t` function result is an error code */
ZSTDLIB_API const char* ZSTD_getErrorName(size_t code); /*!< provides readable string from an error code */
+ZSTDLIB_API int ZSTD_maxCLevel(void); /*!< maximum compression level available */
/***************************************
@@ -375,27 +377,31 @@ ZSTDLIB_API size_t ZSTD_DStreamOutSize(void); /*!< recommended size for output
#define ZSTD_MAGIC_SKIPPABLE_START 0x184D2A50U
#define ZSTD_MAGIC_DICTIONARY 0xEC30A437 /* v0.7+ */
-#define ZSTD_WINDOWLOG_MAX_32 27
-#define ZSTD_WINDOWLOG_MAX_64 27
+#define ZSTD_WINDOWLOG_MAX_32 30
+#define ZSTD_WINDOWLOG_MAX_64 31
#define ZSTD_WINDOWLOG_MAX ((unsigned)(sizeof(size_t) == 4 ? ZSTD_WINDOWLOG_MAX_32 : ZSTD_WINDOWLOG_MAX_64))
-#define ZSTD_WINDOWLOG_MIN 10
-#define ZSTD_HASHLOG_MAX ZSTD_WINDOWLOG_MAX
-#define ZSTD_HASHLOG_MIN 6
-#define ZSTD_CHAINLOG_MAX (ZSTD_WINDOWLOG_MAX+1)
-#define ZSTD_CHAINLOG_MIN ZSTD_HASHLOG_MIN
-#define ZSTD_HASHLOG3_MAX 17
-#define ZSTD_SEARCHLOG_MAX (ZSTD_WINDOWLOG_MAX-1)
-#define ZSTD_SEARCHLOG_MIN 1
-#define ZSTD_SEARCHLENGTH_MAX 7 /* only for ZSTD_fast, other strategies are limited to 6 */
-#define ZSTD_SEARCHLENGTH_MIN 3 /* only for ZSTD_btopt, other strategies are limited to 4 */
-#define ZSTD_TARGETLENGTH_MIN 4
-#define ZSTD_TARGETLENGTH_MAX 999
-
-#define ZSTD_FRAMEHEADERSIZE_MAX 18 /* for static allocation */
-#define ZSTD_FRAMEHEADERSIZE_MIN 6
-static const size_t ZSTD_frameHeaderSize_prefix = 5; /* minimum input size to know frame header size */
-static const size_t ZSTD_frameHeaderSize_max = ZSTD_FRAMEHEADERSIZE_MAX;
+#define ZSTD_WINDOWLOG_MIN 10
+#define ZSTD_HASHLOG_MAX MIN(ZSTD_WINDOWLOG_MAX, 30)
+#define ZSTD_HASHLOG_MIN 6
+#define ZSTD_CHAINLOG_MAX MIN(ZSTD_WINDOWLOG_MAX+1, 30)
+#define ZSTD_CHAINLOG_MIN ZSTD_HASHLOG_MIN
+#define ZSTD_HASHLOG3_MAX 17
+#define ZSTD_SEARCHLOG_MAX (ZSTD_WINDOWLOG_MAX-1)
+#define ZSTD_SEARCHLOG_MIN 1
+#define ZSTD_SEARCHLENGTH_MAX 7 /* only for ZSTD_fast, other strategies are limited to 6 */
+#define ZSTD_SEARCHLENGTH_MIN 3 /* only for ZSTD_btopt, other strategies are limited to 4 */
+#define ZSTD_TARGETLENGTH_MIN 4 /* only useful for btopt */
+#define ZSTD_TARGETLENGTH_MAX 999 /* only useful for btopt */
+#define ZSTD_LDM_MINMATCH_MIN 4
+#define ZSTD_LDM_MINMATCH_MAX 4096
+#define ZSTD_LDM_BUCKETSIZELOG_MAX 8
+
+#define ZSTD_FRAMEHEADERSIZE_PREFIX 5 /* minimum input size to know frame header size */
+#define ZSTD_FRAMEHEADERSIZE_MIN 6
+#define ZSTD_FRAMEHEADERSIZE_MAX 18 /* for static allocation */
+static const size_t ZSTD_frameHeaderSize_prefix = ZSTD_FRAMEHEADERSIZE_PREFIX;
static const size_t ZSTD_frameHeaderSize_min = ZSTD_FRAMEHEADERSIZE_MIN;
+static const size_t ZSTD_frameHeaderSize_max = ZSTD_FRAMEHEADERSIZE_MAX;
static const size_t ZSTD_skippableHeaderSize = 8; /* magic number + skippable frame length */
@@ -424,6 +430,8 @@ typedef struct {
ZSTD_frameParameters fParams;
} ZSTD_parameters;
+typedef struct ZSTD_CCtx_params_s ZSTD_CCtx_params;
+
/*= Custom memory allocation functions */
typedef void* (*ZSTD_allocFunction) (void* opaque, size_t size);
typedef void (*ZSTD_freeFunction) (void* opaque, void* address);
@@ -480,7 +488,7 @@ ZSTDLIB_API size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize);
/*! ZSTD_sizeof_*() :
* These functions give the current memory usage of selected object.
- * Object memory usage can evolve if it's re-used multiple times. */
+ * Object memory usage can evolve when re-used multiple times. */
ZSTDLIB_API size_t ZSTD_sizeof_CCtx(const ZSTD_CCtx* cctx);
ZSTDLIB_API size_t ZSTD_sizeof_DCtx(const ZSTD_DCtx* dctx);
ZSTDLIB_API size_t ZSTD_sizeof_CStream(const ZSTD_CStream* zcs);
@@ -493,18 +501,21 @@ ZSTDLIB_API size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict);
* of a future {D,C}Ctx, before its creation.
* ZSTD_estimateCCtxSize() will provide a budget large enough for any compression level up to selected one.
* It will also consider src size to be arbitrarily "large", which is worst case.
- * If srcSize is known to always be small, ZSTD_estimateCCtxSize_advanced() can provide a tighter estimation.
- * ZSTD_estimateCCtxSize_advanced() can be used in tandem with ZSTD_getCParams() to create cParams from compressionLevel.
+ * If srcSize is known to always be small, ZSTD_estimateCCtxSize_usingCParams() can provide a tighter estimation.
+ * ZSTD_estimateCCtxSize_usingCParams() can be used in tandem with ZSTD_getCParams() to create cParams from compressionLevel.
+ * ZSTD_estimateCCtxSize_usingCCtxParams() can be used in tandem with ZSTD_CCtxParam_setParameter(). Only single-threaded compression is supported. This function will return an error code if ZSTD_p_nbThreads is > 1.
* Note : CCtx estimation is only correct for single-threaded compression */
ZSTDLIB_API size_t ZSTD_estimateCCtxSize(int compressionLevel);
-ZSTDLIB_API size_t ZSTD_estimateCCtxSize_advanced(ZSTD_compressionParameters cParams);
+ZSTDLIB_API size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams);
+ZSTDLIB_API size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params);
ZSTDLIB_API size_t ZSTD_estimateDCtxSize(void);
-/*! ZSTD_estimate?StreamSize() :
+/*! ZSTD_estimateCStreamSize() :
* ZSTD_estimateCStreamSize() will provide a budget large enough for any compression level up to selected one.
* It will also consider src size to be arbitrarily "large", which is worst case.
- * If srcSize is known to always be small, ZSTD_estimateCStreamSize_advanced() can provide a tighter estimation.
- * ZSTD_estimateCStreamSize_advanced() can be used in tandem with ZSTD_getCParams() to create cParams from compressionLevel.
+ * If srcSize is known to always be small, ZSTD_estimateCStreamSize_usingCParams() can provide a tighter estimation.
+ * ZSTD_estimateCStreamSize_usingCParams() can be used in tandem with ZSTD_getCParams() to create cParams from compressionLevel.
+ * ZSTD_estimateCStreamSize_usingCCtxParams() can be used in tandem with ZSTD_CCtxParam_setParameter(). Only single-threaded compression is supported. This function will return an error code if ZSTD_p_nbThreads is set to a value > 1.
* Note : CStream estimation is only correct for single-threaded compression.
* ZSTD_DStream memory budget depends on window Size.
* This information can be passed manually, using ZSTD_estimateDStreamSize,
@@ -513,17 +524,24 @@ ZSTDLIB_API size_t ZSTD_estimateDCtxSize(void);
* an internal ?Dict will be created, which additional size is not estimated here.
* In this case, get total size by adding ZSTD_estimate?DictSize */
ZSTDLIB_API size_t ZSTD_estimateCStreamSize(int compressionLevel);
-ZSTDLIB_API size_t ZSTD_estimateCStreamSize_advanced(ZSTD_compressionParameters cParams);
+ZSTDLIB_API size_t ZSTD_estimateCStreamSize_usingCParams(ZSTD_compressionParameters cParams);
+ZSTDLIB_API size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params);
ZSTDLIB_API size_t ZSTD_estimateDStreamSize(size_t windowSize);
ZSTDLIB_API size_t ZSTD_estimateDStreamSize_fromFrame(const void* src, size_t srcSize);
+typedef enum {
+ ZSTD_dlm_byCopy = 0, /**< Copy dictionary content internally */
+ ZSTD_dlm_byRef, /**< Reference dictionary content -- the dictionary buffer must outlive its users. */
+} ZSTD_dictLoadMethod_e;
+
/*! ZSTD_estimate?DictSize() :
* ZSTD_estimateCDictSize() will bet that src size is relatively "small", and content is copied, like ZSTD_createCDict().
- * ZSTD_estimateCStreamSize_advanced() makes it possible to control precisely compression parameters, like ZSTD_createCDict_advanced().
- * Note : dictionary created "byReference" are smaller */
+ * ZSTD_estimateCStreamSize_advanced_usingCParams() makes it possible to control precisely compression parameters, like ZSTD_createCDict_advanced().
+ * Note : dictionary created by reference using ZSTD_dlm_byRef are smaller
+ */
ZSTDLIB_API size_t ZSTD_estimateCDictSize(size_t dictSize, int compressionLevel);
-ZSTDLIB_API size_t ZSTD_estimateCDictSize_advanced(size_t dictSize, ZSTD_compressionParameters cParams, unsigned byReference);
-ZSTDLIB_API size_t ZSTD_estimateDDictSize(size_t dictSize, unsigned byReference);
+ZSTDLIB_API size_t ZSTD_estimateCDictSize_advanced(size_t dictSize, ZSTD_compressionParameters cParams, ZSTD_dictLoadMethod_e dictLoadMethod);
+ZSTDLIB_API size_t ZSTD_estimateDDictSize(size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod);
/***************************************
@@ -551,24 +569,12 @@ ZSTDLIB_API ZSTD_CCtx* ZSTD_createCCtx_advanced(ZSTD_customMem customMem);
ZSTDLIB_API ZSTD_CCtx* ZSTD_initStaticCCtx(void* workspace, size_t workspaceSize);
-/* !!! To be deprecated !!! */
-typedef enum {
- ZSTD_p_forceWindow, /* Force back-references to remain < windowSize, even when referencing Dictionary content (default:0) */
- ZSTD_p_forceRawDict /* Force loading dictionary in "content-only" mode (no header analysis) */
-} ZSTD_CCtxParameter;
-/*! ZSTD_setCCtxParameter() :
- * Set advanced parameters, selected through enum ZSTD_CCtxParameter
- * @result : 0, or an error code (which can be tested with ZSTD_isError()) */
-ZSTDLIB_API size_t ZSTD_setCCtxParameter(ZSTD_CCtx* cctx, ZSTD_CCtxParameter param, unsigned value);
-
-
/*! ZSTD_createCDict_byReference() :
* Create a digested dictionary for compression
* Dictionary content is simply referenced, and therefore stays in dictBuffer.
* It is important that dictBuffer outlives CDict, it must remain read accessible throughout the lifetime of CDict */
ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_byReference(const void* dictBuffer, size_t dictSize, int compressionLevel);
-
typedef enum { ZSTD_dm_auto=0, /* dictionary is "full" if it starts with ZSTD_MAGIC_DICTIONARY, otherwise it is "rawContent" */
ZSTD_dm_rawContent, /* ensures dictionary is always loaded as rawContent, even if it starts with ZSTD_MAGIC_DICTIONARY */
ZSTD_dm_fullDict /* refuses to load a dictionary if it does not respect Zstandard's specification */
@@ -576,7 +582,8 @@ typedef enum { ZSTD_dm_auto=0, /* dictionary is "full" if it starts with
/*! ZSTD_createCDict_advanced() :
* Create a ZSTD_CDict using external alloc and free, and customized compression parameters */
ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_advanced(const void* dict, size_t dictSize,
- unsigned byReference, ZSTD_dictMode_e dictMode,
+ ZSTD_dictLoadMethod_e dictLoadMethod,
+ ZSTD_dictMode_e dictMode,
ZSTD_compressionParameters cParams,
ZSTD_customMem customMem);
@@ -596,7 +603,7 @@ ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_advanced(const void* dict, size_t dictS
ZSTDLIB_API ZSTD_CDict* ZSTD_initStaticCDict(
void* workspace, size_t workspaceSize,
const void* dict, size_t dictSize,
- unsigned byReference, ZSTD_dictMode_e dictMode,
+ ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictMode_e dictMode,
ZSTD_compressionParameters cParams);
/*! ZSTD_getCParams() :
@@ -674,7 +681,8 @@ ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict_byReference(const void* dictBuffer, siz
/*! ZSTD_createDDict_advanced() :
* Create a ZSTD_DDict using external alloc and free, optionally by reference */
ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict_advanced(const void* dict, size_t dictSize,
- unsigned byReference, ZSTD_customMem customMem);
+ ZSTD_dictLoadMethod_e dictLoadMethod,
+ ZSTD_customMem customMem);
/*! ZSTD_initStaticDDict() :
* Generate a digested dictionary in provided memory area.
@@ -689,7 +697,7 @@ ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict_advanced(const void* dict, size_t dictS
*/
ZSTDLIB_API ZSTD_DDict* ZSTD_initStaticDDict(void* workspace, size_t workspaceSize,
const void* dict, size_t dictSize,
- unsigned byReference);
+ ZSTD_dictLoadMethod_e dictLoadMethod);
/*! ZSTD_getDictID_fromDict() :
* Provides the dictID stored within dictionary.
@@ -724,9 +732,9 @@ ZSTDLIB_API unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize);
ZSTDLIB_API ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem);
ZSTDLIB_API ZSTD_CStream* ZSTD_initStaticCStream(void* workspace, size_t workspaceSize); /**< same as ZSTD_initStaticCCtx() */
ZSTDLIB_API size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, int compressionLevel, unsigned long long pledgedSrcSize); /**< pledgedSrcSize must be correct, a size of 0 means unknown. for a frame size of 0 use initCStream_advanced */
-ZSTDLIB_API size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel); /**< creates of an internal CDict (incompatible with static CCtx), except if dict == NULL or dictSize < 8, in which case no dict is used. */
+ZSTDLIB_API size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel); /**< creates of an internal CDict (incompatible with static CCtx), except if dict == NULL or dictSize < 8, in which case no dict is used. Note: dict is loaded with ZSTD_dm_auto (treated as a full zstd dictionary if it begins with ZSTD_MAGIC_DICTIONARY, else as raw content) and ZSTD_dlm_byCopy.*/
ZSTDLIB_API size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs, const void* dict, size_t dictSize,
- ZSTD_parameters params, unsigned long long pledgedSrcSize); /**< pledgedSrcSize is optional and can be 0 (meaning unknown). note: if the contentSizeFlag is set, pledgedSrcSize == 0 means the source size is actually 0 */
+ ZSTD_parameters params, unsigned long long pledgedSrcSize); /**< pledgedSrcSize is optional and can be 0 (meaning unknown). note: if the contentSizeFlag is set, pledgedSrcSize == 0 means the source size is actually 0. dict is loaded with ZSTD_dm_auto and ZSTD_dlm_byCopy. */
ZSTDLIB_API size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict); /**< note : cdict will just be referenced, and must outlive compression session */
ZSTDLIB_API size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs, const ZSTD_CDict* cdict, ZSTD_frameParameters fParams, unsigned long long pledgedSrcSize); /**< same as ZSTD_initCStream_usingCDict(), with control over frame parameters */
@@ -741,12 +749,12 @@ ZSTDLIB_API size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pledg
/*===== Advanced Streaming decompression functions =====*/
-typedef enum { DStream_p_maxWindowSize } ZSTD_DStreamParameter_e;
ZSTDLIB_API ZSTD_DStream* ZSTD_createDStream_advanced(ZSTD_customMem customMem);
ZSTDLIB_API ZSTD_DStream* ZSTD_initStaticDStream(void* workspace, size_t workspaceSize); /**< same as ZSTD_initStaticDCtx() */
-ZSTDLIB_API size_t ZSTD_setDStreamParameter(ZSTD_DStream* zds, ZSTD_DStreamParameter_e paramType, unsigned paramValue);
-ZSTDLIB_API size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize); /**< note: a dict will not be used if dict == NULL or dictSize < 8 */
-ZSTDLIB_API size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* zds, const ZSTD_DDict* ddict); /**< note : ddict will just be referenced, and must outlive decompression session */
+typedef enum { DStream_p_maxWindowSize } ZSTD_DStreamParameter_e;
+ZSTDLIB_API size_t ZSTD_setDStreamParameter(ZSTD_DStream* zds, ZSTD_DStreamParameter_e paramType, unsigned paramValue); /* obsolete : this API will be removed in a future version */
+ZSTDLIB_API size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize); /**< note: no dictionary will be used if dict == NULL or dictSize < 8 */
+ZSTDLIB_API size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* zds, const ZSTD_DDict* ddict); /**< note : ddict is referenced, it must outlive decompression session */
ZSTDLIB_API size_t ZSTD_resetDStream(ZSTD_DStream* zds); /**< re-use decompression parameters from previous init; saves dictionary loading */
@@ -754,8 +762,8 @@ ZSTDLIB_API size_t ZSTD_resetDStream(ZSTD_DStream* zds); /**< re-use decompress
* Buffer-less and synchronous inner streaming functions
*
* This is an advanced API, giving full control over buffer management, for users which need direct control over memory.
-* But it's also a complex one, with many restrictions (documented below).
-* Prefer using normal streaming API for an easier experience
+* But it's also a complex one, with several restrictions, documented below.
+* Prefer normal streaming API for an easier experience.
********************************************************************* */
/**
@@ -772,8 +780,8 @@ ZSTDLIB_API size_t ZSTD_resetDStream(ZSTD_DStream* zds); /**< re-use decompress
Then, consume your input using ZSTD_compressContinue().
There are some important considerations to keep in mind when using this advanced function :
- - ZSTD_compressContinue() has no internal buffer. It uses externally provided buffer only.
- - Interface is synchronous : input is consumed entirely and produce 1+ (or more) compressed blocks.
+ - ZSTD_compressContinue() has no internal buffer. It uses externally provided buffers only.
+ - Interface is synchronous : input is consumed entirely and produces 1+ compressed blocks.
- Caller must ensure there is enough space in `dst` to store compressed data under worst case scenario.
Worst case evaluation is provided by ZSTD_compressBound().
ZSTD_compressContinue() doesn't guarantee recover after a failed compression.
@@ -784,9 +792,9 @@ ZSTDLIB_API size_t ZSTD_resetDStream(ZSTD_DStream* zds); /**< re-use decompress
Finish a frame with ZSTD_compressEnd(), which will write the last block(s) and optional checksum.
It's possible to use srcSize==0, in which case, it will write a final empty block to end the frame.
- Without last block mark, frames will be considered unfinished (corrupted) by decoders.
+ Without last block mark, frames are considered unfinished (hence corrupted) by compliant decoders.
- `ZSTD_CCtx` object can be re-used (ZSTD_compressBegin()) to compress some new frame.
+ `ZSTD_CCtx` object can be re-used (ZSTD_compressBegin()) to compress again.
*/
/*===== Buffer-less streaming compression functions =====*/
@@ -809,40 +817,53 @@ ZSTDLIB_API size_t ZSTD_compressEnd(ZSTD_CCtx* cctx, void* dst, size_t dstCapaci
A ZSTD_DCtx object can be re-used multiple times.
First typical operation is to retrieve frame parameters, using ZSTD_getFrameHeader().
- It fills a ZSTD_frameHeader structure with important information to correctly decode the frame,
- such as minimum rolling buffer size to allocate to decompress data (`windowSize`),
- and the dictionary ID in use.
- (Note : content size is optional, it may not be present. 0 means : content size unknown).
- Note that these values could be wrong, either because of data malformation, or because an attacker is spoofing deliberate false information.
- As a consequence, check that values remain within valid application range, especially `windowSize`, before allocation.
- Each application can set its own limit, depending on local restrictions.
- For extended interoperability, it is recommended to support windowSize of at least 8 MB.
Frame header is extracted from the beginning of compressed frame, so providing only the frame's beginning is enough.
Data fragment must be large enough to ensure successful decoding.
- `ZSTD_frameHeaderSize_max` bytes is guaranteed to always be large enough.
+ `ZSTD_frameHeaderSize_max` bytes is guaranteed to always be large enough.
@result : 0 : successful decoding, the `ZSTD_frameHeader` structure is correctly filled.
>0 : `srcSize` is too small, please provide at least @result bytes on next attempt.
errorCode, which can be tested using ZSTD_isError().
- Start decompression, with ZSTD_decompressBegin().
+ It fills a ZSTD_frameHeader structure with important information to correctly decode the frame,
+ such as the dictionary ID, content size, or maximum back-reference distance (`windowSize`).
+ Note that these values could be wrong, either because of data corruption, or because a 3rd party deliberately spoofs false information.
+ As a consequence, check that values remain within valid application range.
+ For example, do not allocate memory blindly, check that `windowSize` is within expectation.
+ Each application can set its own limits, depending on local restrictions.
+ For extended interoperability, it is recommended to support `windowSize` of at least 8 MB.
+
+ ZSTD_decompressContinue() needs previous data blocks during decompression, up to `windowSize` bytes.
+ ZSTD_decompressContinue() is very sensitive to contiguity,
+ if 2 blocks don't follow each other, make sure that either the compressor breaks contiguity at the same place,
+ or that previous contiguous segment is large enough to properly handle maximum back-reference distance.
+ There are multiple ways to guarantee this condition.
+
+ The most memory efficient way is to use a round buffer of sufficient size.
+ Sufficient size is determined by invoking ZSTD_decodingBufferSize_min(),
+ which can @return an error code if required value is too large for current system (in 32-bits mode).
+ In a round buffer methodology, ZSTD_decompressContinue() decompresses each block next to previous one,
+ up to the moment there is not enough room left in the buffer to guarantee decoding another full block,
+ which maximum size is provided in `ZSTD_frameHeader` structure, field `blockSizeMax`.
+ At which point, decoding can resume from the beginning of the buffer.
+ Note that already decoded data stored in the buffer should be flushed before being overwritten.
+
+ There are alternatives possible, for example using two or more buffers of size `windowSize` each, though they consume more memory.
+
+ Finally, if you control the compression process, you can also ignore all buffer size rules,
+ as long as the encoder and decoder progress in "lock-step",
+ aka use exactly the same buffer sizes, break contiguity at the same place, etc.
+
+ Once buffers are setup, start decompression, with ZSTD_decompressBegin().
If decompression requires a dictionary, use ZSTD_decompressBegin_usingDict() or ZSTD_decompressBegin_usingDDict().
- Alternatively, you can copy a prepared context, using ZSTD_copyDCtx().
Then use ZSTD_nextSrcSizeToDecompress() and ZSTD_decompressContinue() alternatively.
ZSTD_nextSrcSizeToDecompress() tells how many bytes to provide as 'srcSize' to ZSTD_decompressContinue().
ZSTD_decompressContinue() requires this _exact_ amount of bytes, or it will fail.
- @result of ZSTD_decompressContinue() is the number of bytes regenerated within 'dst' (necessarily <= dstCapacity).
- It can be zero, which is not an error; it just means ZSTD_decompressContinue() has decoded some metadata item.
+ @result of ZSTD_decompressContinue() is the number of bytes regenerated within 'dst' (necessarily <= dstCapacity).
+ It can be zero : it just means ZSTD_decompressContinue() has decoded some metadata item.
It can also be an error code, which can be tested with ZSTD_isError().
- ZSTD_decompressContinue() needs previous data blocks during decompression, up to `windowSize`.
- They should preferably be located contiguously, prior to current block.
- Alternatively, a round buffer of sufficient size is also possible. Sufficient size is determined by frame parameters.
- ZSTD_decompressContinue() is very sensitive to contiguity,
- if 2 blocks don't follow each other, make sure that either the compressor breaks contiguity at the same place,
- or that previous contiguous segment is large enough to properly handle maximum back-reference.
-
A frame is fully decoded when ZSTD_nextSrcSizeToDecompress() returns zero.
Context can then be reset to start a new decompression.
@@ -852,75 +873,101 @@ ZSTDLIB_API size_t ZSTD_compressEnd(ZSTD_CCtx* cctx, void* dst, size_t dstCapaci
== Special case : skippable frames ==
Skippable frames allow integration of user-defined data into a flow of concatenated frames.
- Skippable frames will be ignored (skipped) by a decompressor. The format of skippable frames is as follows :
+ Skippable frames will be ignored (skipped) by decompressor.
+ The format of skippable frames is as follows :
a) Skippable frame ID - 4 Bytes, Little endian format, any value from 0x184D2A50 to 0x184D2A5F
b) Frame Size - 4 Bytes, Little endian format, unsigned 32-bits
c) Frame Content - any content (User Data) of length equal to Frame Size
- For skippable frames ZSTD_decompressContinue() always returns 0.
- For skippable frames ZSTD_getFrameHeader() returns fparamsPtr->windowLog==0 what means that a frame is skippable.
- Note : If fparamsPtr->frameContentSize==0, it is ambiguous: the frame might actually be a Zstd encoded frame with no content.
- For purposes of decompression, it is valid in both cases to skip the frame using
- ZSTD_findFrameCompressedSize to find its size in bytes.
- It also returns Frame Size as fparamsPtr->frameContentSize.
+ For skippable frames ZSTD_getFrameHeader() returns zfhPtr->frameType==ZSTD_skippableFrame.
+ For skippable frames ZSTD_decompressContinue() always returns 0 : it only skips the content.
*/
/*===== Buffer-less streaming decompression functions =====*/
typedef enum { ZSTD_frame, ZSTD_skippableFrame } ZSTD_frameType_e;
typedef struct {
- unsigned long long frameContentSize; /* ZSTD_CONTENTSIZE_UNKNOWN means this field is not available. 0 means "empty" */
+ unsigned long long frameContentSize; /* if == ZSTD_CONTENTSIZE_UNKNOWN, it means this field is not available. 0 means "empty" */
unsigned long long windowSize; /* can be very large, up to <= frameContentSize */
+ unsigned blockSizeMax;
ZSTD_frameType_e frameType; /* if == ZSTD_skippableFrame, frameContentSize is the size of skippable content */
unsigned headerSize;
unsigned dictID;
unsigned checksumFlag;
} ZSTD_frameHeader;
ZSTDLIB_API size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize); /**< doesn't consume input */
+ZSTDLIB_API size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long long frameContentSize); /**< when frame content size is not known, pass in frameContentSize == ZSTD_CONTENTSIZE_UNKNOWN */
+
ZSTDLIB_API size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx);
ZSTDLIB_API size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx* dctx, const void* dict, size_t dictSize);
ZSTDLIB_API size_t ZSTD_decompressBegin_usingDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict);
-ZSTDLIB_API void ZSTD_copyDCtx(ZSTD_DCtx* dctx, const ZSTD_DCtx* preparedDCtx);
ZSTDLIB_API size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx);
ZSTDLIB_API size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
+
+/* misc */
+ZSTDLIB_API void ZSTD_copyDCtx(ZSTD_DCtx* dctx, const ZSTD_DCtx* preparedDCtx);
typedef enum { ZSTDnit_frameHeader, ZSTDnit_blockHeader, ZSTDnit_block, ZSTDnit_lastBlock, ZSTDnit_checksum, ZSTDnit_skippableFrame } ZSTD_nextInputType_e;
ZSTDLIB_API ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx);
-/*=== New advanced API (experimental, and compression only) ===*/
+/* ============================================ */
+/** New advanced API (experimental) */
+/* ============================================ */
/* notes on API design :
- * In this proposal, parameters are pushed one by one into an existing CCtx,
+ * In this proposal, parameters are pushed one by one into an existing context,
* and then applied on all subsequent compression jobs.
* When no parameter is ever provided, CCtx is created with compression level ZSTD_CLEVEL_DEFAULT.
*
* This API is intended to replace all others experimental API.
* It can basically do all other use cases, and even new ones.
- * It stands a good chance to become "stable",
- * after a reasonable testing period.
+ * In constrast with _advanced() variants, it stands a reasonable chance to become "stable",
+ * after a good testing period.
*/
/* note on naming convention :
* Initially, the API favored names like ZSTD_setCCtxParameter() .
* In this proposal, convention is changed towards ZSTD_CCtx_setParameter() .
* The main driver is that it identifies more clearly the target object type.
- * It feels clearer in light of potential variants :
+ * It feels clearer when considering multiple targets :
* ZSTD_CDict_setParameter() (rather than ZSTD_setCDictParameter())
- * ZSTD_DCtx_setParameter() (rather than ZSTD_setDCtxParameter() )
- * Left variant feels easier to distinguish.
+ * ZSTD_CCtxParams_setParameter() (rather than ZSTD_setCCtxParamsParameter() )
+ * etc...
*/
/* note on enum design :
- * All enum will be manually set to explicit values before reaching "stable API" status */
+ * All enum will be pinned to explicit values before reaching "stable API" status */
typedef enum {
+ /* Question : should we have a format ZSTD_f_auto ?
+ * For the time being, it would mean exactly the same as ZSTD_f_zstd1.
+ * But, in the future, should several formats be supported,
+ * on the compression side, it would mean "default format".
+ * On the decompression side, it would mean "multi format",
+ * and ZSTD_f_zstd1 could be reserved to mean "accept *only* zstd frames".
+ * Since meaning is a little different, another option could be to define different enums for compression and decompression.
+ * This question could be kept for later, when there are actually multiple formats to support,
+ * but there is also the question of pinning enum values, and pinning value `0` is especially important */
+ ZSTD_f_zstd1 = 0, /* zstd frame format, specified in zstd_compression_format.md (default) */
+ ZSTD_f_zstd1_magicless, /* Variant of zstd frame format, without initial 4-bytes magic number.
+ * Useful to save 4 bytes per generated frame.
+ * Decoder cannot recognise automatically this format, requiring instructions. */
+} ZSTD_format_e;
+
+typedef enum {
+ /* compression format */
+ ZSTD_p_format = 10, /* See ZSTD_format_e enum definition.
+ * Cast selected format as unsigned for ZSTD_CCtx_setParameter() compatibility. */
+
/* compression parameters */
ZSTD_p_compressionLevel=100, /* Update all compression parameters according to pre-defined cLevel table
* Default level is ZSTD_CLEVEL_DEFAULT==3.
* Special: value 0 means "do not change cLevel". */
ZSTD_p_windowLog, /* Maximum allowed back-reference distance, expressed as power of 2.
* Must be clamped between ZSTD_WINDOWLOG_MIN and ZSTD_WINDOWLOG_MAX.
- * Special: value 0 means "do not change windowLog". */
+ * Special: value 0 means "do not change windowLog".
+ * Note: Using a window size greater than ZSTD_MAXWINDOWSIZE_DEFAULT (default: 2^27)
+ * requires setting the maximum window size at least as large during decompression. */
ZSTD_p_hashLog, /* Size of the probe table, as a power of 2.
* Resulting table size is (1 << (hashLog+2)).
* Must be clamped between ZSTD_HASHLOG_MIN and ZSTD_HASHLOG_MAX.
@@ -959,12 +1006,6 @@ typedef enum {
ZSTD_p_checksumFlag, /* A 32-bits checksum of content is written at end of frame (default:0) */
ZSTD_p_dictIDFlag, /* When applicable, dictID of dictionary is provided in frame header (default:1) */
- /* dictionary parameters (must be set before ZSTD_CCtx_loadDictionary) */
- ZSTD_p_dictMode=300, /* Select how dictionary content must be interpreted. Value must be from type ZSTD_dictMode_e.
- * default : 0==auto : dictionary will be "full" if it respects specification, otherwise it will be "rawContent" */
- ZSTD_p_refDictContent, /* Dictionary content will be referenced, instead of copied (default:0==byCopy).
- * It requires that dictionary buffer outlives its users */
-
/* multi-threading parameters */
ZSTD_p_nbThreads=400, /* Select how many threads a compression job can spawn (default:1)
* More threads improve speed, but also increase memory usage.
@@ -980,6 +1021,35 @@ typedef enum {
/* advanced parameters - may not remain available after API update */
ZSTD_p_forceMaxWindow=1100, /* Force back-reference distances to remain < windowSize,
* even when referencing into Dictionary content (default:0) */
+ ZSTD_p_enableLongDistanceMatching=1200, /* Enable long distance matching.
+ * This parameter is designed to improve the compression
+ * ratio for large inputs with long distance matches.
+ * This increases the memory usage as well as window size.
+ * Note: setting this parameter sets all the LDM parameters
+ * as well as ZSTD_p_windowLog. It should be set after
+ * ZSTD_p_compressionLevel and before ZSTD_p_windowLog and
+ * other LDM parameters. Setting the compression level
+ * after this parameter overrides the window log, though LDM
+ * will remain enabled until explicitly disabled. */
+ ZSTD_p_ldmHashLog, /* Size of the table for long distance matching, as a power of 2.
+ * Larger values increase memory usage and compression ratio, but decrease
+ * compression speed.
+ * Must be clamped between ZSTD_HASHLOG_MIN and ZSTD_HASHLOG_MAX
+ * (default: windowlog - 7). */
+ ZSTD_p_ldmMinMatch, /* Minimum size of searched matches for long distance matcher.
+ * Larger/too small values usually decrease compression ratio.
+ * Must be clamped between ZSTD_LDM_MINMATCH_MIN
+ * and ZSTD_LDM_MINMATCH_MAX (default: 64). */
+ ZSTD_p_ldmBucketSizeLog, /* Log size of each bucket in the LDM hash table for collision resolution.
+ * Larger values usually improve collision resolution but may decrease
+ * compression speed.
+ * The maximum value is ZSTD_LDM_BUCKETSIZELOG_MAX (default: 3). */
+ ZSTD_p_ldmHashEveryLog, /* Frequency of inserting/looking up entries in the LDM hash table.
+ * The default is MAX(0, (windowLog - ldmHashLog)) to
+ * optimize hash table usage.
+ * Larger values improve compression speed. Deviating far from the
+ * default value will likely result in a decrease in compression ratio.
+ * Must be clamped between 0 and ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN. */
} ZSTD_cParameter;
@@ -1007,14 +1077,22 @@ ZSTDLIB_API size_t ZSTD_CCtx_setPledgedSrcSize(ZSTD_CCtx* cctx, unsigned long lo
* @result : 0, or an error code (which can be tested with ZSTD_isError()).
* Special : Adding a NULL (or 0-size) dictionary invalidates any previous dictionary,
* meaning "return to no-dictionary mode".
- * Note 1 : `dict` content will be copied internally,
- * except if ZSTD_p_refDictContent is set before loading.
+ * Note 1 : `dict` content will be copied internally. Use
+ * ZSTD_CCtx_loadDictionary_byReference() to reference dictionary
+ * content instead. The dictionary buffer must then outlive its
+ * users.
* Note 2 : Loading a dictionary involves building tables, which are dependent on compression parameters.
* For this reason, compression parameters cannot be changed anymore after loading a dictionary.
* It's also a CPU-heavy operation, with non-negligible impact on latency.
* Note 3 : Dictionary will be used for all future compression jobs.
- * To return to "no-dictionary" situation, load a NULL dictionary */
+ * To return to "no-dictionary" situation, load a NULL dictionary
+ * Note 5 : Use ZSTD_CCtx_loadDictionary_advanced() to select how dictionary
+ * content will be interpreted.
+ */
ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize);
+ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary_byReference(ZSTD_CCtx* cctx, const void* dict, size_t dictSize);
+ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictMode_e dictMode);
+
/*! ZSTD_CCtx_refCDict() :
* Reference a prepared dictionary, to be used for all next compression jobs.
@@ -1040,23 +1118,26 @@ ZSTDLIB_API size_t ZSTD_CCtx_refCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict);
* Note 1 : Prefix buffer is referenced. It must outlive compression job.
* Note 2 : Referencing a prefix involves building tables, which are dependent on compression parameters.
* It's a CPU-heavy operation, with non-negligible impact on latency.
- * Note 3 : it's possible to alter ZSTD_p_dictMode using ZSTD_CCtx_setParameter() */
+ * Note 3 : By default, the prefix is treated as raw content
+ * (ZSTD_dm_rawContent). Use ZSTD_CCtx_refPrefix_advanced() to alter
+ * dictMode. */
ZSTDLIB_API size_t ZSTD_CCtx_refPrefix(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize);
+ZSTDLIB_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize, ZSTD_dictMode_e dictMode);
typedef enum {
ZSTD_e_continue=0, /* collect more data, encoder transparently decides when to output result, for optimal conditions */
ZSTD_e_flush, /* flush any data provided so far - frame will continue, future data can still reference previous data for better compression */
- ZSTD_e_end /* flush any remaining data and ends current frame. Any future compression starts a new frame. */
+ ZSTD_e_end /* flush any remaining data and close current frame. Any additional data starts a new frame. */
} ZSTD_EndDirective;
/*! ZSTD_compress_generic() :
* Behave about the same as ZSTD_compressStream. To note :
* - Compression parameters are pushed into CCtx before starting compression, using ZSTD_CCtx_setParameter()
* - Compression parameters cannot be changed once compression is started.
- * - *dstPos must be <= dstCapacity, *srcPos must be <= srcSize
- * - *dspPos and *srcPos will be updated. They are guaranteed to remain below their respective limit.
+ * - outpot->pos must be <= dstCapacity, input->pos must be <= srcSize
+ * - outpot->pos and input->pos will be updated. They are guaranteed to remain below their respective limit.
* - @return provides the minimum amount of data still to flush from internal buffers
* or an error code, which can be tested using ZSTD_isError().
* if @return != 0, flush is not fully completed, there is some data left within internal buffers.
@@ -1075,6 +1156,7 @@ ZSTDLIB_API size_t ZSTD_compress_generic (ZSTD_CCtx* cctx,
* Useful after an error, or to interrupt an ongoing compression job and start a new one.
* Any internal data not yet flushed is cancelled.
* Dictionary (if any) is dropped.
+ * All parameters are back to default values.
* It's possible to modify compression parameters after a reset.
*/
ZSTDLIB_API void ZSTD_CCtx_reset(ZSTD_CCtx* cctx); /* Not ready yet ! */
@@ -1083,21 +1165,187 @@ ZSTDLIB_API void ZSTD_CCtx_reset(ZSTD_CCtx* cctx); /* Not ready yet ! */
/*! ZSTD_compress_generic_simpleArgs() :
* Same as ZSTD_compress_generic(),
* but using only integral types as arguments.
- * Argument list is larger and less expressive than ZSTD_{in,out}Buffer,
+ * Argument list is larger than ZSTD_{in,out}Buffer,
* but can be helpful for binders from dynamic languages
* which have troubles handling structures containing memory pointers.
*/
-size_t ZSTD_compress_generic_simpleArgs (
+ZSTDLIB_API size_t ZSTD_compress_generic_simpleArgs (
ZSTD_CCtx* cctx,
void* dst, size_t dstCapacity, size_t* dstPos,
const void* src, size_t srcSize, size_t* srcPos,
ZSTD_EndDirective endOp);
+/*! ZSTD_CCtx_params :
+ * Quick howto :
+ * - ZSTD_createCCtxParams() : Create a ZSTD_CCtx_params structure
+ * - ZSTD_CCtxParam_setParameter() : Push parameters one by one into
+ * an existing ZSTD_CCtx_params structure.
+ * This is similar to
+ * ZSTD_CCtx_setParameter().
+ * - ZSTD_CCtx_setParametersUsingCCtxParams() : Apply parameters to
+ * an existing CCtx.
+ * These parameters will be applied to
+ * all subsequent compression jobs.
+ * - ZSTD_compress_generic() : Do compression using the CCtx.
+ * - ZSTD_freeCCtxParams() : Free the memory.
+ *
+ * This can be used with ZSTD_estimateCCtxSize_advanced_usingCCtxParams()
+ * for static allocation for single-threaded compression.
+ */
+ZSTDLIB_API ZSTD_CCtx_params* ZSTD_createCCtxParams(void);
-/**
- Block functions
+/*! ZSTD_resetCCtxParams() :
+ * Reset params to default, with the default compression level.
+ */
+ZSTDLIB_API size_t ZSTD_resetCCtxParams(ZSTD_CCtx_params* params);
+
+/*! ZSTD_initCCtxParams() :
+ * Initializes the compression parameters of cctxParams according to
+ * compression level. All other parameters are reset to their default values.
+ */
+ZSTDLIB_API size_t ZSTD_initCCtxParams(ZSTD_CCtx_params* cctxParams, int compressionLevel);
+
+/*! ZSTD_initCCtxParams_advanced() :
+ * Initializes the compression and frame parameters of cctxParams according to
+ * params. All other parameters are reset to their default values.
+ */
+ZSTDLIB_API size_t ZSTD_initCCtxParams_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_parameters params);
+
+ZSTDLIB_API size_t ZSTD_freeCCtxParams(ZSTD_CCtx_params* params);
+
+/*! ZSTD_CCtxParam_setParameter() :
+ * Similar to ZSTD_CCtx_setParameter.
+ * Set one compression parameter, selected by enum ZSTD_cParameter.
+ * Parameters must be applied to a ZSTD_CCtx using ZSTD_CCtx_setParametersUsingCCtxParams().
+ * Note : when `value` is an enum, cast it to unsigned for proper type checking.
+ * @result : 0, or an error code (which can be tested with ZSTD_isError()).
+ */
+ZSTDLIB_API size_t ZSTD_CCtxParam_setParameter(ZSTD_CCtx_params* params, ZSTD_cParameter param, unsigned value);
+
+/*! ZSTD_CCtx_setParametersUsingCCtxParams() :
+ * Apply a set of ZSTD_CCtx_params to the compression context.
+ * This must be done before the dictionary is loaded.
+ * The pledgedSrcSize is treated as unknown.
+ * Multithreading parameters are applied only if nbThreads > 1.
+ */
+ZSTDLIB_API size_t ZSTD_CCtx_setParametersUsingCCtxParams(
+ ZSTD_CCtx* cctx, const ZSTD_CCtx_params* params);
+
+
+/*=== Advanced parameters for decompression API ===*/
+
+/* The following parameters must be set after creating a ZSTD_DCtx* (or ZSTD_DStream*) object,
+ * but before starting decompression of a frame.
+ */
+
+/*! ZSTD_DCtx_loadDictionary() :
+ * Create an internal DDict from dict buffer,
+ * to be used to decompress next frames.
+ * @result : 0, or an error code (which can be tested with ZSTD_isError()).
+ * Special : Adding a NULL (or 0-size) dictionary invalidates any previous dictionary,
+ * meaning "return to no-dictionary mode".
+ * Note 1 : `dict` content will be copied internally.
+ * Use ZSTD_DCtx_loadDictionary_byReference()
+ * to reference dictionary content instead.
+ * In which case, the dictionary buffer must outlive its users.
+ * Note 2 : Loading a dictionary involves building tables,
+ * which has a non-negligible impact on CPU usage and latency.
+ * Note 3 : Use ZSTD_DCtx_loadDictionary_advanced() to select
+ * how dictionary content will be interpreted and loaded.
+ */
+ZSTDLIB_API size_t ZSTD_DCtx_loadDictionary(ZSTD_DCtx* dctx, const void* dict, size_t dictSize); /* not implemented */
+ZSTDLIB_API size_t ZSTD_DCtx_loadDictionary_byReference(ZSTD_DCtx* dctx, const void* dict, size_t dictSize); /* not implemented */
+ZSTDLIB_API size_t ZSTD_DCtx_loadDictionary_advanced(ZSTD_DCtx* dctx, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictMode_e dictMode); /* not implemented */
+
+
+/*! ZSTD_DCtx_refDDict() :
+ * Reference a prepared dictionary, to be used to decompress next frames.
+ * The dictionary remains active for decompression of future frames using same DCtx.
+ * @result : 0, or an error code (which can be tested with ZSTD_isError()).
+ * Note 1 : Currently, only one dictionary can be managed.
+ * Referencing a new dictionary effectively "discards" any previous one.
+ * Special : adding a NULL DDict means "return to no-dictionary mode".
+ * Note 2 : DDict is just referenced, its lifetime must outlive its usage from DCtx.
+ */
+ZSTDLIB_API size_t ZSTD_DCtx_refDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict); /* not implemented */
+
+
+/*! ZSTD_DCtx_refPrefix() :
+ * Reference a prefix (single-usage dictionary) for next compression job.
+ * Prefix is **only used once**. It must be explicitly referenced before each frame.
+ * If there is a need to use same prefix multiple times, consider embedding it into a ZSTD_DDict instead.
+ * @result : 0, or an error code (which can be tested with ZSTD_isError()).
+ * Note 1 : Adding any prefix (including NULL) invalidates any previously set prefix or dictionary
+ * Note 2 : Prefix buffer is referenced. It must outlive compression job.
+ * Note 3 : By default, the prefix is treated as raw content (ZSTD_dm_rawContent).
+ * Use ZSTD_CCtx_refPrefix_advanced() to alter dictMode.
+ * Note 4 : Referencing a raw content prefix has almost no cpu nor memory cost.
+ */
+ZSTDLIB_API size_t ZSTD_DCtx_refPrefix(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize); /* not implemented */
+ZSTDLIB_API size_t ZSTD_DCtx_refPrefix_advanced(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize, ZSTD_dictMode_e dictMode); /* not implemented */
+
+
+/*! ZSTD_DCtx_setMaxWindowSize() :
+ * Refuses allocating internal buffers for frames requiring a window size larger than provided limit.
+ * This is useful to prevent a decoder context from reserving too much memory for itself (potential attack scenario).
+ * This parameter is only useful in streaming mode, since no internal buffer is allocated in direct mode.
+ * By default, a decompression context accepts all window sizes <= (1 << ZSTD_WINDOWLOG_MAX)
+ * @return : 0, or an error code (which can be tested using ZSTD_isError()).
+ */
+ZSTDLIB_API size_t ZSTD_DCtx_setMaxWindowSize(ZSTD_DCtx* dctx, size_t maxWindowSize);
+
+
+/*! ZSTD_DCtx_setFormat() :
+ * Instruct the decoder context about what kind of data to decode next.
+ * This instruction is mandatory to decode data without a fully-formed header,
+ * such ZSTD_f_zstd1_magicless for example.
+ * @return : 0, or an error code (which can be tested using ZSTD_isError()).
+ */
+ZSTDLIB_API size_t ZSTD_DCtx_setFormat(ZSTD_DCtx* dctx, ZSTD_format_e format);
+
+
+/*! ZSTD_decompress_generic() :
+ * Behave the same as ZSTD_decompressStream.
+ * Decompression parameters cannot be changed once decompression is started.
+ * @return : an error code, which can be tested using ZSTD_isError()
+ * if >0, a hint, nb of expected input bytes for next invocation.
+ * `0` means : a frame has just been fully decoded and flushed.
+ */
+ZSTDLIB_API size_t ZSTD_decompress_generic(ZSTD_DCtx* dctx,
+ ZSTD_outBuffer* output,
+ ZSTD_inBuffer* input);
+
+
+/*! ZSTD_decompress_generic_simpleArgs() :
+ * Same as ZSTD_decompress_generic(),
+ * but using only integral types as arguments.
+ * Argument list is larger than ZSTD_{in,out}Buffer,
+ * but can be helpful for binders from dynamic languages
+ * which have troubles handling structures containing memory pointers.
+ */
+ZSTDLIB_API size_t ZSTD_decompress_generic_simpleArgs (
+ ZSTD_DCtx* dctx,
+ void* dst, size_t dstCapacity, size_t* dstPos,
+ const void* src, size_t srcSize, size_t* srcPos);
+
+
+/*! ZSTD_DCtx_reset() :
+ * Return a DCtx to clean state.
+ * If a decompression was ongoing, any internal data not yet flushed is cancelled.
+ * All parameters are back to default values, including sticky ones.
+ * Dictionary (if any) is dropped.
+ * Parameters can be modified again after a reset.
+ */
+ZSTDLIB_API void ZSTD_DCtx_reset(ZSTD_DCtx* dctx);
+
+
+
+/* ============================ */
+/** Block level API */
+/* ============================ */
+/*!
Block functions produce and decode raw zstd blocks, without frame metadata.
Frame metadata cost is typically ~18 bytes, which can be non-negligible for very small blocks (< 100 bytes).
User will have to take in charge required information to regenerate data, such as compressed and content sizes.
@@ -1109,7 +1357,7 @@ size_t ZSTD_compress_generic_simpleArgs (
+ compression : any ZSTD_compressBegin*() variant, including with dictionary
+ decompression : any ZSTD_decompressBegin*() variant, including with dictionary
+ copyCCtx() and copyDCtx() can be used too
- - Block size is limited, it must be <= ZSTD_getBlockSize() <= ZSTD_BLOCKSIZE_MAX
+ - Block size is limited, it must be <= ZSTD_getBlockSize() <= ZSTD_BLOCKSIZE_MAX == 128 KB
+ If input is larger than a block size, it's necessary to split input data into multiple blocks
+ For inputs larger than a single block size, consider using the regular ZSTD_compress() instead.
Frame metadata is not that costly, and quickly becomes negligible as source size grows larger.
@@ -1128,7 +1376,7 @@ size_t ZSTD_compress_generic_simpleArgs (
ZSTDLIB_API size_t ZSTD_getBlockSize (const ZSTD_CCtx* cctx);
ZSTDLIB_API size_t ZSTD_compressBlock (ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
ZSTDLIB_API size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
-ZSTDLIB_API size_t ZSTD_insertBlock(ZSTD_DCtx* dctx, const void* blockStart, size_t blockSize); /**< insert block into `dctx` history. Useful for uncompressed blocks */
+ZSTDLIB_API size_t ZSTD_insertBlock(ZSTD_DCtx* dctx, const void* blockStart, size_t blockSize); /**< insert uncompressed block into `dctx` history. Useful for multi-blocks decompression */
#endif /* ZSTD_H_ZSTD_STATIC_LINKING_ONLY */
diff --git a/programs/Makefile b/programs/Makefile
index c5469cfc4def..cbb67e9fe349 100644
--- a/programs/Makefile
+++ b/programs/Makefile
@@ -1,12 +1,10 @@
-# ##########################################################################
+# ################################################################
# Copyright (c) 2015-present, Yann Collet, Facebook, Inc.
# All rights reserved.
#
-# This Makefile is validated for Linux, macOS, *BSD, Hurd, Solaris, MSYS2 targets
-#
-# This source code is licensed under the BSD-style license found in the
-# LICENSE file in the root directory of this source tree. An additional grant
-# of patent rights can be found in the PATENTS file in the same directory.
+# This source code is licensed under both the BSD-style license (found in the
+# LICENSE file in the root directory of this source tree) and the GPLv2 (found
+# in the COPYING file in the root directory of this source tree).
# ##########################################################################
# zstd : Command Line Utility, supporting gzip-like arguments
# zstd32 : Same as zstd, but forced to compile in 32-bits mode
@@ -42,7 +40,7 @@ CPPFLAGS+= -I$(ZSTDDIR) -I$(ZSTDDIR)/common -I$(ZSTDDIR)/compress \
-DZSTD_NEWAPI \
-DXXH_NAMESPACE=ZSTD_ # because xxhash.o already compiled with this macro from library
CFLAGS ?= -O3
-DEBUGFLAGS= -Wall -Wextra -Wcast-qual -Wcast-align -Wshadow \
+DEBUGFLAGS+=-Wall -Wextra -Wcast-qual -Wcast-align -Wshadow \
-Wstrict-aliasing=1 -Wswitch-enum -Wdeclaration-after-statement \
-Wstrict-prototypes -Wundef -Wpointer-arith -Wformat-security \
-Wvla -Wformat=2 -Winit-self -Wfloat-equal -Wwrite-strings \
@@ -68,8 +66,7 @@ endif
else
endif
-ZSTDLIB_FILES := $(wildcard $(ZSTD_FILES)) $(wildcard $(ZSTDLEGACY_FILES)) $(wildcard $(ZDICT_FILES))
-ZSTDLIB_OBJ := $(patsubst %.c,%.o,$(ZSTDLIB_FILES))
+ZSTDLIB_FILES := $(sort $(wildcard $(ZSTD_FILES)) $(wildcard $(ZSTDLEGACY_FILES)) $(wildcard $(ZDICT_FILES)))
# Define *.exe as extension for Windows systems
ifneq (,$(filter Windows%,$(OS)))
@@ -143,13 +140,10 @@ allVariants: zstd zstd-compress zstd-decompress zstd-small zstd-nolegacy
$(ZSTDDECOMP_O): CFLAGS += $(ALIGN_LOOP)
-zstd zstd4 : CPPFLAGS += $(THREAD_CPP) $(ZLIBCPP) $(LZMACPP)
-zstd zstd4 : LDFLAGS += $(THREAD_LD) $(ZLIBLD) $(LZMALD)
-zstd4 : CPPFLAGS += $(LZ4CPP)
-zstd4 : LDFLAGS += $(LZ4LD)
-zstd : LZ4_MSG := - lz4 support is disabled
-zstd zstd4 : CPPFLAGS += -DZSTD_LEGACY_SUPPORT=$(ZSTD_LEGACY_SUPPORT)
-zstd zstd4 : $(ZSTDLIB_FILES) zstdcli.o fileio.o bench.o datagen.o dibio.o
+zstd : CPPFLAGS += $(THREAD_CPP) $(ZLIBCPP) $(LZMACPP) $(LZ4CPP)
+zstd : LDFLAGS += $(THREAD_LD) $(ZLIBLD) $(LZMALD) $(LZ4LD)
+zstd : CPPFLAGS += -DZSTD_LEGACY_SUPPORT=$(ZSTD_LEGACY_SUPPORT)
+zstd : $(ZSTDLIB_FILES) zstdcli.o fileio.o bench.o datagen.o dibio.o
@echo "$(THREAD_MSG)"
@echo "$(ZLIB_MSG)"
@echo "$(LZMA_MSG)"
@@ -212,8 +206,8 @@ zstd-decompress: $(ZSTDCOMMON_FILES) $(ZSTDDECOMP_FILES) zstdcli.c fileio.c
zstd-compress: $(ZSTDCOMMON_FILES) $(ZSTDCOMP_FILES) zstdcli.c fileio.c
$(CC) $(FLAGS) -DZSTD_NOBENCH -DZSTD_NODICT -DZSTD_NODECOMPRESS $^ -o $@$(EXT)
-# zstd is now built with multithreading enabled y default
zstdmt: zstd
+ ln -sf zstd zstdmt
.PHONY: generate_res
generate_res:
@@ -255,25 +249,35 @@ ifneq (,$(filter $(shell uname),Linux Darwin GNU/kFreeBSD GNU OpenBSD FreeBSD Ne
list:
@$(MAKE) -pRrq -f $(lastword $(MAKEFILE_LIST)) : 2>/dev/null | awk -v RS= -F: '/^# File/,/^# Finished Make data base/ {if ($$1 !~ "^[#.]") {print $$1}}' | sort | egrep -v -e '^[^[:alnum:]]' -e '^$@$$' | xargs
-ifneq (,$(filter $(shell uname),SunOS))
-INSTALL ?= ginstall
-else
-INSTALL ?= install
-endif
-
-PREFIX ?= /usr/local
-DESTDIR ?=
-BINDIR ?= $(PREFIX)/bin
+DESTDIR ?=
+# directory variables : GNU conventions prefer lowercase
+# see https://www.gnu.org/prep/standards/html_node/Makefile-Conventions.html
+# support both lower and uppercase (BSD), use uppercase in script
+prefix ?= /usr/local
+PREFIX ?= $(prefix)
+exec_prefix ?= $(PREFIX)
+bindir ?= $(exec_prefix)/bin
+BINDIR ?= $(bindir)
+datarootdir ?= $(PREFIX)/share
+mandir ?= $(datarootdir)/man
+man1dir ?= $(mandir)/man1
ifneq (,$(filter $(shell uname),OpenBSD FreeBSD NetBSD DragonFly SunOS))
MANDIR ?= $(PREFIX)/man/man1
else
-MANDIR ?= $(PREFIX)/share/man/man1
+MANDIR ?= $(man1dir)
+endif
+
+ifneq (,$(filter $(shell uname),SunOS))
+INSTALL ?= ginstall
+else
+INSTALL ?= install
endif
-INSTALL_PROGRAM ?= $(INSTALL) -m 755
-INSTALL_SCRIPT ?= $(INSTALL) -m 755
-INSTALL_MAN ?= $(INSTALL) -m 644
+INSTALL_PROGRAM ?= $(INSTALL)
+INSTALL_SCRIPT ?= $(INSTALL_PROGRAM)
+INSTALL_DATA ?= $(INSTALL) -m 644
+INSTALL_MAN ?= $(INSTALL_DATA)
.PHONY: install
install: zstd
diff --git a/programs/README.md b/programs/README.md
index 8b65dfdb3f24..2da9a6db1261 100644
--- a/programs/README.md
+++ b/programs/README.md
@@ -40,6 +40,16 @@ There are however other Makefile targets that create different variations of CLI
In which case, linking stage will fail if `lzma` library cannot be found.
This might be useful to prevent silent feature disabling.
+- __HAVE_LZ4__ : `zstd` can compress and decompress files in `.lz4` formats.
+ This is ordered through commands `--format=lz4`.
+ Alternatively, symlinks named `lz4`, or `unlz4` will mimic intended behavior.
+ `.lz4` support is automatically enabled when `lz4` library is detected at build time.
+ It's possible to disable `.lz4` support, by setting HAVE_LZ4=0 .
+ Example : make zstd HAVE_LZ4=0
+ It's also possible to force compilation with lz4 support, using HAVE_LZ4=1.
+ In which case, linking stage will fail if `lz4` library cannot be found.
+ This might be useful to prevent silent feature disabling.
+
- __ZSTD_LEGACY_SUPPORT__ : `zstd` can decompress files compressed by older versions of `zstd`.
Starting v0.8.0, all versions of `zstd` produce frames compliant with the [specification](../doc/zstd_compression_format.md), and are therefore compatible.
But older versions (< v0.8.0) produced different, incompatible, frames.
@@ -113,6 +123,7 @@ Advanced arguments :
-c : force write to standard output, even if it is the console
-l : print information about zstd compressed files
--ultra : enable levels beyond 19, up to 22 (requires more memory)
+--long : enable long distance matching (requires more memory)
--no-dictID : don't write dictID into header (dictionary compression)
--[no-]check : integrity check (default:enabled)
-r : operate recursively on directories
@@ -139,3 +150,60 @@ Benchmark arguments :
-B# : cut file into independent blocks of size # (default: no block)
--priority=rt : set process priority to real-time
```
+
+
+#### Long distance matching mode
+The long distance matching mode, enabled with `--long`, is designed to improve
+the compression ratio for files with long matches at a large distance (up to the
+maximum window size, `128 MiB`) while still maintaining compression speed.
+
+Enabling this mode sets the window size to `128 MiB` and thus increases the memory
+usage for both the compressor and decompressor. Performance in terms of speed is
+dependent on long matches being found. Compression speed may degrade if few long
+matches are found. Decompression speed usually improves when there are many long
+distance matches.
+
+Below are graphs comparing the compression speed, compression ratio, and
+decompression speed with and without long distance matching on an ideal use
+case: a tar of four versions of clang (versions `3.4.1`, `3.4.2`, `3.5.0`,
+`3.5.1`) with a total size of `244889600 B`. This is an ideal use case as there
+are many long distance matches within the maximum window size of `128 MiB` (each
+version is less than `128 MiB`).
+
+Compression Speed vs Ratio | Decompression Speed
+---------------------------|---------------------
+![Compression Speed vs Ratio](../doc/images/ldmCspeed.png "Compression Speed vs Ratio") | ![Decompression Speed](../doc/images/ldmDspeed.png "Decompression Speed")
+
+| Method | Compression ratio | Compression speed | Decompression speed |
+|:-------|------------------:|-------------------------:|---------------------------:|
+| `zstd -1` | `5.065` | `284.8 MB/s` | `759.3 MB/s` |
+| `zstd -5` | `5.826` | `124.9 MB/s` | `674.0 MB/s` |
+| `zstd -10` | `6.504` | `29.5 MB/s` | `771.3 MB/s` |
+| `zstd -1 --long` | `17.426` | `220.6 MB/s` | `1638.4 MB/s` |
+| `zstd -5 --long` | `19.661` | `165.5 MB/s` | `1530.6 MB/s`|
+| `zstd -10 --long`| `21.949` | `75.6 MB/s` | `1632.6 MB/s`|
+
+On this file, the compression ratio improves significantly with minimal impact
+on compression speed, and the decompression speed doubles.
+
+On the other extreme, compressing a file with few long distance matches (such as
+the [Silesia compression corpus]) will likely lead to a deterioration in
+compression speed (for lower levels) with minimal change in compression ratio.
+
+The below table illustrates this on the [Silesia compression corpus].
+
+[Silesia compression corpus]: http://sun.aei.polsl.pl/~sdeor/index.php?page=silesia
+
+| Method | Compression ratio | Compression speed | Decompression speed |
+|:-------|------------------:|-------------------------:|---------------------------:|
+| `zstd -1` | `2.878` | `231.7 MB/s` | `594.4 MB/s` |
+| `zstd -1 --long` | `2.929` | `106.5 MB/s` | `517.9 MB/s` |
+| `zstd -5` | `3.274` | `77.1 MB/s` | `464.2 MB/s` |
+| `zstd -5 --long` | `3.319` | `51.7 MB/s` | `371.9 MB/s` |
+| `zstd -10` | `3.523` | `16.4 MB/s` | `489.2 MB/s` |
+| `zstd -10 --long`| `3.566` | `16.2 MB/s` | `415.7 MB/s` |
+
+
+
+
+
diff --git a/programs/bench.c b/programs/bench.c
index 2b48a4663a86..ec99c61cebf6 100644
--- a/programs/bench.c
+++ b/programs/bench.c
@@ -5,6 +5,7 @@
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
*/
@@ -129,6 +130,31 @@ void BMK_setNbThreads(unsigned nbThreads) {
#endif
g_nbThreads = nbThreads;
}
+static U32 g_ldmFlag = 0;
+void BMK_setLdmFlag(unsigned ldmFlag) {
+ g_ldmFlag = ldmFlag;
+}
+
+static U32 g_ldmMinMatch = 0;
+void BMK_setLdmMinMatch(unsigned ldmMinMatch) {
+ g_ldmMinMatch = ldmMinMatch;
+}
+
+static U32 g_ldmHashLog = 0;
+void BMK_setLdmHashLog(unsigned ldmHashLog) {
+ g_ldmHashLog = ldmHashLog;
+}
+
+#define BMK_LDM_PARAM_NOTSET 9999
+static U32 g_ldmBucketSizeLog = BMK_LDM_PARAM_NOTSET;
+void BMK_setLdmBucketSizeLog(unsigned ldmBucketSizeLog) {
+ g_ldmBucketSizeLog = ldmBucketSizeLog;
+}
+
+static U32 g_ldmHashEveryLog = BMK_LDM_PARAM_NOTSET;
+void BMK_setLdmHashEveryLog(unsigned ldmHashEveryLog) {
+ g_ldmHashEveryLog = ldmHashEveryLog;
+}
/* ********************************************************
@@ -170,7 +196,6 @@ static int BMK_benchMem(const void* srcBuffer, size_t srcSize,
size_t cSize = 0;
double ratio = 0.;
U32 nbBlocks;
- UTIL_freq_t ticksPerSecond;
/* checks */
if (!compressedBuffer || !resultBuffer || !blockTable || !ctx || !dctx)
@@ -178,7 +203,6 @@ static int BMK_benchMem(const void* srcBuffer, size_t srcSize,
/* init */
if (strlen(displayName)>17) displayName += strlen(displayName)-17; /* display last 17 characters */
- UTIL_initTimer(&ticksPerSecond);
if (g_decodeOnly) { /* benchmark only decompression : source must be already compressed */
const char* srcPtr = (const char*)srcBuffer;
@@ -238,15 +262,15 @@ static int BMK_benchMem(const void* srcBuffer, size_t srcSize,
const char* const marks[NB_MARKS] = { " |", " /", " =", "\\" };
U32 markNb = 0;
- UTIL_getTime(&coolTime);
+ coolTime = UTIL_getTime();
DISPLAYLEVEL(2, "\r%79s\r", "");
while (!cCompleted || !dCompleted) {
/* overheat protection */
- if (UTIL_clockSpanMicro(coolTime, ticksPerSecond) > ACTIVEPERIOD_MICROSEC) {
+ if (UTIL_clockSpanMicro(coolTime) > ACTIVEPERIOD_MICROSEC) {
DISPLAYLEVEL(2, "\rcooling down ... \r");
UTIL_sleep(COOLPERIOD_SEC);
- UTIL_getTime(&coolTime);
+ coolTime = UTIL_getTime();
}
if (!g_decodeOnly) {
@@ -256,8 +280,8 @@ static int BMK_benchMem(const void* srcBuffer, size_t srcSize,
if (!cCompleted) memset(compressedBuffer, 0xE5, maxCompressedSize); /* warm up and erase result buffer */
UTIL_sleepMilli(1); /* give processor time to other processes */
- UTIL_waitForNextTick(ticksPerSecond);
- UTIL_getTime(&clockStart);
+ UTIL_waitForNextTick();
+ clockStart = UTIL_getTime();
if (!cCompleted) { /* still some time to do compression tests */
U64 const clockLoop = g_nbSeconds ? TIMELOOP_MICROSEC : 1;
@@ -266,6 +290,15 @@ static int BMK_benchMem(const void* srcBuffer, size_t srcSize,
#ifdef ZSTD_NEWAPI
ZSTD_CCtx_setParameter(ctx, ZSTD_p_nbThreads, g_nbThreads);
ZSTD_CCtx_setParameter(ctx, ZSTD_p_compressionLevel, cLevel);
+ ZSTD_CCtx_setParameter(ctx, ZSTD_p_enableLongDistanceMatching, g_ldmFlag);
+ ZSTD_CCtx_setParameter(ctx, ZSTD_p_ldmMinMatch, g_ldmMinMatch);
+ ZSTD_CCtx_setParameter(ctx, ZSTD_p_ldmHashLog, g_ldmHashLog);
+ if (g_ldmBucketSizeLog != BMK_LDM_PARAM_NOTSET) {
+ ZSTD_CCtx_setParameter(ctx, ZSTD_p_ldmBucketSizeLog, g_ldmBucketSizeLog);
+ }
+ if (g_ldmHashEveryLog != BMK_LDM_PARAM_NOTSET) {
+ ZSTD_CCtx_setParameter(ctx, ZSTD_p_ldmHashEveryLog, g_ldmHashEveryLog);
+ }
ZSTD_CCtx_setParameter(ctx, ZSTD_p_windowLog, comprParams->windowLog);
ZSTD_CCtx_setParameter(ctx, ZSTD_p_chainLog, comprParams->chainLog);
ZSTD_CCtx_setParameter(ctx, ZSTD_p_searchLog, comprParams->searchLog);
@@ -284,7 +317,7 @@ static int BMK_benchMem(const void* srcBuffer, size_t srcSize,
if (comprParams->searchLength) zparams.cParams.searchLength = comprParams->searchLength;
if (comprParams->targetLength) zparams.cParams.targetLength = comprParams->targetLength;
if (comprParams->strategy) zparams.cParams.strategy = comprParams->strategy;
- cdict = ZSTD_createCDict_advanced(dictBuffer, dictBufferSize, 1 /*byRef*/, ZSTD_dm_auto, zparams.cParams, cmem);
+ cdict = ZSTD_createCDict_advanced(dictBuffer, dictBufferSize, ZSTD_dlm_byRef, ZSTD_dm_auto, zparams.cParams, cmem);
if (cdict==NULL) EXM_THROW(1, "ZSTD_createCDict_advanced() allocation failure");
#endif
do {
@@ -329,9 +362,9 @@ static int BMK_benchMem(const void* srcBuffer, size_t srcSize,
blockTable[blockNb].cSize = rSize;
}
nbLoops++;
- } while (UTIL_clockSpanMicro(clockStart, ticksPerSecond) < clockLoop);
+ } while (UTIL_clockSpanMicro(clockStart) < clockLoop);
ZSTD_freeCDict(cdict);
- { U64 const clockSpanMicro = UTIL_clockSpanMicro(clockStart, ticksPerSecond);
+ { U64 const clockSpanMicro = UTIL_clockSpanMicro(clockStart);
if (clockSpanMicro < fastestC*nbLoops) fastestC = clockSpanMicro / nbLoops;
totalCTime += clockSpanMicro;
cCompleted = (totalCTime >= maxTime);
@@ -356,15 +389,14 @@ static int BMK_benchMem(const void* srcBuffer, size_t srcSize,
if (!dCompleted) memset(resultBuffer, 0xD6, srcSize); /* warm result buffer */
UTIL_sleepMilli(1); /* give processor time to other processes */
- UTIL_waitForNextTick(ticksPerSecond);
+ UTIL_waitForNextTick();
if (!dCompleted) {
U64 clockLoop = g_nbSeconds ? TIMELOOP_MICROSEC : 1;
U32 nbLoops = 0;
- UTIL_time_t clockStart;
ZSTD_DDict* const ddict = ZSTD_createDDict(dictBuffer, dictBufferSize);
+ UTIL_time_t const clockStart = UTIL_getTime();
if (!ddict) EXM_THROW(2, "ZSTD_createDDict() allocation failure");
- UTIL_getTime(&clockStart);
do {
U32 blockNb;
for (blockNb=0; blockNb<nbBlocks; blockNb++) {
@@ -373,17 +405,15 @@ static int BMK_benchMem(const void* srcBuffer, size_t srcSize,
blockTable[blockNb].cPtr, blockTable[blockNb].cSize,
ddict);
if (ZSTD_isError(regenSize)) {
- DISPLAY("ZSTD_decompress_usingDDict() failed on block %u of size %u : %s \n",
+ EXM_THROW(2, "ZSTD_decompress_usingDDict() failed on block %u of size %u : %s \n",
blockNb, (U32)blockTable[blockNb].cSize, ZSTD_getErrorName(regenSize));
- clockLoop = 0; /* force immediate test end */
- break;
}
blockTable[blockNb].resSize = regenSize;
}
nbLoops++;
- } while (UTIL_clockSpanMicro(clockStart, ticksPerSecond) < clockLoop);
+ } while (UTIL_clockSpanMicro(clockStart) < clockLoop);
ZSTD_freeDDict(ddict);
- { U64 const clockSpanMicro = UTIL_clockSpanMicro(clockStart, ticksPerSecond);
+ { U64 const clockSpanMicro = UTIL_clockSpanMicro(clockStart);
if (clockSpanMicro < fastestD*nbLoops) fastestD = clockSpanMicro / nbLoops;
totalDTime += clockSpanMicro;
dCompleted = (totalDTime >= maxTime);
diff --git a/programs/bench.h b/programs/bench.h
index 5f8d61a25b30..82bb345696a7 100644
--- a/programs/bench.h
+++ b/programs/bench.h
@@ -5,6 +5,7 @@
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
*/
@@ -25,5 +26,10 @@ void BMK_setNbThreads(unsigned nbThreads);
void BMK_setNotificationLevel(unsigned level);
void BMK_setAdditionalParam(int additionalParam);
void BMK_setDecodeOnlyMode(unsigned decodeFlag);
+void BMK_setLdmFlag(unsigned ldmFlag);
+void BMK_setLdmMinMatch(unsigned ldmMinMatch);
+void BMK_setLdmHashLog(unsigned ldmHashLog);
+void BMK_setLdmBucketSizeLog(unsigned ldmBucketSizeLog);
+void BMK_setLdmHashEveryLog(unsigned ldmHashEveryLog);
#endif /* BENCH_H_121279284357 */
diff --git a/programs/datagen.c b/programs/datagen.c
index b1da8e78b31e..a489d6af08d7 100644
--- a/programs/datagen.c
+++ b/programs/datagen.c
@@ -5,6 +5,7 @@
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
*/
diff --git a/programs/datagen.h b/programs/datagen.h
index 5b1b7c47cc5d..2fcc980e5e76 100644
--- a/programs/datagen.h
+++ b/programs/datagen.h
@@ -5,6 +5,7 @@
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
*/
diff --git a/programs/dibio.c b/programs/dibio.c
index ab2dc285a273..2cb2a42671a0 100644
--- a/programs/dibio.c
+++ b/programs/dibio.c
@@ -5,6 +5,7 @@
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
*/
@@ -13,7 +14,7 @@
* Compiler Warnings
****************************************/
#ifdef _MSC_VER
-# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
+# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
#endif
@@ -43,7 +44,7 @@
#define SAMPLESIZE_MAX (128 KB)
#define MEMMULT 11 /* rough estimation : memory cost to analyze 1 byte of sample */
#define COVER_MEMMULT 9 /* rough estimation : memory cost to analyze 1 byte of sample */
-static const size_t maxMemory = (sizeof(size_t) == 4) ? (2 GB - 64 MB) : ((size_t)(512 MB) << sizeof(size_t));
+static const size_t g_maxMemory = (sizeof(size_t) == 4) ? (2 GB - 64 MB) : ((size_t)(512 MB) << sizeof(size_t));
#define NOISELENGTH 32
@@ -52,13 +53,12 @@ static const size_t maxMemory = (sizeof(size_t) == 4) ? (2 GB - 64 MB) : ((size_
* Console display
***************************************/
#define DISPLAY(...) fprintf(stderr, __VA_ARGS__)
-#define DISPLAYLEVEL(l, ...) if (g_displayLevel>=l) { DISPLAY(__VA_ARGS__); }
-static int g_displayLevel = 0; /* 0 : no display; 1: errors; 2: default; 4: full information */
+#define DISPLAYLEVEL(l, ...) if (displayLevel>=l) { DISPLAY(__VA_ARGS__); }
-#define DISPLAYUPDATE(l, ...) if (g_displayLevel>=l) { \
- if ((DIB_clockSpan(g_time) > refreshRate) || (g_displayLevel>=4)) \
+#define DISPLAYUPDATE(l, ...) if (displayLevel>=l) { \
+ if ((DIB_clockSpan(g_time) > refreshRate) || (displayLevel>=4)) \
{ g_time = clock(); DISPLAY(__VA_ARGS__); \
- if (g_displayLevel>=4) fflush(stderr); } }
+ if (displayLevel>=4) fflush(stderr); } }
static const clock_t refreshRate = CLOCKS_PER_SEC * 2 / 10;
static clock_t g_time = 0;
@@ -75,9 +75,9 @@ static clock_t DIB_clockSpan(clock_t nPrevious) { return clock() - nPrevious; }
#define EXM_THROW(error, ...) \
{ \
DEBUGOUTPUT("Error defined at %s, line %i : \n", __FILE__, __LINE__); \
- DISPLAYLEVEL(1, "Error %i : ", error); \
- DISPLAYLEVEL(1, __VA_ARGS__); \
- DISPLAYLEVEL(1, "\n"); \
+ DISPLAY("Error %i : ", error); \
+ DISPLAY(__VA_ARGS__); \
+ DISPLAY("\n"); \
exit(error); \
}
@@ -97,32 +97,55 @@ const char* DiB_getErrorName(size_t errorCode) { return ERR_getErrorName(errorCo
* File related operations
**********************************************************/
/** DiB_loadFiles() :
-* @return : nb of files effectively loaded into `buffer` */
+ * load samples from files listed in fileNamesTable into buffer.
+ * works even if buffer is too small to load all samples.
+ * Also provides the size of each sample into sampleSizes table
+ * which must be sized correctly, using DiB_fileStats().
+ * @return : nb of samples effectively loaded into `buffer`
+ * *bufferSizePtr is modified, it provides the amount data loaded within buffer.
+ * sampleSizes is filled with the size of each sample.
+ */
static unsigned DiB_loadFiles(void* buffer, size_t* bufferSizePtr,
- size_t* fileSizes,
- const char** fileNamesTable, unsigned nbFiles)
+ size_t* sampleSizes, unsigned sstSize,
+ const char** fileNamesTable, unsigned nbFiles, size_t targetChunkSize,
+ unsigned displayLevel)
{
char* const buff = (char*)buffer;
size_t pos = 0;
- unsigned n;
+ unsigned nbLoadedChunks = 0, fileIndex;
- for (n=0; n<nbFiles; n++) {
- const char* const fileName = fileNamesTable[n];
+ for (fileIndex=0; fileIndex<nbFiles; fileIndex++) {
+ const char* const fileName = fileNamesTable[fileIndex];
unsigned long long const fs64 = UTIL_getFileSize(fileName);
- size_t const fileSize = (size_t) MIN(fs64, SAMPLESIZE_MAX);
- if (fileSize > *bufferSizePtr-pos) break;
- { FILE* const f = fopen(fileName, "rb");
- if (f==NULL) EXM_THROW(10, "zstd: dictBuilder: %s %s ", fileName, strerror(errno));
- DISPLAYUPDATE(2, "Loading %s... \r", fileName);
- { size_t const readSize = fread(buff+pos, 1, fileSize, f);
- if (readSize != fileSize) EXM_THROW(11, "Pb reading %s", fileName);
- pos += readSize; }
- fileSizes[n] = fileSize;
- fclose(f);
- } }
+ unsigned long long remainingToLoad = fs64;
+ U32 const nbChunks = targetChunkSize ? (U32)((fs64 + (targetChunkSize-1)) / targetChunkSize) : 1;
+ U64 const chunkSize = targetChunkSize ? MIN(targetChunkSize, fs64) : fs64;
+ size_t const maxChunkSize = (size_t)MIN(chunkSize, SAMPLESIZE_MAX);
+ U32 cnb;
+ FILE* const f = fopen(fileName, "rb");
+ if (f==NULL) EXM_THROW(10, "zstd: dictBuilder: %s %s ", fileName, strerror(errno));
+ DISPLAYUPDATE(2, "Loading %s... \r", fileName);
+ for (cnb=0; cnb<nbChunks; cnb++) {
+ size_t const toLoad = (size_t)MIN(maxChunkSize, remainingToLoad);
+ if (toLoad > *bufferSizePtr-pos) break;
+ { size_t const readSize = fread(buff+pos, 1, toLoad, f);
+ if (readSize != toLoad) EXM_THROW(11, "Pb reading %s", fileName);
+ pos += readSize;
+ sampleSizes[nbLoadedChunks++] = toLoad;
+ remainingToLoad -= targetChunkSize;
+ if (nbLoadedChunks == sstSize) { /* no more space left in sampleSizes table */
+ fileIndex = nbFiles; /* stop there */
+ break;
+ }
+ if (toLoad < targetChunkSize) {
+ fseek(f, (long)(targetChunkSize - toLoad), SEEK_CUR);
+ } } }
+ fclose(f);
+ }
DISPLAYLEVEL(2, "\r%79s\r", "");
*bufferSizePtr = pos;
- return n;
+ DISPLAYLEVEL(4, "loaded : %u KB \n", (U32)(pos >> 10))
+ return nbLoadedChunks;
}
#define DiB_rotl32(x,r) ((x << r) | (x >> (32 - r)))
@@ -138,16 +161,19 @@ static U32 DiB_rand(U32* src)
return rand32 >> 5;
}
+/* DiB_shuffle() :
+ * shuffle a table of file names in a semi-random way
+ * It improves dictionary quality by reducing "locality" impact, so if sample set is very large,
+ * it will load random elements from it, instead of just the first ones. */
static void DiB_shuffle(const char** fileNamesTable, unsigned nbFiles) {
- /* Initialize the pseudorandom number generator */
- U32 seed = 0xFD2FB528;
- unsigned i;
- for (i = nbFiles - 1; i > 0; --i) {
- unsigned const j = DiB_rand(&seed) % (i + 1);
- const char* tmp = fileNamesTable[j];
- fileNamesTable[j] = fileNamesTable[i];
- fileNamesTable[i] = tmp;
- }
+ U32 seed = 0xFD2FB528;
+ unsigned i;
+ for (i = nbFiles - 1; i > 0; --i) {
+ unsigned const j = DiB_rand(&seed) % (i + 1);
+ const char* const tmp = fileNamesTable[j];
+ fileNamesTable[j] = fileNamesTable[i];
+ fileNamesTable[i] = tmp;
+ }
}
@@ -161,7 +187,7 @@ static size_t DiB_findMaxMem(unsigned long long requiredMem)
requiredMem = (((requiredMem >> 23) + 1) << 23);
requiredMem += step;
- if (requiredMem > maxMemory) requiredMem = maxMemory;
+ if (requiredMem > g_maxMemory) requiredMem = g_maxMemory;
while (!testmem) {
testmem = malloc((size_t)requiredMem);
@@ -201,18 +227,33 @@ static void DiB_saveDict(const char* dictFileName,
}
-static int g_tooLargeSamples = 0;
-static U64 DiB_getTotalCappedFileSize(const char** fileNamesTable, unsigned nbFiles)
+typedef struct {
+ U64 totalSizeToLoad;
+ unsigned oneSampleTooLarge;
+ unsigned nbSamples;
+} fileStats;
+
+/*! DiB_fileStats() :
+ * Given a list of files, and a chunkSize (0 == no chunk, whole files)
+ * provides the amount of data to be loaded and the resulting nb of samples.
+ * This is useful primarily for allocation purpose => sample buffer, and sample sizes table.
+ */
+static fileStats DiB_fileStats(const char** fileNamesTable, unsigned nbFiles, size_t chunkSize, unsigned displayLevel)
{
- U64 total = 0;
+ fileStats fs;
unsigned n;
+ memset(&fs, 0, sizeof(fs));
for (n=0; n<nbFiles; n++) {
U64 const fileSize = UTIL_getFileSize(fileNamesTable[n]);
- U64 const cappedFileSize = MIN(fileSize, SAMPLESIZE_MAX);
- total += cappedFileSize;
- g_tooLargeSamples |= (fileSize > 2*SAMPLESIZE_MAX);
+ U32 const nbSamples = (U32)(chunkSize ? (fileSize + (chunkSize-1)) / chunkSize : 1);
+ U64 const chunkToLoad = chunkSize ? MIN(chunkSize, fileSize) : fileSize;
+ size_t const cappedChunkSize = (size_t)MIN(chunkToLoad, SAMPLESIZE_MAX);
+ fs.totalSizeToLoad += cappedChunkSize * nbSamples;
+ fs.oneSampleTooLarge |= (chunkSize > 2*SAMPLESIZE_MAX);
+ fs.nbSamples += nbSamples;
}
- return total;
+ DISPLAYLEVEL(4, "Preparing to load : %u KB \n", (U32)(fs.totalSizeToLoad >> 10));
+ return fs;
}
@@ -229,64 +270,66 @@ size_t ZDICT_trainFromBuffer_unsafe_legacy(void* dictBuffer, size_t dictBufferCa
int DiB_trainFromFiles(const char* dictFileName, unsigned maxDictSize,
- const char** fileNamesTable, unsigned nbFiles,
+ const char** fileNamesTable, unsigned nbFiles, size_t chunkSize,
ZDICT_legacy_params_t *params, ZDICT_cover_params_t *coverParams,
int optimizeCover)
{
+ unsigned const displayLevel = params ? params->zParams.notificationLevel :
+ coverParams ? coverParams->zParams.notificationLevel :
+ 0; /* should never happen */
void* const dictBuffer = malloc(maxDictSize);
- size_t* const fileSizes = (size_t*)malloc(nbFiles * sizeof(size_t));
- unsigned long long const totalSizeToLoad = DiB_getTotalCappedFileSize(fileNamesTable, nbFiles);
+ fileStats const fs = DiB_fileStats(fileNamesTable, nbFiles, chunkSize, displayLevel);
+ size_t* const sampleSizes = (size_t*)malloc(fs.nbSamples * sizeof(size_t));
size_t const memMult = params ? MEMMULT : COVER_MEMMULT;
- size_t const maxMem = DiB_findMaxMem(totalSizeToLoad * memMult) / memMult;
- size_t benchedSize = (size_t) MIN ((unsigned long long)maxMem, totalSizeToLoad);
- void* const srcBuffer = malloc(benchedSize+NOISELENGTH);
+ size_t const maxMem = DiB_findMaxMem(fs.totalSizeToLoad * memMult) / memMult;
+ size_t loadedSize = (size_t) MIN ((unsigned long long)maxMem, fs.totalSizeToLoad);
+ void* const srcBuffer = malloc(loadedSize+NOISELENGTH);
int result = 0;
/* Checks */
- if (params) g_displayLevel = params->zParams.notificationLevel;
- else if (coverParams) g_displayLevel = coverParams->zParams.notificationLevel;
- else EXM_THROW(13, "Neither dictionary algorith selected"); /* should not happen */
- if ((!fileSizes) || (!srcBuffer) || (!dictBuffer)) EXM_THROW(12, "not enough memory for DiB_trainFiles"); /* should not happen */
- if (g_tooLargeSamples) {
- DISPLAYLEVEL(2, "! Warning : some samples are very large \n");
- DISPLAYLEVEL(2, "! Note that dictionary is only useful for small files or beginning of large files. \n");
- DISPLAYLEVEL(2, "! As a consequence, only the first %u bytes of each file are loaded \n", SAMPLESIZE_MAX);
+ if ((!sampleSizes) || (!srcBuffer) || (!dictBuffer))
+ EXM_THROW(12, "not enough memory for DiB_trainFiles"); /* should not happen */
+ if (fs.oneSampleTooLarge) {
+ DISPLAYLEVEL(2, "! Warning : some sample(s) are very large \n");
+ DISPLAYLEVEL(2, "! Note that dictionary is only useful for small samples. \n");
+ DISPLAYLEVEL(2, "! As a consequence, only the first %u bytes of each sample are loaded \n", SAMPLESIZE_MAX);
}
- if ((nbFiles < 5) || (totalSizeToLoad < 9 * (unsigned long long)maxDictSize)) {
+ if (fs.nbSamples < 5) {
DISPLAYLEVEL(2, "! Warning : nb of samples too low for proper processing ! \n");
DISPLAYLEVEL(2, "! Please provide _one file per sample_. \n");
- DISPLAYLEVEL(2, "! Do not concatenate samples together into a single file, \n");
- DISPLAYLEVEL(2, "! as dictBuilder will be unable to find the beginning of each sample, \n");
- DISPLAYLEVEL(2, "! resulting in poor dictionary quality. \n");
+ DISPLAYLEVEL(2, "! Alternatively, split files into fixed-size blocks representative of samples, with -B# \n");
+ EXM_THROW(14, "nb of samples too low"); /* we now clearly forbid this case */
+ }
+ if (fs.totalSizeToLoad < (unsigned long long)(8 * maxDictSize)) {
+ DISPLAYLEVEL(2, "! Warning : data size of samples too small for target dictionary size \n");
+ DISPLAYLEVEL(2, "! Samples should be about 100x larger than target dictionary size \n");
}
/* init */
- if (benchedSize < totalSizeToLoad)
- DISPLAYLEVEL(1, "Not enough memory; training on %u MB only...\n", (unsigned)(benchedSize >> 20));
+ if (loadedSize < fs.totalSizeToLoad)
+ DISPLAYLEVEL(1, "Not enough memory; training on %u MB only...\n", (unsigned)(loadedSize >> 20));
/* Load input buffer */
DISPLAYLEVEL(3, "Shuffling input files\n");
DiB_shuffle(fileNamesTable, nbFiles);
- nbFiles = DiB_loadFiles(srcBuffer, &benchedSize, fileSizes, fileNamesTable, nbFiles);
+ nbFiles = DiB_loadFiles(srcBuffer, &loadedSize, sampleSizes, fs.nbSamples, fileNamesTable, nbFiles, chunkSize, displayLevel);
- {
- size_t dictSize;
+ { size_t dictSize;
if (params) {
- DiB_fillNoise((char*)srcBuffer + benchedSize, NOISELENGTH); /* guard band, for end of buffer condition */
+ DiB_fillNoise((char*)srcBuffer + loadedSize, NOISELENGTH); /* guard band, for end of buffer condition */
dictSize = ZDICT_trainFromBuffer_unsafe_legacy(dictBuffer, maxDictSize,
- srcBuffer, fileSizes, nbFiles,
+ srcBuffer, sampleSizes, fs.nbSamples,
*params);
} else if (optimizeCover) {
dictSize = ZDICT_optimizeTrainFromBuffer_cover(dictBuffer, maxDictSize,
- srcBuffer, fileSizes, nbFiles,
+ srcBuffer, sampleSizes, fs.nbSamples,
coverParams);
if (!ZDICT_isError(dictSize)) {
DISPLAYLEVEL(2, "k=%u\nd=%u\nsteps=%u\n", coverParams->k, coverParams->d, coverParams->steps);
}
} else {
- dictSize =
- ZDICT_trainFromBuffer_cover(dictBuffer, maxDictSize, srcBuffer,
- fileSizes, nbFiles, *coverParams);
+ dictSize = ZDICT_trainFromBuffer_cover(dictBuffer, maxDictSize, srcBuffer,
+ sampleSizes, fs.nbSamples, *coverParams);
}
if (ZDICT_isError(dictSize)) {
DISPLAYLEVEL(1, "dictionary training failed : %s \n", ZDICT_getErrorName(dictSize)); /* should not happen */
@@ -301,7 +344,7 @@ int DiB_trainFromFiles(const char* dictFileName, unsigned maxDictSize,
/* clean up */
_cleanup:
free(srcBuffer);
+ free(sampleSizes);
free(dictBuffer);
- free(fileSizes);
return result;
}
diff --git a/programs/dibio.h b/programs/dibio.h
index 0227239b26db..499e3036520c 100644
--- a/programs/dibio.h
+++ b/programs/dibio.h
@@ -5,6 +5,7 @@
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
*/
/* This library is designed for a single-threaded console application.
@@ -31,7 +32,7 @@
@return : 0 == ok. Any other : error.
*/
int DiB_trainFromFiles(const char* dictFileName, unsigned maxDictSize,
- const char** fileNamesTable, unsigned nbFiles,
+ const char** fileNamesTable, unsigned nbFiles, size_t chunkSize,
ZDICT_legacy_params_t *params, ZDICT_cover_params_t *coverParams,
int optimizeCover);
diff --git a/programs/fileio.c b/programs/fileio.c
index 65b4c7579194..70158f930952 100644
--- a/programs/fileio.c
+++ b/programs/fileio.c
@@ -5,6 +5,7 @@
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
*/
@@ -36,6 +37,7 @@
# include <io.h>
#endif
+#include "bitstream.h"
#include "mem.h"
#include "fileio.h"
#define ZSTD_STATIC_LINKING_ONLY /* ZSTD_magicNumber, ZSTD_frameHeaderSize_max */
@@ -55,6 +57,7 @@
#define LZ4_MAGICNUMBER 0x184D2204
#if defined(ZSTD_LZ4COMPRESS) || defined(ZSTD_LZ4DECOMPRESS)
+# define LZ4F_ENABLE_OBSOLETE_ENUMS
# include <lz4frame.h>
# include <lz4.h>
#endif
@@ -105,9 +108,6 @@ static clock_t g_time = 0;
/*-*************************************
-* Errors
-***************************************/
-/*-*************************************
* Debug
***************************************/
#if defined(ZSTD_DEBUG) && (ZSTD_DEBUG>=1)
@@ -140,6 +140,24 @@ static clock_t g_time = 0;
} }
+/*-************************************
+* Signal (Ctrl-C trapping)
+**************************************/
+#include <signal.h>
+
+static const char* g_artefact = NULL;
+static void INThandler(int sig)
+{
+ assert(sig==SIGINT); (void)sig;
+#if !defined(_MSC_VER)
+ signal(sig, SIG_IGN); /* this invocation generates a buggy warning in Visual Studio */
+#endif
+ if (g_artefact) remove(g_artefact);
+ DISPLAY("\n");
+ exit(2);
+}
+
+
/* ************************************************************
* Avoid fseek()'s 2GiB barrier with MSVC, MacOS, *BSD, MinGW
***************************************************************/
@@ -213,6 +231,30 @@ void FIO_setOverlapLog(unsigned overlapLog){
DISPLAYLEVEL(2, "Setting overlapLog is useless in single-thread mode \n");
g_overlapLog = overlapLog;
}
+static U32 g_ldmFlag = 0;
+void FIO_setLdmFlag(unsigned ldmFlag) {
+ g_ldmFlag = (ldmFlag>0);
+}
+static U32 g_ldmHashLog = 0;
+void FIO_setLdmHashLog(unsigned ldmHashLog) {
+ g_ldmHashLog = ldmHashLog;
+}
+static U32 g_ldmMinMatch = 0;
+void FIO_setLdmMinMatch(unsigned ldmMinMatch) {
+ g_ldmMinMatch = ldmMinMatch;
+}
+
+#define FIO_LDM_PARAM_NOTSET 9999
+static U32 g_ldmBucketSizeLog = FIO_LDM_PARAM_NOTSET;
+void FIO_setLdmBucketSizeLog(unsigned ldmBucketSizeLog) {
+ g_ldmBucketSizeLog = ldmBucketSizeLog;
+}
+
+static U32 g_ldmHashEveryLog = FIO_LDM_PARAM_NOTSET;
+void FIO_setLdmHashEveryLog(unsigned ldmHashEveryLog) {
+ g_ldmHashEveryLog = ldmHashEveryLog;
+}
+
/*-*************************************
@@ -325,13 +367,16 @@ static size_t FIO_createDictBuffer(void** bufferPtr, const char* fileName)
fileHandle = fopen(fileName, "rb");
if (fileHandle==0) EXM_THROW(31, "%s: %s", fileName, strerror(errno));
fileSize = UTIL_getFileSize(fileName);
- if (fileSize > DICTSIZE_MAX)
+ if (fileSize > DICTSIZE_MAX) {
EXM_THROW(32, "Dictionary file %s is too large (> %u MB)",
fileName, DICTSIZE_MAX >> 20); /* avoid extreme cases */
+ }
*bufferPtr = malloc((size_t)fileSize);
if (*bufferPtr==NULL) EXM_THROW(34, "%s", strerror(errno));
- { size_t const readSize = fread(*bufferPtr, 1, (size_t)fileSize, fileHandle);
- if (readSize!=fileSize) EXM_THROW(35, "Error reading dictionary file %s", fileName); }
+ { size_t const readSize = fread(*bufferPtr, 1, (size_t)fileSize, fileHandle);
+ if (readSize!=fileSize)
+ EXM_THROW(35, "Error reading dictionary file %s", fileName);
+ }
fclose(fileHandle);
return (size_t)fileSize;
}
@@ -356,7 +401,7 @@ typedef struct {
} cRess_t;
static cRess_t FIO_createCResources(const char* dictFileName, int cLevel,
- U64 srcSize, int srcIsRegularFile,
+ U64 srcSize,
ZSTD_compressionParameters* comprParams) {
cRess_t ress;
memset(&ress, 0, sizeof(ress));
@@ -394,12 +439,23 @@ static cRess_t FIO_createCResources(const char* dictFileName, int cLevel,
#ifdef ZSTD_NEWAPI
{ /* frame parameters */
- CHECK( ZSTD_CCtx_setParameter(ress.cctx, ZSTD_p_contentSizeFlag, srcIsRegularFile) );
CHECK( ZSTD_CCtx_setParameter(ress.cctx, ZSTD_p_dictIDFlag, g_dictIDFlag) );
CHECK( ZSTD_CCtx_setParameter(ress.cctx, ZSTD_p_checksumFlag, g_checksumFlag) );
- CHECK( ZSTD_CCtx_setPledgedSrcSize(ress.cctx, srcSize) );
- /* compression parameters */
+ (void)srcSize;
+ /* compression level */
CHECK( ZSTD_CCtx_setParameter(ress.cctx, ZSTD_p_compressionLevel, cLevel) );
+ /* long distance matching */
+ CHECK( ZSTD_CCtx_setParameter(
+ ress.cctx, ZSTD_p_enableLongDistanceMatching, g_ldmFlag) );
+ CHECK( ZSTD_CCtx_setParameter(ress.cctx, ZSTD_p_ldmHashLog, g_ldmHashLog) );
+ CHECK( ZSTD_CCtx_setParameter(ress.cctx, ZSTD_p_ldmMinMatch, g_ldmMinMatch) );
+ if (g_ldmBucketSizeLog != FIO_LDM_PARAM_NOTSET) {
+ CHECK( ZSTD_CCtx_setParameter(ress.cctx, ZSTD_p_ldmBucketSizeLog, g_ldmBucketSizeLog) );
+ }
+ if (g_ldmHashEveryLog != FIO_LDM_PARAM_NOTSET) {
+ CHECK( ZSTD_CCtx_setParameter(ress.cctx, ZSTD_p_ldmHashEveryLog, g_ldmHashEveryLog) );
+ }
+ /* compression parameters */
CHECK( ZSTD_CCtx_setParameter(ress.cctx, ZSTD_p_windowLog, comprParams->windowLog) );
CHECK( ZSTD_CCtx_setParameter(ress.cctx, ZSTD_p_chainLog, comprParams->chainLog) );
CHECK( ZSTD_CCtx_setParameter(ress.cctx, ZSTD_p_hashLog, comprParams->hashLog) );
@@ -408,13 +464,13 @@ static cRess_t FIO_createCResources(const char* dictFileName, int cLevel,
CHECK( ZSTD_CCtx_setParameter(ress.cctx, ZSTD_p_targetLength, comprParams->targetLength) );
CHECK( ZSTD_CCtx_setParameter(ress.cctx, ZSTD_p_compressionStrategy, (U32)comprParams->strategy) );
/* multi-threading */
+ DISPLAYLEVEL(5,"set nb threads = %u \n", g_nbThreads);
CHECK( ZSTD_CCtx_setParameter(ress.cctx, ZSTD_p_nbThreads, g_nbThreads) );
/* dictionary */
CHECK( ZSTD_CCtx_loadDictionary(ress.cctx, dictBuffer, dictBuffSize) );
}
#elif defined(ZSTD_MULTITHREAD)
{ ZSTD_parameters params = ZSTD_getParams(cLevel, srcSize, dictBuffSize);
- params.fParams.contentSizeFlag = srcIsRegularFile;
params.fParams.checksumFlag = g_checksumFlag;
params.fParams.noDictIDFlag = !g_dictIDFlag;
if (comprParams->windowLog) params.cParams.windowLog = comprParams->windowLog;
@@ -429,7 +485,6 @@ static cRess_t FIO_createCResources(const char* dictFileName, int cLevel,
}
#else
{ ZSTD_parameters params = ZSTD_getParams(cLevel, srcSize, dictBuffSize);
- params.fParams.contentSizeFlag = srcIsRegularFile;
params.fParams.checksumFlag = g_checksumFlag;
params.fParams.noDictIDFlag = !g_dictIDFlag;
if (comprParams->windowLog) params.cParams.windowLog = comprParams->windowLog;
@@ -719,6 +774,7 @@ static int FIO_compressFilename_internal(cRess_t ress,
U64 readsize = 0;
U64 compressedfilesize = 0;
U64 const fileSize = UTIL_getFileSize(srcFileName);
+ DISPLAYLEVEL(5, "%s: %u bytes \n", srcFileName, (U32)fileSize);
switch (g_compressionType) {
case FIO_zstdCompression:
@@ -758,11 +814,12 @@ static int FIO_compressFilename_internal(cRess_t ress,
/* init */
#ifdef ZSTD_NEWAPI
- /* nothing, reset is implied */
+ if (fileSize!=0) /* when src is stdin, fileSize==0, but is effectively unknown */
+ ZSTD_CCtx_setPledgedSrcSize(ress.cctx, fileSize); /* note : fileSize==0 means "empty" */
#elif defined(ZSTD_MULTITHREAD)
- CHECK( ZSTDMT_resetCStream(ress.cctx, fileSize) );
+ CHECK( ZSTDMT_resetCStream(ress.cctx, fileSize) ); /* note : fileSize==0 means "unknown" */
#else
- CHECK( ZSTD_resetCStream(ress.cctx, fileSize) );
+ CHECK( ZSTD_resetCStream(ress.cctx, fileSize) ); /* note : fileSize==0 means "unknown" */
#endif
/* Main compression loop */
@@ -811,10 +868,10 @@ static int FIO_compressFilename_internal(cRess_t ress,
/* End of Frame */
{ size_t result = 1;
- while (result!=0) { /* note : is there any possibility of endless loop ? */
+ while (result != 0) {
ZSTD_outBuffer outBuff = { ress.dstBuffer, ress.dstBufferSize, 0 };
#ifdef ZSTD_NEWAPI
- ZSTD_inBuffer inBuff = { NULL, 0, 0};
+ ZSTD_inBuffer inBuff = { NULL, 0, 0 };
result = ZSTD_compress_generic(ress.cctx,
&outBuff, &inBuff, ZSTD_e_end);
#elif defined(ZSTD_MULTITHREAD)
@@ -822,11 +879,14 @@ static int FIO_compressFilename_internal(cRess_t ress,
#else
result = ZSTD_endStream(ress.cctx, &outBuff);
#endif
- if (ZSTD_isError(result))
+ if (ZSTD_isError(result)) {
EXM_THROW(26, "Compression error during frame end : %s",
ZSTD_getErrorName(result));
- { size_t const sizeCheck = fwrite(ress.dstBuffer, 1, outBuff.pos, dstFile);
- if (sizeCheck!=outBuff.pos) EXM_THROW(27, "Write error : cannot write frame end into %s", dstFileName); }
+ }
+ { size_t const sizeCheck = fwrite(ress.dstBuffer, 1, outBuff.pos, dstFile);
+ if (sizeCheck!=outBuff.pos)
+ EXM_THROW(27, "Write error : cannot write frame end into %s", dstFileName);
+ }
compressedfilesize += outBuff.pos;
}
}
@@ -890,6 +950,14 @@ static int FIO_compressFilename_dstFile(cRess_t ress,
ress.dstFile = FIO_openDstFile(dstFileName);
if (ress.dstFile==NULL) return 1; /* could not open dstFileName */
+ if (UTIL_isRegularFile(dstFileName)) {
+ g_artefact = dstFileName;
+ signal(SIGINT, INThandler);
+ } else {
+ g_artefact = NULL;
+ }
+
+
if (strcmp (srcFileName, stdinmark) && UTIL_getFileStat(srcFileName, &statbuf))
stat_result = 1;
result = FIO_compressFilename_srcFile(ress, dstFileName, srcFileName, compressionLevel);
@@ -904,6 +972,9 @@ static int FIO_compressFilename_dstFile(cRess_t ress,
}
else if (strcmp (dstFileName, stdoutmark) && stat_result)
UTIL_setFileStat(dstFileName, &statbuf);
+
+ signal(SIGINT, SIG_DFL);
+
return result;
}
@@ -913,9 +984,8 @@ int FIO_compressFilename(const char* dstFileName, const char* srcFileName,
{
clock_t const start = clock();
U64 const srcSize = UTIL_getFileSize(srcFileName);
- int const isRegularFile = UTIL_isRegularFile(srcFileName);
- cRess_t const ress = FIO_createCResources(dictFileName, compressionLevel, srcSize, isRegularFile, comprParams);
+ cRess_t const ress = FIO_createCResources(dictFileName, compressionLevel, srcSize, comprParams);
int const result = FIO_compressFilename_dstFile(ress, dstFileName, srcFileName, compressionLevel);
double const seconds = (double)(clock() - start) / CLOCKS_PER_SEC;
@@ -936,8 +1006,7 @@ int FIO_compressMultipleFilenames(const char** inFileNamesTable, unsigned nbFile
char* dstFileName = (char*)malloc(FNSPACE);
size_t const suffixSize = suffix ? strlen(suffix) : 0;
U64 const srcSize = (nbFiles != 1) ? 0 : UTIL_getFileSize(inFileNamesTable[0]) ;
- int const isRegularFile = (nbFiles != 1) ? 0 : UTIL_isRegularFile(inFileNamesTable[0]);
- cRess_t ress = FIO_createCResources(dictFileName, compressionLevel, srcSize, isRegularFile, comprParams);
+ cRess_t ress = FIO_createCResources(dictFileName, compressionLevel, srcSize, comprParams);
/* init */
if (dstFileName==NULL)
@@ -986,8 +1055,8 @@ int FIO_compressMultipleFilenames(const char** inFileNamesTable, unsigned nbFile
***************************************************************************/
typedef struct {
void* srcBuffer;
- size_t srcBufferLoaded;
size_t srcBufferSize;
+ size_t srcBufferLoaded;
void* dstBuffer;
size_t dstBufferSize;
ZSTD_DStream* dctx;
@@ -1002,7 +1071,7 @@ static dRess_t FIO_createDResources(const char* dictFileName)
/* Allocation */
ress.dctx = ZSTD_createDStream();
if (ress.dctx==NULL) EXM_THROW(60, "Can't create ZSTD_DStream");
- ZSTD_setDStreamParameter(ress.dctx, DStream_p_maxWindowSize, g_memLimit);
+ CHECK( ZSTD_setDStreamParameter(ress.dctx, DStream_p_maxWindowSize, g_memLimit) );
ress.srcBufferSize = ZSTD_DStreamInSize();
ress.srcBuffer = malloc(ress.srcBufferSize);
ress.dstBufferSize = ZSTD_DStreamOutSize();
@@ -1133,6 +1202,35 @@ static unsigned FIO_passThrough(FILE* foutput, FILE* finput, void* buffer, size_
return 0;
}
+static void FIO_zstdErrorHelp(dRess_t* ress, size_t ret, char const* srcFileName)
+{
+ ZSTD_frameHeader header;
+ /* No special help for these errors */
+ if (ZSTD_getErrorCode(ret) != ZSTD_error_frameParameter_windowTooLarge)
+ return;
+ /* Try to decode the frame header */
+ ret = ZSTD_getFrameHeader(&header, ress->srcBuffer, ress->srcBufferLoaded);
+ if (ret == 0) {
+ U32 const windowSize = (U32)header.windowSize;
+ U32 const windowLog = BIT_highbit32(windowSize) + ((windowSize & (windowSize - 1)) != 0);
+ U32 const windowMB = (windowSize >> 20) + (windowSize & ((1 MB) - 1));
+ assert(header.windowSize <= (U64)((U32)-1));
+ assert(g_memLimit > 0);
+ DISPLAYLEVEL(1, "%s : Window size larger than maximum : %llu > %u\n",
+ srcFileName, header.windowSize, g_memLimit);
+ if (windowLog <= ZSTD_WINDOWLOG_MAX) {
+ DISPLAYLEVEL(1, "%s : Use --long=%u or --memory=%uMB\n",
+ srcFileName, windowLog, windowMB);
+ return;
+ }
+ } else if (ZSTD_getErrorCode(ret) != ZSTD_error_frameParameter_windowTooLarge) {
+ DISPLAYLEVEL(1, "%s : Error decoding frame header to read window size : %s\n",
+ srcFileName, ZSTD_getErrorName(ret));
+ return;
+ }
+ DISPLAYLEVEL(1, "%s : Window log larger than ZSTD_WINDOWLOG_MAX=%u not supported\n",
+ srcFileName, ZSTD_WINDOWLOG_MAX);
+}
/** FIO_decompressFrame() :
* @return : size of decoded zstd frame, or an error code
@@ -1146,14 +1244,18 @@ unsigned long long FIO_decompressZstdFrame(dRess_t* ress,
U64 frameSize = 0;
U32 storedSkips = 0;
+ size_t const srcFileLength = strlen(srcFileName);
+ if (srcFileLength>20) srcFileName += srcFileLength-20; /* display last 20 characters only */
+
ZSTD_resetDStream(ress->dctx);
- if (strlen(srcFileName)>20) srcFileName += strlen(srcFileName)-20; /* display last 20 characters */
- /* Header loading (optional, saves one loop) */
- { size_t const toRead = 9;
- if (ress->srcBufferLoaded < toRead)
- ress->srcBufferLoaded += fread(((char*)ress->srcBuffer) + ress->srcBufferLoaded, 1, toRead - ress->srcBufferLoaded, finput);
- }
+ /* Header loading : ensures ZSTD_getFrameHeader() will succeed */
+ { size_t const toDecode = ZSTD_FRAMEHEADERSIZE_MAX;
+ if (ress->srcBufferLoaded < toDecode) {
+ size_t const toRead = toDecode - ress->srcBufferLoaded;
+ void* const startPosition = (char*)ress->srcBuffer + ress->srcBufferLoaded;
+ ress->srcBufferLoaded += fread(startPosition, 1, toRead, finput);
+ } }
/* Main decompression Loop */
while (1) {
@@ -1163,6 +1265,7 @@ unsigned long long FIO_decompressZstdFrame(dRess_t* ress,
if (ZSTD_isError(readSizeHint)) {
DISPLAYLEVEL(1, "%s : Decoding error (36) : %s \n",
srcFileName, ZSTD_getErrorName(readSizeHint));
+ FIO_zstdErrorHelp(ress, readSizeHint, srcFileName);
return FIO_ERROR_FRAME_DECODING;
}
@@ -1185,14 +1288,17 @@ unsigned long long FIO_decompressZstdFrame(dRess_t* ress,
}
/* Fill input buffer */
- { size_t const toRead = MIN(readSizeHint, ress->srcBufferSize); /* support large skippable frames */
- if (ress->srcBufferLoaded < toRead)
- ress->srcBufferLoaded += fread((char*)ress->srcBuffer + ress->srcBufferLoaded,
- 1, toRead - ress->srcBufferLoaded, finput);
- if (ress->srcBufferLoaded < toRead) {
- DISPLAYLEVEL(1, "%s : Read error (39) : premature end \n",
- srcFileName);
- return FIO_ERROR_FRAME_DECODING;
+ { size_t const toDecode = MIN(readSizeHint, ress->srcBufferSize); /* support large skippable frames */
+ if (ress->srcBufferLoaded < toDecode) {
+ size_t const toRead = toDecode - ress->srcBufferLoaded; /* > 0 */
+ void* const startPosition = (char*)ress->srcBuffer + ress->srcBufferLoaded;
+ size_t const readSize = fread(startPosition, 1, toRead, finput);
+ if (readSize==0) {
+ DISPLAYLEVEL(1, "%s : Read error (39) : premature end \n",
+ srcFileName);
+ return FIO_ERROR_FRAME_DECODING;
+ }
+ ress->srcBufferLoaded += readSize;
} } }
FIO_fwriteSparseEnd(ress->dstFile, storedSkips);
@@ -1523,11 +1629,11 @@ static int FIO_decompressSrcFile(dRess_t ress, const char* dstFileName, const ch
/* Close file */
if (fclose(srcFile)) {
- DISPLAYLEVEL(1, "zstd: %s: %s \n", srcFileName, strerror(errno)); /* error should never happen */
+ DISPLAYLEVEL(1, "zstd: %s: %s \n", srcFileName, strerror(errno)); /* error should not happen */
return 1;
}
if ( g_removeSrcFile /* --rm */
- && (result==0) /* decompression successful */
+ && (result==0) /* decompression successful */
&& strcmp(srcFileName, stdinmark) ) /* not stdin */ {
if (remove(srcFileName)) {
/* failed to remove src file */
@@ -1553,7 +1659,15 @@ static int FIO_decompressDstFile(dRess_t ress,
ress.dstFile = FIO_openDstFile(dstFileName);
if (ress.dstFile==0) return 1;
- if (strcmp (srcFileName, stdinmark) && UTIL_getFileStat(srcFileName, &statbuf))
+ if (UTIL_isRegularFile(dstFileName)) {
+ g_artefact = dstFileName;
+ signal(SIGINT, INThandler);
+ } else {
+ g_artefact = NULL;
+ }
+
+ if ( strcmp(srcFileName, stdinmark)
+ && UTIL_getFileStat(srcFileName, &statbuf) )
stat_result = 1;
result = FIO_decompressSrcFile(ress, dstFileName, srcFileName);
@@ -1563,11 +1677,18 @@ static int FIO_decompressDstFile(dRess_t ress,
}
if ( (result != 0) /* operation failure */
- && strcmp(dstFileName, nulmark) /* special case : don't remove() /dev/null (#316) */
- && remove(dstFileName) /* remove artefact */ )
- result=1; /* don't do anything special if remove() fails */
- else if (strcmp (dstFileName, stdoutmark) && stat_result)
- UTIL_setFileStat(dstFileName, &statbuf);
+ && strcmp(dstFileName, nulmark) /* special case : don't remove() /dev/null (#316) */
+ && strcmp(dstFileName, stdoutmark) ) /* special case : don't remove() stdout */
+ remove(dstFileName); /* remove decompression artefact; note don't do anything special if remove() fails */
+ else { /* operation success */
+ if ( strcmp(dstFileName, stdoutmark) /* special case : don't chmod stdout */
+ && strcmp(dstFileName, nulmark) /* special case : don't chmod /dev/null */
+ && stat_result ) /* file permissions correctly extracted from src */
+ UTIL_setFileStat(dstFileName, &statbuf); /* transfer file permissions from src into dst */
+ }
+
+ signal(SIGINT, SIG_DFL);
+
return result;
}
@@ -1659,12 +1780,14 @@ int FIO_decompressMultipleFilenames(const char** srcNamesTable, unsigned nbFiles
***************************************************************************/
typedef struct {
+ U64 decompressedSize;
+ U64 compressedSize;
+ U64 windowSize;
int numActualFrames;
int numSkippableFrames;
- unsigned long long decompressedSize;
int decompUnavailable;
- unsigned long long compressedSize;
int usesCheck;
+ U32 nbFiles;
} fileInfo_t;
/** getFileInfo() :
@@ -1681,14 +1804,16 @@ static int getFileInfo(fileInfo_t* info, const char* inFileName){
DISPLAY("Error: could not open source file %s\n", inFileName);
return 3;
}
- info->compressedSize = (unsigned long long)UTIL_getFileSize(inFileName);
+ info->compressedSize = UTIL_getFileSize(inFileName);
/* begin analyzing frame */
for ( ; ; ) {
BYTE headerBuffer[ZSTD_FRAMEHEADERSIZE_MAX];
size_t const numBytesRead = fread(headerBuffer, 1, sizeof(headerBuffer), srcFile);
if (numBytesRead < ZSTD_frameHeaderSize_min) {
- if (feof(srcFile) && numBytesRead == 0 && info->compressedSize > 0) {
+ if ( feof(srcFile)
+ && (numBytesRead == 0)
+ && (info->compressedSize > 0) ) {
break;
}
else if (feof(srcFile)) {
@@ -1705,12 +1830,19 @@ static int getFileInfo(fileInfo_t* info, const char* inFileName){
{ U32 const magicNumber = MEM_readLE32(headerBuffer);
/* Zstandard frame */
if (magicNumber == ZSTD_MAGICNUMBER) {
+ ZSTD_frameHeader header;
U64 const frameContentSize = ZSTD_getFrameContentSize(headerBuffer, numBytesRead);
if (frameContentSize == ZSTD_CONTENTSIZE_ERROR || frameContentSize == ZSTD_CONTENTSIZE_UNKNOWN) {
info->decompUnavailable = 1;
} else {
info->decompressedSize += frameContentSize;
}
+ if (ZSTD_getFrameHeader(&header, headerBuffer, numBytesRead) != 0) {
+ DISPLAY("Error: could not decode frame header\n");
+ detectError = 1;
+ break;
+ }
+ info->windowSize = header.windowSize;
/* move to the end of the frame header */
{ size_t const headerSize = ZSTD_frameHeaderSize(headerBuffer, numBytesRead);
if (ZSTD_isError(headerSize)) {
@@ -1790,37 +1922,45 @@ static int getFileInfo(fileInfo_t* info, const char* inFileName){
}
} /* end analyzing frame */
fclose(srcFile);
+ info->nbFiles = 1;
return detectError;
}
static void displayInfo(const char* inFileName, fileInfo_t* info, int displayLevel){
unsigned const unit = info->compressedSize < (1 MB) ? (1 KB) : (1 MB);
const char* const unitStr = info->compressedSize < (1 MB) ? "KB" : "MB";
+ double const windowSizeUnit = (double)info->windowSize / unit;
double const compressedSizeUnit = (double)info->compressedSize / unit;
double const decompressedSizeUnit = (double)info->decompressedSize / unit;
double const ratio = (info->compressedSize == 0) ? 0 : ((double)info->decompressedSize)/info->compressedSize;
const char* const checkString = (info->usesCheck ? "XXH64" : "None");
if (displayLevel <= 2) {
if (!info->decompUnavailable) {
- DISPLAYOUT("Skippable Non-Skippable Compressed Uncompressed Ratio Check Filename\n");
- DISPLAYOUT("%9d %13d %7.2f %2s %9.2f %2s %5.3f %5s %s\n",
- info->numSkippableFrames, info->numActualFrames,
+ DISPLAYOUT("%6d %5d %7.2f %2s %9.2f %2s %5.3f %5s %s\n",
+ info->numSkippableFrames + info->numActualFrames,
+ info->numSkippableFrames,
compressedSizeUnit, unitStr, decompressedSizeUnit, unitStr,
ratio, checkString, inFileName);
} else {
- DISPLAYOUT("Skippable Non-Skippable Compressed Check Filename\n");
- DISPLAYOUT("%9d %13d %7.2f MB %5s %s\n",
- info->numSkippableFrames, info->numActualFrames,
- compressedSizeUnit, checkString, inFileName);
+ DISPLAYOUT("%6d %5d %7.2f %2s %5s %s\n",
+ info->numSkippableFrames + info->numActualFrames,
+ info->numSkippableFrames,
+ compressedSizeUnit, unitStr,
+ checkString, inFileName);
}
} else {
DISPLAYOUT("# Zstandard Frames: %d\n", info->numActualFrames);
DISPLAYOUT("# Skippable Frames: %d\n", info->numSkippableFrames);
+ DISPLAYOUT("Window Size: %.2f %2s (%llu B)\n",
+ windowSizeUnit, unitStr,
+ (unsigned long long)info->windowSize);
DISPLAYOUT("Compressed Size: %.2f %2s (%llu B)\n",
- compressedSizeUnit, unitStr, info->compressedSize);
+ compressedSizeUnit, unitStr,
+ (unsigned long long)info->compressedSize);
if (!info->decompUnavailable) {
DISPLAYOUT("Decompressed Size: %.2f %2s (%llu B)\n",
- decompressedSizeUnit, unitStr, info->decompressedSize);
+ decompressedSizeUnit, unitStr,
+ (unsigned long long)info->decompressedSize);
DISPLAYOUT("Ratio: %.4f\n", ratio);
}
DISPLAYOUT("Check: %s\n", checkString);
@@ -1828,33 +1968,40 @@ static void displayInfo(const char* inFileName, fileInfo_t* info, int displayLev
}
}
+static fileInfo_t FIO_addFInfo(fileInfo_t fi1, fileInfo_t fi2)
+{
+ fileInfo_t total;
+ total.numActualFrames = fi1.numActualFrames + fi2.numActualFrames;
+ total.numSkippableFrames = fi1.numSkippableFrames + fi2.numSkippableFrames;
+ total.compressedSize = fi1.compressedSize + fi2.compressedSize;
+ total.decompressedSize = fi1.decompressedSize + fi2.decompressedSize;
+ total.decompUnavailable = fi1.decompUnavailable | fi2.decompUnavailable;
+ total.usesCheck = fi1.usesCheck & fi2.usesCheck;
+ total.nbFiles = fi1.nbFiles + fi2.nbFiles;
+ return total;
+}
-static int FIO_listFile(const char* inFileName, int displayLevel, unsigned fileNo, unsigned numFiles){
+static int FIO_listFile(fileInfo_t* total, const char* inFileName, int displayLevel){
/* initialize info to avoid warnings */
fileInfo_t info;
memset(&info, 0, sizeof(info));
- DISPLAYOUT("%s (%u/%u):\n", inFileName, fileNo, numFiles);
- {
- int const error = getFileInfo(&info, inFileName);
+ { int const error = getFileInfo(&info, inFileName);
if (error == 1) {
/* display error, but provide output */
- DISPLAY("An error occurred with getting file info\n");
+ DISPLAY("An error occurred while getting file info \n");
}
else if (error == 2) {
- DISPLAYOUT("File %s not compressed with zstd\n", inFileName);
- if (displayLevel > 2) {
- DISPLAYOUT("\n");
- }
+ DISPLAYOUT("File %s not compressed by zstd \n", inFileName);
+ if (displayLevel > 2) DISPLAYOUT("\n");
return 1;
}
else if (error == 3) {
- /* error occurred with opening the file */
- if (displayLevel > 2) {
- DISPLAYOUT("\n");
- }
+ /* error occurred while opening the file */
+ if (displayLevel > 2) DISPLAYOUT("\n");
return 1;
}
displayInfo(inFileName, &info, displayLevel);
+ *total = FIO_addFInfo(*total, info);
return error;
}
}
@@ -1864,15 +2011,38 @@ int FIO_listMultipleFiles(unsigned numFiles, const char** filenameTable, int dis
DISPLAYOUT("No files given\n");
return 0;
}
- DISPLAYOUT("===========================================\n");
- DISPLAYOUT("Printing information about compressed files\n");
- DISPLAYOUT("===========================================\n");
- DISPLAYOUT("Number of files listed: %u\n", numFiles);
- {
- int error = 0;
+ if (displayLevel <= 2) {
+ DISPLAYOUT("Frames Skips Compressed Uncompressed Ratio Check Filename\n");
+ }
+ { int error = 0;
unsigned u;
+ fileInfo_t total;
+ memset(&total, 0, sizeof(total));
+ total.usesCheck = 1;
for (u=0; u<numFiles;u++) {
- error |= FIO_listFile(filenameTable[u], displayLevel, u+1, numFiles);
+ error |= FIO_listFile(&total, filenameTable[u], displayLevel);
+ }
+ if (numFiles > 1 && displayLevel <= 2) {
+ unsigned const unit = total.compressedSize < (1 MB) ? (1 KB) : (1 MB);
+ const char* const unitStr = total.compressedSize < (1 MB) ? "KB" : "MB";
+ double const compressedSizeUnit = (double)total.compressedSize / unit;
+ double const decompressedSizeUnit = (double)total.decompressedSize / unit;
+ double const ratio = (total.compressedSize == 0) ? 0 : ((double)total.decompressedSize)/total.compressedSize;
+ const char* const checkString = (total.usesCheck ? "XXH64" : "");
+ DISPLAYOUT("----------------------------------------------------------------- \n");
+ if (total.decompUnavailable) {
+ DISPLAYOUT("%6d %5d %7.2f %2s %5s %u files\n",
+ total.numSkippableFrames + total.numActualFrames,
+ total.numSkippableFrames,
+ compressedSizeUnit, unitStr,
+ checkString, total.nbFiles);
+ } else {
+ DISPLAYOUT("%6d %5d %7.2f %2s %9.2f %2s %5.3f %5s %u files\n",
+ total.numSkippableFrames + total.numActualFrames,
+ total.numSkippableFrames,
+ compressedSizeUnit, unitStr, decompressedSizeUnit, unitStr,
+ ratio, checkString, total.nbFiles);
+ }
}
return error;
}
diff --git a/programs/fileio.h b/programs/fileio.h
index 8008e97dd5f3..aa4484fdce0c 100644
--- a/programs/fileio.h
+++ b/programs/fileio.h
@@ -5,6 +5,7 @@
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
*/
@@ -56,6 +57,11 @@ void FIO_setMemLimit(unsigned memLimit);
void FIO_setNbThreads(unsigned nbThreads);
void FIO_setBlockSize(unsigned blockSize);
void FIO_setOverlapLog(unsigned overlapLog);
+void FIO_setLdmFlag(unsigned ldmFlag);
+void FIO_setLdmHashLog(unsigned ldmHashLog);
+void FIO_setLdmMinMatch(unsigned ldmMinMatch);
+void FIO_setLdmBucketSizeLog(unsigned ldmBucketSizeLog);
+void FIO_setLdmHashEveryLog(unsigned ldmHashEveryLog);
/*-*************************************
diff --git a/programs/platform.h b/programs/platform.h
index fb2e9b173d2a..a4d7850fd714 100644
--- a/programs/platform.h
+++ b/programs/platform.h
@@ -1,10 +1,11 @@
/*
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * Copyright (c) 2016-present, Przemyslaw Skibinski, Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
*/
#ifndef PLATFORM_H_MODULE
diff --git a/programs/util.h b/programs/util.h
index 7b553661cde3..c8be5f5fb574 100644
--- a/programs/util.h
+++ b/programs/util.h
@@ -1,10 +1,11 @@
/*
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * Copyright (c) 2016-present, Przemyslaw Skibinski, Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
*/
#ifndef UTIL_H_MODULE
@@ -106,57 +107,127 @@ extern "C" {
/*-****************************************
+* Console log
+******************************************/
+static int g_utilDisplayLevel;
+#define UTIL_DISPLAY(...) fprintf(stderr, __VA_ARGS__)
+#define UTIL_DISPLAYLEVEL(l, ...) { if (g_utilDisplayLevel>=l) { UTIL_DISPLAY(__VA_ARGS__); } }
+
+
+/*-****************************************
* Time functions
******************************************/
#if defined(_WIN32) /* Windows */
- typedef LARGE_INTEGER UTIL_freq_t;
- typedef LARGE_INTEGER UTIL_time_t;
- UTIL_STATIC void UTIL_initTimer(UTIL_freq_t* ticksPerSecond) { if (!QueryPerformanceFrequency(ticksPerSecond)) fprintf(stderr, "ERROR: QueryPerformance not present\n"); }
- UTIL_STATIC void UTIL_getTime(UTIL_time_t* x) { QueryPerformanceCounter(x); }
- UTIL_STATIC U64 UTIL_getSpanTimeMicro(UTIL_freq_t ticksPerSecond, UTIL_time_t clockStart, UTIL_time_t clockEnd) { return 1000000ULL*(clockEnd.QuadPart - clockStart.QuadPart)/ticksPerSecond.QuadPart; }
- UTIL_STATIC U64 UTIL_getSpanTimeNano(UTIL_freq_t ticksPerSecond, UTIL_time_t clockStart, UTIL_time_t clockEnd) { return 1000000000ULL*(clockEnd.QuadPart - clockStart.QuadPart)/ticksPerSecond.QuadPart; }
+ typedef LARGE_INTEGER UTIL_time_t;
+ UTIL_STATIC UTIL_time_t UTIL_getTime(void) { UTIL_time_t x; QueryPerformanceCounter(&x); return x; }
+ UTIL_STATIC U64 UTIL_getSpanTimeMicro(UTIL_time_t clockStart, UTIL_time_t clockEnd)
+ {
+ static LARGE_INTEGER ticksPerSecond;
+ static int init = 0;
+ if (!init) {
+ if (!QueryPerformanceFrequency(&ticksPerSecond))
+ UTIL_DISPLAYLEVEL(1, "ERROR: QueryPerformanceFrequency() failure\n");
+ init = 1;
+ }
+ return 1000000ULL*(clockEnd.QuadPart - clockStart.QuadPart)/ticksPerSecond.QuadPart;
+ }
+ UTIL_STATIC U64 UTIL_getSpanTimeNano(UTIL_time_t clockStart, UTIL_time_t clockEnd)
+ {
+ static LARGE_INTEGER ticksPerSecond;
+ static int init = 0;
+ if (!init) {
+ if (!QueryPerformanceFrequency(&ticksPerSecond))
+ UTIL_DISPLAYLEVEL(1, "ERROR: QueryPerformanceFrequency() failure\n");
+ init = 1;
+ }
+ return 1000000000ULL*(clockEnd.QuadPart - clockStart.QuadPart)/ticksPerSecond.QuadPart;
+ }
#elif defined(__APPLE__) && defined(__MACH__)
- #include <mach/mach_time.h>
- typedef mach_timebase_info_data_t UTIL_freq_t;
- typedef U64 UTIL_time_t;
- UTIL_STATIC void UTIL_initTimer(UTIL_freq_t* rate) { mach_timebase_info(rate); }
- UTIL_STATIC void UTIL_getTime(UTIL_time_t* x) { *x = mach_absolute_time(); }
- UTIL_STATIC U64 UTIL_getSpanTimeMicro(UTIL_freq_t rate, UTIL_time_t clockStart, UTIL_time_t clockEnd) { return (((clockEnd - clockStart) * (U64)rate.numer) / ((U64)rate.denom))/1000ULL; }
- UTIL_STATIC U64 UTIL_getSpanTimeNano(UTIL_freq_t rate, UTIL_time_t clockStart, UTIL_time_t clockEnd) { return ((clockEnd - clockStart) * (U64)rate.numer) / ((U64)rate.denom); }
+ #include <mach/mach_time.h>
+ typedef U64 UTIL_time_t;
+ UTIL_STATIC UTIL_time_t UTIL_getTime(void) { return mach_absolute_time(); }
+ UTIL_STATIC U64 UTIL_getSpanTimeMicro(UTIL_time_t clockStart, UTIL_time_t clockEnd)
+ {
+ static mach_timebase_info_data_t rate;
+ static int init = 0;
+ if (!init) {
+ mach_timebase_info(&rate);
+ init = 1;
+ }
+ return (((clockEnd - clockStart) * (U64)rate.numer) / ((U64)rate.denom))/1000ULL;
+ }
+ UTIL_STATIC U64 UTIL_getSpanTimeNano(UTIL_time_t clockStart, UTIL_time_t clockEnd)
+ {
+ static mach_timebase_info_data_t rate;
+ static int init = 0;
+ if (!init) {
+ mach_timebase_info(&rate);
+ init = 1;
+ }
+ return ((clockEnd - clockStart) * (U64)rate.numer) / ((U64)rate.denom);
+ }
#elif (PLATFORM_POSIX_VERSION >= 200112L)
- #include <sys/times.h> /* times */
- typedef U64 UTIL_freq_t;
- typedef U64 UTIL_time_t;
- UTIL_STATIC void UTIL_initTimer(UTIL_freq_t* ticksPerSecond) { *ticksPerSecond=sysconf(_SC_CLK_TCK); }
- UTIL_STATIC void UTIL_getTime(UTIL_time_t* x) { struct tms junk; clock_t newTicks = (clock_t) times(&junk); (void)junk; *x = (UTIL_time_t)newTicks; }
- UTIL_STATIC U64 UTIL_getSpanTimeMicro(UTIL_freq_t ticksPerSecond, UTIL_time_t clockStart, UTIL_time_t clockEnd) { return 1000000ULL * (clockEnd - clockStart) / ticksPerSecond; }
- UTIL_STATIC U64 UTIL_getSpanTimeNano(UTIL_freq_t ticksPerSecond, UTIL_time_t clockStart, UTIL_time_t clockEnd) { return 1000000000ULL * (clockEnd - clockStart) / ticksPerSecond; }
+ #include <time.h>
+ typedef struct timespec UTIL_freq_t;
+ typedef struct timespec UTIL_time_t;
+ UTIL_STATIC UTIL_time_t UTIL_getTime(void)
+ {
+ UTIL_time_t time;
+ if (clock_gettime(CLOCK_MONOTONIC, &time))
+ UTIL_DISPLAYLEVEL(1, "ERROR: Failed to get time\n"); /* we could also exit() */
+ return time;
+ }
+ UTIL_STATIC UTIL_time_t UTIL_getSpanTime(UTIL_time_t begin, UTIL_time_t end)
+ {
+ UTIL_time_t diff;
+ if (end.tv_nsec < begin.tv_nsec) {
+ diff.tv_sec = (end.tv_sec - 1) - begin.tv_sec;
+ diff.tv_nsec = (end.tv_nsec + 1000000000ULL) - begin.tv_nsec;
+ } else {
+ diff.tv_sec = end.tv_sec - begin.tv_sec;
+ diff.tv_nsec = end.tv_nsec - begin.tv_nsec;
+ }
+ return diff;
+ }
+ UTIL_STATIC U64 UTIL_getSpanTimeMicro(UTIL_time_t begin, UTIL_time_t end)
+ {
+ UTIL_time_t const diff = UTIL_getSpanTime(begin, end);
+ U64 micro = 0;
+ micro += 1000000ULL * diff.tv_sec;
+ micro += diff.tv_nsec / 1000ULL;
+ return micro;
+ }
+ UTIL_STATIC U64 UTIL_getSpanTimeNano(UTIL_time_t begin, UTIL_time_t end)
+ {
+ UTIL_time_t const diff = UTIL_getSpanTime(begin, end);
+ U64 nano = 0;
+ nano += 1000000000ULL * diff.tv_sec;
+ nano += diff.tv_nsec;
+ return nano;
+ }
#else /* relies on standard C (note : clock_t measurements can be wrong when using multi-threading) */
- typedef clock_t UTIL_freq_t;
- typedef clock_t UTIL_time_t;
- UTIL_STATIC void UTIL_initTimer(UTIL_freq_t* ticksPerSecond) { *ticksPerSecond=0; }
- UTIL_STATIC void UTIL_getTime(UTIL_time_t* x) { *x = clock(); }
- UTIL_STATIC U64 UTIL_getSpanTimeMicro(UTIL_freq_t ticksPerSecond, UTIL_time_t clockStart, UTIL_time_t clockEnd) { (void)ticksPerSecond; return 1000000ULL * (clockEnd - clockStart) / CLOCKS_PER_SEC; }
- UTIL_STATIC U64 UTIL_getSpanTimeNano(UTIL_freq_t ticksPerSecond, UTIL_time_t clockStart, UTIL_time_t clockEnd) { (void)ticksPerSecond; return 1000000000ULL * (clockEnd - clockStart) / CLOCKS_PER_SEC; }
+ typedef clock_t UTIL_time_t;
+ UTIL_STATIC UTIL_time_t UTIL_getTime(void) { return clock(); }
+ UTIL_STATIC U64 UTIL_getSpanTimeMicro(UTIL_time_t clockStart, UTIL_time_t clockEnd) { return 1000000ULL * (clockEnd - clockStart) / CLOCKS_PER_SEC; }
+ UTIL_STATIC U64 UTIL_getSpanTimeNano(UTIL_time_t clockStart, UTIL_time_t clockEnd) { return 1000000000ULL * (clockEnd - clockStart) / CLOCKS_PER_SEC; }
#endif
/* returns time span in microseconds */
-UTIL_STATIC U64 UTIL_clockSpanMicro( UTIL_time_t clockStart, UTIL_freq_t ticksPerSecond )
+UTIL_STATIC U64 UTIL_clockSpanMicro( UTIL_time_t clockStart )
{
- UTIL_time_t clockEnd;
- UTIL_getTime(&clockEnd);
- return UTIL_getSpanTimeMicro(ticksPerSecond, clockStart, clockEnd);
+ UTIL_time_t const clockEnd = UTIL_getTime();
+ return UTIL_getSpanTimeMicro(clockStart, clockEnd);
}
-UTIL_STATIC void UTIL_waitForNextTick(UTIL_freq_t ticksPerSecond)
+UTIL_STATIC void UTIL_waitForNextTick(void)
{
- UTIL_time_t clockStart, clockEnd;
- UTIL_getTime(&clockStart);
+ UTIL_time_t const clockStart = UTIL_getTime();
+ UTIL_time_t clockEnd;
do {
- UTIL_getTime(&clockEnd);
- } while (UTIL_getSpanTimeNano(ticksPerSecond, clockStart, clockEnd) == 0);
+ clockEnd = UTIL_getTime();
+ } while (UTIL_getSpanTimeNano(clockStart, clockEnd) == 0);
}
@@ -284,10 +355,6 @@ UTIL_STATIC void *UTIL_realloc(void *ptr, size_t size)
return NULL;
}
-static int g_utilDisplayLevel;
-#define UTIL_DISPLAY(...) fprintf(stderr, __VA_ARGS__)
-#define UTIL_DISPLAYLEVEL(l, ...) { if (g_utilDisplayLevel>=l) { UTIL_DISPLAY(__VA_ARGS__); } }
-
#ifdef _WIN32
# define UTIL_HAS_CREATEFILELIST
@@ -309,7 +376,7 @@ UTIL_STATIC int UTIL_prepareFileList(const char *dirName, char** bufStart, size_
hFile=FindFirstFileA(path, &cFile);
if (hFile == INVALID_HANDLE_VALUE) {
- fprintf(stderr, "Cannot open directory '%s'\n", dirName);
+ UTIL_DISPLAYLEVEL(1, "Cannot open directory '%s'\n", dirName);
return 0;
}
free(path);
@@ -363,7 +430,7 @@ UTIL_STATIC int UTIL_prepareFileList(const char *dirName, char** bufStart, size_
int dirLength, fnameLength, pathLength, nbFiles = 0;
if (!(dir = opendir(dirName))) {
- fprintf(stderr, "Cannot open directory '%s': %s\n", dirName, strerror(errno));
+ UTIL_DISPLAYLEVEL(1, "Cannot open directory '%s': %s\n", dirName, strerror(errno));
return 0;
}
@@ -408,7 +475,7 @@ UTIL_STATIC int UTIL_prepareFileList(const char *dirName, char** bufStart, size_
}
if (errno != 0) {
- fprintf(stderr, "readdir(%s) error: %s\n", dirName, strerror(errno));
+ UTIL_DISPLAYLEVEL(1, "readdir(%s) error: %s\n", dirName, strerror(errno));
free(*bufStart);
*bufStart = NULL;
}
@@ -421,7 +488,7 @@ UTIL_STATIC int UTIL_prepareFileList(const char *dirName, char** bufStart, size_
UTIL_STATIC int UTIL_prepareFileList(const char *dirName, char** bufStart, size_t* pos, char** bufEnd, int followLinks)
{
(void)bufStart; (void)bufEnd; (void)pos;
- fprintf(stderr, "Directory %s ignored (compiled without _WIN32 or _POSIX_C_SOURCE)\n", dirName);
+ UTIL_DISPLAYLEVEL(1, "Directory %s ignored (compiled without _WIN32 or _POSIX_C_SOURCE)\n", dirName);
return 0;
}
diff --git a/programs/zstd.1 b/programs/zstd.1
index 5a91eea281f4..8187c7403509 100644
--- a/programs/zstd.1
+++ b/programs/zstd.1
@@ -1,5 +1,5 @@
.
-.TH "ZSTD" "1" "August 2017" "zstd 1.3.1" "User Commands"
+.TH "ZSTD" "1" "September 2017" "zstd 1.3.1" "User Commands"
.
.SH "NAME"
\fBzstd\fR \- zstd, zstdmt, unzstd, zstdcat \- Compress or decompress \.zst files
@@ -60,11 +60,11 @@ In most places where an integer argument is expected, an optional suffix is supp
.
.TP
\fBKiB\fR
-Multiply the integer by 1,024 (2^10)\. \fBKi\fR, \fBK\fR, and \fBKB\fR are accepted as synonyms for \fBKiB\fR\.
+Multiply the integer by 1,024 (2\e \fBKi\fR, \fBK\fR, and \fBKB\fR are accepted as synonyms for \fBKiB\fR\.
.
.TP
\fBMiB\fR
-Multiply the integer by 1,048,576 (2^20)\. \fBMi\fR, \fBM\fR, and \fBMB\fR are accepted as synonyms for \fBMiB\fR\.
+Multiply the integer by 1,048,576 (2\e \fBMi\fR, \fBM\fR, and \fBMB\fR are accepted as synonyms for \fBMiB\fR\.
.
.SS "Operation mode"
If multiple operation mode options are given, the last one takes effect\.
@@ -104,6 +104,13 @@ Display information related to a zstd compressed file, such as size, ratio, and
unlocks high compression levels 20+ (maximum 22), using a lot more memory\. Note that decompression will also require more memory when using these levels\.
.
.TP
+\fB\-\-long[=#]\fR
+enables long distance matching with \fB#\fR \fBwindowLog\fR, if not \fB#\fR is not present it defaults to \fB27\fR\. This increases the window size (\fBwindowLog\fR) and memory usage for both the compressor and decompressor\. This setting is designed to improve the compression ratio for files with long matches at a large distance\.
+.
+.IP
+Note: If \fBwindowLog\fR is set to larger than 27, \fB\-\-long=windowLog\fR or \fB\-\-memory=windowSize\fR needs to be passed to the decompressor\.
+.
+.TP
\fB\-T#\fR, \fB\-\-threads=#\fR
Compress using \fB#\fR threads (default: 1)\. If \fB#\fR is 0, attempt to detect and use the number of physical CPU cores\. In all cases, the nb of threads is capped to ZSTDMT_NBTHREADS_MAX==256\. This modifier does nothing if \fBzstd\fR is compiled without multithread support\.
.
@@ -144,6 +151,10 @@ keep source file(s) after successful compression or decompression\. This is the
operate recursively on dictionaries
.
.TP
+\fB\-\-format=FORMAT\fR
+compress and decompress in other formats\. If compiled with support, zstd can compress to or decompress from other compression algorithm formats\. Possibly available options are \fBgzip\fR, \fBxz\fR, \fBlzma\fR, and \fBlz4\fR\.
+.
+.TP
\fB\-h\fR/\fB\-H\fR, \fB\-\-help\fR
display help/long help and exit
.
@@ -186,6 +197,10 @@ Dictionary saved into \fBfile\fR (default name: dictionary)\.
Limit dictionary to specified size (default: 112640)\.
.
.TP
+\fB\-B#\fR
+Split input files in blocks of size # (default: no split)
+.
+.TP
\fB\-\-dictID=#\fR
A dictionary ID is a locally unique ID that a decoder can use to verify it is using the right dictionary\. By default, zstd will create a 4\-bytes random number ID\. It\'s possible to give a precise number instead\. Short numbers have an advantage : an ID < 256 will only need 1 byte in the compressed frame header, and an ID < 65536 will only need 2 bytes\. This compares favorably to 4 bytes default\. However, it\'s up to the dictionary manager to not assign twice the same ID to 2 different dictionaries\.
.
@@ -263,7 +278,10 @@ There are 8 strategies numbered from 1 to 8, from faster to stronger: 1=ZSTD_fas
Specify the maximum number of bits for a match distance\.
.
.IP
-The higher number of increases the chance to find a match which usually improves compression ratio\. It also increases memory requirements for the compressor and decompressor\. The minimum \fIwlog\fR is 10 (1 KiB) and the maximum is 27 (128 MiB)\.
+The higher number of increases the chance to find a match which usually improves compression ratio\. It also increases memory requirements for the compressor and decompressor\. The minimum \fIwlog\fR is 10 (1 KiB) and the maximum is 30 (1 GiB) on 32\-bit platforms and 31 (2 GiB) on 64\-bit platforms\.
+.
+.IP
+Note: If \fBwindowLog\fR is set to larger than 27, \fB\-\-long=windowLog\fR or \fB\-\-memory=windowSize\fR needs to be passed to the decompressor\.
.
.TP
\fBhashLog\fR=\fIhlog\fR, \fBhlog\fR=\fIhlog\fR
@@ -322,6 +340,58 @@ Determine \fBoverlapSize\fR, amount of data reloaded from previous job\. This pa
.IP
The minimum \fIovlog\fR is 0, and the maximum is 9\. 0 means "no overlap", hence completely independent jobs\. 9 means "full overlap", meaning up to \fBwindowSize\fR is reloaded from previous job\. Reducing \fIovlog\fR by 1 reduces the amount of reload by a factor 2\. Default \fIovlog\fR is 6, which means "reload \fBwindowSize / 8\fR"\. Exception : the maximum compression level (22) has a default \fIovlog\fR of 9\.
.
+.TP
+\fBldmHashLog\fR=\fIldmhlog\fR, \fBldmhlog\fR=\fIldmhlog\fR
+Specify the maximum size for a hash table used for long distance matching\.
+.
+.IP
+This option is ignored unless long distance matching is enabled\.
+.
+.IP
+Bigger hash tables usually improve compression ratio at the expense of more memory during compression and a decrease in compression speed\.
+.
+.IP
+The minimum \fIldmhlog\fR is 6 and the maximum is 26 (default: 20)\.
+.
+.TP
+\fBldmSearchLength\fR=\fIldmslen\fR, \fBldmslen\fR=\fIldmslen\fR
+Specify the minimum searched length of a match for long distance matching\.
+.
+.IP
+This option is ignored unless long distance matching is enabled\.
+.
+.IP
+Larger/very small values usually decrease compression ratio\.
+.
+.IP
+The minumum \fIldmslen\fR is 4 and the maximum is 4096 (default: 64)\.
+.
+.TP
+\fBldmBucketSizeLog\fR=\fIldmblog\fR, \fBldmblog\fR=\fIldmblog\fR
+Specify the size of each bucket for the hash table used for long distance matching\.
+.
+.IP
+This option is ignored unless long distance matching is enabled\.
+.
+.IP
+Larger bucket sizes improve collision resolution but decrease compression speed\.
+.
+.IP
+The minimum \fIldmblog\fR is 0 and the maximum is 8 (default: 3)\.
+.
+.TP
+\fBldmHashEveryLog\fR=\fIldmhevery\fR, \fBldmhevery\fR=\fIldmhevery\fR
+Specify the frequency of inserting entries into the long distance matching hash table\.
+.
+.IP
+This option is ignored unless long distance matching is enabled\.
+.
+.IP
+Larger values will improve compression speed\. Deviating far from the default value will likely result in a decrease in compression ratio\.
+.
+.IP
+The default value is \fBwlog \- ldmhlog\fR\.
+.
.SS "\-B#:"
Select the size of each compression job\. This parameter is available only when multi\-threading is enabled\. Default value is \fB4 * windowSize\fR, which means it varies depending on compression level\. \fB\-B#\fR makes it possible to select a custom value\. Note that job size must respect a minimum value which is enforced transparently\. This minimum is either 1 MB, or \fBoverlapSize\fR, whichever is largest\.
.
diff --git a/programs/zstd.1.md b/programs/zstd.1.md
index 4310afa1aaf8..eea68548982e 100644
--- a/programs/zstd.1.md
+++ b/programs/zstd.1.md
@@ -105,6 +105,16 @@ the last one takes effect.
* `--ultra`:
unlocks high compression levels 20+ (maximum 22), using a lot more memory.
Note that decompression will also require more memory when using these levels.
+* `--long[=#]`:
+ enables long distance matching with `#` `windowLog`, if not `#` is not
+ present it defaults to `27`.
+ This increases the window size (`windowLog`) and memory usage for both the
+ compressor and decompressor.
+ This setting is designed to improve the compression ratio for files with
+ long matches at a large distance.
+
+ Note: If `windowLog` is set to larger than 27, `--long=windowLog` or
+ `--memory=windowSize` needs to be passed to the decompressor.
* `-T#`, `--threads=#`:
Compress using `#` threads (default: 1).
If `#` is 0, attempt to detect and use the number of physical CPU cores.
@@ -137,6 +147,10 @@ the last one takes effect.
This is the default behavior.
* `-r`:
operate recursively on dictionaries
+* `--format=FORMAT`:
+ compress and decompress in other formats. If compiled with
+ support, zstd can compress to or decompress from other compression algorithm
+ formats. Possibly available options are `gzip`, `xz`, `lzma`, and `lz4`.
* `-h`/`-H`, `--help`:
display help/long help and exit
* `-V`, `--version`:
@@ -178,6 +192,8 @@ Typical gains range from 10% (at 64KB) to x5 better (at <1KB).
Dictionary saved into `file` (default name: dictionary).
* `--maxdict=#`:
Limit dictionary to specified size (default: 112640).
+* `-B#`:
+ Split input files in blocks of size # (default: no split)
* `--dictID=#`:
A dictionary ID is a locally unique ID that a decoder can use to verify it is
using the right dictionary.
@@ -267,7 +283,11 @@ The list of available _options_:
The higher number of increases the chance to find a match which usually
improves compression ratio.
It also increases memory requirements for the compressor and decompressor.
- The minimum _wlog_ is 10 (1 KiB) and the maximum is 27 (128 MiB).
+ The minimum _wlog_ is 10 (1 KiB) and the maximum is 30 (1 GiB) on 32-bit
+ platforms and 31 (2 GiB) on 64-bit platforms.
+
+ Note: If `windowLog` is set to larger than 27, `--long=windowLog` or
+ `--memory=windowSize` needs to be passed to the decompressor.
- `hashLog`=_hlog_, `hlog`=_hlog_:
Specify the maximum number of bits for a hash table.
@@ -327,6 +347,47 @@ The list of available _options_:
Default _ovlog_ is 6, which means "reload `windowSize / 8`".
Exception : the maximum compression level (22) has a default _ovlog_ of 9.
+- `ldmHashLog`=_ldmhlog_, `ldmhlog`=_ldmhlog_:
+ Specify the maximum size for a hash table used for long distance matching.
+
+ This option is ignored unless long distance matching is enabled.
+
+ Bigger hash tables usually improve compression ratio at the expense of more
+ memory during compression and a decrease in compression speed.
+
+ The minimum _ldmhlog_ is 6 and the maximum is 26 (default: 20).
+
+- `ldmSearchLength`=_ldmslen_, `ldmslen`=_ldmslen_:
+ Specify the minimum searched length of a match for long distance matching.
+
+ This option is ignored unless long distance matching is enabled.
+
+ Larger/very small values usually decrease compression ratio.
+
+ The minumum _ldmslen_ is 4 and the maximum is 4096 (default: 64).
+
+- `ldmBucketSizeLog`=_ldmblog_, `ldmblog`=_ldmblog_:
+ Specify the size of each bucket for the hash table used for long distance
+ matching.
+
+ This option is ignored unless long distance matching is enabled.
+
+ Larger bucket sizes improve collision resolution but decrease compression
+ speed.
+
+ The minimum _ldmblog_ is 0 and the maximum is 8 (default: 3).
+
+- `ldmHashEveryLog`=_ldmhevery_, `ldmhevery`=_ldmhevery_:
+ Specify the frequency of inserting entries into the long distance matching
+ hash table.
+
+ This option is ignored unless long distance matching is enabled.
+
+ Larger values will improve compression speed. Deviating far from the
+ default value will likely result in a decrease in compression ratio.
+
+ The default value is `wlog - ldmhlog`.
+
### -B#:
Select the size of each compression job.
This parameter is available only when multi-threading is enabled.
diff --git a/programs/zstdcli.c b/programs/zstdcli.c
index e7eb71db6e37..3f8367341885 100644
--- a/programs/zstdcli.c
+++ b/programs/zstdcli.c
@@ -5,6 +5,7 @@
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
*/
@@ -60,6 +61,8 @@
#define ZSTD_UNLZMA "unlzma"
#define ZSTD_XZ "xz"
#define ZSTD_UNXZ "unxz"
+#define ZSTD_LZ4 "lz4"
+#define ZSTD_UNLZ4 "unlz4"
#define KB *(1 <<10)
#define MB *(1 <<20)
@@ -71,8 +74,14 @@ static const char* g_defaultDictName = "dictionary";
static const unsigned g_defaultMaxDictSize = 110 KB;
static const int g_defaultDictCLevel = 3;
static const unsigned g_defaultSelectivityLevel = 9;
+static const unsigned g_defaultMaxWindowLog = 27;
#define OVERLAP_LOG_DEFAULT 9999
+#define LDM_PARAM_DEFAULT 9999 /* Default for parameters where 0 is valid */
static U32 g_overlapLog = OVERLAP_LOG_DEFAULT;
+static U32 g_ldmHashLog = 0;
+static U32 g_ldmMinMatch = 0;
+static U32 g_ldmHashEveryLog = LDM_PARAM_DEFAULT;
+static U32 g_ldmBucketSizeLog = LDM_PARAM_DEFAULT;
/*-************************************
@@ -123,6 +132,7 @@ static int usage_advanced(const char* programName)
DISPLAY( " -l : print information about zstd compressed files \n");
#ifndef ZSTD_NOCOMPRESS
DISPLAY( "--ultra : enable levels beyond %i, up to %i (requires more memory)\n", ZSTDCLI_CLEVEL_MAX, ZSTD_maxCLevel());
+ DISPLAY( "--long[=#] : enable long distance matching with given window log (default : %u)\n", g_defaultMaxWindowLog);
#ifdef ZSTD_MULTITHREAD
DISPLAY( " -T# : use # threads for compression (default:1) \n");
DISPLAY( " -B# : select size of each job (default:0==automatic) \n");
@@ -304,6 +314,10 @@ static unsigned parseCompressionParameters(const char* stringPtr, ZSTD_compressi
if (longCommandWArg(&stringPtr, "targetLength=") || longCommandWArg(&stringPtr, "tlen=")) { params->targetLength = readU32FromChar(&stringPtr); if (stringPtr[0]==',') { stringPtr++; continue; } else break; }
if (longCommandWArg(&stringPtr, "strategy=") || longCommandWArg(&stringPtr, "strat=")) { params->strategy = (ZSTD_strategy)(readU32FromChar(&stringPtr)); if (stringPtr[0]==',') { stringPtr++; continue; } else break; }
if (longCommandWArg(&stringPtr, "overlapLog=") || longCommandWArg(&stringPtr, "ovlog=")) { g_overlapLog = readU32FromChar(&stringPtr); if (stringPtr[0]==',') { stringPtr++; continue; } else break; }
+ if (longCommandWArg(&stringPtr, "ldmHashLog=") || longCommandWArg(&stringPtr, "ldmhlog=")) { g_ldmHashLog = readU32FromChar(&stringPtr); if (stringPtr[0]==',') { stringPtr++; continue; } else break; }
+ if (longCommandWArg(&stringPtr, "ldmSearchLength=") || longCommandWArg(&stringPtr, "ldmslen=")) { g_ldmMinMatch = readU32FromChar(&stringPtr); if (stringPtr[0]==',') { stringPtr++; continue; } else break; }
+ if (longCommandWArg(&stringPtr, "ldmBucketSizeLog=") || longCommandWArg(&stringPtr, "ldmblog=")) { g_ldmBucketSizeLog = readU32FromChar(&stringPtr); if (stringPtr[0]==',') { stringPtr++; continue; } else break; }
+ if (longCommandWArg(&stringPtr, "ldmHashEveryLog=") || longCommandWArg(&stringPtr, "ldmhevery=")) { g_ldmHashEveryLog = readU32FromChar(&stringPtr); if (stringPtr[0]==',') { stringPtr++; continue; } else break; }
return 0;
}
@@ -362,7 +376,8 @@ int main(int argCount, const char* argv[])
ultra=0,
lastCommand = 0,
nbThreads = 1,
- setRealTimePrio = 0;
+ setRealTimePrio = 0,
+ ldmFlag = 0;
unsigned bench_nbSeconds = 3; /* would be better if this value was synchronized from bench */
size_t blockSize = 0;
zstd_operation_mode operation = zom_compress;
@@ -391,10 +406,11 @@ int main(int argCount, const char* argv[])
int cover = 1;
#endif
+
/* init */
(void)recursive; (void)cLevelLast; /* not used when ZSTD_NOBENCH set */
(void)dictCLevel; (void)dictSelect; (void)dictID; (void)maxDictSize; /* not used when ZSTD_NODICT set */
- (void)ultra; (void)cLevel; /* not used when ZSTD_NOCOMPRESS set */
+ (void)ultra; (void)cLevel; (void)ldmFlag; /* not used when ZSTD_NOCOMPRESS set */
(void)memLimit; /* not used when ZSTD_NODECOMPRESS set */
if (filenameTable==NULL) { DISPLAY("zstd: %s \n", strerror(errno)); exit(1); }
filenameTable[0] = stdinmark;
@@ -413,6 +429,8 @@ int main(int argCount, const char* argv[])
if (exeNameMatch(programName, ZSTD_UNLZMA)) { operation=zom_decompress; FIO_setCompressionType(FIO_lzmaCompression); FIO_setRemoveSrcFile(1); } /* behave like unlzma */
if (exeNameMatch(programName, ZSTD_XZ)) { suffix = XZ_EXTENSION; FIO_setCompressionType(FIO_xzCompression); FIO_setRemoveSrcFile(1); } /* behave like xz */
if (exeNameMatch(programName, ZSTD_UNXZ)) { operation=zom_decompress; FIO_setCompressionType(FIO_xzCompression); FIO_setRemoveSrcFile(1); } /* behave like unxz */
+ if (exeNameMatch(programName, ZSTD_LZ4)) { suffix = LZ4_EXTENSION; FIO_setCompressionType(FIO_lz4Compression); FIO_setRemoveSrcFile(1); } /* behave like xz */
+ if (exeNameMatch(programName, ZSTD_UNLZ4)) { operation=zom_decompress; FIO_setCompressionType(FIO_lz4Compression); FIO_setRemoveSrcFile(1); } /* behave like unxz */
memset(&compressionParams, 0, sizeof(compressionParams));
/* command switches */
@@ -501,6 +519,22 @@ int main(int argCount, const char* argv[])
if (longCommandWArg(&argument, "--maxdict=")) { maxDictSize = readU32FromChar(&argument); continue; }
if (longCommandWArg(&argument, "--dictID=")) { dictID = readU32FromChar(&argument); continue; }
if (longCommandWArg(&argument, "--zstd=")) { if (!parseCompressionParameters(argument, &compressionParams)) CLEAN_RETURN(badusage(programName)); continue; }
+ if (longCommandWArg(&argument, "--long")) {
+ unsigned ldmWindowLog = 0;
+ ldmFlag = 1;
+ /* Parse optional window log */
+ if (*argument == '=') {
+ ++argument;
+ ldmWindowLog = readU32FromChar(&argument);
+ } else if (*argument != 0) {
+ /* Invalid character following --long */
+ CLEAN_RETURN(badusage(programName));
+ }
+ /* Only set windowLog if not already set by --zstd */
+ if (compressionParams.windowLog == 0)
+ compressionParams.windowLog = ldmWindowLog;
+ continue;
+ }
/* fall-through, will trigger bad_usage() later on */
}
@@ -720,6 +754,15 @@ int main(int argCount, const char* argv[])
BMK_setBlockSize(blockSize);
BMK_setNbThreads(nbThreads);
BMK_setNbSeconds(bench_nbSeconds);
+ BMK_setLdmFlag(ldmFlag);
+ BMK_setLdmMinMatch(g_ldmMinMatch);
+ BMK_setLdmHashLog(g_ldmHashLog);
+ if (g_ldmBucketSizeLog != LDM_PARAM_DEFAULT) {
+ BMK_setLdmBucketSizeLog(g_ldmBucketSizeLog);
+ }
+ if (g_ldmHashEveryLog != LDM_PARAM_DEFAULT) {
+ BMK_setLdmHashEveryLog(g_ldmHashEveryLog);
+ }
BMK_benchFiles(filenameTable, filenameIdx, dictFileName, cLevel, cLevelLast, &compressionParams, setRealTimePrio);
#endif
(void)bench_nbSeconds; (void)blockSize; (void)setRealTimePrio;
@@ -737,13 +780,13 @@ int main(int argCount, const char* argv[])
int const optimize = !coverParams.k || !coverParams.d;
coverParams.nbThreads = nbThreads;
coverParams.zParams = zParams;
- operationResult = DiB_trainFromFiles(outFileName, maxDictSize, filenameTable, filenameIdx, NULL, &coverParams, optimize);
+ operationResult = DiB_trainFromFiles(outFileName, maxDictSize, filenameTable, filenameIdx, blockSize, NULL, &coverParams, optimize);
} else {
ZDICT_legacy_params_t dictParams;
memset(&dictParams, 0, sizeof(dictParams));
dictParams.selectivityLevel = dictSelect;
dictParams.zParams = zParams;
- operationResult = DiB_trainFromFiles(outFileName, maxDictSize, filenameTable, filenameIdx, &dictParams, NULL, 0);
+ operationResult = DiB_trainFromFiles(outFileName, maxDictSize, filenameTable, filenameIdx, blockSize, &dictParams, NULL, 0);
}
#endif
goto _end;
@@ -787,6 +830,16 @@ int main(int argCount, const char* argv[])
#ifndef ZSTD_NOCOMPRESS
FIO_setNbThreads(nbThreads);
FIO_setBlockSize((U32)blockSize);
+ FIO_setLdmFlag(ldmFlag);
+ FIO_setLdmHashLog(g_ldmHashLog);
+ FIO_setLdmMinMatch(g_ldmMinMatch);
+ if (g_ldmBucketSizeLog != LDM_PARAM_DEFAULT) {
+ FIO_setLdmBucketSizeLog(g_ldmBucketSizeLog);
+ }
+ if (g_ldmHashEveryLog != LDM_PARAM_DEFAULT) {
+ FIO_setLdmHashEveryLog(g_ldmHashEveryLog);
+ }
+
if (g_overlapLog!=OVERLAP_LOG_DEFAULT) FIO_setOverlapLog(g_overlapLog);
if ((filenameIdx==1) && outFileName)
operationResult = FIO_compressFilename(outFileName, filenameTable[0], dictFileName, cLevel, &compressionParams);
@@ -798,6 +851,13 @@ int main(int argCount, const char* argv[])
#endif
} else { /* decompression or test */
#ifndef ZSTD_NODECOMPRESS
+ if (memLimit == 0) {
+ if (compressionParams.windowLog == 0)
+ memLimit = (U32)1 << g_defaultMaxWindowLog;
+ else {
+ memLimit = (U32)1 << (compressionParams.windowLog & 31);
+ }
+ }
FIO_setMemLimit(memLimit);
if (filenameIdx==1 && outFileName)
operationResult = FIO_decompressFilename(outFileName, filenameTable[0], dictFileName);
diff --git a/tests/.gitignore b/tests/.gitignore
index f408a7491213..b16e26e3d490 100644
--- a/tests/.gitignore
+++ b/tests/.gitignore
@@ -21,6 +21,7 @@ symbols
legacy
decodecorpus
pool
+poolTests
invalidDictionaries
# Tmp test directory
@@ -48,6 +49,7 @@ grillResults.txt
_*
tmp*
*.zst
+*.gz
result
out
diff --git a/tests/Makefile b/tests/Makefile
index 3be79c159057..651833bbbaae 100644
--- a/tests/Makefile
+++ b/tests/Makefile
@@ -1,13 +1,11 @@
-# ##########################################################################
-# Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+# ################################################################
+# Copyright (c) 2015-present, Yann Collet, Facebook, Inc.
# All rights reserved.
#
-# This Makefile is validated for Linux, macOS, *BSD, Hurd, Solaris, MSYS2 targets
-#
-# This source code is licensed under the BSD-style license found in the
-# LICENSE file in the root directory of this source tree. An additional grant
-# of patent rights can be found in the PATENTS file in the same directory.
-# ##########################################################################
+# This source code is licensed under both the BSD-style license (found in the
+# LICENSE file in the root directory of this source tree) and the GPLv2 (found
+# in the COPYING file in the root directory of this source tree).
+# ################################################################
# datagen : Synthetic and parametrable data generator, for tests
# fullbench : Precisely measure speed for each zstd inner functions
# fullbench32: Same as fullbench, but forced to compile in 32-bits mode
@@ -25,18 +23,18 @@ PRGDIR = ../programs
PYTHON ?= python3
TESTARTEFACT := versionsTest namespaceTest
-DEBUGLEVEL= 1
-DEBUGFLAGS= -g -DZSTD_DEBUG=$(DEBUGLEVEL)
-CPPFLAGS += -I$(ZSTDDIR) -I$(ZSTDDIR)/common -I$(ZSTDDIR)/compress \
- -I$(ZSTDDIR)/dictBuilder -I$(ZSTDDIR)/deprecated -I$(PRGDIR)
-CFLAGS ?= -O3
-CFLAGS += -Wall -Wextra -Wcast-qual -Wcast-align -Wshadow \
- -Wstrict-aliasing=1 -Wswitch-enum -Wdeclaration-after-statement \
- -Wstrict-prototypes -Wundef -Wformat-security \
- -Wvla -Wformat=2 -Winit-self -Wfloat-equal -Wwrite-strings \
- -Wredundant-decls
-CFLAGS += $(DEBUGFLAGS) $(MOREFLAGS)
-FLAGS = $(CPPFLAGS) $(CFLAGS) $(LDFLAGS)
+DEBUGLEVEL ?= 1
+DEBUGFLAGS = -g -DZSTD_DEBUG=$(DEBUGLEVEL)
+CPPFLAGS += -I$(ZSTDDIR) -I$(ZSTDDIR)/common -I$(ZSTDDIR)/compress \
+ -I$(ZSTDDIR)/dictBuilder -I$(ZSTDDIR)/deprecated -I$(PRGDIR)
+CFLAGS ?= -O3
+CFLAGS += -Wall -Wextra -Wcast-qual -Wcast-align -Wshadow \
+ -Wstrict-aliasing=1 -Wswitch-enum -Wdeclaration-after-statement \
+ -Wstrict-prototypes -Wundef -Wformat-security \
+ -Wvla -Wformat=2 -Winit-self -Wfloat-equal -Wwrite-strings \
+ -Wredundant-decls
+CFLAGS += $(DEBUGFLAGS) $(MOREFLAGS)
+FLAGS = $(CPPFLAGS) $(CFLAGS) $(LDFLAGS)
ZSTDCOMMON_FILES := $(ZSTDDIR)/common/*.c
@@ -46,10 +44,6 @@ ZSTD_FILES := $(ZSTDDECOMP_FILES) $(ZSTDCOMMON_FILES) $(ZSTDCOMP_FILES)
ZBUFF_FILES := $(ZSTDDIR)/deprecated/*.c
ZDICT_FILES := $(ZSTDDIR)/dictBuilder/*.c
-ZSTD_OBJ := $(patsubst %.c,%.o, $(wildcard $(ZSTD_FILES)) )
-ZBUFF_OBJ := $(patsubst %.c,%.o, $(wildcard $(ZBUFF_FILES)) )
-ZDICT_OBJ := $(patsubst %.c,%.o, $(wildcard $(ZDICT_FILES)) )
-
# Define *.exe as extension for Windows systems
ifneq (,$(filter Windows%,$(OS)))
@@ -64,8 +58,8 @@ endif
MULTITHREAD = $(MULTITHREAD_CPP) $(MULTITHREAD_LD)
VOID = /dev/null
-ZSTREAM_TESTTIME ?= -T2mn
-FUZZERTEST ?= -T5mn
+ZSTREAM_TESTTIME ?= -T90s
+FUZZERTEST ?= -T200s
ZSTDRTTEST = --test-large-data
DECODECORPUS_TESTTIME ?= -T30
@@ -77,7 +71,7 @@ all: fullbench fuzzer zstreamtest paramgrill datagen decodecorpus
all32: fullbench32 fuzzer32 zstreamtest32
-allnothread: fullbench fuzzer paramgrill datagen decodecorpus
+allnothread: fullbench fuzzer paramgrill datagen decodecorpus
dll: fuzzer-dll zstreamtest-dll
@@ -91,7 +85,7 @@ zstd-nolegacy:
$(MAKE) -C $(PRGDIR) $@
gzstd:
- $(MAKE) -C $(PRGDIR) $@
+ $(MAKE) -C $(PRGDIR) zstd HAVE_ZLIB=1
fullbench32: CPPFLAGS += -m32
fullbench fullbench32 : CPPFLAGS += $(MULTITHREAD_CPP)
@@ -167,7 +161,7 @@ datagen : $(PRGDIR)/datagen.c datagencli.c
$(CC) $(FLAGS) $^ -o $@$(EXT)
roundTripCrash : $(ZSTD_FILES) roundTripCrash.c
- $(CC) $(FLAGS) $^ -o $@$(EXT)
+ $(CC) $(FLAGS) $(MULTITHREAD) $^ -o $@$(EXT)
longmatch : $(ZSTD_FILES) longmatch.c
$(CC) $(FLAGS) $^ -o $@$(EXT)
@@ -192,8 +186,8 @@ else
$(CC) $(FLAGS) $^ -o $@$(EXT) -Wl,-rpath=$(ZSTDDIR) $(ZSTDDIR)/libzstd.so
endif
-poolTests : poolTests.c $(ZSTDDIR)/common/pool.c $(ZSTDDIR)/common/threading.c
- $(CC) $(FLAGS) $(MULTITHREAD) $^ -o $@$(EXT)
+poolTests : poolTests.c $(ZSTDDIR)/common/pool.c $(ZSTDDIR)/common/threading.c $(ZSTDDIR)/common/zstd_common.c $(ZSTDDIR)/common/error_private.c
+ $(CC) $(FLAGS) $(MULTITHREAD) $^ -o $@$(EXT)
namespaceTest:
if $(CC) namespaceTest.c ../lib/common/xxhash.c -o $@ ; then echo compilation should fail; exit 1 ; fi
@@ -214,7 +208,7 @@ clean:
zstreamtest$(EXT) zstreamtest32$(EXT) \
datagen$(EXT) paramgrill$(EXT) roundTripCrash$(EXT) longmatch$(EXT) \
symbols$(EXT) invalidDictionaries$(EXT) legacy$(EXT) poolTests$(EXT) \
- decodecorpus$(EXT)
+ decodecorpus$(EXT)
@echo Cleaning completed
@@ -247,7 +241,7 @@ endif
#-----------------------------------------------------------------------------
-#make tests validated only for MSYS, Linux, OSX, BSD, Hurd and Solaris targets
+# make tests validated only for below targets
#-----------------------------------------------------------------------------
ifneq (,$(filter $(HOST_OS),MSYS POSIX))
@@ -284,13 +278,13 @@ test-zstd-nolegacy: ZSTD = $(PRGDIR)/zstd-nolegacy
test-zstd-nolegacy: zstd-nolegacy zstd-playTests
test-gzstd: gzstd
- $(PRGDIR)/zstd README.md test-zstd-speed.py
- gzip README.md test-zstd-speed.py
+ $(PRGDIR)/zstd -f README.md test-zstd-speed.py
+ gzip -f README.md test-zstd-speed.py
cat README.md.zst test-zstd-speed.py.gz >zstd_gz.zst
cat README.md.gz test-zstd-speed.py.zst >gz_zstd.gz
- $(PRGDIR)/zstd -d README.md.gz -o README2.md
- $(PRGDIR)/zstd -d README.md.gz test-zstd-speed.py.gz
- $(PRGDIR)/zstd -d zstd_gz.zst gz_zstd.gz
+ $(PRGDIR)/zstd -df README.md.gz -o README2.md
+ $(PRGDIR)/zstd -df README.md.gz test-zstd-speed.py.gz
+ $(PRGDIR)/zstd -df zstd_gz.zst gz_zstd.gz
$(DIFF) -q zstd_gz gz_zstd
echo Hello World ZSTD | $(PRGDIR)/zstd -c - >hello.zst
echo Hello World GZIP | gzip -c - >hello.gz
@@ -298,6 +292,7 @@ test-gzstd: gzstd
cat hello.zst hello.gz hello.txt >hello_zst_gz_txt.gz
$(PRGDIR)/zstd -dcf hello.*
$(PRGDIR)/zstd -dcf - <hello_zst_gz_txt.gz
+ $(RM) *.gz *.zst README2.md gz_zstd zstd_gz hello.txt
test-fullbench: fullbench datagen
$(QEMU_SYS) ./fullbench -i1
@@ -321,8 +316,9 @@ test-zbuff32: zbufftest32
test-zstream: zstreamtest
$(QEMU_SYS) ./zstreamtest $(ZSTREAM_TESTTIME) $(FUZZER_FLAGS)
- $(QEMU_SYS) ./zstreamtest --mt $(ZSTREAM_TESTTIME) $(FUZZER_FLAGS)
- $(QEMU_SYS) ./zstreamtest --newapi $(ZSTREAM_TESTTIME) $(FUZZER_FLAGS)
+ $(QEMU_SYS) ./zstreamtest --mt -t1 $(ZSTREAM_TESTTIME) $(FUZZER_FLAGS)
+ $(QEMU_SYS) ./zstreamtest --newapi -t1 $(ZSTREAM_TESTTIME) $(FUZZER_FLAGS)
+ $(QEMU_SYS) ./zstreamtest --opaqueapi -t1 $(ZSTREAM_TESTTIME) $(FUZZER_FLAGS)
test-zstream32: zstreamtest32
$(QEMU_SYS) ./zstreamtest32 $(ZSTREAM_TESTTIME) $(FUZZER_FLAGS)
@@ -378,4 +374,37 @@ test-decodecorpus-cli: decodecorpus
test-pool: poolTests
$(QEMU_SYS) ./poolTests
+test-lz4: ZSTD = LD_LIBRARY_PATH=/usr/local/lib $(PRGDIR)/zstd
+test-lz4: ZSTD_LZ4 = LD_LIBRARY_PATH=/usr/local/lib ./lz4
+test-lz4: ZSTD_UNLZ4 = LD_LIBRARY_PATH=/usr/local/lib ./unlz4
+test-lz4: zstd decodecorpus
+ ln -s $(PRGDIR)/zstd lz4
+ ln -s $(PRGDIR)/zstd unlz4
+
+ ./decodecorpus -ptmp
+ # lz4 -> zstd
+ lz4 < tmp | \
+ $(ZSTD) -d | \
+ cmp - tmp
+ lz4 < tmp | \
+ $(ZSTD_UNLZ4) | \
+ cmp - tmp
+ # zstd -> lz4
+ $(ZSTD) --format=lz4 < tmp | \
+ lz4 -d | \
+ cmp - tmp
+ $(ZSTD_LZ4) < tmp | \
+ lz4 -d | \
+ cmp - tmp
+ # zstd -> zstd
+ $(ZSTD) --format=lz4 < tmp | \
+ $(ZSTD) -d | \
+ cmp - tmp
+ # zstd -> zstd
+ $(ZSTD) < tmp | \
+ $(ZSTD) -d | \
+ cmp - tmp
+
+ rm tmp lz4 unlz4
+
endif
diff --git a/tests/datagencli.c b/tests/datagencli.c
index bf9601f20976..4814974e27f5 100644
--- a/tests/datagencli.c
+++ b/tests/datagencli.c
@@ -1,10 +1,11 @@
/*
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * Copyright (c) 2015-present, Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
*/
diff --git a/tests/decodecorpus.c b/tests/decodecorpus.c
index 23166bd67f0a..e697e519cfc1 100644
--- a/tests/decodecorpus.c
+++ b/tests/decodecorpus.c
@@ -1,10 +1,11 @@
/*
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * Copyright (c) 2017-present, Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
*/
#include <limits.h>
@@ -44,7 +45,7 @@
**************************************/
#define DISPLAY(...) fprintf(stderr, __VA_ARGS__)
#define DISPLAYLEVEL(l, ...) if (g_displayLevel>=l) { DISPLAY(__VA_ARGS__); }
-static U32 g_displayLevel = 0;
+static U32 g_displayLevel = 2;
#define DISPLAYUPDATE(...) \
do { \
@@ -173,20 +174,19 @@ const char *BLOCK_TYPES[] = {"raw", "rle", "compressed"};
#define MAX_DECOMPRESSED_SIZE (1ULL << MAX_DECOMPRESSED_SIZE_LOG)
#define MAX_WINDOW_LOG 22 /* Recommended support is 8MB, so limit to 4MB + mantissa */
-#define MAX_BLOCK_SIZE (128ULL * 1024)
#define MIN_SEQ_LEN (3)
-#define MAX_NB_SEQ ((MAX_BLOCK_SIZE + MIN_SEQ_LEN - 1) / MIN_SEQ_LEN)
+#define MAX_NB_SEQ ((ZSTD_BLOCKSIZE_MAX + MIN_SEQ_LEN - 1) / MIN_SEQ_LEN)
BYTE CONTENT_BUFFER[MAX_DECOMPRESSED_SIZE];
BYTE FRAME_BUFFER[MAX_DECOMPRESSED_SIZE * 2];
-BYTE LITERAL_BUFFER[MAX_BLOCK_SIZE];
+BYTE LITERAL_BUFFER[ZSTD_BLOCKSIZE_MAX];
seqDef SEQUENCE_BUFFER[MAX_NB_SEQ];
-BYTE SEQUENCE_LITERAL_BUFFER[MAX_BLOCK_SIZE]; /* storeSeq expects a place to copy literals to */
-BYTE SEQUENCE_LLCODE[MAX_BLOCK_SIZE];
-BYTE SEQUENCE_MLCODE[MAX_BLOCK_SIZE];
-BYTE SEQUENCE_OFCODE[MAX_BLOCK_SIZE];
+BYTE SEQUENCE_LITERAL_BUFFER[ZSTD_BLOCKSIZE_MAX]; /* storeSeq expects a place to copy literals to */
+BYTE SEQUENCE_LLCODE[ZSTD_BLOCKSIZE_MAX];
+BYTE SEQUENCE_MLCODE[ZSTD_BLOCKSIZE_MAX];
+BYTE SEQUENCE_OFCODE[ZSTD_BLOCKSIZE_MAX];
unsigned WKSP[1024];
@@ -237,6 +237,18 @@ typedef struct {
size_t dictContentSize;
BYTE* dictContent;
} dictInfo;
+
+typedef enum {
+ gt_frame = 0, /* generate frames */
+ gt_block, /* generate compressed blocks without block/frame headers */
+} genType_e;
+
+/*-*******************************************************
+* Global variables (set from command line)
+*********************************************************/
+U32 g_maxDecompressedSizeLog = MAX_DECOMPRESSED_SIZE_LOG; /* <= 20 */
+U32 g_maxBlockSize = ZSTD_BLOCKSIZE_MAX; /* <= 128 KB */
+
/*-*******************************************************
* Generator Functions
*********************************************************/
@@ -273,12 +285,12 @@ static void writeFrameHeader(U32* seed, frame_t* frame, dictInfo info)
{
/* Generate random content size */
size_t highBit;
- if (RAND(seed) & 7) {
+ if (RAND(seed) & 7 && g_maxDecompressedSizeLog > 7) {
/* do content of at least 128 bytes */
- highBit = 1ULL << RAND_range(seed, 7, MAX_DECOMPRESSED_SIZE_LOG);
+ highBit = 1ULL << RAND_range(seed, 7, g_maxDecompressedSizeLog);
} else if (RAND(seed) & 3) {
/* do small content */
- highBit = 1ULL << RAND_range(seed, 0, 7);
+ highBit = 1ULL << RAND_range(seed, 0, MIN(7, 1U << g_maxDecompressedSizeLog));
} else {
/* 0 size frame */
highBit = 0;
@@ -342,10 +354,10 @@ static void writeFrameHeader(U32* seed, frame_t* frame, dictInfo info)
}
}
- DISPLAYLEVEL(2, " frame content size:\t%u\n", (U32)fh.contentSize);
- DISPLAYLEVEL(2, " frame window size:\t%u\n", fh.windowSize);
- DISPLAYLEVEL(2, " content size flag:\t%d\n", contentSizeFlag);
- DISPLAYLEVEL(2, " single segment flag:\t%d\n", singleSegment);
+ DISPLAYLEVEL(3, " frame content size:\t%u\n", (U32)fh.contentSize);
+ DISPLAYLEVEL(3, " frame window size:\t%u\n", fh.windowSize);
+ DISPLAYLEVEL(3, " content size flag:\t%d\n", contentSizeFlag);
+ DISPLAYLEVEL(3, " single segment flag:\t%d\n", singleSegment);
frame->data = op + pos;
frame->header = fh;
@@ -358,7 +370,7 @@ static size_t writeLiteralsBlockSimple(U32* seed, frame_t* frame, size_t content
int const type = RAND(seed) % 2;
int const sizeFormatDesc = RAND(seed) % 8;
size_t litSize;
- size_t maxLitSize = MIN(contentSize, MAX_BLOCK_SIZE);
+ size_t maxLitSize = MIN(contentSize, g_maxBlockSize);
if (sizeFormatDesc == 0) {
/* Size_FormatDesc = ?0 */
@@ -452,7 +464,7 @@ static size_t writeHufHeader(U32* seed, HUF_CElt* hufTable, void* dst, size_t ds
return op - ostart;
}
-/* Write a Huffman coded literals block and return the litearls size */
+/* Write a Huffman coded literals block and return the literals size */
static size_t writeLiteralsBlockCompressed(U32* seed, frame_t* frame, size_t contentSize)
{
BYTE* origop = (BYTE*)frame->data;
@@ -463,7 +475,7 @@ static size_t writeLiteralsBlockCompressed(U32* seed, frame_t* frame, size_t con
size_t litSize;
size_t hufHeaderSize = 0;
size_t compressedSize = 0;
- size_t maxLitSize = MIN(contentSize-3, MAX_BLOCK_SIZE);
+ size_t maxLitSize = MIN(contentSize-3, g_maxBlockSize);
symbolEncodingType_e hType;
@@ -867,7 +879,7 @@ static size_t writeSequences(U32* seed, frame_t* frame, seqStore_t* seqStorePtr,
frame->stats.offsetSymbolSet, 28)) {
Offtype = set_repeat;
} else if (!(RAND(seed) & 3)) {
- FSE_buildCTable_wksp(CTable_OffsetBits, OF_defaultNorm, MaxOff, OF_defaultNormLog, scratchBuffer, sizeof(scratchBuffer));
+ FSE_buildCTable_wksp(CTable_OffsetBits, OF_defaultNorm, DefaultMaxOff, OF_defaultNormLog, scratchBuffer, sizeof(scratchBuffer));
Offtype = set_basic;
} else {
size_t nbSeq_1 = nbSeq;
@@ -1020,9 +1032,9 @@ static void writeBlock(U32* seed, frame_t* frame, size_t contentSize,
BYTE *const header = (BYTE*)frame->data;
BYTE *op = header + 3;
- DISPLAYLEVEL(3, " block:\n");
- DISPLAYLEVEL(3, " block content size: %u\n", (U32)contentSize);
- DISPLAYLEVEL(3, " last block: %s\n", lastBlock ? "yes" : "no");
+ DISPLAYLEVEL(4, " block:\n");
+ DISPLAYLEVEL(4, " block content size: %u\n", (U32)contentSize);
+ DISPLAYLEVEL(4, " last block: %s\n", lastBlock ? "yes" : "no");
if (blockTypeDesc == 0) {
/* Raw data frame */
@@ -1052,7 +1064,7 @@ static void writeBlock(U32* seed, frame_t* frame, size_t contentSize,
frame->data = op;
compressedSize = writeCompressedBlock(seed, frame, contentSize, info);
- if (compressedSize > contentSize) {
+ if (compressedSize >= contentSize) { /* compressed block must be strictly smaller than uncompressed one */
blockType = 0;
memcpy(op, frame->src, contentSize);
@@ -1068,8 +1080,8 @@ static void writeBlock(U32* seed, frame_t* frame, size_t contentSize,
}
frame->src = (BYTE*)frame->src + contentSize;
- DISPLAYLEVEL(3, " block type: %s\n", BLOCK_TYPES[blockType]);
- DISPLAYLEVEL(3, " block size field: %u\n", (U32)blockSize);
+ DISPLAYLEVEL(4, " block type: %s\n", BLOCK_TYPES[blockType]);
+ DISPLAYLEVEL(4, " block size field: %u\n", (U32)blockSize);
header[0] = (BYTE) ((lastBlock | (blockType << 1) | (blockSize << 3)) & 0xff);
MEM_writeLE16(header + 1, (U16) (blockSize >> 5));
@@ -1080,7 +1092,7 @@ static void writeBlock(U32* seed, frame_t* frame, size_t contentSize,
static void writeBlocks(U32* seed, frame_t* frame, dictInfo info)
{
size_t contentLeft = frame->header.contentSize;
- size_t const maxBlockSize = MIN(MAX_BLOCK_SIZE, frame->header.windowSize);
+ size_t const maxBlockSize = MIN(g_maxBlockSize, frame->header.windowSize);
while (1) {
/* 1 in 4 chance of ending frame */
int const lastBlock = contentLeft > maxBlockSize ? 0 : !(RAND(seed) & 3);
@@ -1089,13 +1101,13 @@ static void writeBlocks(U32* seed, frame_t* frame, dictInfo info)
blockContentSize = contentLeft;
} else {
if (contentLeft > 0 && (RAND(seed) & 7)) {
- /* some variable size blocks */
+ /* some variable size block */
blockContentSize = RAND(seed) % (MIN(maxBlockSize, contentLeft)+1);
} else if (contentLeft > maxBlockSize && (RAND(seed) & 1)) {
- /* some full size blocks */
+ /* some full size block */
blockContentSize = maxBlockSize;
} else {
- /* some empty blocks */
+ /* some empty block */
blockContentSize = 0;
}
}
@@ -1111,7 +1123,7 @@ static void writeChecksum(frame_t* frame)
{
/* write checksum so implementations can verify their output */
U64 digest = XXH64(frame->srcStart, (BYTE*)frame->src-(BYTE*)frame->srcStart, 0);
- DISPLAYLEVEL(2, " checksum: %08x\n", (U32)digest);
+ DISPLAYLEVEL(3, " checksum: %08x\n", (U32)digest);
MEM_writeLE32(frame->data, (U32)digest);
frame->data = (BYTE*)frame->data + 4;
}
@@ -1132,8 +1144,7 @@ static void outputBuffer(const void* buf, size_t size, const char* const path)
exit(1);
}
- {
- size_t fsize = size;
+ { size_t fsize = size;
size_t written = 0;
while (written < fsize) {
written += fwrite(ip + written, 1, fsize - written, out);
@@ -1164,11 +1175,64 @@ static void initFrame(frame_t* fr)
fr->stats.rep[2] = 8;
}
+/**
+ * Generated a single zstd compressed block with no block/frame header.
+ * Returns the final seed.
+ */
+static U32 generateCompressedBlock(U32 seed, frame_t* frame, dictInfo info)
+{
+ size_t blockContentSize;
+ int blockWritten = 0;
+ BYTE* op;
+ DISPLAYLEVEL(4, "block seed: %u\n", seed);
+ initFrame(frame);
+ op = (BYTE*)frame->data;
+
+ while (!blockWritten) {
+ size_t cSize;
+ /* generate window size */
+ { int const exponent = RAND(&seed) % (MAX_WINDOW_LOG - 10);
+ int const mantissa = RAND(&seed) % 8;
+ frame->header.windowSize = (1U << (exponent + 10));
+ frame->header.windowSize += (frame->header.windowSize / 8) * mantissa;
+ }
+
+ /* generate content size */
+ { size_t const maxBlockSize = MIN(g_maxBlockSize, frame->header.windowSize);
+ if (RAND(&seed) & 15) {
+ /* some full size blocks */
+ blockContentSize = maxBlockSize;
+ } else if (RAND(&seed) & 7 && g_maxBlockSize >= (1U << 7)) {
+ /* some small blocks <= 128 bytes*/
+ blockContentSize = RAND(&seed) % (1U << 7);
+ } else {
+ /* some variable size blocks */
+ blockContentSize = RAND(&seed) % maxBlockSize;
+ }
+ }
+
+ /* try generating a compressed block */
+ frame->oldStats = frame->stats;
+ frame->data = op;
+ cSize = writeCompressedBlock(&seed, frame, blockContentSize, info);
+ if (cSize >= blockContentSize) { /* compressed size must be strictly smaller than decompressed size : https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#blocks */
+ /* data doesn't compress -- try again */
+ frame->stats = frame->oldStats; /* don't update the stats */
+ DISPLAYLEVEL(5, " can't compress block : try again \n");
+ } else {
+ blockWritten = 1;
+ DISPLAYLEVEL(4, " block size: %u \n", (U32)cSize);
+ frame->src = (BYTE*)frame->src + blockContentSize;
+ }
+ }
+ return seed;
+}
+
/* Return the final seed */
static U32 generateFrame(U32 seed, frame_t* fr, dictInfo info)
{
/* generate a complete frame */
- DISPLAYLEVEL(1, "frame seed: %u\n", seed);
+ DISPLAYLEVEL(3, "frame seed: %u\n", seed);
initFrame(fr);
writeFrameHeader(&seed, fr, info);
@@ -1182,7 +1246,8 @@ static U32 generateFrame(U32 seed, frame_t* fr, dictInfo info)
* Dictionary Helper Functions
*********************************************************/
/* returns 0 if successful, otherwise returns 1 upon error */
-static int genRandomDict(U32 dictID, U32 seed, size_t dictSize, BYTE* fullDict){
+static int genRandomDict(U32 dictID, U32 seed, size_t dictSize, BYTE* fullDict)
+{
/* allocate space for samples */
int ret = 0;
unsigned const numSamples = 4;
@@ -1194,27 +1259,20 @@ static int genRandomDict(U32 dictID, U32 seed, size_t dictSize, BYTE* fullDict){
}
/* generate samples */
- {
- unsigned literalValue = 1;
+ { unsigned literalValue = 1;
unsigned samplesPos = 0;
size_t currSize = 1;
while (literalValue <= 4) {
sampleSizes[literalValue - 1] = currSize;
- {
- size_t k;
+ { size_t k;
for (k = 0; k < currSize; k++) {
*(samples + (samplesPos++)) = (BYTE)literalValue;
- }
- }
+ } }
literalValue++;
currSize *= 16;
- }
- }
-
+ } }
- {
- /* create variables */
- size_t dictWriteSize = 0;
+ { size_t dictWriteSize = 0;
ZDICT_params_t zdictParams;
size_t const headerSize = MAX(dictSize/4, 256);
size_t const dictContentSize = dictSize - headerSize;
@@ -1322,7 +1380,7 @@ cleanup:
return ret;
}
-static size_t testDecodeWithDict(U32 seed)
+static size_t testDecodeWithDict(U32 seed, genType_e genType)
{
/* create variables */
size_t const dictSize = RAND(&seed) % (10 << 20) + ZDICT_DICTSIZE_MIN + ZDICT_CONTENTSIZE_MIN;
@@ -1334,45 +1392,53 @@ static size_t testDecodeWithDict(U32 seed)
}
/* generate random dictionary */
- {
- int const ret = genRandomDict(dictID, seed, dictSize, fullDict);
- if (ret != 0) {
- errorDetected = ERROR(GENERIC);
- goto dictTestCleanup;
- }
+ if (genRandomDict(dictID, seed, dictSize, fullDict)) { /* return 0 on success */
+ errorDetected = ERROR(GENERIC);
+ goto dictTestCleanup;
}
- {
- frame_t fr;
+ { frame_t fr;
+ dictInfo info;
+ ZSTD_DCtx* const dctx = ZSTD_createDCtx();
+ size_t ret;
- /* generate frame */
- {
- size_t const headerSize = MAX(dictSize/4, 256);
+ /* get dict info */
+ { size_t const headerSize = MAX(dictSize/4, 256);
size_t const dictContentSize = dictSize-headerSize;
BYTE* const dictContent = fullDict+headerSize;
- dictInfo const info = initDictInfo(1, dictContentSize, dictContent, dictID);
- seed = generateFrame(seed, &fr, info);
+ info = initDictInfo(1, dictContentSize, dictContent, dictID);
}
/* manually decompress and check difference */
- {
- ZSTD_DCtx* const dctx = ZSTD_createDCtx();
- {
- size_t const returnValue = ZSTD_decompress_usingDict(dctx, DECOMPRESSED_BUFFER, MAX_DECOMPRESSED_SIZE,
- fr.dataStart, (BYTE*)fr.data - (BYTE*)fr.dataStart,
- fullDict, dictSize);
- if (ZSTD_isError(returnValue)) {
- errorDetected = returnValue;
- goto dictTestCleanup;
- }
- }
-
- if (memcmp(DECOMPRESSED_BUFFER, fr.srcStart, (BYTE*)fr.src - (BYTE*)fr.srcStart) != 0) {
- errorDetected = ERROR(corruption_detected);
+ if (genType == gt_frame) {
+ /* Test frame */
+ generateFrame(seed, &fr, info);
+ ret = ZSTD_decompress_usingDict(dctx, DECOMPRESSED_BUFFER, MAX_DECOMPRESSED_SIZE,
+ fr.dataStart, (BYTE*)fr.data - (BYTE*)fr.dataStart,
+ fullDict, dictSize);
+ } else {
+ /* Test block */
+ generateCompressedBlock(seed, &fr, info);
+ ret = ZSTD_decompressBegin_usingDict(dctx, fullDict, dictSize);
+ if (ZSTD_isError(ret)) {
+ errorDetected = ret;
+ ZSTD_freeDCtx(dctx);
goto dictTestCleanup;
}
- ZSTD_freeDCtx(dctx);
+ ret = ZSTD_decompressBlock(dctx, DECOMPRESSED_BUFFER, MAX_DECOMPRESSED_SIZE,
+ fr.dataStart, (BYTE*)fr.data - (BYTE*)fr.dataStart);
+ }
+ ZSTD_freeDCtx(dctx);
+
+ if (ZSTD_isError(ret)) {
+ errorDetected = ret;
+ goto dictTestCleanup;
+ }
+
+ if (memcmp(DECOMPRESSED_BUFFER, fr.srcStart, (BYTE*)fr.src - (BYTE*)fr.srcStart) != 0) {
+ errorDetected = ERROR(corruption_detected);
+ goto dictTestCleanup;
}
}
@@ -1381,7 +1447,87 @@ dictTestCleanup:
return errorDetected;
}
-static int runTestMode(U32 seed, unsigned numFiles, unsigned const testDurationS)
+static size_t testDecodeRawBlock(frame_t* fr)
+{
+ ZSTD_DCtx* dctx = ZSTD_createDCtx();
+ size_t ret = ZSTD_decompressBegin(dctx);
+ if (ZSTD_isError(ret)) return ret;
+
+ ret = ZSTD_decompressBlock(
+ dctx,
+ DECOMPRESSED_BUFFER, MAX_DECOMPRESSED_SIZE,
+ fr->dataStart, (BYTE*)fr->data - (BYTE*)fr->dataStart);
+ ZSTD_freeDCtx(dctx);
+ if (ZSTD_isError(ret)) return ret;
+
+ if (memcmp(DECOMPRESSED_BUFFER, fr->srcStart,
+ (BYTE*)fr->src - (BYTE*)fr->srcStart) != 0) {
+ return ERROR(corruption_detected);
+ }
+
+ return ret;
+}
+
+static int runBlockTest(U32* seed)
+{
+ frame_t fr;
+ U32 const seedCopy = *seed;
+ { dictInfo const info = initDictInfo(0, 0, NULL, 0);
+ *seed = generateCompressedBlock(*seed, &fr, info);
+ }
+
+ { size_t const r = testDecodeRawBlock(&fr);
+ if (ZSTD_isError(r)) {
+ DISPLAY("Error in block mode on test seed %u: %s\n", seedCopy,
+ ZSTD_getErrorName(r));
+ return 1;
+ }
+ }
+
+ { size_t const r = testDecodeWithDict(*seed, gt_block);
+ if (ZSTD_isError(r)) {
+ DISPLAY("Error in block mode with dictionary on test seed %u: %s\n",
+ seedCopy, ZSTD_getErrorName(r));
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static int runFrameTest(U32* seed)
+{
+ frame_t fr;
+ U32 const seedCopy = *seed;
+ { dictInfo const info = initDictInfo(0, 0, NULL, 0);
+ *seed = generateFrame(*seed, &fr, info);
+ }
+
+ { size_t const r = testDecodeSimple(&fr);
+ if (ZSTD_isError(r)) {
+ DISPLAY("Error in simple mode on test seed %u: %s\n",
+ seedCopy, ZSTD_getErrorName(r));
+ return 1;
+ }
+ }
+ { size_t const r = testDecodeStreaming(&fr);
+ if (ZSTD_isError(r)) {
+ DISPLAY("Error in streaming mode on test seed %u: %s\n",
+ seedCopy, ZSTD_getErrorName(r));
+ return 1;
+ }
+ }
+ { size_t const r = testDecodeWithDict(*seed, gt_frame); /* avoid big dictionaries */
+ if (ZSTD_isError(r)) {
+ DISPLAY("Error in dictionary mode on test seed %u: %s\n",
+ seedCopy, ZSTD_getErrorName(r));
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static int runTestMode(U32 seed, unsigned numFiles, unsigned const testDurationS,
+ genType_e genType)
{
unsigned fnum;
@@ -1393,39 +1539,15 @@ static int runTestMode(U32 seed, unsigned numFiles, unsigned const testDurationS
DISPLAY("seed: %u\n", seed);
for (fnum = 0; fnum < numFiles || clockSpan(startClock) < maxClockSpan; fnum++) {
- frame_t fr;
- U32 const seedCopy = seed;
if (fnum < numFiles)
DISPLAYUPDATE("\r%u/%u ", fnum, numFiles);
else
DISPLAYUPDATE("\r%u ", fnum);
- {
- dictInfo const info = initDictInfo(0, 0, NULL, 0);
- seed = generateFrame(seed, &fr, info);
- }
-
- { size_t const r = testDecodeSimple(&fr);
- if (ZSTD_isError(r)) {
- DISPLAY("Error in simple mode on test seed %u: %s\n", seedCopy,
- ZSTD_getErrorName(r));
- return 1;
- }
- }
- { size_t const r = testDecodeStreaming(&fr);
- if (ZSTD_isError(r)) {
- DISPLAY("Error in streaming mode on test seed %u: %s\n", seedCopy,
- ZSTD_getErrorName(r));
- return 1;
- }
- }
- {
- /* don't create a dictionary that is too big */
- size_t const r = testDecodeWithDict(seed);
- if (ZSTD_isError(r)) {
- DISPLAY("Error in dictionary mode on test seed %u: %s\n", seedCopy, ZSTD_getErrorName(r));
- return 1;
- }
+ { int const ret = (genType == gt_frame) ?
+ runFrameTest(&seed) :
+ runBlockTest(&seed);
+ if (ret) return ret;
}
}
@@ -1440,17 +1562,19 @@ static int runTestMode(U32 seed, unsigned numFiles, unsigned const testDurationS
*********************************************************/
static int generateFile(U32 seed, const char* const path,
- const char* const origPath)
+ const char* const origPath, genType_e genType)
{
frame_t fr;
DISPLAY("seed: %u\n", seed);
- {
- dictInfo const info = initDictInfo(0, 0, NULL, 0);
- generateFrame(seed, &fr, info);
+ { dictInfo const info = initDictInfo(0, 0, NULL, 0);
+ if (genType == gt_frame) {
+ generateFrame(seed, &fr, info);
+ } else {
+ generateCompressedBlock(seed, &fr, info);
+ }
}
-
outputBuffer(fr.dataStart, (BYTE*)fr.data - (BYTE*)fr.dataStart, path);
if (origPath) {
outputBuffer(fr.srcStart, (BYTE*)fr.src - (BYTE*)fr.srcStart, origPath);
@@ -1459,7 +1583,7 @@ static int generateFile(U32 seed, const char* const path,
}
static int generateCorpus(U32 seed, unsigned numFiles, const char* const path,
- const char* const origPath)
+ const char* const origPath, genType_e genType)
{
char outPath[MAX_PATH];
unsigned fnum;
@@ -1471,9 +1595,12 @@ static int generateCorpus(U32 seed, unsigned numFiles, const char* const path,
DISPLAYUPDATE("\r%u/%u ", fnum, numFiles);
- {
- dictInfo const info = initDictInfo(0, 0, NULL, 0);
- seed = generateFrame(seed, &fr, info);
+ { dictInfo const info = initDictInfo(0, 0, NULL, 0);
+ if (genType == gt_frame) {
+ seed = generateFrame(seed, &fr, info);
+ } else {
+ seed = generateCompressedBlock(seed, &fr, info);
+ }
}
if (snprintf(outPath, MAX_PATH, "%s/z%06u.zst", path, fnum) + 1 > MAX_PATH) {
@@ -1497,7 +1624,8 @@ static int generateCorpus(U32 seed, unsigned numFiles, const char* const path,
}
static int generateCorpusWithDict(U32 seed, unsigned numFiles, const char* const path,
- const char* const origPath, const size_t dictSize)
+ const char* const origPath, const size_t dictSize,
+ genType_e genType)
{
char outPath[MAX_PATH];
BYTE* fullDict;
@@ -1517,8 +1645,7 @@ static int generateCorpusWithDict(U32 seed, unsigned numFiles, const char* const
}
/* randomly generate the dictionary */
- {
- int const ret = genRandomDict(dictID, seed, dictSize, fullDict);
+ { int const ret = genRandomDict(dictID, seed, dictSize, fullDict);
if (ret != 0) {
errorDetected = ret;
goto dictCleanup;
@@ -1539,8 +1666,7 @@ static int generateCorpusWithDict(U32 seed, unsigned numFiles, const char* const
}
/* generate random compressed/decompressed files */
- {
- unsigned fnum;
+ { unsigned fnum;
for (fnum = 0; fnum < MAX(numFiles, 1); fnum++) {
frame_t fr;
DISPLAYUPDATE("\r%u/%u ", fnum, numFiles);
@@ -1549,7 +1675,11 @@ static int generateCorpusWithDict(U32 seed, unsigned numFiles, const char* const
size_t const dictContentSize = dictSize-headerSize;
BYTE* const dictContent = fullDict+headerSize;
dictInfo const info = initDictInfo(1, dictContentSize, dictContent, dictID);
- seed = generateFrame(seed, &fr, info);
+ if (genType == gt_frame) {
+ seed = generateFrame(seed, &fr, info);
+ } else {
+ seed = generateCompressedBlock(seed, &fr, info);
+ }
}
if (numFiles != 0) {
@@ -1626,9 +1756,13 @@ static void advancedUsage(const char* programName)
{
usage(programName);
DISPLAY( "\n");
- DISPLAY( "Advanced arguments :\n");
- DISPLAY( " --content-size : always include the content size in the frame header\n");
- DISPLAY( " --use-dict=# : include a dictionary used to decompress the corpus\n");
+ DISPLAY( "Advanced arguments :\n");
+ DISPLAY( " --content-size : always include the content size in the frame header\n");
+ DISPLAY( " --use-dict=# : include a dictionary used to decompress the corpus\n");
+ DISPLAY( " --gen-blocks : generate raw compressed blocks without block/frame headers\n");
+ DISPLAY( " --max-block-size-log=# : max block size log, must be in range [2, 17]\n");
+ DISPLAY( " --max-content-size-log=# : max content size log, must be <= 20\n");
+ DISPLAY( " (this is ignored with gen-blocks)\n");
}
/*! readU32FromChar() :
@@ -1675,6 +1809,7 @@ int main(int argc, char** argv)
const char* origPath = NULL;
int useDict = 0;
unsigned dictSize = (10 << 10); /* 10 kB default */
+ genType_e genType = gt_frame;
int argNb;
@@ -1738,6 +1873,17 @@ int main(int argc, char** argv)
} else if (longCommandWArg(&argument, "use-dict=")) {
dictSize = readU32FromChar(&argument);
useDict = 1;
+ } else if (strcmp(argument, "gen-blocks") == 0) {
+ genType = gt_block;
+ } else if (longCommandWArg(&argument, "max-block-size-log=")) {
+ U32 value = readU32FromChar(&argument);
+ if (value >= 2 && value <= ZSTD_BLOCKSIZE_MAX) {
+ g_maxBlockSize = 1U << value;
+ }
+ } else if (longCommandWArg(&argument, "max-content-size-log=")) {
+ U32 value = readU32FromChar(&argument);
+ g_maxDecompressedSizeLog =
+ MIN(MAX_DECOMPRESSED_SIZE_LOG, value);
} else {
advancedUsage(argv[0]);
return 1;
@@ -1754,7 +1900,7 @@ int main(int argc, char** argv)
}
if (testMode) {
- return runTestMode(seed, numFiles, testDuration);
+ return runTestMode(seed, numFiles, testDuration, genType);
} else {
if (testDuration) {
DISPLAY("Error: -T requires test mode (-t)\n\n");
@@ -1770,12 +1916,12 @@ int main(int argc, char** argv)
}
if (numFiles == 0 && useDict == 0) {
- return generateFile(seed, path, origPath);
+ return generateFile(seed, path, origPath, genType);
} else if (useDict == 0){
- return generateCorpus(seed, numFiles, path, origPath);
+ return generateCorpus(seed, numFiles, path, origPath, genType);
} else {
/* should generate files with a dictionary */
- return generateCorpusWithDict(seed, numFiles, path, origPath, dictSize);
+ return generateCorpusWithDict(seed, numFiles, path, origPath, dictSize, genType);
}
}
diff --git a/tests/fullbench.c b/tests/fullbench.c
index 78a70940f9d2..db00ce217b0e 100644
--- a/tests/fullbench.c
+++ b/tests/fullbench.c
@@ -1,10 +1,11 @@
/*
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * Copyright (c) 2015-present, Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
*/
@@ -375,6 +376,7 @@ static size_t benchMem(const void* src, size_t srcSize, U32 benchNb)
skippedSize = frameHeaderSize + ZSTD_blockHeaderSize;
memcpy(buff2, dstBuff+skippedSize, g_cSize-skippedSize);
srcSize = srcSize > 128 KB ? 128 KB : srcSize; /* speed relative to block */
+ ZSTD_decompressBegin(g_zdc);
break;
}
case 32: /* ZSTD_decodeSeqHeaders */
@@ -421,8 +423,6 @@ static size_t benchMem(const void* src, size_t srcSize, U32 benchNb)
{ U32 loopNb;
# define TIME_SEC_MICROSEC (1*1000000ULL) /* 1 second */
U64 const clockLoop = TIMELOOP_S * TIME_SEC_MICROSEC;
- UTIL_freq_t ticksPerSecond;
- UTIL_initTimer(&ticksPerSecond);
DISPLAY("%2i- %-30.30s : \r", benchNb, benchName);
for (loopNb = 1; loopNb <= g_nbIterations; loopNb++) {
UTIL_time_t clockStart;
@@ -430,13 +430,13 @@ static size_t benchMem(const void* src, size_t srcSize, U32 benchNb)
U32 nbRounds;
UTIL_sleepMilli(1); /* give processor time to other processes */
- UTIL_waitForNextTick(ticksPerSecond);
- UTIL_getTime(&clockStart);
- for (nbRounds=0; UTIL_clockSpanMicro(clockStart, ticksPerSecond) < clockLoop; nbRounds++) {
+ UTIL_waitForNextTick();
+ clockStart = UTIL_getTime();
+ for (nbRounds=0; UTIL_clockSpanMicro(clockStart) < clockLoop; nbRounds++) {
benchResult = benchFunction(dstBuff, dstBuffSize, buff2, src, srcSize);
if (ZSTD_isError(benchResult)) { DISPLAY("ERROR ! %s() => %s !! \n", benchName, ZSTD_getErrorName(benchResult)); exit(1); }
}
- { U64 const clockSpanMicro = UTIL_clockSpanMicro(clockStart, ticksPerSecond);
+ { U64 const clockSpanMicro = UTIL_clockSpanMicro(clockStart);
double const averageTime = (double)clockSpanMicro / TIME_SEC_MICROSEC / nbRounds;
if (averageTime < bestTime) bestTime = averageTime;
DISPLAY("%2i- %-30.30s : %7.1f MB/s (%9u)\r", loopNb, benchName, (double)srcSize / (1 MB) / bestTime, (U32)benchResult);
diff --git a/tests/fuzz/Makefile b/tests/fuzz/Makefile
index da22ed0d0baa..d9b00fd2af79 100644
--- a/tests/fuzz/Makefile
+++ b/tests/fuzz/Makefile
@@ -1,97 +1,97 @@
-# ##########################################################################
+# ################################################################
# Copyright (c) 2016-present, Facebook, Inc.
# All rights reserved.
#
-# This Makefile is validated for Linux, and macOS targets
-#
-# This source code is licensed under the BSD-style license found in the
-# LICENSE file in the root directory of this source tree. An additional grant
-# of patent rights can be found in the PATENTS file in the same directory.
-# ##########################################################################
+# This source code is licensed under both the BSD-style license (found in the
+# LICENSE file in the root directory of this source tree) and the GPLv2 (found
+# in the COPYING file in the root directory of this source tree).
+# ################################################################
+# Optionally user defined flags
CFLAGS ?= -O3
CXXFLAGS ?= -O3
+CPPFLAGS ?=
+LDFLAGS ?=
+ARFLAGS ?=
+LIB_FUZZING_ENGINE ?= libregression.a
+PYTHON ?= python
+ifeq ($(shell uname), Darwin)
+ DOWNLOAD?=curl -L -o
+else
+ DOWNLOAD?=wget -O
+endif
+CORPORA_URL_PREFIX:=https://github.com/facebook/zstd/releases/download/fuzz-corpora/
ZSTDDIR = ../../lib
PRGDIR = ../../programs
FUZZ_CPPFLAGS := -I$(ZSTDDIR) -I$(ZSTDDIR)/common -I$(ZSTDDIR)/compress \
-I$(ZSTDDIR)/dictBuilder -I$(ZSTDDIR)/deprecated -I$(PRGDIR) \
- -DZSTD_DEBUG=1 -DMEM_FORCE_MEMORY_ACCESS=0 \
- -DFUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION $(CPPFLAGS)
-FUZZ_CFLAGS := -Wall -Wextra -Wcast-qual -Wcast-align -Wshadow \
+ $(CPPFLAGS)
+FUZZ_EXTRA_FLAGS := -Wall -Wextra -Wcast-qual -Wcast-align -Wshadow \
-Wstrict-aliasing=1 -Wswitch-enum -Wdeclaration-after-statement \
-Wstrict-prototypes -Wundef -Wformat-security \
-Wvla -Wformat=2 -Winit-self -Wfloat-equal -Wwrite-strings \
-Wredundant-decls \
- -g -fno-omit-frame-pointer $(CFLAGS)
-FUZZ_CXXFLAGS := -Wall -Wextra -Wcast-qual -Wcast-align -Wshadow \
- -Wstrict-aliasing=1 -Wswitch-enum \
- -Wdeclaration-after-statement -Wstrict-prototypes -Wundef \
- -Wformat-security -Wvla -Wformat=2 -Winit-self -Wfloat-equal \
- -Wwrite-strings -Wredundant-decls \
- -g -fno-omit-frame-pointer -std=c++11 $(CXXFLAGS)
+ -g -fno-omit-frame-pointer
+FUZZ_CFLAGS := $(FUZZ_EXTRA_FLAGS) $(CFLAGS)
+FUZZ_CXXFLAGS := $(FUZZ_EXTRA_FLAGS) -std=c++11 $(CXXFLAGS)
FUZZ_LDFLAGS := $(LDFLAGS)
FUZZ_ARFLAGS := $(ARFLAGS)
FUZZ_TARGET_FLAGS = $(FUZZ_CPPFLAGS) $(FUZZ_CXXFLAGS) $(FUZZ_LDFLAGS)
-FUZZ_HEADERS := fuzz_helpers.h fuzz.h
+FUZZ_HEADERS := fuzz_helpers.h fuzz.h zstd_helpers.h
+FUZZ_SRC := zstd_helpers.c
-ZSTDCOMMON_FILES := $(ZSTDDIR)/common/*.c
-ZSTDCOMP_FILES := $(ZSTDDIR)/compress/*.c
-ZSTDDECOMP_FILES := $(ZSTDDIR)/decompress/*.c
-ZSTD_FILES := $(ZSTDDECOMP_FILES) $(ZSTDCOMMON_FILES) $(ZSTDCOMP_FILES)
+ZSTDCOMMON_SRC := $(ZSTDDIR)/common/*.c
+ZSTDCOMP_SRC := $(ZSTDDIR)/compress/*.c
+ZSTDDECOMP_SRC := $(ZSTDDIR)/decompress/*.c
+FUZZ_SRC := \
+ $(FUZZ_SRC) \
+ $(ZSTDDECOMP_SRC) \
+ $(ZSTDCOMMON_SRC) \
+ $(ZSTDCOMP_SRC)
-ZSTD_OBJ := $(patsubst %.c,%.o, $(wildcard $(ZSTD_FILES)))
+FUZZ_OBJ := $(patsubst %.c,%.o, $(wildcard $(FUZZ_SRC)))
-LIBFUZZER ?= -lFuzzer
-.PHONY: default all clean
+.PHONY: default all clean cleanall
default: all
-all: round_trip simple_decompress
+FUZZ_TARGETS := \
+ simple_round_trip \
+ stream_round_trip \
+ block_round_trip \
+ simple_decompress \
+ stream_decompress \
+ block_decompress
+
+all: $(FUZZ_TARGETS)
%.o: %.c
$(CC) $(FUZZ_CPPFLAGS) $(FUZZ_CFLAGS) $^ -c -o $@
-simple_round_trip: $(FUZZ_HEADERS) $(ZSTD_OBJ) simple_round_trip.o
- $(CXX) $(FUZZ_TARGET_FLAGS) $(ZSTD_OBJ) simple_round_trip.o $(LIBFUZZER) -o $@
-
-stream_round_trip: $(FUZZ_HEADERS) $(ZSTD_OBJ) stream_round_trip.o
- $(CXX) $(FUZZ_TARGET_FLAGS) $(ZSTD_OBJ) stream_round_trip.o $(LIBFUZZER) -o $@
-
-simple_decompress: $(FUZZ_HEADERS) $(ZSTD_OBJ) simple_decompress.o
- $(CXX) $(FUZZ_TARGET_FLAGS) $(ZSTD_OBJ) simple_decompress.o $(LIBFUZZER) -o $@
+simple_round_trip: $(FUZZ_HEADERS) $(FUZZ_OBJ) simple_round_trip.o
+ $(CXX) $(FUZZ_TARGET_FLAGS) $(FUZZ_OBJ) simple_round_trip.o $(LIB_FUZZING_ENGINE) -o $@
-stream_decompress: $(FUZZ_HEADERS) $(ZSTD_OBJ) stream_decompress.o
- $(CXX) $(FUZZ_TARGET_FLAGS) $(ZSTD_OBJ) stream_decompress.o $(LIBFUZZER) -o $@
-
-libregression.a: $(FUZZ_HEADERS) $(PRGDIR)/util.h regression_driver.o
- $(AR) $(FUZZ_ARFLAGS) $@ regression_driver.o
+stream_round_trip: $(FUZZ_HEADERS) $(FUZZ_OBJ) stream_round_trip.o
+ $(CXX) $(FUZZ_TARGET_FLAGS) $(FUZZ_OBJ) stream_round_trip.o $(LIB_FUZZING_ENGINE) -o $@
-%-regression: libregression.a
- $(RM) $*
- $(MAKE) $* LDFLAGS="$(FUZZ_LDFLAGS) -L." LIBFUZZER=-lregression
+block_round_trip: $(FUZZ_HEADERS) $(FUZZ_OBJ) block_round_trip.o
+ $(CXX) $(FUZZ_TARGET_FLAGS) $(FUZZ_OBJ) block_round_trip.o $(LIB_FUZZING_ENGINE) -o $@
-%-regression-test: %-regression
- ./$* corpora/$*
+simple_decompress: $(FUZZ_HEADERS) $(FUZZ_OBJ) simple_decompress.o
+ $(CXX) $(FUZZ_TARGET_FLAGS) $(FUZZ_OBJ) simple_decompress.o $(LIB_FUZZING_ENGINE) -o $@
-regression-test: \
- simple_round_trip-regression-test \
- stream_round_trip-regression-test \
- simple_decompress-regression-test \
- stream_decompress-regression-test
+stream_decompress: $(FUZZ_HEADERS) $(FUZZ_OBJ) stream_decompress.o
+ $(CXX) $(FUZZ_TARGET_FLAGS) $(FUZZ_OBJ) stream_decompress.o $(LIB_FUZZING_ENGINE) -o $@
-%-msan: clean
- $(MAKE) $* CFLAGS="-fsanitize=memory $(FUZZ_CFLAGS)" \
- CXXFLAGS="-fsanitize=memory $(FUZZ_CXXFLAGS)"
+block_decompress: $(FUZZ_HEADERS) $(FUZZ_OBJ) block_decompress.o
+ $(CXX) $(FUZZ_TARGET_FLAGS) $(FUZZ_OBJ) block_decompress.o $(LIB_FUZZING_ENGINE) -o $@
-UASAN_FLAGS := -fsanitize=address,undefined -fno-sanitize-recover=undefined \
- -fno-sanitize=pointer-overflow
-%-uasan: clean
- $(MAKE) $* CFLAGS="$(FUZZ_CFLAGS) $(UASAN_FLAGS)" \
- CXXFLAGS="$(FUZZ_CXXFLAGS) $(UASAN_FLAGS)"
+libregression.a: $(FUZZ_HEADERS) $(PRGDIR)/util.h regression_driver.o
+ $(AR) $(FUZZ_ARFLAGS) $@ regression_driver.o
# Install libfuzzer (not usable for MSAN testing)
# Provided for convienence. To use this library run make libFuzzer and
@@ -100,9 +100,28 @@ UASAN_FLAGS := -fsanitize=address,undefined -fno-sanitize-recover=undefined \
libFuzzer:
@$(RM) -rf Fuzzer
@git clone https://chromium.googlesource.com/chromium/llvm-project/llvm/lib/Fuzzer
- @./Fuzzer/build.sh
+ @cd Fuzzer && ./build.sh
+
+corpora/%_seed_corpus.zip:
+ @mkdir -p corpora
+ $(DOWNLOAD) $@ $(CORPORA_URL_PREFIX)$*_seed_corpus.zip
+
+corpora/%: corpora/%_seed_corpus.zip
+ unzip -q $^ -d $@
+
+.PHONY: corpora
+corpora: $(patsubst %,corpora/%,$(FUZZ_TARGETS))
+
+regressiontest: corpora
+ CC="$(CC)" CXX="$(CXX)" CFLAGS="$(CFLAGS)" CXXFLAGS="$(CXXFLAGS)" LDFLAGS="$(LDFLAGS)" $(PYTHON) ./fuzz.py build all
+ $(PYTHON) ./fuzz.py regression all
clean:
@$(MAKE) -C $(ZSTDDIR) clean
- @$(RM) -f *.a *.o
- @$(RM) -f simple_round_trip stream_round_trip simple_decompress stream_decompress
+ @$(RM) *.a *.o
+ @$(RM) simple_round_trip stream_round_trip simple_decompress \
+ stream_decompress block_decompress block_round_trip
+
+cleanall:
+ @$(RM) -r Fuzzer
+ @$(RM) -r corpora
diff --git a/tests/fuzz/README.md b/tests/fuzz/README.md
index 38a4f3d1ab02..f184be646ef6 100644
--- a/tests/fuzz/README.md
+++ b/tests/fuzz/README.md
@@ -1,34 +1,96 @@
# Fuzzing
Each fuzzing target can be built with multiple engines.
+Zstd provides a fuzz corpus for each target that can be downloaded with
+the command:
+
+```
+make corpora
+```
+
+It will download each corpus into `./corpora/TARGET`.
+
+## fuzz.py
+
+`fuzz.py` is a helper script for building and running fuzzers.
+Run `./fuzz.py -h` for the commands and run `./fuzz.py COMMAND -h` for
+command specific help.
+
+### Generating Data
+
+`fuzz.py` provides a utility to generate seed data for each fuzzer.
+
+```
+make -C ../tests decodecorpus
+./fuzz.py gen TARGET
+```
+
+By default it outputs 100 samples, each at most 8KB into `corpora/TARGET-seed`,
+but that can be configured with the `--number`, `--max-size-log` and `--seed`
+flags.
+
+### Build
+It respects the usual build environment variables `CC`, `CFLAGS`, etc.
+The environment variables can be overridden with the corresponding flags
+`--cc`, `--cflags`, etc.
+The specific fuzzing engine is selected with `LIB_FUZZING_ENGINE` or
+`--lib-fuzzing-engine`, the default is `libregression.a`.
+It has flags that can easily set up sanitizers `--enable-{a,ub,m}san`, and
+coverage instrumentation `--enable-coverage`.
+It sets sane defaults which can be overriden with flags `--debug`,
+`--enable-ubsan-pointer-overlow`, etc.
+Run `./fuzz.py build -h` for help.
+
+### Running Fuzzers
+
+`./fuzz.py` can run `libfuzzer`, `afl`, and `regression` tests.
+See the help of the relevant command for options.
+Flags not parsed by `fuzz.py` are passed to the fuzzing engine.
+The command used to run the fuzzer is printed for debugging.
## LibFuzzer
-You can install `libFuzzer` with `make libFuzzer`. Then you can make each target
-with `make target LDFLAGS=-L. CC=clang CXX=clang++`.
+```
+# Build libfuzzer if necessary
+make libFuzzer
+# Build the fuzz targets
+./fuzz.py build all --enable-coverage --enable-asan --enable-ubsan --lib-fuzzing-engine Fuzzer/libFuzzer.a --cc clang --cxx clang++
+# OR equivalently
+CC=clang CXX=clang++ LIB_FUZZING_ENGINE=Fuzzer/libFuzzer.a ./fuzz.py build all --enable-coverage --enable-asan --enable-ubsan
+# Run the fuzzer
+./fuzz.py libfuzzer TARGET -max_len=8192 -jobs=4
+```
+
+where `TARGET` could be `simple_decompress`, `stream_round_trip`, etc.
+
+### MSAN
+
+Fuzzing with `libFuzzer` and `MSAN` will require building a C++ standard library
+and libFuzzer with MSAN.
+`fuzz.py` respects the environment variables / flags `MSAN_EXTRA_CPPFLAGS`,
+`MSAN_EXTRA_CFLAGS`, `MSAN_EXTRA_CXXFLAGS`, `MSAN_EXTRA_LDFLAGS` to easily pass
+the extra parameters only for MSAN.
## AFL
-The regression driver also serves as a binary for `afl-fuzz`. You can make each
-target with one of these commands:
+The default `LIB_FUZZING_ENGINE` is `libregression.a`, which produces a binary
+that AFL can use.
```
-make target-regression CC=afl-clang CXX=afl-clang++
-AFL_MSAN=1 make target-regression-msan CC=afl-clang CXX=afl-clang++
-AFL_ASAN=1 make target-regression-uasan CC=afl-clang CXX=afl-clang++
+# Build the fuzz targets
+CC=afl-clang CXX=afl-clang++ ./fuzz.py build all --enable-asan --enable-ubsan
+# Run the fuzzer without a memory limit because of ASAN
+./fuzz.py afl TARGET -m none
```
-Then run as `./target @@`.
-
## Regression Testing
-Each fuzz target has a corpus checked into the repo under `fuzz/corpora/`.
-You can run regression tests on the corpora to ensure that inputs which
-previously exposed bugs still pass. You can make these targets to run the
-regression tests with different sanitizers.
+The regression rest supports the `all` target to run all the fuzzers in one
+command.
```
-make regression-test
-make regression-test-msan
-make regression-test-uasan
+CC=clang CXX=clang++ ./fuzz.py build all --enable-asan --enable-ubsan
+./fuzz.py regression all
+CC=clang CXX=clang++ ./fuzz.py build all --enable-msan
+./fuzz.py regression all
```
diff --git a/tests/fuzz/block_decompress.c b/tests/fuzz/block_decompress.c
new file mode 100644
index 000000000000..3cccc32f4e52
--- /dev/null
+++ b/tests/fuzz/block_decompress.c
@@ -0,0 +1,51 @@
+/**
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ */
+
+/**
+ * This fuzz target attempts to decompress the fuzzed data with the simple
+ * decompression function to ensure the decompressor never crashes.
+ */
+
+#define ZSTD_STATIC_LINKING_ONLY
+
+#include <stddef.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "fuzz_helpers.h"
+#include "zstd.h"
+
+static ZSTD_DCtx *dctx = NULL;
+static void* rBuf = NULL;
+static size_t bufSize = 0;
+
+int LLVMFuzzerTestOneInput(const uint8_t *src, size_t size)
+{
+ size_t const neededBufSize = ZSTD_BLOCKSIZE_MAX;
+
+ FUZZ_seed(&src, &size);
+
+ /* Allocate all buffers and contexts if not already allocated */
+ if (neededBufSize > bufSize) {
+ free(rBuf);
+ rBuf = malloc(neededBufSize);
+ bufSize = neededBufSize;
+ FUZZ_ASSERT(rBuf);
+ }
+ if (!dctx) {
+ dctx = ZSTD_createDCtx();
+ FUZZ_ASSERT(dctx);
+ }
+ ZSTD_decompressBegin(dctx);
+ ZSTD_decompressBlock(dctx, rBuf, neededBufSize, src, size);
+
+#ifndef STATEFUL_FUZZING
+ ZSTD_freeDCtx(dctx); dctx = NULL;
+#endif
+ return 0;
+}
diff --git a/tests/fuzz/block_round_trip.c b/tests/fuzz/block_round_trip.c
new file mode 100644
index 000000000000..64ca5fc40f9c
--- /dev/null
+++ b/tests/fuzz/block_round_trip.c
@@ -0,0 +1,92 @@
+/**
+ * Copyright (c) 2016-present, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ */
+
+/**
+ * This fuzz target performs a zstd round-trip test (compress & decompress),
+ * compares the result with the original, and calls abort() on corruption.
+ */
+
+#define ZSTD_STATIC_LINKING_ONLY
+
+#include <stddef.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "fuzz_helpers.h"
+#include "zstd.h"
+
+static const int kMaxClevel = 19;
+
+static ZSTD_CCtx *cctx = NULL;
+static ZSTD_DCtx *dctx = NULL;
+static void* cBuf = NULL;
+static void* rBuf = NULL;
+static size_t bufSize = 0;
+static uint32_t seed;
+
+static size_t roundTripTest(void *result, size_t resultCapacity,
+ void *compressed, size_t compressedCapacity,
+ const void *src, size_t srcSize)
+{
+ int const cLevel = FUZZ_rand(&seed) % kMaxClevel;
+ ZSTD_parameters const params = ZSTD_getParams(cLevel, srcSize, 0);
+ size_t ret = ZSTD_compressBegin_advanced(cctx, NULL, 0, params, srcSize);
+ FUZZ_ZASSERT(ret);
+
+ ret = ZSTD_compressBlock(cctx, compressed, compressedCapacity, src, srcSize);
+ FUZZ_ZASSERT(ret);
+ if (ret == 0) {
+ FUZZ_ASSERT(resultCapacity >= srcSize);
+ memcpy(result, src, srcSize);
+ return srcSize;
+ }
+ ZSTD_decompressBegin(dctx);
+ return ZSTD_decompressBlock(dctx, result, resultCapacity, compressed, ret);
+}
+
+int LLVMFuzzerTestOneInput(const uint8_t *src, size_t size)
+{
+ size_t neededBufSize;
+
+ seed = FUZZ_seed(&src, &size);
+ neededBufSize = size;
+ if (size > ZSTD_BLOCKSIZE_MAX)
+ return 0;
+
+ /* Allocate all buffers and contexts if not already allocated */
+ if (neededBufSize > bufSize || !cBuf || !rBuf) {
+ free(cBuf);
+ free(rBuf);
+ cBuf = malloc(neededBufSize);
+ rBuf = malloc(neededBufSize);
+ bufSize = neededBufSize;
+ FUZZ_ASSERT(cBuf && rBuf);
+ }
+ if (!cctx) {
+ cctx = ZSTD_createCCtx();
+ FUZZ_ASSERT(cctx);
+ }
+ if (!dctx) {
+ dctx = ZSTD_createDCtx();
+ FUZZ_ASSERT(dctx);
+ }
+
+ {
+ size_t const result =
+ roundTripTest(rBuf, neededBufSize, cBuf, neededBufSize, src, size);
+ FUZZ_ZASSERT(result);
+ FUZZ_ASSERT_MSG(result == size, "Incorrect regenerated size");
+ FUZZ_ASSERT_MSG(!memcmp(src, rBuf, size), "Corruption!");
+ }
+#ifndef STATEFUL_FUZZING
+ ZSTD_freeCCtx(cctx); cctx = NULL;
+ ZSTD_freeDCtx(dctx); dctx = NULL;
+#endif
+ return 0;
+}
diff --git a/tests/fuzz/default.options b/tests/fuzz/default.options
new file mode 100644
index 000000000000..8ea8588375d7
--- /dev/null
+++ b/tests/fuzz/default.options
@@ -0,0 +1,2 @@
+[libfuzzer]
+max_len = 8192
diff --git a/tests/fuzz/fuzz.h b/tests/fuzz/fuzz.h
index 5b71aba89b43..a64845473c2b 100644
--- a/tests/fuzz/fuzz.h
+++ b/tests/fuzz/fuzz.h
@@ -1,10 +1,10 @@
-/**
+/*
* Copyright (c) 2016-present, Facebook, Inc.
* All rights reserved.
*
- * This source code is licensed under the BSD-style license found in the
- * LICENSE file in the root directory of this source tree. An additional grant
- * of patent rights can be found in the PATENTS file in the same directory.
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
*/
/**
@@ -12,15 +12,17 @@
* Fuzz targets have some common parameters passed as macros during compilation.
* Check the documentation for each individual fuzzer for more parameters.
*
- * @param STATEFULL_FUZZING:
+ * @param STATEFUL_FUZZING:
* Define this to reuse state between fuzzer runs. This can be useful to
* test code paths which are only executed when contexts are reused.
* WARNING: Makes reproducing crashes much harder.
* Default: Not defined.
* @param FUZZ_RNG_SEED_SIZE:
* The number of bytes of the source to look at when constructing a seed
- * for the deterministic RNG.
- * Default: 128.
+ * for the deterministic RNG. These bytes are discarded before passing
+ * the data to zstd functions. Every fuzzer initializes the RNG exactly
+ * once before doing anything else, even if it is unused.
+ * Default: 4.
* @param ZSTD_DEBUG:
* This is a parameter for the zstd library. Defining `ZSTD_DEBUG=1`
* enables assert() statements in the zstd library. Higher levels enable
@@ -41,12 +43,20 @@
#define FUZZ_H
#ifndef FUZZ_RNG_SEED_SIZE
-# define FUZZ_RNG_SEED_SIZE 128
+# define FUZZ_RNG_SEED_SIZE 4
#endif
#include <stddef.h>
#include <stdint.h>
+#ifdef __cplusplus
+extern "C" {
+#endif
+
int LLVMFuzzerTestOneInput(const uint8_t *src, size_t size);
+#ifdef __cplusplus
+}
+#endif
+
#endif
diff --git a/tests/fuzz/fuzz.py b/tests/fuzz/fuzz.py
new file mode 100755
index 000000000000..b591e4f6734e
--- /dev/null
+++ b/tests/fuzz/fuzz.py
@@ -0,0 +1,818 @@
+#!/usr/bin/env python
+
+# ################################################################
+# Copyright (c) 2016-present, Facebook, Inc.
+# All rights reserved.
+#
+# This source code is licensed under both the BSD-style license (found in the
+# LICENSE file in the root directory of this source tree) and the GPLv2 (found
+# in the COPYING file in the root directory of this source tree).
+# ##########################################################################
+
+import argparse
+import contextlib
+import os
+import re
+import shutil
+import subprocess
+import sys
+import tempfile
+
+
+def abs_join(a, *p):
+ return os.path.abspath(os.path.join(a, *p))
+
+
+# Constants
+FUZZ_DIR = os.path.abspath(os.path.dirname(__file__))
+CORPORA_DIR = abs_join(FUZZ_DIR, 'corpora')
+TARGETS = [
+ 'simple_round_trip',
+ 'stream_round_trip',
+ 'block_round_trip',
+ 'simple_decompress',
+ 'stream_decompress',
+ 'block_decompress',
+]
+ALL_TARGETS = TARGETS + ['all']
+FUZZ_RNG_SEED_SIZE = 4
+
+# Standard environment variables
+CC = os.environ.get('CC', 'cc')
+CXX = os.environ.get('CXX', 'c++')
+CPPFLAGS = os.environ.get('CPPFLAGS', '')
+CFLAGS = os.environ.get('CFLAGS', '-O3')
+CXXFLAGS = os.environ.get('CXXFLAGS', CFLAGS)
+LDFLAGS = os.environ.get('LDFLAGS', '')
+MFLAGS = os.environ.get('MFLAGS', '-j')
+
+# Fuzzing environment variables
+LIB_FUZZING_ENGINE = os.environ.get('LIB_FUZZING_ENGINE', 'libregression.a')
+AFL_FUZZ = os.environ.get('AFL_FUZZ', 'afl-fuzz')
+DECODECORPUS = os.environ.get('DECODECORPUS',
+ abs_join(FUZZ_DIR, '..', 'decodecorpus'))
+
+# Sanitizer environment variables
+MSAN_EXTRA_CPPFLAGS = os.environ.get('MSAN_EXTRA_CPPFLAGS', '')
+MSAN_EXTRA_CFLAGS = os.environ.get('MSAN_EXTRA_CFLAGS', '')
+MSAN_EXTRA_CXXFLAGS = os.environ.get('MSAN_EXTRA_CXXFLAGS', '')
+MSAN_EXTRA_LDFLAGS = os.environ.get('MSAN_EXTRA_LDFLAGS', '')
+
+
+def create(r):
+ d = os.path.abspath(r)
+ if not os.path.isdir(d):
+ os.mkdir(d)
+ return d
+
+
+def check(r):
+ d = os.path.abspath(r)
+ if not os.path.isdir(d):
+ return None
+ return d
+
+
+@contextlib.contextmanager
+def tmpdir():
+ dirpath = tempfile.mkdtemp()
+ try:
+ yield dirpath
+ finally:
+ shutil.rmtree(dirpath, ignore_errors=True)
+
+
+def parse_targets(in_targets):
+ targets = set()
+ for target in in_targets:
+ if not target:
+ continue
+ if target == 'all':
+ targets = targets.union(TARGETS)
+ elif target in TARGETS:
+ targets.add(target)
+ else:
+ raise RuntimeError('{} is not a valid target'.format(target))
+ return list(targets)
+
+
+def targets_parser(args, description):
+ parser = argparse.ArgumentParser(prog=args.pop(0), description=description)
+ parser.add_argument(
+ 'TARGET',
+ nargs='*',
+ type=str,
+ help='Fuzz target(s) to build {{{}}}'.format(', '.join(ALL_TARGETS)))
+ args, extra = parser.parse_known_args(args)
+ args.extra = extra
+
+ args.TARGET = parse_targets(args.TARGET)
+
+ return args
+
+
+def parse_env_flags(args, flags):
+ """
+ Look for flags set by environment variables.
+ """
+ san_flags = ','.join(re.findall('-fsanitize=((?:[a-z]+,?)+)', flags))
+ nosan_flags = ','.join(re.findall('-fno-sanitize=((?:[a-z]+,?)+)', flags))
+
+ def set_sanitizer(sanitizer, default, san, nosan):
+ if sanitizer in san and sanitizer in nosan:
+ raise RuntimeError('-fno-sanitize={s} and -fsanitize={s} passed'.
+ format(s=sanitizer))
+ if sanitizer in san:
+ return True
+ if sanitizer in nosan:
+ return False
+ return default
+
+ san = set(san_flags.split(','))
+ nosan = set(nosan_flags.split(','))
+
+ args.asan = set_sanitizer('address', args.asan, san, nosan)
+ args.msan = set_sanitizer('memory', args.msan, san, nosan)
+ args.ubsan = set_sanitizer('undefined', args.ubsan, san, nosan)
+
+ args.sanitize = args.asan or args.msan or args.ubsan
+
+ return args
+
+
+def compiler_version(cc, cxx):
+ """
+ Determines the compiler and version.
+ Only works for clang and gcc.
+ """
+ cc_version_bytes = subprocess.check_output([cc, "--version"])
+ cxx_version_bytes = subprocess.check_output([cxx, "--version"])
+ if cc_version_bytes.startswith(b'clang'):
+ assert(cxx_version_bytes.startswith(b'clang'))
+ compiler = 'clang'
+ if cc_version_bytes.startswith(b'gcc'):
+ assert(cxx_version_bytes.startswith(b'g++'))
+ compiler = 'gcc'
+ version_regex = b'([0-9])+\.([0-9])+\.([0-9])+'
+ version_match = re.search(version_regex, cc_version_bytes)
+ version = tuple(int(version_match.group(i)) for i in range(1, 4))
+ return compiler, version
+
+
+def overflow_ubsan_flags(cc, cxx):
+ compiler, version = compiler_version(cc, cxx)
+ if compiler == 'gcc':
+ return ['-fno-sanitize=signed-integer-overflow']
+ if compiler == 'clang' and version >= (5, 0, 0):
+ return ['-fno-sanitize=pointer-overflow']
+ return []
+
+
+def build_parser(args):
+ description = """
+ Cleans the repository and builds a fuzz target (or all).
+ Many flags default to environment variables (default says $X='y').
+ Options that aren't enabling features default to the correct values for
+ zstd.
+ Enable sanitizers with --enable-*san.
+ For regression testing just build.
+ For libFuzzer set LIB_FUZZING_ENGINE and pass --enable-coverage.
+ For AFL set CC and CXX to AFL's compilers and set
+ LIB_FUZZING_ENGINE='libregression.a'.
+ """
+ parser = argparse.ArgumentParser(prog=args.pop(0), description=description)
+ parser.add_argument(
+ '--lib-fuzzing-engine',
+ dest='lib_fuzzing_engine',
+ type=str,
+ default=LIB_FUZZING_ENGINE,
+ help=('The fuzzing engine to use e.g. /path/to/libFuzzer.a '
+ "(default: $LIB_FUZZING_ENGINE='{})".format(LIB_FUZZING_ENGINE)))
+ parser.add_argument(
+ '--enable-coverage',
+ dest='coverage',
+ action='store_true',
+ help='Enable coverage instrumentation (-fsanitize-coverage)')
+ parser.add_argument(
+ '--enable-asan', dest='asan', action='store_true', help='Enable UBSAN')
+ parser.add_argument(
+ '--enable-ubsan',
+ dest='ubsan',
+ action='store_true',
+ help='Enable UBSAN')
+ parser.add_argument(
+ '--enable-ubsan-pointer-overflow',
+ dest='ubsan_pointer_overflow',
+ action='store_true',
+ help='Enable UBSAN pointer overflow check (known failure)')
+ parser.add_argument(
+ '--enable-msan', dest='msan', action='store_true', help='Enable MSAN')
+ parser.add_argument(
+ '--enable-msan-track-origins', dest='msan_track_origins',
+ action='store_true', help='Enable MSAN origin tracking')
+ parser.add_argument(
+ '--msan-extra-cppflags',
+ dest='msan_extra_cppflags',
+ type=str,
+ default=MSAN_EXTRA_CPPFLAGS,
+ help="Extra CPPFLAGS for MSAN (default: $MSAN_EXTRA_CPPFLAGS='{}')".
+ format(MSAN_EXTRA_CPPFLAGS))
+ parser.add_argument(
+ '--msan-extra-cflags',
+ dest='msan_extra_cflags',
+ type=str,
+ default=MSAN_EXTRA_CFLAGS,
+ help="Extra CFLAGS for MSAN (default: $MSAN_EXTRA_CFLAGS='{}')".format(
+ MSAN_EXTRA_CFLAGS))
+ parser.add_argument(
+ '--msan-extra-cxxflags',
+ dest='msan_extra_cxxflags',
+ type=str,
+ default=MSAN_EXTRA_CXXFLAGS,
+ help="Extra CXXFLAGS for MSAN (default: $MSAN_EXTRA_CXXFLAGS='{}')".
+ format(MSAN_EXTRA_CXXFLAGS))
+ parser.add_argument(
+ '--msan-extra-ldflags',
+ dest='msan_extra_ldflags',
+ type=str,
+ default=MSAN_EXTRA_LDFLAGS,
+ help="Extra LDFLAGS for MSAN (default: $MSAN_EXTRA_LDFLAGS='{}')".
+ format(MSAN_EXTRA_LDFLAGS))
+ parser.add_argument(
+ '--enable-sanitize-recover',
+ dest='sanitize_recover',
+ action='store_true',
+ help='Non-fatal sanitizer errors where possible')
+ parser.add_argument(
+ '--debug',
+ dest='debug',
+ type=int,
+ default=1,
+ help='Set ZSTD_DEBUG (default: 1)')
+ parser.add_argument(
+ '--force-memory-access',
+ dest='memory_access',
+ type=int,
+ default=0,
+ help='Set MEM_FORCE_MEMORY_ACCESS (default: 0)')
+ parser.add_argument(
+ '--fuzz-rng-seed-size',
+ dest='fuzz_rng_seed_size',
+ type=int,
+ default=4,
+ help='Set FUZZ_RNG_SEED_SIZE (default: 4)')
+ parser.add_argument(
+ '--disable-fuzzing-mode',
+ dest='fuzzing_mode',
+ action='store_false',
+ help='Do not define FUZZING_BUILD_MORE_UNSAFE_FOR_PRODUCTION')
+ parser.add_argument(
+ '--enable-stateful-fuzzing',
+ dest='stateful_fuzzing',
+ action='store_true',
+ help='Reuse contexts between runs (makes reproduction impossible)')
+ parser.add_argument(
+ '--cc',
+ dest='cc',
+ type=str,
+ default=CC,
+ help="CC (default: $CC='{}')".format(CC))
+ parser.add_argument(
+ '--cxx',
+ dest='cxx',
+ type=str,
+ default=CXX,
+ help="CXX (default: $CXX='{}')".format(CXX))
+ parser.add_argument(
+ '--cppflags',
+ dest='cppflags',
+ type=str,
+ default=CPPFLAGS,
+ help="CPPFLAGS (default: $CPPFLAGS='{}')".format(CPPFLAGS))
+ parser.add_argument(
+ '--cflags',
+ dest='cflags',
+ type=str,
+ default=CFLAGS,
+ help="CFLAGS (default: $CFLAGS='{}')".format(CFLAGS))
+ parser.add_argument(
+ '--cxxflags',
+ dest='cxxflags',
+ type=str,
+ default=CXXFLAGS,
+ help="CXXFLAGS (default: $CXXFLAGS='{}')".format(CXXFLAGS))
+ parser.add_argument(
+ '--ldflags',
+ dest='ldflags',
+ type=str,
+ default=LDFLAGS,
+ help="LDFLAGS (default: $LDFLAGS='{}')".format(LDFLAGS))
+ parser.add_argument(
+ '--mflags',
+ dest='mflags',
+ type=str,
+ default=MFLAGS,
+ help="Extra Make flags (default: $MFLAGS='{}')".format(MFLAGS))
+ parser.add_argument(
+ 'TARGET',
+ nargs='*',
+ type=str,
+ help='Fuzz target(s) to build {{{}}}'.format(', '.join(ALL_TARGETS))
+ )
+ args = parser.parse_args(args)
+ args = parse_env_flags(args, ' '.join(
+ [args.cppflags, args.cflags, args.cxxflags, args.ldflags]))
+
+ # Check option sanitiy
+ if args.msan and (args.asan or args.ubsan):
+ raise RuntimeError('MSAN may not be used with any other sanitizers')
+ if args.msan_track_origins and not args.msan:
+ raise RuntimeError('--enable-msan-track-origins requires MSAN')
+ if args.ubsan_pointer_overflow and not args.ubsan:
+ raise RuntimeError('--enable-ubsan-pointer-overlow requires UBSAN')
+ if args.sanitize_recover and not args.sanitize:
+ raise RuntimeError('--enable-sanitize-recover but no sanitizers used')
+
+ return args
+
+
+def build(args):
+ try:
+ args = build_parser(args)
+ except Exception as e:
+ print(e)
+ return 1
+ # The compilation flags we are setting
+ targets = args.TARGET
+ cc = args.cc
+ cxx = args.cxx
+ cppflags = [args.cppflags]
+ cflags = [args.cflags]
+ ldflags = [args.ldflags]
+ cxxflags = [args.cxxflags]
+ mflags = [args.mflags] if args.mflags else []
+ # Flags to be added to both cflags and cxxflags
+ common_flags = []
+
+ cppflags += [
+ '-DZSTD_DEBUG={}'.format(args.debug),
+ '-DMEM_FORCE_MEMORY_ACCESS={}'.format(args.memory_access),
+ '-DFUZZ_RNG_SEED_SIZE={}'.format(args.fuzz_rng_seed_size),
+ ]
+
+ mflags += ['LIB_FUZZING_ENGINE={}'.format(args.lib_fuzzing_engine)]
+
+ # Set flags for options
+ if args.coverage:
+ common_flags += [
+ '-fsanitize-coverage=trace-pc-guard,indirect-calls,trace-cmp'
+ ]
+
+ if args.sanitize_recover:
+ recover_flags = ['-fsanitize-recover=all']
+ else:
+ recover_flags = ['-fno-sanitize-recover=all']
+ if args.sanitize:
+ common_flags += recover_flags
+
+ if args.msan:
+ msan_flags = ['-fsanitize=memory']
+ if args.msan_track_origins:
+ msan_flags += ['-fsanitize-memory-track-origins']
+ common_flags += msan_flags
+ # Append extra MSAN flags (it might require special setup)
+ cppflags += [args.msan_extra_cppflags]
+ cflags += [args.msan_extra_cflags]
+ cxxflags += [args.msan_extra_cxxflags]
+ ldflags += [args.msan_extra_ldflags]
+
+ if args.asan:
+ common_flags += ['-fsanitize=address']
+
+ if args.ubsan:
+ ubsan_flags = ['-fsanitize=undefined']
+ if not args.ubsan_pointer_overflow:
+ ubsan_flags += overflow_ubsan_flags(cc, cxx)
+ common_flags += ubsan_flags
+
+ if args.stateful_fuzzing:
+ cppflags += ['-DSTATEFUL_FUZZING']
+
+ if args.fuzzing_mode:
+ cppflags += ['-DFUZZING_BUILD_MORE_UNSAFE_FOR_PRODUCTION']
+
+ if args.lib_fuzzing_engine == 'libregression.a':
+ targets = ['libregression.a'] + targets
+
+ # Append the common flags
+ cflags += common_flags
+ cxxflags += common_flags
+
+ # Prepare the flags for Make
+ cc_str = "CC={}".format(cc)
+ cxx_str = "CXX={}".format(cxx)
+ cppflags_str = "CPPFLAGS={}".format(' '.join(cppflags))
+ cflags_str = "CFLAGS={}".format(' '.join(cflags))
+ cxxflags_str = "CXXFLAGS={}".format(' '.join(cxxflags))
+ ldflags_str = "LDFLAGS={}".format(' '.join(ldflags))
+
+ # Print the flags
+ print('MFLAGS={}'.format(' '.join(mflags)))
+ print(cc_str)
+ print(cxx_str)
+ print(cppflags_str)
+ print(cflags_str)
+ print(cxxflags_str)
+ print(ldflags_str)
+
+ # Clean and build
+ clean_cmd = ['make', 'clean'] + mflags
+ print(' '.join(clean_cmd))
+ subprocess.check_call(clean_cmd)
+ build_cmd = [
+ 'make',
+ cc_str,
+ cxx_str,
+ cppflags_str,
+ cflags_str,
+ cxxflags_str,
+ ldflags_str,
+ ] + mflags + targets
+ print(' '.join(build_cmd))
+ subprocess.check_call(build_cmd)
+ return 0
+
+
+def libfuzzer_parser(args):
+ description = """
+ Runs a libfuzzer binary.
+ Passes all extra arguments to libfuzzer.
+ The fuzzer should have been build with LIB_FUZZING_ENGINE pointing to
+ libFuzzer.a.
+ Generates output in the CORPORA directory, puts crashes in the ARTIFACT
+ directory, and takes extra input from the SEED directory.
+ To merge AFL's output pass the SEED as AFL's output directory and pass
+ '-merge=1'.
+ """
+ parser = argparse.ArgumentParser(prog=args.pop(0), description=description)
+ parser.add_argument(
+ '--corpora',
+ type=str,
+ help='Override the default corpora dir (default: {})'.format(
+ abs_join(CORPORA_DIR, 'TARGET')))
+ parser.add_argument(
+ '--artifact',
+ type=str,
+ help='Override the default artifact dir (default: {})'.format(
+ abs_join(CORPORA_DIR, 'TARGET-crash')))
+ parser.add_argument(
+ '--seed',
+ type=str,
+ help='Override the default seed dir (default: {})'.format(
+ abs_join(CORPORA_DIR, 'TARGET-seed')))
+ parser.add_argument(
+ 'TARGET',
+ type=str,
+ help='Fuzz target(s) to build {{{}}}'.format(', '.join(TARGETS)))
+ args, extra = parser.parse_known_args(args)
+ args.extra = extra
+
+ if args.TARGET and args.TARGET not in TARGETS:
+ raise RuntimeError('{} is not a valid target'.format(args.TARGET))
+
+ return args
+
+
+def libfuzzer(target, corpora=None, artifact=None, seed=None, extra_args=None):
+ if corpora is None:
+ corpora = abs_join(CORPORA_DIR, target)
+ if artifact is None:
+ artifact = abs_join(CORPORA_DIR, '{}-crash'.format(target))
+ if seed is None:
+ seed = abs_join(CORPORA_DIR, '{}-seed'.format(target))
+ if extra_args is None:
+ extra_args = []
+
+ target = abs_join(FUZZ_DIR, target)
+
+ corpora = [create(corpora)]
+ artifact = create(artifact)
+ seed = check(seed)
+
+ corpora += [artifact]
+ if seed is not None:
+ corpora += [seed]
+
+ cmd = [target, '-artifact_prefix={}/'.format(artifact)]
+ cmd += corpora + extra_args
+ print(' '.join(cmd))
+ subprocess.check_call(cmd)
+
+
+def libfuzzer_cmd(args):
+ try:
+ args = libfuzzer_parser(args)
+ except Exception as e:
+ print(e)
+ return 1
+ libfuzzer(args.TARGET, args.corpora, args.artifact, args.seed, args.extra)
+ return 0
+
+
+def afl_parser(args):
+ description = """
+ Runs an afl-fuzz job.
+ Passes all extra arguments to afl-fuzz.
+ The fuzzer should have been built with CC/CXX set to the AFL compilers,
+ and with LIB_FUZZING_ENGINE='libregression.a'.
+ Takes input from CORPORA and writes output to OUTPUT.
+ Uses AFL_FUZZ as the binary (set from flag or environment variable).
+ """
+ parser = argparse.ArgumentParser(prog=args.pop(0), description=description)
+ parser.add_argument(
+ '--corpora',
+ type=str,
+ help='Override the default corpora dir (default: {})'.format(
+ abs_join(CORPORA_DIR, 'TARGET')))
+ parser.add_argument(
+ '--output',
+ type=str,
+ help='Override the default AFL output dir (default: {})'.format(
+ abs_join(CORPORA_DIR, 'TARGET-afl')))
+ parser.add_argument(
+ '--afl-fuzz',
+ type=str,
+ default=AFL_FUZZ,
+ help='AFL_FUZZ (default: $AFL_FUZZ={})'.format(AFL_FUZZ))
+ parser.add_argument(
+ 'TARGET',
+ type=str,
+ help='Fuzz target(s) to build {{{}}}'.format(', '.join(TARGETS)))
+ args, extra = parser.parse_known_args(args)
+ args.extra = extra
+
+ if args.TARGET and args.TARGET not in TARGETS:
+ raise RuntimeError('{} is not a valid target'.format(args.TARGET))
+
+ if not args.corpora:
+ args.corpora = abs_join(CORPORA_DIR, args.TARGET)
+ if not args.output:
+ args.output = abs_join(CORPORA_DIR, '{}-afl'.format(args.TARGET))
+
+ return args
+
+
+def afl(args):
+ try:
+ args = afl_parser(args)
+ except Exception as e:
+ print(e)
+ return 1
+ target = abs_join(FUZZ_DIR, args.TARGET)
+
+ corpora = create(args.corpora)
+ output = create(args.output)
+
+ cmd = [args.afl_fuzz, '-i', corpora, '-o', output] + args.extra
+ cmd += [target, '@@']
+ print(' '.join(cmd))
+ subprocess.call(cmd)
+ return 0
+
+
+def regression(args):
+ try:
+ description = """
+ Runs one or more regression tests.
+ The fuzzer should have been built with with
+ LIB_FUZZING_ENGINE='libregression.a'.
+ Takes input from CORPORA.
+ """
+ args = targets_parser(args, description)
+ except Exception as e:
+ print(e)
+ return 1
+ for target in args.TARGET:
+ corpora = create(abs_join(CORPORA_DIR, target))
+ target = abs_join(FUZZ_DIR, target)
+ cmd = [target, corpora]
+ print(' '.join(cmd))
+ subprocess.check_call(cmd)
+ return 0
+
+
+def gen_parser(args):
+ description = """
+ Generate a seed corpus appropiate for TARGET with data generated with
+ decodecorpus.
+ The fuzz inputs are prepended with a seed before the zstd data, so the
+ output of decodecorpus shouldn't be used directly.
+ Generates NUMBER samples prepended with FUZZ_RNG_SEED_SIZE random bytes and
+ puts the output in SEED.
+ DECODECORPUS is the decodecorpus binary, and must already be built.
+ """
+ parser = argparse.ArgumentParser(prog=args.pop(0), description=description)
+ parser.add_argument(
+ '--number',
+ '-n',
+ type=int,
+ default=100,
+ help='Number of samples to generate')
+ parser.add_argument(
+ '--max-size-log',
+ type=int,
+ default=13,
+ help='Maximum sample size to generate')
+ parser.add_argument(
+ '--seed',
+ type=str,
+ help='Override the default seed dir (default: {})'.format(
+ abs_join(CORPORA_DIR, 'TARGET-seed')))
+ parser.add_argument(
+ '--decodecorpus',
+ type=str,
+ default=DECODECORPUS,
+ help="decodecorpus binary (default: $DECODECORPUS='{}')".format(
+ DECODECORPUS))
+ parser.add_argument(
+ '--fuzz-rng-seed-size',
+ type=int,
+ default=4,
+ help="FUZZ_RNG_SEED_SIZE used for generate the samples (must match)"
+ )
+ parser.add_argument(
+ 'TARGET',
+ type=str,
+ help='Fuzz target(s) to build {{{}}}'.format(', '.join(TARGETS)))
+ args, extra = parser.parse_known_args(args)
+ args.extra = extra
+
+ if args.TARGET and args.TARGET not in TARGETS:
+ raise RuntimeError('{} is not a valid target'.format(args.TARGET))
+
+ if not args.seed:
+ args.seed = abs_join(CORPORA_DIR, '{}-seed'.format(args.TARGET))
+
+ if not os.path.isfile(args.decodecorpus):
+ raise RuntimeError("{} is not a file run 'make -C {} decodecorpus'".
+ format(args.decodecorpus, abs_join(FUZZ_DIR, '..')))
+
+ return args
+
+
+def gen(args):
+ try:
+ args = gen_parser(args)
+ except Exception as e:
+ print(e)
+ return 1
+
+ seed = create(args.seed)
+ with tmpdir() as compressed:
+ with tmpdir() as decompressed:
+ cmd = [
+ args.decodecorpus,
+ '-n{}'.format(args.number),
+ '-p{}/'.format(compressed),
+ '-o{}'.format(decompressed),
+ ]
+
+ if 'block_' in args.TARGET:
+ cmd += [
+ '--gen-blocks',
+ '--max-block-size-log={}'.format(args.max_size_log)
+ ]
+ else:
+ cmd += ['--max-content-size-log={}'.format(args.max_size_log)]
+
+ print(' '.join(cmd))
+ subprocess.check_call(cmd)
+
+ if '_round_trip' in args.TARGET:
+ print('using decompressed data in {}'.format(decompressed))
+ samples = decompressed
+ elif '_decompress' in args.TARGET:
+ print('using compressed data in {}'.format(compressed))
+ samples = compressed
+
+ # Copy the samples over and prepend the RNG seeds
+ for name in os.listdir(samples):
+ samplename = abs_join(samples, name)
+ outname = abs_join(seed, name)
+ rng_seed = os.urandom(args.fuzz_rng_seed_size)
+ with open(samplename, 'rb') as sample:
+ with open(outname, 'wb') as out:
+ out.write(rng_seed)
+ CHUNK_SIZE = 131072
+ chunk = sample.read(CHUNK_SIZE)
+ while len(chunk) > 0:
+ out.write(chunk)
+ chunk = sample.read(CHUNK_SIZE)
+ return 0
+
+
+def minimize(args):
+ try:
+ description = """
+ Runs a libfuzzer fuzzer with -merge=1 to build a minimal corpus in
+ TARGET_seed_corpus. All extra args are passed to libfuzzer.
+ """
+ args = targets_parser(args, description)
+ except Exception as e:
+ print(e)
+ return 1
+
+ for target in args.TARGET:
+ # Merge the corpus + anything else into the seed_corpus
+ corpus = abs_join(CORPORA_DIR, target)
+ seed_corpus = abs_join(CORPORA_DIR, "{}_seed_corpus".format(target))
+ extra_args = [corpus, "-merge=1"] + args.extra
+ libfuzzer(target, corpora=seed_corpus, extra_args=extra_args)
+ seeds = set(os.listdir(seed_corpus))
+ # Copy all crashes directly into the seed_corpus if not already present
+ crashes = abs_join(CORPORA_DIR, '{}-crash'.format(target))
+ for crash in os.listdir(crashes):
+ if crash not in seeds:
+ shutil.copy(abs_join(crashes, crash), seed_corpus)
+ seeds.add(crash)
+
+
+def zip_cmd(args):
+ try:
+ description = """
+ Zips up the seed corpus.
+ """
+ args = targets_parser(args, description)
+ except Exception as e:
+ print(e)
+ return 1
+
+ for target in args.TARGET:
+ # Zip the seed_corpus
+ seed_corpus = abs_join(CORPORA_DIR, "{}_seed_corpus".format(target))
+ seeds = [abs_join(seed_corpus, f) for f in os.listdir(seed_corpus)]
+ zip_file = "{}.zip".format(seed_corpus)
+ cmd = ["zip", "-q", "-j", "-9", zip_file]
+ print(' '.join(cmd + [abs_join(seed_corpus, '*')]))
+ subprocess.check_call(cmd + seeds)
+
+
+def list_cmd(args):
+ print("\n".join(TARGETS))
+
+
+def short_help(args):
+ name = args[0]
+ print("Usage: {} [OPTIONS] COMMAND [ARGS]...\n".format(name))
+
+
+def help(args):
+ short_help(args)
+ print("\tfuzzing helpers (select a command and pass -h for help)\n")
+ print("Options:")
+ print("\t-h, --help\tPrint this message")
+ print("")
+ print("Commands:")
+ print("\tbuild\t\tBuild a fuzzer")
+ print("\tlibfuzzer\tRun a libFuzzer fuzzer")
+ print("\tafl\t\tRun an AFL fuzzer")
+ print("\tregression\tRun a regression test")
+ print("\tgen\t\tGenerate a seed corpus for a fuzzer")
+ print("\tminimize\tMinimize the test corpora")
+ print("\tzip\t\tZip the minimized corpora up")
+ print("\tlist\t\tList the available targets")
+
+
+def main():
+ args = sys.argv
+ if len(args) < 2:
+ help(args)
+ return 1
+ if args[1] == '-h' or args[1] == '--help' or args[1] == '-H':
+ help(args)
+ return 1
+ command = args.pop(1)
+ args[0] = "{} {}".format(args[0], command)
+ if command == "build":
+ return build(args)
+ if command == "libfuzzer":
+ return libfuzzer_cmd(args)
+ if command == "regression":
+ return regression(args)
+ if command == "afl":
+ return afl(args)
+ if command == "gen":
+ return gen(args)
+ if command == "minimize":
+ return minimize(args)
+ if command == "zip":
+ return zip_cmd(args)
+ if command == "list":
+ return list_cmd(args)
+ short_help(args)
+ print("Error: No such command {} (pass -h for help)".format(command))
+ return 1
+
+
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/tests/fuzz/fuzz_helpers.h b/tests/fuzz/fuzz_helpers.h
index 5f07fa4de935..468c39fb42d4 100644
--- a/tests/fuzz/fuzz_helpers.h
+++ b/tests/fuzz/fuzz_helpers.h
@@ -1,10 +1,10 @@
-/**
+/*
* Copyright (c) 2016-present, Facebook, Inc.
* All rights reserved.
*
- * This source code is licensed under the BSD-style license found in the
- * LICENSE file in the root directory of this source tree. An additional grant
- * of patent rights can be found in the PATENTS file in the same directory.
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
*/
/**
@@ -16,8 +16,14 @@
#include "fuzz.h"
#include "xxhash.h"
+#include "zstd.h"
#include <stdint.h>
#include <stdio.h>
+#include <stdlib.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
#define MIN(a, b) ((a) < (b) ? (a) : (b))
#define MAX(a, b) ((a) > (b) ? (a) : (b))
@@ -34,6 +40,8 @@
__LINE__, FUZZ_QUOTE(cond), (msg)), \
abort()))
#define FUZZ_ASSERT(cond) FUZZ_ASSERT_MSG((cond), "");
+#define FUZZ_ZASSERT(code) \
+ FUZZ_ASSERT_MSG(!ZSTD_isError(code), ZSTD_getErrorName(code))
#if defined(__GNUC__)
#define FUZZ_STATIC static __inline __attribute__((unused))
@@ -48,23 +56,37 @@
/**
* Determininistically constructs a seed based on the fuzz input.
- * Only looks at the first FUZZ_RNG_SEED_SIZE bytes of the input.
+ * Consumes up to the first FUZZ_RNG_SEED_SIZE bytes of the input.
*/
-FUZZ_STATIC uint32_t FUZZ_seed(const uint8_t *src, size_t size) {
- size_t const toHash = MIN(FUZZ_RNG_SEED_SIZE, size);
- return XXH32(src, toHash, 0);
+FUZZ_STATIC uint32_t FUZZ_seed(uint8_t const **src, size_t* size) {
+ uint8_t const *data = *src;
+ size_t const toHash = MIN(FUZZ_RNG_SEED_SIZE, *size);
+ *size -= toHash;
+ *src += toHash;
+ return XXH32(data, toHash, 0);
}
#define FUZZ_rotl32(x, r) (((x) << (r)) | ((x) >> (32 - (r))))
+
FUZZ_STATIC uint32_t FUZZ_rand(uint32_t *state) {
- static const uint32_t prime1 = 2654435761U;
- static const uint32_t prime2 = 2246822519U;
- uint32_t rand32 = *state;
- rand32 *= prime1;
- rand32 += prime2;
- rand32 = FUZZ_rotl32(rand32, 13);
- *state = rand32;
- return rand32 >> 5;
+ static const uint32_t prime1 = 2654435761U;
+ static const uint32_t prime2 = 2246822519U;
+ uint32_t rand32 = *state;
+ rand32 *= prime1;
+ rand32 += prime2;
+ rand32 = FUZZ_rotl32(rand32, 13);
+ *state = rand32;
+ return rand32 >> 5;
}
+/* Returns a random numer in the range [min, max]. */
+FUZZ_STATIC uint32_t FUZZ_rand32(uint32_t *state, uint32_t min, uint32_t max) {
+ uint32_t random = FUZZ_rand(state);
+ return min + (random % (max - min + 1));
+}
+
+#ifdef __cplusplus
+}
+#endif
+
#endif
diff --git a/tests/fuzz/regression_driver.c b/tests/fuzz/regression_driver.c
index eee5f0a2a2b2..2b714d29e9dd 100644
--- a/tests/fuzz/regression_driver.c
+++ b/tests/fuzz/regression_driver.c
@@ -1,10 +1,10 @@
-/**
+/*
* Copyright (c) 2016-present, Facebook, Inc.
* All rights reserved.
*
- * This source code is licensed under the BSD-style license found in the
- * LICENSE file in the root directory of this source tree. An additional grant
- * of patent rights can be found in the PATENTS file in the same directory.
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
*/
#include "fuzz.h"
@@ -29,9 +29,11 @@ int main(int argc, char const **argv) {
#ifdef UTIL_HAS_CREATEFILELIST
files = UTIL_createFileList(files, numFiles, &fileNamesBuf, &numFiles,
kFollowLinks);
- FUZZ_ASSERT(files);
+ if (!files)
+ numFiles = 0;
#endif
-
+ if (numFiles == 0)
+ fprintf(stderr, "WARNING: No files passed to %s\n", argv[0]);
for (i = 0; i < numFiles; ++i) {
char const *fileName = files[i];
size_t const fileSize = UTIL_getFileSize(fileName);
@@ -39,7 +41,7 @@ int main(int argc, char const **argv) {
FILE *file;
/* Check that it is a regular file, and that the fileSize is valid */
- FUZZ_ASSERT_MSG(UTIL_isRegFile(fileName), fileName);
+ FUZZ_ASSERT_MSG(UTIL_isRegularFile(fileName), fileName);
FUZZ_ASSERT_MSG(fileSize <= kMaxFileSize, fileName);
/* Ensure we have a large enough buffer allocated */
if (fileSize > bufferSize) {
diff --git a/tests/fuzz/simple_decompress.c b/tests/fuzz/simple_decompress.c
index c22ad7c5301c..bba272c62256 100644
--- a/tests/fuzz/simple_decompress.c
+++ b/tests/fuzz/simple_decompress.c
@@ -1,10 +1,10 @@
-/**
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+/*
+ * Copyright (c) 2016-present, Facebook, Inc.
* All rights reserved.
*
- * This source code is licensed under the BSD-style license found in the
- * LICENSE file in the root directory of this source tree. An additional grant
- * of patent rights can be found in the PATENTS file in the same directory.
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
*/
/**
@@ -24,7 +24,10 @@ static size_t bufSize = 0;
int LLVMFuzzerTestOneInput(const uint8_t *src, size_t size)
{
- size_t const neededBufSize = MAX(20 * size, (size_t)256 << 10);
+ size_t neededBufSize;
+
+ FUZZ_seed(&src, &size);
+ neededBufSize = MAX(20 * size, (size_t)256 << 10);
/* Allocate all buffers and contexts if not already allocated */
if (neededBufSize > bufSize) {
@@ -39,7 +42,7 @@ int LLVMFuzzerTestOneInput(const uint8_t *src, size_t size)
}
ZSTD_decompressDCtx(dctx, rBuf, neededBufSize, src, size);
-#ifndef STATEFULL_FUZZING
+#ifndef STATEFUL_FUZZING
ZSTD_freeDCtx(dctx); dctx = NULL;
#endif
return 0;
diff --git a/tests/fuzz/simple_round_trip.c b/tests/fuzz/simple_round_trip.c
index 703ea582630a..0921106dea8e 100644
--- a/tests/fuzz/simple_round_trip.c
+++ b/tests/fuzz/simple_round_trip.c
@@ -1,10 +1,10 @@
-/**
+/*
* Copyright (c) 2016-present, Facebook, Inc.
* All rights reserved.
*
- * This source code is licensed under the BSD-style license found in the
- * LICENSE file in the root directory of this source tree. An additional grant
- * of patent rights can be found in the PATENTS file in the same directory.
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
*/
/**
@@ -12,12 +12,14 @@
* compares the result with the original, and calls abort() on corruption.
*/
+#define ZSTD_STATIC_LINKING_ONLY
+
#include <stddef.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include "fuzz_helpers.h"
-#include "zstd.h"
+#include "zstd_helpers.h"
static const int kMaxClevel = 19;
@@ -32,21 +34,33 @@ static size_t roundTripTest(void *result, size_t resultCapacity,
void *compressed, size_t compressedCapacity,
const void *src, size_t srcSize)
{
- int const cLevel = FUZZ_rand(&seed) % kMaxClevel;
- size_t const cSize = ZSTD_compressCCtx(cctx, compressed, compressedCapacity,
- src, srcSize, cLevel);
- if (ZSTD_isError(cSize)) {
- fprintf(stderr, "Compression error: %s\n", ZSTD_getErrorName(cSize));
- return cSize;
- }
- return ZSTD_decompressDCtx(dctx, result, resultCapacity, compressed, cSize);
+ size_t cSize;
+ if (FUZZ_rand(&seed) & 1) {
+ ZSTD_inBuffer in = {src, srcSize, 0};
+ ZSTD_outBuffer out = {compressed, compressedCapacity, 0};
+ size_t err;
+
+ ZSTD_CCtx_reset(cctx);
+ FUZZ_setRandomParameters(cctx, srcSize, &seed);
+ err = ZSTD_compress_generic(cctx, &out, &in, ZSTD_e_end);
+ FUZZ_ZASSERT(err);
+ FUZZ_ASSERT(err == 0);
+ cSize = out.pos;
+ } else {
+ int const cLevel = FUZZ_rand(&seed) % kMaxClevel;
+ cSize = ZSTD_compressCCtx(
+ cctx, compressed, compressedCapacity, src, srcSize, cLevel);
+ }
+ FUZZ_ZASSERT(cSize);
+ return ZSTD_decompressDCtx(dctx, result, resultCapacity, compressed, cSize);
}
int LLVMFuzzerTestOneInput(const uint8_t *src, size_t size)
{
- size_t const neededBufSize = ZSTD_compressBound(size);
+ size_t neededBufSize;
- seed = FUZZ_seed(src, size);
+ seed = FUZZ_seed(&src, &size);
+ neededBufSize = ZSTD_compressBound(size);
/* Allocate all buffers and contexts if not already allocated */
if (neededBufSize > bufSize) {
@@ -69,11 +83,11 @@ int LLVMFuzzerTestOneInput(const uint8_t *src, size_t size)
{
size_t const result =
roundTripTest(rBuf, neededBufSize, cBuf, neededBufSize, src, size);
- FUZZ_ASSERT_MSG(!ZSTD_isError(result), ZSTD_getErrorName(result));
+ FUZZ_ZASSERT(result);
FUZZ_ASSERT_MSG(result == size, "Incorrect regenerated size");
FUZZ_ASSERT_MSG(!memcmp(src, rBuf, size), "Corruption!");
}
-#ifndef STATEFULL_FUZZING
+#ifndef STATEFUL_FUZZING
ZSTD_freeCCtx(cctx); cctx = NULL;
ZSTD_freeDCtx(dctx); dctx = NULL;
#endif
diff --git a/tests/fuzz/stream_decompress.c b/tests/fuzz/stream_decompress.c
index 778a426dec7c..7ad571221dff 100644
--- a/tests/fuzz/stream_decompress.c
+++ b/tests/fuzz/stream_decompress.c
@@ -1,10 +1,10 @@
-/**
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+/*
+ * Copyright (c) 2016-present, Facebook, Inc.
* All rights reserved.
*
- * This source code is licensed under the BSD-style license found in the
- * LICENSE file in the root directory of this source tree. An additional grant
- * of patent rights can be found in the PATENTS file in the same directory.
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
*/
/**
@@ -20,7 +20,7 @@
#include "fuzz_helpers.h"
#include "zstd.h"
-static size_t const kBufSize = ZSTD_BLOCKSIZE_ABSOLUTEMAX;
+static size_t const kBufSize = ZSTD_BLOCKSIZE_MAX;
static ZSTD_DStream *dstream = NULL;
static void* buf = NULL;
@@ -51,7 +51,7 @@ static ZSTD_inBuffer makeInBuffer(const uint8_t **src, size_t *size)
int LLVMFuzzerTestOneInput(const uint8_t *src, size_t size)
{
- seed = FUZZ_seed(src, size);
+ seed = FUZZ_seed(&src, &size);
/* Allocate all buffers and contexts if not already allocated */
if (!buf) {
@@ -78,7 +78,7 @@ int LLVMFuzzerTestOneInput(const uint8_t *src, size_t size)
}
error:
-#ifndef STATEFULL_FUZZING
+#ifndef STATEFUL_FUZZING
ZSTD_freeDStream(dstream); dstream = NULL;
#endif
return 0;
diff --git a/tests/fuzz/stream_round_trip.c b/tests/fuzz/stream_round_trip.c
index 17c7dfdd29a7..72d70495f836 100644
--- a/tests/fuzz/stream_round_trip.c
+++ b/tests/fuzz/stream_round_trip.c
@@ -1,10 +1,10 @@
-/**
+/*
* Copyright (c) 2016-present, Facebook, Inc.
* All rights reserved.
*
- * This source code is licensed under the BSD-style license found in the
- * LICENSE file in the root directory of this source tree. An additional grant
- * of patent rights can be found in the PATENTS file in the same directory.
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
*/
/**
@@ -12,16 +12,16 @@
* compares the result with the original, and calls abort() on corruption.
*/
+#define ZSTD_STATIC_LINKING_ONLY
+
#include <stddef.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include "fuzz_helpers.h"
-#include "zstd.h"
+#include "zstd_helpers.h"
-static const int kMaxClevel = 19;
-
-static ZSTD_CStream *cstream = NULL;
+ZSTD_CCtx *cctx = NULL;
static ZSTD_DCtx *dctx = NULL;
static uint8_t* cBuf = NULL;
static uint8_t* rBuf = NULL;
@@ -30,93 +30,102 @@ static uint32_t seed;
static ZSTD_outBuffer makeOutBuffer(uint8_t *dst, size_t capacity)
{
- ZSTD_outBuffer buffer = { dst, 0, 0 };
+ ZSTD_outBuffer buffer = { dst, 0, 0 };
- FUZZ_ASSERT(capacity > 0);
- buffer.size = (FUZZ_rand(&seed) % capacity) + 1;
- FUZZ_ASSERT(buffer.size <= capacity);
+ FUZZ_ASSERT(capacity > 0);
+ buffer.size = (FUZZ_rand(&seed) % capacity) + 1;
+ FUZZ_ASSERT(buffer.size <= capacity);
- return buffer;
+ return buffer;
}
static ZSTD_inBuffer makeInBuffer(const uint8_t **src, size_t *size)
{
- ZSTD_inBuffer buffer = { *src, 0, 0 };
+ ZSTD_inBuffer buffer = { *src, 0, 0 };
- FUZZ_ASSERT(*size > 0);
- buffer.size = (FUZZ_rand(&seed) % *size) + 1;
- FUZZ_ASSERT(buffer.size <= *size);
- *src += buffer.size;
- *size -= buffer.size;
+ FUZZ_ASSERT(*size > 0);
+ buffer.size = (FUZZ_rand(&seed) % *size) + 1;
+ FUZZ_ASSERT(buffer.size <= *size);
+ *src += buffer.size;
+ *size -= buffer.size;
- return buffer;
+ return buffer;
}
static size_t compress(uint8_t *dst, size_t capacity,
const uint8_t *src, size_t srcSize)
{
- int cLevel = FUZZ_rand(&seed) % kMaxClevel;
size_t dstSize = 0;
- FUZZ_ASSERT(!ZSTD_isError(ZSTD_initCStream(cstream, cLevel)));
+ ZSTD_CCtx_reset(cctx);
+ FUZZ_setRandomParameters(cctx, srcSize, &seed);
while (srcSize > 0) {
ZSTD_inBuffer in = makeInBuffer(&src, &srcSize);
/* Mode controls the action. If mode == -1 we pick a new mode */
int mode = -1;
while (in.pos < in.size) {
- ZSTD_outBuffer out = makeOutBuffer(dst, capacity);
- /* Previous action finished, pick a new mode. */
- if (mode == -1) mode = FUZZ_rand(&seed) % 10;
- switch (mode) {
- case 0: /* fall-though */
- case 1: /* fall-though */
- case 2: {
- size_t const ret = ZSTD_flushStream(cstream, &out);
- FUZZ_ASSERT_MSG(!ZSTD_isError(ret), ZSTD_getErrorName(ret));
- if (ret == 0) mode = -1;
- break;
- }
- case 3: {
- size_t ret = ZSTD_endStream(cstream, &out);
- FUZZ_ASSERT_MSG(!ZSTD_isError(ret), ZSTD_getErrorName(ret));
- /* Reset the compressor when the frame is finished */
- if (ret == 0) {
- cLevel = FUZZ_rand(&seed) % kMaxClevel;
- ret = ZSTD_initCStream(cstream, cLevel);
- FUZZ_ASSERT(!ZSTD_isError(ret));
+ ZSTD_outBuffer out = makeOutBuffer(dst, capacity);
+ /* Previous action finished, pick a new mode. */
+ if (mode == -1) mode = FUZZ_rand(&seed) % 10;
+ switch (mode) {
+ case 0: /* fall-though */
+ case 1: /* fall-though */
+ case 2: {
+ size_t const ret =
+ ZSTD_compress_generic(cctx, &out, &in, ZSTD_e_flush);
+ FUZZ_ZASSERT(ret);
+ if (ret == 0)
+ mode = -1;
+ break;
+ }
+ case 3: {
+ size_t ret =
+ ZSTD_compress_generic(cctx, &out, &in, ZSTD_e_end);
+ FUZZ_ZASSERT(ret);
+ /* Reset the compressor when the frame is finished */
+ if (ret == 0) {
+ ZSTD_CCtx_reset(cctx);
+ if ((FUZZ_rand(&seed) & 7) == 0) {
+ size_t const remaining = in.size - in.pos;
+ FUZZ_setRandomParameters(cctx, remaining, &seed);
+ }
+ mode = -1;
+ }
+ break;
+ }
+ default: {
+ size_t const ret =
+ ZSTD_compress_generic(cctx, &out, &in, ZSTD_e_continue);
+ FUZZ_ZASSERT(ret);
mode = -1;
}
- break;
- }
- default: {
- size_t const ret = ZSTD_compressStream(cstream, &out, &in);
- FUZZ_ASSERT_MSG(!ZSTD_isError(ret), ZSTD_getErrorName(ret));
- mode = -1;
}
- }
- dst += out.pos;
- dstSize += out.pos;
- capacity -= out.pos;
+ dst += out.pos;
+ dstSize += out.pos;
+ capacity -= out.pos;
}
}
for (;;) {
+ ZSTD_inBuffer in = {NULL, 0, 0};
ZSTD_outBuffer out = makeOutBuffer(dst, capacity);
- size_t const ret = ZSTD_endStream(cstream, &out);
- FUZZ_ASSERT_MSG(!ZSTD_isError(ret), ZSTD_getErrorName(ret));
+ size_t const ret = ZSTD_compress_generic(cctx, &out, &in, ZSTD_e_end);
+ FUZZ_ZASSERT(ret);
dst += out.pos;
dstSize += out.pos;
capacity -= out.pos;
- if (ret == 0) break;
+ if (ret == 0)
+ break;
}
return dstSize;
}
int LLVMFuzzerTestOneInput(const uint8_t *src, size_t size)
{
- size_t const neededBufSize = ZSTD_compressBound(size) * 2;
+ size_t neededBufSize;
- seed = FUZZ_seed(src, size);
+ seed = FUZZ_seed(&src, &size);
+ neededBufSize = ZSTD_compressBound(size) * 2;
/* Allocate all buffers and contexts if not already allocated */
if (neededBufSize > bufSize) {
@@ -127,9 +136,9 @@ int LLVMFuzzerTestOneInput(const uint8_t *src, size_t size)
bufSize = neededBufSize;
FUZZ_ASSERT(cBuf && rBuf);
}
- if (!cstream) {
- cstream = ZSTD_createCStream();
- FUZZ_ASSERT(cstream);
+ if (!cctx) {
+ cctx = ZSTD_createCCtx();
+ FUZZ_ASSERT(cctx);
}
if (!dctx) {
dctx = ZSTD_createDCtx();
@@ -140,13 +149,13 @@ int LLVMFuzzerTestOneInput(const uint8_t *src, size_t size)
size_t const cSize = compress(cBuf, neededBufSize, src, size);
size_t const rSize =
ZSTD_decompressDCtx(dctx, rBuf, neededBufSize, cBuf, cSize);
- FUZZ_ASSERT_MSG(!ZSTD_isError(rSize), ZSTD_getErrorName(rSize));
+ FUZZ_ZASSERT(rSize);
FUZZ_ASSERT_MSG(rSize == size, "Incorrect regenerated size");
FUZZ_ASSERT_MSG(!memcmp(src, rBuf, size), "Corruption!");
}
-#ifndef STATEFULL_FUZZING
- ZSTD_freeCStream(cstream); cstream = NULL;
+#ifndef STATEFUL_FUZZING
+ ZSTD_freeCCtx(cctx); cctx = NULL;
ZSTD_freeDCtx(dctx); dctx = NULL;
#endif
return 0;
diff --git a/tests/fuzz/zstd_helpers.c b/tests/fuzz/zstd_helpers.c
new file mode 100644
index 000000000000..602898479bc2
--- /dev/null
+++ b/tests/fuzz/zstd_helpers.c
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2016-present, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ */
+
+#define ZSTD_STATIC_LINKING_ONLY
+
+#include "zstd_helpers.h"
+#include "fuzz_helpers.h"
+#include "zstd.h"
+
+static void set(ZSTD_CCtx *cctx, ZSTD_cParameter param, unsigned value)
+{
+ FUZZ_ZASSERT(ZSTD_CCtx_setParameter(cctx, param, value));
+}
+
+static void setRand(ZSTD_CCtx *cctx, ZSTD_cParameter param, unsigned min,
+ unsigned max, uint32_t *state) {
+ unsigned const value = FUZZ_rand32(state, min, max);
+ set(cctx, param, value);
+}
+
+ZSTD_compressionParameters FUZZ_randomCParams(size_t srcSize, uint32_t *state)
+{
+ /* Select compression parameters */
+ ZSTD_compressionParameters cParams;
+ cParams.windowLog = FUZZ_rand32(state, ZSTD_WINDOWLOG_MIN, 15);
+ cParams.hashLog = FUZZ_rand32(state, ZSTD_HASHLOG_MIN, 15);
+ cParams.chainLog = FUZZ_rand32(state, ZSTD_CHAINLOG_MIN, 16);
+ cParams.searchLog = FUZZ_rand32(state, ZSTD_SEARCHLOG_MIN, 9);
+ cParams.searchLength = FUZZ_rand32(state, ZSTD_SEARCHLENGTH_MIN,
+ ZSTD_SEARCHLENGTH_MAX);
+ cParams.targetLength = FUZZ_rand32(state, ZSTD_TARGETLENGTH_MIN,
+ ZSTD_TARGETLENGTH_MAX);
+ cParams.strategy = FUZZ_rand32(state, ZSTD_fast, ZSTD_btultra);
+ return ZSTD_adjustCParams(cParams, srcSize, 0);
+}
+
+ZSTD_frameParameters FUZZ_randomFParams(uint32_t *state)
+{
+ /* Select frame parameters */
+ ZSTD_frameParameters fParams;
+ fParams.contentSizeFlag = FUZZ_rand32(state, 0, 1);
+ fParams.checksumFlag = FUZZ_rand32(state, 0, 1);
+ fParams.noDictIDFlag = FUZZ_rand32(state, 0, 1);
+ return fParams;
+}
+
+ZSTD_parameters FUZZ_randomParams(size_t srcSize, uint32_t *state)
+{
+ ZSTD_parameters params;
+ params.cParams = FUZZ_randomCParams(srcSize, state);
+ params.fParams = FUZZ_randomFParams(state);
+ return params;
+}
+
+void FUZZ_setRandomParameters(ZSTD_CCtx *cctx, size_t srcSize, uint32_t *state)
+{
+ ZSTD_compressionParameters cParams = FUZZ_randomCParams(srcSize, state);
+ set(cctx, ZSTD_p_windowLog, cParams.windowLog);
+ set(cctx, ZSTD_p_hashLog, cParams.hashLog);
+ set(cctx, ZSTD_p_chainLog, cParams.chainLog);
+ set(cctx, ZSTD_p_searchLog, cParams.searchLog);
+ set(cctx, ZSTD_p_minMatch, cParams.searchLength);
+ set(cctx, ZSTD_p_targetLength, cParams.targetLength);
+ set(cctx, ZSTD_p_compressionStrategy, cParams.strategy);
+ /* Select frame parameters */
+ setRand(cctx, ZSTD_p_contentSizeFlag, 0, 1, state);
+ setRand(cctx, ZSTD_p_checksumFlag, 0, 1, state);
+ setRand(cctx, ZSTD_p_dictIDFlag, 0, 1, state);
+ /* Select long distance matchig parameters */
+ setRand(cctx, ZSTD_p_enableLongDistanceMatching, 0, 1, state);
+ setRand(cctx, ZSTD_p_ldmHashLog, ZSTD_HASHLOG_MIN, 16, state);
+ setRand(cctx, ZSTD_p_ldmMinMatch, ZSTD_LDM_MINMATCH_MIN,
+ ZSTD_LDM_MINMATCH_MAX, state);
+ setRand(cctx, ZSTD_p_ldmBucketSizeLog, 0, ZSTD_LDM_BUCKETSIZELOG_MAX,
+ state);
+ setRand(cctx, ZSTD_p_ldmHashEveryLog, 0,
+ ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN, state);
+}
diff --git a/tests/fuzz/zstd_helpers.h b/tests/fuzz/zstd_helpers.h
new file mode 100644
index 000000000000..3856bebecf7e
--- /dev/null
+++ b/tests/fuzz/zstd_helpers.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2016-present, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ */
+
+/**
+ * Helper functions for fuzzing.
+ */
+
+#ifndef ZSTD_HELPERS_H
+#define ZSTD_HELPERS_H
+
+#include "zstd.h"
+#include <stdint.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void FUZZ_setRandomParameters(ZSTD_CCtx *cctx, size_t srcSize, uint32_t *state);
+
+ZSTD_compressionParameters FUZZ_randomCParams(size_t srcSize, uint32_t *state);
+ZSTD_frameParameters FUZZ_randomFParams(uint32_t *state);
+ZSTD_parameters FUZZ_randomParams(size_t srcSize, uint32_t *state);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* ZSTD_HELPERS_H */
diff --git a/tests/fuzzer.c b/tests/fuzzer.c
index 0c13a6e488ad..76df77af9015 100644
--- a/tests/fuzzer.c
+++ b/tests/fuzzer.c
@@ -1,10 +1,11 @@
/*
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * Copyright (c) 2015-present, Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
*/
@@ -509,8 +510,7 @@ static int basicUnitTests(U32 seed, double compressibility)
/* only use the first half so we don't push against size limit of compressedBuffer */
size_t const segSize = (CNBuffSize / 2) / segs;
for (i = 0; i < segs; i++) {
- CHECK_V(r,
- ZSTD_compress(
+ CHECK_V(r, ZSTD_compress(
(BYTE *)compressedBuffer + off, CNBuffSize - off,
(BYTE *)CNBuffer + segSize * i,
segSize, 5));
@@ -598,10 +598,10 @@ static int basicUnitTests(U32 seed, double compressibility)
}
DISPLAYLEVEL(4, "test%3i : decompress with static DDict : ", testNb++);
- { size_t const ddictBufferSize = ZSTD_estimateDDictSize(dictSize, 0);
+ { size_t const ddictBufferSize = ZSTD_estimateDDictSize(dictSize, ZSTD_dlm_byCopy);
void* ddictBuffer = malloc(ddictBufferSize);
if (ddictBuffer == NULL) goto _output_error;
- { ZSTD_DDict* const ddict = ZSTD_initStaticDDict(ddictBuffer, ddictBufferSize, CNBuffer, dictSize, 0);
+ { ZSTD_DDict* const ddict = ZSTD_initStaticDDict(ddictBuffer, ddictBufferSize, CNBuffer, dictSize, ZSTD_dlm_byCopy);
size_t const r = ZSTD_decompress_usingDDict(dctx, decodedBuffer, CNBuffSize, compressedBuffer, cSize, ddict);
if (r != CNBuffSize - dictSize) goto _output_error;
}
@@ -687,14 +687,14 @@ static int basicUnitTests(U32 seed, double compressibility)
DISPLAYLEVEL(4, "test%3i : estimate CDict size : ", testNb++);
{ ZSTD_compressionParameters const cParams = ZSTD_getCParams(1, CNBuffSize, dictSize);
- size_t const estimatedSize = ZSTD_estimateCDictSize_advanced(dictSize, cParams, 1 /*byReference*/);
+ size_t const estimatedSize = ZSTD_estimateCDictSize_advanced(dictSize, cParams, ZSTD_dlm_byRef);
DISPLAYLEVEL(4, "OK : %u \n", (U32)estimatedSize);
}
DISPLAYLEVEL(4, "test%3i : compress with CDict ", testNb++);
{ ZSTD_compressionParameters const cParams = ZSTD_getCParams(1, CNBuffSize, dictSize);
ZSTD_CDict* const cdict = ZSTD_createCDict_advanced(dictBuffer, dictSize,
- 1 /* byReference */, ZSTD_dm_auto,
+ ZSTD_dlm_byRef, ZSTD_dm_auto,
cParams, ZSTD_defaultCMem);
DISPLAYLEVEL(4, "(size : %u) : ", (U32)ZSTD_sizeof_CDict(cdict));
cSize = ZSTD_compress_usingCDict(cctx, compressedBuffer, compressedBufferSize,
@@ -720,12 +720,12 @@ static int basicUnitTests(U32 seed, double compressibility)
DISPLAYLEVEL(4, "test%3i : compress with static CDict : ", testNb++);
{ ZSTD_compressionParameters const cParams = ZSTD_getCParams(1, CNBuffSize, dictSize);
- size_t const cdictSize = ZSTD_estimateCDictSize_advanced(dictSize, cParams, 0);
+ size_t const cdictSize = ZSTD_estimateCDictSize_advanced(dictSize, cParams, ZSTD_dlm_byCopy);
void* const cdictBuffer = malloc(cdictSize);
if (cdictBuffer==NULL) goto _output_error;
{ ZSTD_CDict* const cdict = ZSTD_initStaticCDict(cdictBuffer, cdictSize,
dictBuffer, dictSize,
- 0 /* by Reference */, ZSTD_dm_auto,
+ ZSTD_dlm_byCopy, ZSTD_dm_auto,
cParams);
if (cdict == NULL) {
DISPLAY("ZSTD_initStaticCDict failed ");
@@ -745,7 +745,7 @@ static int basicUnitTests(U32 seed, double compressibility)
DISPLAYLEVEL(4, "test%3i : ZSTD_compress_usingCDict_advanced, no contentSize, no dictID : ", testNb++);
{ ZSTD_frameParameters const fParams = { 0 /* frameSize */, 1 /* checksum */, 1 /* noDictID*/ };
ZSTD_compressionParameters const cParams = ZSTD_getCParams(1, CNBuffSize, dictSize);
- ZSTD_CDict* const cdict = ZSTD_createCDict_advanced(dictBuffer, dictSize, 1 /*byRef*/, ZSTD_dm_auto, cParams, ZSTD_defaultCMem);
+ ZSTD_CDict* const cdict = ZSTD_createCDict_advanced(dictBuffer, dictSize, ZSTD_dlm_byRef, ZSTD_dm_auto, cParams, ZSTD_defaultCMem);
cSize = ZSTD_compress_usingCDict_advanced(cctx, compressedBuffer, compressedBufferSize,
CNBuffer, CNBuffSize, cdict, fParams);
ZSTD_freeCDict(cdict);
@@ -796,7 +796,7 @@ static int basicUnitTests(U32 seed, double compressibility)
DISPLAYLEVEL(4, "test%3i : Building cdict w/ ZSTD_dm_fullDict on a good dictionary : ", testNb++);
{ ZSTD_compressionParameters const cParams = ZSTD_getCParams(1, CNBuffSize, dictSize);
- ZSTD_CDict* const cdict = ZSTD_createCDict_advanced(dictBuffer, dictSize, 1 /*byRef*/, ZSTD_dm_fullDict, cParams, ZSTD_defaultCMem);
+ ZSTD_CDict* const cdict = ZSTD_createCDict_advanced(dictBuffer, dictSize, ZSTD_dlm_byRef, ZSTD_dm_fullDict, cParams, ZSTD_defaultCMem);
if (cdict==NULL) goto _output_error;
ZSTD_freeCDict(cdict);
}
@@ -804,12 +804,32 @@ static int basicUnitTests(U32 seed, double compressibility)
DISPLAYLEVEL(4, "test%3i : Building cdict w/ ZSTD_dm_fullDict on a rawContent (must fail) : ", testNb++);
{ ZSTD_compressionParameters const cParams = ZSTD_getCParams(1, CNBuffSize, dictSize);
- ZSTD_CDict* const cdict = ZSTD_createCDict_advanced((const char*)dictBuffer+1, dictSize-1, 1 /*byRef*/, ZSTD_dm_fullDict, cParams, ZSTD_defaultCMem);
+ ZSTD_CDict* const cdict = ZSTD_createCDict_advanced((const char*)dictBuffer+1, dictSize-1, ZSTD_dlm_byRef, ZSTD_dm_fullDict, cParams, ZSTD_defaultCMem);
if (cdict!=NULL) goto _output_error;
ZSTD_freeCDict(cdict);
}
DISPLAYLEVEL(4, "OK \n");
+ DISPLAYLEVEL(4, "test%3i : Loading rawContent starting with dict header w/ ZSTD_dm_auto should fail : ", testNb++);
+ {
+ size_t ret;
+ MEM_writeLE32((char*)dictBuffer+2, ZSTD_MAGIC_DICTIONARY);
+ ret = ZSTD_CCtx_loadDictionary_advanced(
+ cctx, (const char*)dictBuffer+2, dictSize-2, ZSTD_dlm_byRef, ZSTD_dm_auto);
+ if (!ZSTD_isError(ret)) goto _output_error;
+ }
+ DISPLAYLEVEL(4, "OK \n");
+
+ DISPLAYLEVEL(4, "test%3i : Loading rawContent starting with dict header w/ ZSTD_dm_rawContent should pass : ", testNb++);
+ {
+ size_t ret;
+ MEM_writeLE32((char*)dictBuffer+2, ZSTD_MAGIC_DICTIONARY);
+ ret = ZSTD_CCtx_loadDictionary_advanced(
+ cctx, (const char*)dictBuffer+2, dictSize-2, ZSTD_dlm_byRef, ZSTD_dm_rawContent);
+ if (ZSTD_isError(ret)) goto _output_error;
+ }
+ DISPLAYLEVEL(4, "OK \n");
+
ZSTD_freeCCtx(cctx);
free(dictBuffer);
free(samplesSizes);
@@ -897,6 +917,43 @@ static int basicUnitTests(U32 seed, double compressibility)
ZSTD_freeCCtx(cctx);
}
+ /* custom formats tests */
+ { ZSTD_CCtx* const cctx = ZSTD_createCCtx();
+ size_t const inputSize = CNBuffSize / 2; /* won't cause pb with small dict size */
+
+ /* basic block compression */
+ DISPLAYLEVEL(4, "test%3i : magic-less format test : ", testNb++);
+ CHECK( ZSTD_CCtx_setParameter(cctx, ZSTD_p_format, ZSTD_f_zstd1_magicless) );
+ { ZSTD_inBuffer in = { CNBuffer, inputSize, 0 };
+ ZSTD_outBuffer out = { compressedBuffer, ZSTD_compressBound(inputSize), 0 };
+ size_t const result = ZSTD_compress_generic(cctx, &out, &in, ZSTD_e_end);
+ if (result != 0) goto _output_error;
+ if (in.pos != in.size) goto _output_error;
+ cSize = out.pos;
+ }
+ DISPLAYLEVEL(4, "OK (compress : %u -> %u bytes)\n", (U32)inputSize, (U32)cSize);
+
+ DISPLAYLEVEL(4, "test%3i : decompress normally (should fail) : ", testNb++);
+ { size_t const decodeResult = ZSTD_decompressDCtx(dctx, decodedBuffer, CNBuffSize, compressedBuffer, cSize);
+ if (ZSTD_getErrorCode(decodeResult) != ZSTD_error_prefix_unknown) goto _output_error;
+ DISPLAYLEVEL(4, "OK : %s \n", ZSTD_getErrorName(decodeResult));
+ }
+
+ DISPLAYLEVEL(4, "test%3i : decompress with magic-less instruction : ", testNb++);
+ ZSTD_DCtx_reset(dctx);
+ CHECK( ZSTD_DCtx_setFormat(dctx, ZSTD_f_zstd1_magicless) );
+ { ZSTD_inBuffer in = { compressedBuffer, cSize, 0 };
+ ZSTD_outBuffer out = { decodedBuffer, CNBuffSize, 0 };
+ size_t const result = ZSTD_decompress_generic(dctx, &out, &in);
+ if (result != 0) goto _output_error;
+ if (in.pos != in.size) goto _output_error;
+ if (out.pos != inputSize) goto _output_error;
+ DISPLAYLEVEL(4, "OK : regenerated %u bytes \n", (U32)out.pos);
+ }
+
+ ZSTD_freeCCtx(cctx);
+ }
+
/* block API tests */
{ ZSTD_CCtx* const cctx = ZSTD_createCCtx();
static const size_t dictSize = 65 KB;
@@ -940,8 +997,8 @@ static int basicUnitTests(U32 seed, double compressibility)
DISPLAYLEVEL(4, "OK \n");
ZSTD_freeCCtx(cctx);
- ZSTD_freeDCtx(dctx);
}
+ ZSTD_freeDCtx(dctx);
/* long rle test */
{ size_t sampleSize = 0;
@@ -1334,7 +1391,6 @@ static int fuzzerTests(U32 seed, U32 nbTests, unsigned startTest, U32 const maxD
}
CHECK_Z( ZSTD_copyCCtx(ctx, refCtx, 0) );
}
- ZSTD_setCCtxParameter(ctx, ZSTD_p_forceWindow, FUZ_rand(&lseed) & 1);
{ U32 const nbChunks = (FUZ_rand(&lseed) & 127) + 2;
U32 n;
@@ -1364,6 +1420,16 @@ static int fuzzerTests(U32 seed, U32 nbTests, unsigned startTest, U32 const maxD
}
/* streaming decompression test */
+ /* ensure memory requirement is good enough (should always be true) */
+ { ZSTD_frameHeader zfh;
+ CHECK( ZSTD_getFrameHeader(&zfh, cBuffer, ZSTD_frameHeaderSize_max),
+ "ZSTD_getFrameHeader(): error retrieving frame information");
+ { size_t const roundBuffSize = ZSTD_decodingBufferSize_min(zfh.windowSize, zfh.frameContentSize);
+ CHECK_Z(roundBuffSize);
+ CHECK((roundBuffSize > totalTestSize) && (zfh.frameContentSize!=ZSTD_CONTENTSIZE_UNKNOWN),
+ "ZSTD_decodingBufferSize_min() requires more memory (%u) than necessary (%u)",
+ (U32)roundBuffSize, (U32)totalTestSize );
+ } }
if (dictSize<8) dictSize=0, dict=NULL; /* disable dictionary */
CHECK_Z( ZSTD_decompressBegin_usingDict(dctx, dict, dictSize) );
totalCSize = 0;
diff --git a/tests/gzip/Makefile b/tests/gzip/Makefile
index b02fb693f329..40a0ba97d2b3 100644
--- a/tests/gzip/Makefile
+++ b/tests/gzip/Makefile
@@ -1,10 +1,10 @@
# ################################################################
-# Copyright (c) 2017-present, Yann Collet, Facebook, Inc.
+# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
-# This source code is licensed under the BSD-style license found in the
-# LICENSE file in the root directory of this source tree. An additional grant
-# of patent rights can be found in the PATENTS file in the same directory.
+# This source code is licensed under both the BSD-style license (found in the
+# LICENSE file in the root directory of this source tree) and the GPLv2 (found
+# in the COPYING file in the root directory of this source tree).
# ################################################################
PRGDIR = ../../programs
@@ -12,7 +12,7 @@ VOID = /dev/null
export PATH := .:$(PATH)
.PHONY: all
-#all: test-gzip-env
+#all: test-gzip-env
all: test-helin-segv test-hufts test-keep test-list test-memcpy-abuse test-mixed
all: test-null-suffix-clobber test-stdin test-trailing-nul test-unpack-invalid
all: test-zdiff test-zgrep-context test-zgrep-f test-zgrep-signal test-znew-k test-z-suffix
diff --git a/tests/invalidDictionaries.c b/tests/invalidDictionaries.c
index 83fe439d4c43..b23db036f489 100644
--- a/tests/invalidDictionaries.c
+++ b/tests/invalidDictionaries.c
@@ -5,6 +5,7 @@
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
*/
#include <stddef.h>
diff --git a/tests/legacy.c b/tests/legacy.c
index 962b2c9c3c9d..46a8206c4422 100644
--- a/tests/legacy.c
+++ b/tests/legacy.c
@@ -5,6 +5,7 @@
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
*/
/*
diff --git a/tests/longmatch.c b/tests/longmatch.c
index ef79337f56d5..ed3861571d9b 100644
--- a/tests/longmatch.c
+++ b/tests/longmatch.c
@@ -1,10 +1,11 @@
/*
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * Copyright (c) 2017-present, Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
*/
diff --git a/tests/namespaceTest.c b/tests/namespaceTest.c
index 6f6c74fd63d9..5b7095f67c80 100644
--- a/tests/namespaceTest.c
+++ b/tests/namespaceTest.c
@@ -5,6 +5,7 @@
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
*/
diff --git a/tests/paramgrill.c b/tests/paramgrill.c
index ed13e1dacca8..317ec461ca91 100644
--- a/tests/paramgrill.c
+++ b/tests/paramgrill.c
@@ -1,10 +1,11 @@
/*
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * Copyright (c) 2015-present, Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
*/
@@ -390,8 +391,8 @@ static int BMK_seed(winnerInfo_t* winners, const ZSTD_compressionParameters para
double W_DMemUsed_note = W_ratioNote * ( 40 + 9*cLevel) - log((double)W_DMemUsed);
double O_DMemUsed_note = O_ratioNote * ( 40 + 9*cLevel) - log((double)O_DMemUsed);
- size_t W_CMemUsed = (1 << params.windowLog) + ZSTD_estimateCCtxSize_advanced(params);
- size_t O_CMemUsed = (1 << winners[cLevel].params.windowLog) + ZSTD_estimateCCtxSize_advanced(winners[cLevel].params);
+ size_t W_CMemUsed = (1 << params.windowLog) + ZSTD_estimateCCtxSize_usingCParams(params);
+ size_t O_CMemUsed = (1 << winners[cLevel].params.windowLog) + ZSTD_estimateCCtxSize_usingCParams(winners[cLevel].params);
double W_CMemUsed_note = W_ratioNote * ( 50 + 13*cLevel) - log((double)W_CMemUsed);
double O_CMemUsed_note = O_ratioNote * ( 50 + 13*cLevel) - log((double)O_CMemUsed);
diff --git a/tests/playTests.sh b/tests/playTests.sh
index bc8584e7a9f0..bc021648cfcb 100755
--- a/tests/playTests.sh
+++ b/tests/playTests.sh
@@ -13,11 +13,16 @@ roundTripTest() {
cLevel="$2"
proba=""
fi
+ if [ -n "$4" ]; then
+ dLevel="$4"
+ else
+ dLevel="$cLevel"
+ fi
rm -f tmp1 tmp2
- $ECHO "roundTripTest: ./datagen $1 $proba | $ZSTD -v$cLevel | $ZSTD -d"
+ $ECHO "roundTripTest: ./datagen $1 $proba | $ZSTD -v$cLevel | $ZSTD -d$dLevel"
./datagen $1 $proba | $MD5SUM > tmp1
- ./datagen $1 $proba | $ZSTD --ultra -v$cLevel | $ZSTD -d | $MD5SUM > tmp2
+ ./datagen $1 $proba | $ZSTD --ultra -v$cLevel | $ZSTD -d$dLevel | $MD5SUM > tmp2
$DIFF -q tmp1 tmp2
}
@@ -29,12 +34,17 @@ fileRoundTripTest() {
local_c="$2"
local_p=""
fi
+ if [ -n "$4" ]; then
+ local_d="$4"
+ else
+ local_d="$local_c"
+ fi
rm -f tmp.zstd tmp.md5.1 tmp.md5.2
- $ECHO "fileRoundTripTest: ./datagen $1 $local_p > tmp && $ZSTD -v$local_c -c tmp | $ZSTD -d"
+ $ECHO "fileRoundTripTest: ./datagen $1 $local_p > tmp && $ZSTD -v$local_c -c tmp | $ZSTD -d$local_d"
./datagen $1 $local_p > tmp
- cat tmp | $MD5SUM > tmp.md5.1
- $ZSTD --ultra -v$local_c -c tmp | $ZSTD -d | $MD5SUM > tmp.md5.2
+ < tmp $MD5SUM > tmp.md5.1
+ $ZSTD --ultra -v$local_c -c tmp | $ZSTD -d$local_d | $MD5SUM > tmp.md5.2
$DIFF -q tmp.md5.1 tmp.md5.2
}
@@ -45,7 +55,6 @@ then
fi
isWindows=false
-ECHO="echo -e"
INTOVOID="/dev/null"
case "$OS" in
Windows*)
@@ -66,6 +75,11 @@ case "$UNAME" in
SunOS) DIFF="gdiff" ;;
esac
+ECHO="echo -e"
+case "$UNAME" in
+ Darwin) ECHO="echo" ;;
+esac
+
$ECHO "\nStarting playTests.sh isWindows=$isWindows ZSTD='$ZSTD'"
[ -n "$ZSTD" ] || die "ZSTD variable must be defined!"
@@ -161,6 +175,8 @@ roundTripTest -g512K
roundTripTest -g512K " --zstd=slen=3,tlen=48,strat=6"
roundTripTest -g512K " --zstd=strat=6,wlog=23,clog=23,hlog=22,slog=6"
roundTripTest -g512K " --zstd=windowLog=23,chainLog=23,hashLog=22,searchLog=6,searchLength=3,targetLength=48,strategy=6"
+roundTripTest -g512K " --long --zstd=ldmHashLog=20,ldmSearchLength=64,ldmBucketSizeLog=1,ldmHashEveryLog=7"
+roundTripTest -g512K " --long --zstd=ldmhlog=20,ldmslen=64,ldmblog=1,ldmhevery=7"
roundTripTest -g512K 19
@@ -291,8 +307,10 @@ $ECHO "- Create dictionary with wrong dictID parameter order (must fail)"
$ZSTD --train *.c ../programs/*.c --dictID -o 1 tmpDict1 && die "wrong order : --dictID must be followed by argument "
$ECHO "- Create dictionary with size limit"
$ZSTD --train *.c ../programs/*.c -o tmpDict2 --maxdict=4K -v
+$ECHO "- Create dictionary with small size limit"
+$ZSTD --train *.c ../programs/*.c -o tmpDict3 --maxdict=1K -v
$ECHO "- Create dictionary with wrong parameter order (must fail)"
-$ZSTD --train *.c ../programs/*.c -o tmpDict2 --maxdict -v 4K && die "wrong order : --maxdict must be followed by argument "
+$ZSTD --train *.c ../programs/*.c -o tmpDict3 --maxdict -v 4K && die "wrong order : --maxdict must be followed by argument "
$ECHO "- Compress without dictID"
$ZSTD -f tmp -D tmpDict1 --no-dictID
$ZSTD -d tmp.zst -D tmpDict -fo result
@@ -551,6 +569,15 @@ roundTripTest -g516K 19 # btopt
fileRoundTripTest -g500K
+$ECHO "\n**** zstd long distance matching round-trip tests **** "
+roundTripTest -g0 "2 --long"
+roundTripTest -g1000K "1 --long"
+roundTripTest -g517K "6 --long"
+roundTripTest -g516K "16 --long"
+roundTripTest -g518K "19 --long"
+fileRoundTripTest -g5M "3 --long"
+
+
if [ -n "$hasMT" ]
then
$ECHO "\n**** zstdmt round-trip tests **** "
@@ -558,6 +585,9 @@ then
roundTripTest -g8M "3 -T2"
roundTripTest -g8000K "2 --threads=2"
fileRoundTripTest -g4M "19 -T2 -B1M"
+
+ $ECHO "\n**** zstdmt long distance matching round-trip tests **** "
+ roundTripTest -g8M "3 --long -T2"
else
$ECHO "\n**** no multithreading, skipping zstdmt tests **** "
fi
@@ -568,7 +598,6 @@ $ECHO "\n**** zstd --list/-l single frame tests ****"
./datagen > tmp1
./datagen > tmp2
./datagen > tmp3
-./datagen > tmp4
$ZSTD tmp*
$ZSTD -l *.zst
$ZSTD -lv *.zst
@@ -577,9 +606,7 @@ $ZSTD --list -v *.zst
$ECHO "\n**** zstd --list/-l multiple frame tests ****"
cat tmp1.zst tmp2.zst > tmp12.zst
-cat tmp3.zst tmp4.zst > tmp34.zst
-cat tmp12.zst tmp34.zst > tmp1234.zst
-cat tmp12.zst tmp4.zst > tmp124.zst
+cat tmp12.zst tmp3.zst > tmp123.zst
$ZSTD -l *.zst
$ZSTD -lv *.zst
$ZSTD --list *.zst
@@ -589,7 +616,7 @@ $ECHO "\n**** zstd --list/-l error detection tests ****"
! $ZSTD -l tmp1 tmp1.zst
! $ZSTD --list tmp*
! $ZSTD -lv tmp1*
-! $ZSTD --list -v tmp2 tmp23.zst
+! $ZSTD --list -v tmp2 tmp12.zst
$ECHO "\n**** zstd --list/-l test with null files ****"
./datagen -g0 > tmp5
@@ -612,47 +639,70 @@ $ZSTD -lv tmp1.zst
rm tmp*
+$ECHO "\n**** zstd long distance matching tests **** "
+roundTripTest -g0 " --long"
+roundTripTest -g9M "2 --long"
+# Test parameter parsing
+roundTripTest -g1M -P50 "1 --long=29" " --memory=512MB"
+roundTripTest -g1M -P50 "1 --long=29 --zstd=wlog=28" " --memory=256MB"
+roundTripTest -g1M -P50 "1 --long=29" " --long=28 --memory=512MB"
+roundTripTest -g1M -P50 "1 --long=29" " --zstd=wlog=28 --memory=512MB"
+
+
if [ "$1" != "--test-large-data" ]; then
$ECHO "Skipping large data tests"
exit 0
fi
+$ECHO "\n**** large files tests **** "
+
roundTripTest -g270000000 1
-roundTripTest -g270000000 2
-roundTripTest -g270000000 3
+roundTripTest -g250000000 2
+roundTripTest -g230000000 3
roundTripTest -g140000000 -P60 4
-roundTripTest -g140000000 -P60 5
-roundTripTest -g140000000 -P60 6
+roundTripTest -g130000000 -P62 5
+roundTripTest -g120000000 -P65 6
roundTripTest -g70000000 -P70 7
-roundTripTest -g70000000 -P70 8
-roundTripTest -g70000000 -P70 9
+roundTripTest -g60000000 -P71 8
+roundTripTest -g50000000 -P73 9
roundTripTest -g35000000 -P75 10
-roundTripTest -g35000000 -P75 11
-roundTripTest -g35000000 -P75 12
+roundTripTest -g30000000 -P76 11
+roundTripTest -g25000000 -P78 12
roundTripTest -g18000013 -P80 13
roundTripTest -g18000014 -P80 14
-roundTripTest -g18000015 -P80 15
-roundTripTest -g18000016 -P80 16
-roundTripTest -g18000017 -P80 17
+roundTripTest -g18000015 -P81 15
+roundTripTest -g18000016 -P84 16
+roundTripTest -g18000017 -P88 17
roundTripTest -g18000018 -P94 18
-roundTripTest -g18000019 -P94 19
+roundTripTest -g18000019 -P96 19
-roundTripTest -g68000020 -P99 20
-roundTripTest -g6000000000 -P99 1
+roundTripTest -g5000000000 -P99 1
fileRoundTripTest -g4193M -P99 1
+
+$ECHO "\n**** zstd long, long distance matching round-trip tests **** "
+roundTripTest -g270000000 "1 --long"
+roundTripTest -g130000000 -P60 "5 --long"
+roundTripTest -g35000000 -P70 "8 --long"
+roundTripTest -g18000001 -P80 "18 --long"
+# Test large window logs
+roundTripTest -g700M -P50 "1 --long=29"
+roundTripTest -g600M -P50 "1 --long --zstd=wlog=29,clog=28"
+
+
if [ -n "$hasMT" ]
then
$ECHO "\n**** zstdmt long round-trip tests **** "
- roundTripTest -g99000000 -P99 "20 -T2"
- roundTripTest -g6000000000 -P99 "1 -T2"
- roundTripTest -g1500000000 -P97 "1 -T999"
- fileRoundTripTest -g4195M -P98 " -T0"
+ roundTripTest -g80000000 -P99 "19 -T2" " "
+ roundTripTest -g5000000000 -P99 "1 -T2" " "
+ roundTripTest -g500000000 -P97 "1 -T999" " "
+ fileRoundTripTest -g4103M -P98 " -T0" " "
+ roundTripTest -g400000000 -P97 "1 --long=24 -T2" " "
else
$ECHO "\n**** no multithreading, skipping zstdmt tests **** "
fi
diff --git a/tests/poolTests.c b/tests/poolTests.c
index f3d5c382ae57..00ee830154c9 100644
--- a/tests/poolTests.c
+++ b/tests/poolTests.c
@@ -5,6 +5,7 @@
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
*/
diff --git a/tests/roundTripCrash.c b/tests/roundTripCrash.c
index 0b478f6cab3e..180fa9b6f6dc 100644
--- a/tests/roundTripCrash.c
+++ b/tests/roundTripCrash.c
@@ -5,6 +5,7 @@
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
*/
/*
@@ -20,9 +21,12 @@
#include <stddef.h> /* size_t */
#include <stdlib.h> /* malloc, free, exit */
#include <stdio.h> /* fprintf */
+#include <string.h> /* strcmp */
#include <sys/types.h> /* stat */
#include <sys/stat.h> /* stat */
#include "xxhash.h"
+
+#define ZSTD_STATIC_LINKING_ONLY
#include "zstd.h"
/*===========================================
@@ -30,6 +34,24 @@
*==========================================*/
#define MIN(a,b) ( (a) < (b) ? (a) : (b) )
+static void crash(int errorCode){
+ /* abort if AFL/libfuzzer, exit otherwise */
+ #ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION /* could also use __AFL_COMPILER */
+ abort();
+ #else
+ exit(errorCode);
+ #endif
+}
+
+#define CHECK_Z(f) { \
+ size_t const err = f; \
+ if (ZSTD_isError(err)) { \
+ fprintf(stderr, \
+ "Error=> %s: %s", \
+ #f, ZSTD_getErrorName(err)); \
+ crash(1); \
+} }
+
/** roundTripTest() :
* Compresses `srcBuff` into `compressedBuff`,
* then decompresses `compressedBuff` into `resultBuff`.
@@ -54,6 +76,38 @@ static size_t roundTripTest(void* resultBuff, size_t resultBuffCapacity,
return ZSTD_decompress(resultBuff, resultBuffCapacity, compressedBuff, cSize);
}
+/** cctxParamRoundTripTest() :
+ * Same as roundTripTest() except allows experimenting with ZSTD_CCtx_params. */
+static size_t cctxParamRoundTripTest(void* resultBuff, size_t resultBuffCapacity,
+ void* compressedBuff, size_t compressedBuffCapacity,
+ const void* srcBuff, size_t srcBuffSize)
+{
+ ZSTD_CCtx* const cctx = ZSTD_createCCtx();
+ ZSTD_CCtx_params* const cctxParams = ZSTD_createCCtxParams();
+ ZSTD_inBuffer inBuffer = { srcBuff, srcBuffSize, 0 };
+ ZSTD_outBuffer outBuffer = {compressedBuff, compressedBuffCapacity, 0 };
+
+ static const int maxClevel = 19;
+ size_t const hashLength = MIN(128, srcBuffSize);
+ unsigned const h32 = XXH32(srcBuff, hashLength, 0);
+ int const cLevel = h32 % maxClevel;
+
+ /* Set parameters */
+ CHECK_Z( ZSTD_CCtxParam_setParameter(cctxParams, ZSTD_p_compressionLevel, cLevel) );
+ CHECK_Z( ZSTD_CCtxParam_setParameter(cctxParams, ZSTD_p_nbThreads, 2) );
+ CHECK_Z( ZSTD_CCtxParam_setParameter(cctxParams, ZSTD_p_overlapSizeLog, 5) );
+
+
+ /* Apply parameters */
+ CHECK_Z( ZSTD_CCtx_setParametersUsingCCtxParams(cctx, cctxParams) );
+
+ CHECK_Z (ZSTD_compress_generic(cctx, &outBuffer, &inBuffer, ZSTD_e_end) );
+
+ ZSTD_freeCCtxParams(cctxParams);
+ ZSTD_freeCCtx(cctx);
+
+ return ZSTD_decompress(resultBuff, resultBuffCapacity, compressedBuff, outBuffer.pos);
+}
static size_t checkBuffers(const void* buff1, const void* buff2, size_t buffSize)
{
@@ -68,16 +122,7 @@ static size_t checkBuffers(const void* buff1, const void* buff2, size_t buffSize
return pos;
}
-static void crash(int errorCode){
- /* abort if AFL/libfuzzer, exit otherwise */
- #ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION /* could also use __AFL_COMPILER */
- abort();
- #else
- exit(errorCode);
- #endif
-}
-
-static void roundTripCheck(const void* srcBuff, size_t srcBuffSize)
+static void roundTripCheck(const void* srcBuff, size_t srcBuffSize, int testCCtxParams)
{
size_t const cBuffSize = ZSTD_compressBound(srcBuffSize);
void* cBuff = malloc(cBuffSize);
@@ -88,7 +133,9 @@ static void roundTripCheck(const void* srcBuff, size_t srcBuffSize)
exit (1);
}
- { size_t const result = roundTripTest(rBuff, cBuffSize, cBuff, cBuffSize, srcBuff, srcBuffSize);
+ { size_t const result = testCCtxParams ?
+ cctxParamRoundTripTest(rBuff, cBuffSize, cBuff, cBuffSize, srcBuff, srcBuffSize)
+ : roundTripTest(rBuff, cBuffSize, cBuff, cBuffSize, srcBuff, srcBuffSize);
if (ZSTD_isError(result)) {
fprintf(stderr, "roundTripTest error : %s \n", ZSTD_getErrorName(result));
crash(1);
@@ -162,7 +209,7 @@ static void loadFile(void* buffer, const char* fileName, size_t fileSize)
}
-static void fileCheck(const char* fileName)
+static void fileCheck(const char* fileName, int testCCtxParams)
{
size_t const fileSize = getFileSize(fileName);
void* buffer = malloc(fileSize);
@@ -171,16 +218,24 @@ static void fileCheck(const char* fileName)
exit(4);
}
loadFile(buffer, fileName, fileSize);
- roundTripCheck(buffer, fileSize);
+ roundTripCheck(buffer, fileSize, testCCtxParams);
free (buffer);
}
int main(int argCount, const char** argv) {
+ int argNb = 1;
+ int testCCtxParams = 0;
if (argCount < 2) {
fprintf(stderr, "Error : no argument : need input file \n");
exit(9);
}
- fileCheck(argv[1]);
+
+ if (!strcmp(argv[argNb], "--cctxParams")) {
+ testCCtxParams = 1;
+ argNb++;
+ }
+
+ fileCheck(argv[argNb], testCCtxParams);
fprintf(stderr, "no pb detected\n");
return 0;
}
diff --git a/tests/symbols.c b/tests/symbols.c
index f08542dbf669..c0bed2e5d96e 100644
--- a/tests/symbols.c
+++ b/tests/symbols.c
@@ -5,6 +5,7 @@
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
*/
diff --git a/tests/test-zstd-speed.py b/tests/test-zstd-speed.py
index 56108a5cae4c..1096d5e4e224 100755
--- a/tests/test-zstd-speed.py
+++ b/tests/test-zstd-speed.py
@@ -1,13 +1,13 @@
#! /usr/bin/env python3
-#
+# ################################################################
# Copyright (c) 2016-present, Przemyslaw Skibinski, Yann Collet, Facebook, Inc.
# All rights reserved.
#
-# This source code is licensed under the BSD-style license found in the
-# LICENSE file in the root directory of this source tree. An additional grant
-# of patent rights can be found in the PATENTS file in the same directory.
-#
+# This source code is licensed under both the BSD-style license (found in the
+# LICENSE file in the root directory of this source tree) and the GPLv2 (found
+# in the COPYING file in the root directory of this source tree).
+# ##########################################################################
# Limitations:
# - doesn't support filenames with spaces
diff --git a/tests/test-zstd-versions.py b/tests/test-zstd-versions.py
index a5a713009a93..f2deac1f28da 100755
--- a/tests/test-zstd-versions.py
+++ b/tests/test-zstd-versions.py
@@ -1,14 +1,14 @@
#!/usr/bin/env python3
"""Test zstd interoperability between versions"""
-#
+# ################################################################
# Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
# All rights reserved.
#
-# This source code is licensed under the BSD-style license found in the
-# LICENSE file in the root directory of this source tree. An additional grant
-# of patent rights can be found in the PATENTS file in the same directory.
-#
+# This source code is licensed under both the BSD-style license (found in the
+# LICENSE file in the root directory of this source tree) and the GPLv2 (found
+# in the COPYING file in the root directory of this source tree).
+# ################################################################
import filecmp
import glob
diff --git a/tests/zbufftest.c b/tests/zbufftest.c
index fe08fdab5bed..e64c886a5e75 100644
--- a/tests/zbufftest.c
+++ b/tests/zbufftest.c
@@ -1,10 +1,11 @@
/*
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * Copyright (c) 2015-present, Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
*/
diff --git a/tests/zstreamtest.c b/tests/zstreamtest.c
index dd044342ea80..8b4c836940a6 100644
--- a/tests/zstreamtest.c
+++ b/tests/zstreamtest.c
@@ -5,6 +5,7 @@
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
*/
@@ -95,6 +96,15 @@ unsigned int FUZ_rand(unsigned int* seedPtr)
return rand32 >> 5;
}
+#define CHECK_Z(f) { \
+ size_t const err = f; \
+ if (ZSTD_isError(err)) { \
+ DISPLAY("Error => %s : %s ", \
+ #f, ZSTD_getErrorName(err)); \
+ DISPLAY(" (seed %u, test nb %u) \n", seed, testNb); \
+ goto _output_error; \
+} }
+
/*======================================================
* Basic Unit tests
@@ -149,6 +159,8 @@ static int basicUnitTests(U32 seed, double compressibility, ZSTD_customMem custo
U32 testNb = 1;
ZSTD_CStream* zc = ZSTD_createCStream_advanced(customMem);
ZSTD_DStream* zd = ZSTD_createDStream_advanced(customMem);
+ ZSTDMT_CCtx* mtctx = ZSTDMT_createCCtx(2);
+
ZSTD_inBuffer inBuff, inBuff2;
ZSTD_outBuffer outBuff;
buffer_t dictionary = g_nullBuffer;
@@ -163,8 +175,8 @@ static int basicUnitTests(U32 seed, double compressibility, ZSTD_customMem custo
RDG_genBuffer(CNBuffer, CNBufferSize, compressibility, 0., seed);
/* Create dictionary */
- MEM_STATIC_ASSERT(COMPRESSIBLE_NOISE_LENGTH >= 4 MB);
- dictionary = FUZ_createDictionary(CNBuffer, 4 MB, 4 KB, 40 KB);
+ DISPLAYLEVEL(3, "creating dictionary for unit tests \n");
+ dictionary = FUZ_createDictionary(CNBuffer, CNBufferSize / 2, 8 KB, 40 KB);
if (!dictionary.start) {
DISPLAY("Error creating dictionary, aborting \n");
goto _output_error;
@@ -178,16 +190,14 @@ static int basicUnitTests(U32 seed, double compressibility, ZSTD_customMem custo
/* Basic compression test */
DISPLAYLEVEL(3, "test%3i : compress %u bytes : ", testNb++, COMPRESSIBLE_NOISE_LENGTH);
- { size_t const r = ZSTD_initCStream_usingDict(zc, CNBuffer, dictSize, 1);
- if (ZSTD_isError(r)) goto _output_error; }
+ CHECK_Z( ZSTD_initCStream_usingDict(zc, CNBuffer, dictSize, 1) );
outBuff.dst = (char*)(compressedBuffer)+cSize;
outBuff.size = compressedBufferSize;
outBuff.pos = 0;
inBuff.src = CNBuffer;
inBuff.size = CNBufferSize;
inBuff.pos = 0;
- { size_t const r = ZSTD_compressStream(zc, &outBuff, &inBuff);
- if (ZSTD_isError(r)) goto _output_error; }
+ CHECK_Z( ZSTD_compressStream(zc, &outBuff, &inBuff) );
if (inBuff.pos != inBuff.size) goto _output_error; /* entire input should be consumed */
{ size_t const r = ZSTD_endStream(zc, &outBuff);
if (r != 0) goto _output_error; } /* error, or some data not flushed */
@@ -197,11 +207,11 @@ static int basicUnitTests(U32 seed, double compressibility, ZSTD_customMem custo
/* context size functions */
DISPLAYLEVEL(3, "test%3i : estimate CStream size : ", testNb++);
{ ZSTD_compressionParameters const cParams = ZSTD_getCParams(1, CNBufferSize, dictSize);
- size_t const s = ZSTD_estimateCStreamSize_advanced(cParams)
- /* uses ZSTD_initCStream_usingDict() */
- + ZSTD_estimateCDictSize_advanced(dictSize, cParams, 0);
- if (ZSTD_isError(s)) goto _output_error;
- DISPLAYLEVEL(3, "OK (%u bytes) \n", (U32)s);
+ size_t const cstreamSize = ZSTD_estimateCStreamSize_usingCParams(cParams);
+ size_t const cdictSize = ZSTD_estimateCDictSize_advanced(dictSize, cParams, ZSTD_dlm_byCopy); /* uses ZSTD_initCStream_usingDict() */
+ if (ZSTD_isError(cstreamSize)) goto _output_error;
+ if (ZSTD_isError(cdictSize)) goto _output_error;
+ DISPLAYLEVEL(3, "OK (%u bytes) \n", (U32)(cstreamSize + cdictSize));
}
DISPLAYLEVEL(3, "test%3i : check actual CStream size : ", testNb++);
@@ -222,8 +232,7 @@ static int basicUnitTests(U32 seed, double compressibility, ZSTD_customMem custo
/* skippable frame test */
DISPLAYLEVEL(3, "test%3i : decompress skippable frame : ", testNb++);
- if (ZSTD_isError( ZSTD_initDStream_usingDict(zd, CNBuffer, dictSize) ))
- goto _output_error;
+ CHECK_Z( ZSTD_initDStream_usingDict(zd, CNBuffer, dictSize) );
inBuff.src = compressedBuffer;
inBuff.size = cSize;
inBuff.pos = 0;
@@ -241,8 +250,7 @@ static int basicUnitTests(U32 seed, double compressibility, ZSTD_customMem custo
inBuff2 = inBuff;
DISPLAYLEVEL(3, "test%3i : decompress %u bytes : ", testNb++, COMPRESSIBLE_NOISE_LENGTH);
ZSTD_initDStream_usingDict(zd, CNBuffer, dictSize);
- { size_t const r = ZSTD_setDStreamParameter(zd, DStream_p_maxWindowSize, 1000000000); /* large limit */
- if (ZSTD_isError(r)) goto _output_error; }
+ CHECK_Z( ZSTD_setDStreamParameter(zd, DStream_p_maxWindowSize, 1000000000) ); /* large limit */
{ size_t const remaining = ZSTD_decompressStream(zd, &outBuff, &inBuff);
if (remaining != 0) goto _output_error; } /* should reach end of frame == 0; otherwise, some data left, or an error */
if (outBuff.pos != CNBufferSize) goto _output_error; /* should regenerate the same amount */
@@ -275,7 +283,7 @@ static int basicUnitTests(U32 seed, double compressibility, ZSTD_customMem custo
DISPLAYLEVEL(5, " (windowSize : %u) ", (U32)fhi.windowSize);
{ size_t const s = ZSTD_estimateDStreamSize(fhi.windowSize)
/* uses ZSTD_initDStream_usingDict() */
- + ZSTD_estimateDDictSize(dictSize, 0);
+ + ZSTD_estimateDDictSize(dictSize, ZSTD_dlm_byCopy);
if (ZSTD_isError(s)) goto _output_error;
DISPLAYLEVEL(3, "OK (%u bytes) \n", (U32)s);
} }
@@ -332,8 +340,7 @@ static int basicUnitTests(U32 seed, double compressibility, ZSTD_customMem custo
inBuff.src = CNBuffer;
inBuff.size = CNBufferSize;
inBuff.pos = 0;
- { size_t const r = ZSTD_compressStream(zc, &outBuff, &inBuff);
- if (ZSTD_isError(r)) goto _output_error; }
+ CHECK_Z( ZSTD_compressStream(zc, &outBuff, &inBuff) );
if (inBuff.pos != inBuff.size) goto _output_error; /* entire input should be consumed */
{ size_t const r = ZSTD_endStream(zc, &outBuff);
if (r != 0) goto _output_error; } /* error, or some data not flushed */
@@ -350,8 +357,7 @@ static int basicUnitTests(U32 seed, double compressibility, ZSTD_customMem custo
inBuff.src = CNBuffer;
inBuff.size = CNBufferSize;
inBuff.pos = 0;
- { size_t const r = ZSTD_compressStream(zc, &outBuff, &inBuff);
- if (ZSTD_isError(r)) goto _output_error; }
+ CHECK_Z( ZSTD_compressStream(zc, &outBuff, &inBuff) );
if (inBuff.pos != inBuff.size) goto _output_error; /* entire input should be consumed */
{ size_t const r = ZSTD_endStream(zc, &outBuff);
if (ZSTD_getErrorCode(r) != ZSTD_error_srcSize_wrong) goto _output_error; /* must fail : wrong srcSize */
@@ -373,8 +379,7 @@ static int basicUnitTests(U32 seed, double compressibility, ZSTD_customMem custo
outBuff.size = ZSTD_compressBound(inSize);
outBuff.pos = 0;
DISPLAYLEVEL(5, "compress1 ");
- { size_t const r = ZSTD_compressStream(zc, &outBuff, &inBuff);
- if (ZSTD_isError(r)) goto _output_error; }
+ CHECK_Z( ZSTD_compressStream(zc, &outBuff, &inBuff) );
if (inBuff.pos != inBuff.size) goto _output_error; /* entire input should be consumed */
DISPLAYLEVEL(5, "end1 ");
{ size_t const r = ZSTD_endStream(zc, &outBuff);
@@ -391,8 +396,7 @@ static int basicUnitTests(U32 seed, double compressibility, ZSTD_customMem custo
outBuff.size = ZSTD_compressBound(inSize);
outBuff.pos = 0;
DISPLAYLEVEL(5, "compress2 ");
- { size_t const r = ZSTD_compressStream(zc, &outBuff, &inBuff);
- if (ZSTD_isError(r)) goto _output_error; }
+ CHECK_Z( ZSTD_compressStream(zc, &outBuff, &inBuff) );
if (inBuff.pos != inBuff.size) goto _output_error; /* entire input should be consumed */
DISPLAYLEVEL(5, "end2 ");
{ size_t const r = ZSTD_endStream(zc, &outBuff);
@@ -412,8 +416,7 @@ static int basicUnitTests(U32 seed, double compressibility, ZSTD_customMem custo
inBuff.src = CNBuffer;
inBuff.size = CNBufferSize;
inBuff.pos = 0;
- { size_t const r = ZSTD_compressStream(zc, &outBuff, &inBuff);
- if (ZSTD_isError(r)) goto _output_error; }
+ CHECK_Z( ZSTD_compressStream(zc, &outBuff, &inBuff) );
if (inBuff.pos != inBuff.size) goto _output_error; /* entire input should be consumed */
{ size_t const r = ZSTD_endStream(zc, &outBuff);
if (r != 0) goto _output_error; } /* error, or some data not flushed */
@@ -462,8 +465,7 @@ static int basicUnitTests(U32 seed, double compressibility, ZSTD_customMem custo
/* Memory restriction */
DISPLAYLEVEL(3, "test%3i : maxWindowSize < frame requirement : ", testNb++);
ZSTD_initDStream_usingDict(zd, CNBuffer, dictSize);
- { size_t const r = ZSTD_setDStreamParameter(zd, DStream_p_maxWindowSize, 1000); /* too small limit */
- if (ZSTD_isError(r)) goto _output_error; }
+ CHECK_Z( ZSTD_setDStreamParameter(zd, DStream_p_maxWindowSize, 1000) ); /* too small limit */
inBuff.src = compressedBuffer;
inBuff.size = cSize;
inBuff.pos = 0;
@@ -477,7 +479,7 @@ static int basicUnitTests(U32 seed, double compressibility, ZSTD_customMem custo
DISPLAYLEVEL(3, "test%3i : ZSTD_initCStream_usingCDict_advanced with masked dictID : ", testNb++);
{ ZSTD_compressionParameters const cParams = ZSTD_getCParams(1, CNBufferSize, dictionary.filled);
ZSTD_frameParameters const fParams = { 1 /* contentSize */, 1 /* checksum */, 1 /* noDictID */};
- ZSTD_CDict* const cdict = ZSTD_createCDict_advanced(dictionary.start, dictionary.filled, 1 /* byReference */, ZSTD_dm_auto, cParams, customMem);
+ ZSTD_CDict* const cdict = ZSTD_createCDict_advanced(dictionary.start, dictionary.filled, ZSTD_dlm_byRef, ZSTD_dm_auto, cParams, customMem);
size_t const initError = ZSTD_initCStream_usingCDict_advanced(zc, cdict, fParams, CNBufferSize);
if (ZSTD_isError(initError)) goto _output_error;
cSize = 0;
@@ -487,8 +489,7 @@ static int basicUnitTests(U32 seed, double compressibility, ZSTD_customMem custo
inBuff.src = CNBuffer;
inBuff.size = CNBufferSize;
inBuff.pos = 0;
- { size_t const r = ZSTD_compressStream(zc, &outBuff, &inBuff);
- if (ZSTD_isError(r)) goto _output_error; }
+ CHECK_Z( ZSTD_compressStream(zc, &outBuff, &inBuff) );
if (inBuff.pos != inBuff.size) goto _output_error; /* entire input should be consumed */
{ size_t const r = ZSTD_endStream(zc, &outBuff);
if (r != 0) goto _output_error; } /* error, or some data not flushed */
@@ -510,16 +511,14 @@ static int basicUnitTests(U32 seed, double compressibility, ZSTD_customMem custo
}
DISPLAYLEVEL(3, "test%3i : compress with ZSTD_CCtx_refPrefix : ", testNb++);
- { size_t const refErr = ZSTD_CCtx_refPrefix(zc, dictionary.start, dictionary.filled);
- if (ZSTD_isError(refErr)) goto _output_error; }
+ CHECK_Z( ZSTD_CCtx_refPrefix(zc, dictionary.start, dictionary.filled) );
outBuff.dst = compressedBuffer;
outBuff.size = compressedBufferSize;
outBuff.pos = 0;
inBuff.src = CNBuffer;
inBuff.size = CNBufferSize;
inBuff.pos = 0;
- { size_t const r = ZSTD_compress_generic(zc, &outBuff, &inBuff, ZSTD_e_end);
- if (ZSTD_isError(r)) goto _output_error; }
+ CHECK_Z( ZSTD_compress_generic(zc, &outBuff, &inBuff, ZSTD_e_end) );
if (inBuff.pos != inBuff.size) goto _output_error; /* entire input should be consumed */
cSize = outBuff.pos;
DISPLAYLEVEL(3, "OK (%u bytes : %.2f%%)\n", (U32)cSize, (double)cSize/CNBufferSize*100);
@@ -546,23 +545,20 @@ static int basicUnitTests(U32 seed, double compressibility, ZSTD_customMem custo
inBuff.src = CNBuffer;
inBuff.size = CNBufferSize;
inBuff.pos = 0;
- { size_t const r = ZSTD_compress_generic(zc, &outBuff, &inBuff, ZSTD_e_end);
- if (ZSTD_isError(r)) goto _output_error; }
+ CHECK_Z( ZSTD_compress_generic(zc, &outBuff, &inBuff, ZSTD_e_end) );
if (inBuff.pos != inBuff.size) goto _output_error; /* entire input should be consumed */
cSize = outBuff.pos;
DISPLAYLEVEL(3, "OK (%u bytes : %.2f%%)\n", (U32)cSize, (double)cSize/CNBufferSize*100);
DISPLAYLEVEL(3, "test%3i : decompress without dictionary (should work): ", testNb++);
- { size_t const r = ZSTD_decompress(decodedBuffer, CNBufferSize, compressedBuffer, cSize);
- if (ZSTD_isError(r)) goto _output_error; /* must fail : dictionary not used */
- DISPLAYLEVEL(3, "OK \n");
- }
+ CHECK_Z( ZSTD_decompress(decodedBuffer, CNBufferSize, compressedBuffer, cSize) );
+ DISPLAYLEVEL(3, "OK \n");
/* Empty srcSize */
DISPLAYLEVEL(3, "test%3i : ZSTD_initCStream_advanced with pledgedSrcSize=0 and dict : ", testNb++);
{ ZSTD_parameters params = ZSTD_getParams(5, 0, 0);
params.fParams.contentSizeFlag = 1;
- ZSTD_initCStream_advanced(zc, dictionary.start, dictionary.filled, params, 0);
+ CHECK_Z( ZSTD_initCStream_advanced(zc, dictionary.start, dictionary.filled, params, 0) );
} /* cstream advanced shall write content size = 0 */
inBuff.src = CNBuffer;
inBuff.size = 0;
@@ -570,7 +566,7 @@ static int basicUnitTests(U32 seed, double compressibility, ZSTD_customMem custo
outBuff.dst = compressedBuffer;
outBuff.size = compressedBufferSize;
outBuff.pos = 0;
- if (ZSTD_isError(ZSTD_compressStream(zc, &outBuff, &inBuff))) goto _output_error;
+ CHECK_Z( ZSTD_compressStream(zc, &outBuff, &inBuff) );
if (ZSTD_endStream(zc, &outBuff) != 0) goto _output_error;
cSize = outBuff.pos;
if (ZSTD_findDecompressedSize(compressedBuffer, cSize) != 0) goto _output_error;
@@ -579,7 +575,7 @@ static int basicUnitTests(U32 seed, double compressibility, ZSTD_customMem custo
DISPLAYLEVEL(3, "test%3i : pledgedSrcSize == 0 behaves properly : ", testNb++);
{ ZSTD_parameters params = ZSTD_getParams(5, 0, 0);
params.fParams.contentSizeFlag = 1;
- ZSTD_initCStream_advanced(zc, NULL, 0, params, 0);
+ CHECK_Z( ZSTD_initCStream_advanced(zc, NULL, 0, params, 0) );
} /* cstream advanced shall write content size = 0 */
inBuff.src = CNBuffer;
inBuff.size = 0;
@@ -587,7 +583,7 @@ static int basicUnitTests(U32 seed, double compressibility, ZSTD_customMem custo
outBuff.dst = compressedBuffer;
outBuff.size = compressedBufferSize;
outBuff.pos = 0;
- if (ZSTD_isError(ZSTD_compressStream(zc, &outBuff, &inBuff))) goto _output_error;
+ CHECK_Z( ZSTD_compressStream(zc, &outBuff, &inBuff) );
if (ZSTD_endStream(zc, &outBuff) != 0) goto _output_error;
cSize = outBuff.pos;
if (ZSTD_findDecompressedSize(compressedBuffer, cSize) != 0) goto _output_error;
@@ -599,12 +595,30 @@ static int basicUnitTests(U32 seed, double compressibility, ZSTD_customMem custo
outBuff.dst = compressedBuffer;
outBuff.size = compressedBufferSize;
outBuff.pos = 0;
- if (ZSTD_isError(ZSTD_compressStream(zc, &outBuff, &inBuff))) goto _output_error;
+ CHECK_Z( ZSTD_compressStream(zc, &outBuff, &inBuff) );
if (ZSTD_endStream(zc, &outBuff) != 0) goto _output_error;
cSize = outBuff.pos;
if (ZSTD_findDecompressedSize(compressedBuffer, cSize) != ZSTD_CONTENTSIZE_UNKNOWN) goto _output_error;
DISPLAYLEVEL(3, "OK \n");
+ /* Basic multithreading compression test */
+ DISPLAYLEVEL(3, "test%3i : compress %u bytes with multiple threads : ", testNb++, COMPRESSIBLE_NOISE_LENGTH);
+ { ZSTD_parameters const params = ZSTD_getParams(1, 0, 0);
+ CHECK_Z( ZSTDMT_initCStream_advanced(mtctx, CNBuffer, dictSize, params, CNBufferSize) );
+ }
+ outBuff.dst = (char*)(compressedBuffer);
+ outBuff.size = compressedBufferSize;
+ outBuff.pos = 0;
+ inBuff.src = CNBuffer;
+ inBuff.size = CNBufferSize;
+ inBuff.pos = 0;
+ CHECK_Z( ZSTDMT_compressStream_generic(mtctx, &outBuff, &inBuff, ZSTD_e_end) );
+ if (inBuff.pos != inBuff.size) goto _output_error; /* entire input should be consumed */
+ { size_t const r = ZSTDMT_endStream(mtctx, &outBuff);
+ if (r != 0) goto _output_error; } /* error, or some data not flushed */
+ DISPLAYLEVEL(3, "OK \n");
+
+
/* Overlen overwriting window data bug */
DISPLAYLEVEL(3, "test%3i : wildcopy doesn't overwrite potential match data : ", testNb++);
{ /* This test has a window size of 1024 bytes and consists of 3 blocks:
@@ -619,9 +633,10 @@ static int basicUnitTests(U32 seed, double compressibility, ZSTD_customMem custo
"\x28\xB5\x2F\xFD\x04\x00\x4C\x00\x00\x10\x61\x61\x01\x00\x00\x2A"
"\x80\x05\x44\x00\x00\x08\x62\x01\x00\x00\x2A\x20\x04\x5D\x00\x00"
"\x00\x03\x40\x00\x00\x64\x60\x27\xB0\xE0\x0C\x67\x62\xCE\xE0";
- ZSTD_DStream* zds = ZSTD_createDStream();
+ ZSTD_DStream* const zds = ZSTD_createDStream();
+ if (zds==NULL) goto _output_error;
- ZSTD_initDStream(zds);
+ CHECK_Z( ZSTD_initDStream(zds) );
inBuff.src = testCase;
inBuff.size = 47;
inBuff.pos = 0;
@@ -630,9 +645,7 @@ static int basicUnitTests(U32 seed, double compressibility, ZSTD_customMem custo
outBuff.pos = 0;
while (inBuff.pos < inBuff.size) {
- size_t const r = ZSTD_decompressStream(zds, &outBuff, &inBuff);
- /* Bug will cause checksum to fail */
- if (ZSTD_isError(r)) goto _output_error;
+ CHECK_Z( ZSTD_decompressStream(zds, &outBuff, &inBuff) );
}
ZSTD_freeDStream(zds);
@@ -643,6 +656,7 @@ _end:
FUZ_freeDictionary(dictionary);
ZSTD_freeCStream(zc);
ZSTD_freeDStream(zd);
+ ZSTDMT_freeCCtx(mtctx);
free(CNBuffer);
free(compressedBuffer);
free(decodedBuffer);
@@ -687,6 +701,13 @@ static size_t FUZ_randomLength(U32* seed, U32 maxLog)
#define MIN(a,b) ( (a) < (b) ? (a) : (b) )
+/* Return value in range minVal <= v <= maxVal */
+static U32 FUZ_randomClampedLength(U32* seed, U32 minVal, U32 maxVal)
+{
+ U32 const mod = maxVal < minVal ? 1 : (maxVal + 1) - minVal;
+ return (U32)((FUZ_rand(seed) % mod) + minVal);
+}
+
#define CHECK(cond, ...) { \
if (cond) { \
DISPLAY("Error => "); \
@@ -695,35 +716,26 @@ static size_t FUZ_randomLength(U32* seed, U32 maxLog)
goto _output_error; \
} }
-#define CHECK_Z(f) { \
- size_t const err = f; \
- if (ZSTD_isError(err)) { \
- DISPLAY("Error => %s : %s ", \
- #f, ZSTD_getErrorName(err)); \
- DISPLAY(" (seed %u, test nb %u) \n", seed, testNb); \
- goto _output_error; \
-} }
-
static int fuzzerTests(U32 seed, U32 nbTests, unsigned startTest, double compressibility, int bigTests)
{
U32 const maxSrcLog = bigTests ? 24 : 22;
static const U32 maxSampleLog = 19;
size_t const srcBufferSize = (size_t)1<<maxSrcLog;
BYTE* cNoiseBuffer[5];
- size_t const copyBufferSize= srcBufferSize + (1<<maxSampleLog);
+ size_t const copyBufferSize = srcBufferSize + (1<<maxSampleLog);
BYTE* const copyBuffer = (BYTE*)malloc (copyBufferSize);
- size_t const cBufferSize = ZSTD_compressBound(srcBufferSize);
+ size_t const cBufferSize = ZSTD_compressBound(srcBufferSize);
BYTE* const cBuffer = (BYTE*)malloc (cBufferSize);
size_t const dstBufferSize = srcBufferSize;
BYTE* const dstBuffer = (BYTE*)malloc (dstBufferSize);
U32 result = 0;
U32 testNb = 0;
U32 coreSeed = seed;
- ZSTD_CStream* zc = ZSTD_createCStream(); /* will be reset sometimes */
- ZSTD_DStream* zd = ZSTD_createDStream(); /* will be reset sometimes */
+ ZSTD_CStream* zc = ZSTD_createCStream(); /* will be re-created sometimes */
+ ZSTD_DStream* zd = ZSTD_createDStream(); /* will be re-created sometimes */
ZSTD_DStream* const zd_noise = ZSTD_createDStream();
clock_t const startClock = clock();
- const BYTE* dict=NULL; /* can keep same dict on 2 consecutive tests */
+ const BYTE* dict = NULL; /* can keep same dict on 2 consecutive tests */
size_t dictSize = 0;
U32 oldTestLog = 0;
U32 const cLevelMax = bigTests ? (U32)ZSTD_maxCLevel() : g_cLevelMax_smallTests;
@@ -801,8 +813,7 @@ static int fuzzerTests(U32 seed, U32 nbTests, unsigned startTest, double compres
if ((FUZ_rand(&lseed)&1) /* at beginning, to keep same nb of rand */
&& oldTestLog /* at least one test happened */ && resetAllowed) {
maxTestSize = FUZ_randomLength(&lseed, oldTestLog+2);
- if (maxTestSize >= srcBufferSize)
- maxTestSize = srcBufferSize-1;
+ maxTestSize = MIN(maxTestSize, srcBufferSize-16);
{ U64 const pledgedSrcSize = (FUZ_rand(&lseed) & 3) ? 0 : maxTestSize;
CHECK_Z( ZSTD_resetCStream(zc, pledgedSrcSize) );
}
@@ -884,12 +895,18 @@ static int fuzzerTests(U32 seed, U32 nbTests, unsigned startTest, double compres
size_t const randomDstSize = FUZ_randomLength(&lseed, maxSampleLog);
size_t const dstBuffSize = MIN(dstBufferSize - totalGenSize, randomDstSize);
inBuff.size = inBuff.pos + readCSrcSize;
- outBuff.size = inBuff.pos + dstBuffSize;
+ outBuff.size = outBuff.pos + dstBuffSize;
decompressionResult = ZSTD_decompressStream(zd, &outBuff, &inBuff);
- CHECK (ZSTD_isError(decompressionResult), "decompression error : %s", ZSTD_getErrorName(decompressionResult));
+ if (ZSTD_getErrorCode(decompressionResult) == ZSTD_error_checksum_wrong) {
+ DISPLAY("checksum error : \n");
+ findDiff(copyBuffer, dstBuffer, totalTestSize);
+ }
+ CHECK( ZSTD_isError(decompressionResult), "decompression error : %s",
+ ZSTD_getErrorName(decompressionResult) );
}
CHECK (decompressionResult != 0, "frame not fully decoded");
- CHECK (outBuff.pos != totalTestSize, "decompressed data : wrong size")
+ CHECK (outBuff.pos != totalTestSize, "decompressed data : wrong size (%u != %u)",
+ (U32)outBuff.pos, (U32)totalTestSize);
CHECK (inBuff.pos != cSize, "compressed data should be fully read")
{ U64 const crcDest = XXH64(dstBuffer, totalTestSize, 0);
if (crcDest!=crcOrig) findDiff(copyBuffer, dstBuffer, totalTestSize);
@@ -962,15 +979,16 @@ static int fuzzerTests_MT(U32 seed, U32 nbTests, unsigned startTest, double comp
U32 result = 0;
U32 testNb = 0;
U32 coreSeed = seed;
- ZSTDMT_CCtx* zc = ZSTDMT_createCCtx(2); /* will be reset sometimes */
+ U32 nbThreads = 2;
+ ZSTDMT_CCtx* zc = ZSTDMT_createCCtx(nbThreads); /* will be reset sometimes */
ZSTD_DStream* zd = ZSTD_createDStream(); /* will be reset sometimes */
ZSTD_DStream* const zd_noise = ZSTD_createDStream();
clock_t const startClock = clock();
const BYTE* dict=NULL; /* can keep same dict on 2 consecutive tests */
size_t dictSize = 0;
U32 oldTestLog = 0;
- U32 const cLevelMax = bigTests ? (U32)ZSTD_maxCLevel() : g_cLevelMax_smallTests;
- U32 const nbThreadsMax = bigTests ? 5 : 2;
+ int const cLevelMax = bigTests ? (U32)ZSTD_maxCLevel()-1 : g_cLevelMax_smallTests;
+ U32 const nbThreadsMax = bigTests ? 4 : 2;
/* allocations */
cNoiseBuffer[0] = (BYTE*)malloc (srcBufferSize);
@@ -1006,16 +1024,16 @@ static int fuzzerTests_MT(U32 seed, U32 nbTests, unsigned startTest, double comp
size_t maxTestSize;
/* init */
- if (nbTests >= testNb) { DISPLAYUPDATE(2, "\r%6u/%6u ", testNb, nbTests); }
- else { DISPLAYUPDATE(2, "\r%6u ", testNb); }
+ if (testNb < nbTests) {
+ DISPLAYUPDATE(2, "\r%6u/%6u ", testNb, nbTests);
+ } else { DISPLAYUPDATE(2, "\r%6u ", testNb); }
FUZ_rand(&coreSeed);
lseed = coreSeed ^ prime32;
/* states full reset (deliberately not synchronized) */
/* some issues can only happen when reusing states */
if ((FUZ_rand(&lseed) & 0xFF) == 131) {
- U32 const nbThreadsCandidate = (FUZ_rand(&lseed) % 6) + 1;
- U32 const nbThreads = MIN(nbThreadsCandidate, nbThreadsMax);
+ nbThreads = (FUZ_rand(&lseed) % nbThreadsMax) + 1;
DISPLAYLEVEL(5, "Creating new context with %u threads \n", nbThreads);
ZSTDMT_freeCCtx(zc);
zc = ZSTDMT_createCCtx(nbThreads);
@@ -1055,11 +1073,12 @@ static int fuzzerTests_MT(U32 seed, U32 nbTests, unsigned startTest, double comp
} else {
U32 const testLog = FUZ_rand(&lseed) % maxSrcLog;
U32 const dictLog = FUZ_rand(&lseed) % maxSrcLog;
- U32 const cLevelCandidate = (FUZ_rand(&lseed) %
- (ZSTD_maxCLevel() -
- (MAX(testLog, dictLog) / 3))) +
- 1;
- U32 const cLevel = MIN(cLevelCandidate, cLevelMax);
+ int const cLevelCandidate = ( FUZ_rand(&lseed)
+ % (ZSTD_maxCLevel() - (MAX(testLog, dictLog) / 2)) )
+ + 1;
+ int const cLevelThreadAdjusted = cLevelCandidate - (nbThreads * 2) + 2; /* reduce cLevel when multiple threads to reduce memory consumption */
+ int const cLevelMin = MAX(cLevelThreadAdjusted, 1); /* no negative cLevel yet */
+ int const cLevel = MIN(cLevelMin, cLevelMax);
maxTestSize = FUZ_rLogLength(&lseed, testLog);
oldTestLog = testLog;
/* random dictionary selection */
@@ -1142,7 +1161,7 @@ static int fuzzerTests_MT(U32 seed, U32 nbTests, unsigned startTest, double comp
size_t const randomDstSize = FUZ_randomLength(&lseed, maxSampleLog);
size_t const dstBuffSize = MIN(dstBufferSize - totalGenSize, randomDstSize);
inBuff.size = inBuff.pos + readCSrcSize;
- outBuff.size = inBuff.pos + dstBuffSize;
+ outBuff.size = outBuff.pos + dstBuffSize;
DISPLAYLEVEL(5, "ZSTD_decompressStream input %u bytes \n", (U32)readCSrcSize);
decompressionResult = ZSTD_decompressStream(zd, &outBuff, &inBuff);
CHECK (ZSTD_isError(decompressionResult), "decompression error : %s", ZSTD_getErrorName(decompressionResult));
@@ -1204,9 +1223,21 @@ _output_error:
goto _cleanup;
}
+/** If useOpaqueAPI, sets param in cctxParams.
+ * Otherwise, sets the param in zc. */
+static size_t setCCtxParameter(ZSTD_CCtx* zc, ZSTD_CCtx_params* cctxParams,
+ ZSTD_cParameter param, unsigned value,
+ U32 useOpaqueAPI)
+{
+ if (useOpaqueAPI) {
+ return ZSTD_CCtxParam_setParameter(cctxParams, param, value);
+ } else {
+ return ZSTD_CCtx_setParameter(zc, param, value);
+ }
+}
/* Tests for ZSTD_compress_generic() API */
-static int fuzzerTests_newAPI(U32 seed, U32 nbTests, unsigned startTest, double compressibility, int bigTests)
+static int fuzzerTests_newAPI(U32 seed, U32 nbTests, unsigned startTest, double compressibility, int bigTests, U32 const useOpaqueAPI)
{
U32 const maxSrcLog = bigTests ? 24 : 22;
static const U32 maxSampleLog = 19;
@@ -1228,8 +1259,10 @@ static int fuzzerTests_newAPI(U32 seed, U32 nbTests, unsigned startTest, double
const BYTE* dict = NULL; /* can keep same dict on 2 consecutive tests */
size_t dictSize = 0;
U32 oldTestLog = 0;
- U32 const cLevelMax = bigTests ? (U32)ZSTD_maxCLevel() : g_cLevelMax_smallTests;
- U32 const nbThreadsMax = bigTests ? 5 : 1;
+ U32 windowLogMalus = 0; /* can survive between 2 loops */
+ U32 const cLevelMax = bigTests ? (U32)ZSTD_maxCLevel()-1 : g_cLevelMax_smallTests;
+ U32 const nbThreadsMax = bigTests ? 4 : 2;
+ ZSTD_CCtx_params* cctxParams = ZSTD_createCCtxParams();
/* allocations */
cNoiseBuffer[0] = (BYTE*)malloc (srcBufferSize);
@@ -1269,6 +1302,7 @@ static int fuzzerTests_newAPI(U32 seed, U32 nbTests, unsigned startTest, double
else { DISPLAYUPDATE(2, "\r%6u ", testNb); }
FUZ_rand(&coreSeed);
lseed = coreSeed ^ prime32;
+ DISPLAYLEVEL(5, " *** Test %u *** \n", testNb);
/* states full reset (deliberately not synchronized) */
/* some issues can only happen when reusing states */
@@ -1308,17 +1342,19 @@ static int fuzzerTests_newAPI(U32 seed, U32 nbTests, unsigned startTest, double
maxTestSize = FUZ_randomLength(&lseed, oldTestLog+2);
if (maxTestSize >= srcBufferSize) maxTestSize = srcBufferSize-1;
{ int const compressionLevel = (FUZ_rand(&lseed) % 5) + 1;
- CHECK_Z( ZSTD_CCtx_setParameter(zc, ZSTD_p_compressionLevel, compressionLevel) );
+ CHECK_Z (setCCtxParameter(zc, cctxParams, ZSTD_p_compressionLevel, compressionLevel, useOpaqueAPI) );
}
} else {
U32 const testLog = FUZ_rand(&lseed) % maxSrcLog;
U32 const dictLog = FUZ_rand(&lseed) % maxSrcLog;
U32 const cLevelCandidate = (FUZ_rand(&lseed) %
(ZSTD_maxCLevel() -
- (MAX(testLog, dictLog) / 3))) +
+ (MAX(testLog, dictLog) / 2))) +
1;
U32 const cLevel = MIN(cLevelCandidate, cLevelMax);
+ DISPLAYLEVEL(5, "t%u: cLevel : %u \n", testNb, cLevel);
maxTestSize = FUZ_rLogLength(&lseed, testLog);
+ DISPLAYLEVEL(5, "t%u: maxTestSize : %u \n", testNb, (U32)maxTestSize);
oldTestLog = testLog;
/* random dictionary selection */
dictSize = ((FUZ_rand(&lseed)&63)==1) ? FUZ_rLogLength(&lseed, dictLog) : 0;
@@ -1328,51 +1364,90 @@ static int fuzzerTests_newAPI(U32 seed, U32 nbTests, unsigned startTest, double
}
{ U64 const pledgedSrcSize = (FUZ_rand(&lseed) & 3) ? ZSTD_CONTENTSIZE_UNKNOWN : maxTestSize;
ZSTD_compressionParameters cParams = ZSTD_getCParams(cLevel, pledgedSrcSize, dictSize);
+ static const U32 windowLogMax = 24;
/* mess with compression parameters */
cParams.windowLog += (FUZ_rand(&lseed) & 3) - 1;
+ cParams.windowLog = MIN(windowLogMax, cParams.windowLog);
cParams.hashLog += (FUZ_rand(&lseed) & 3) - 1;
cParams.chainLog += (FUZ_rand(&lseed) & 3) - 1;
cParams.searchLog += (FUZ_rand(&lseed) & 3) - 1;
cParams.searchLength += (FUZ_rand(&lseed) & 3) - 1;
- cParams.targetLength = (U32)(cParams.targetLength * (0.5 + ((double)(FUZ_rand(&lseed) & 127) / 128)));
+ cParams.targetLength = (U32)((cParams.targetLength + 1 ) * (0.5 + ((double)(FUZ_rand(&lseed) & 127) / 128)));
cParams = ZSTD_adjustCParams(cParams, 0, 0);
- if (FUZ_rand(&lseed) & 1) CHECK_Z( ZSTD_CCtx_setParameter(zc, ZSTD_p_windowLog, cParams.windowLog) );
- if (FUZ_rand(&lseed) & 1) CHECK_Z( ZSTD_CCtx_setParameter(zc, ZSTD_p_hashLog, cParams.hashLog) );
- if (FUZ_rand(&lseed) & 1) CHECK_Z( ZSTD_CCtx_setParameter(zc, ZSTD_p_chainLog, cParams.chainLog) );
- if (FUZ_rand(&lseed) & 1) CHECK_Z( ZSTD_CCtx_setParameter(zc, ZSTD_p_searchLog, cParams.searchLog) );
- if (FUZ_rand(&lseed) & 1) CHECK_Z( ZSTD_CCtx_setParameter(zc, ZSTD_p_minMatch, cParams.searchLength) );
- if (FUZ_rand(&lseed) & 1) CHECK_Z( ZSTD_CCtx_setParameter(zc, ZSTD_p_targetLength, cParams.targetLength) );
-
- /* unconditionally set, to be sync with decoder */
- if (FUZ_rand(&lseed) & 1) CHECK_Z( ZSTD_CCtx_setParameter(zc, ZSTD_p_refDictContent, FUZ_rand(&lseed) & 1) );
if (FUZ_rand(&lseed) & 1) {
- CHECK_Z( ZSTD_CCtx_loadDictionary(zc, dict, dictSize) );
- if (dict && dictSize) {
- /* test that compression parameters are rejected (correctly) after loading a non-NULL dictionary */
- size_t const setError = ZSTD_CCtx_setParameter(zc, ZSTD_p_windowLog, cParams.windowLog-1) ;
- CHECK(!ZSTD_isError(setError), "ZSTD_CCtx_setParameter should have failed");
- } } else {
- CHECK_Z( ZSTD_CCtx_refPrefix(zc, dict, dictSize) );
+ CHECK_Z( setCCtxParameter(zc, cctxParams, ZSTD_p_windowLog, cParams.windowLog, useOpaqueAPI) );
+ assert(cParams.windowLog >= ZSTD_WINDOWLOG_MIN); /* guaranteed by ZSTD_adjustCParams() */
+ windowLogMalus = (cParams.windowLog - ZSTD_WINDOWLOG_MIN) / 5;
+ DISPLAYLEVEL(5, "t%u: windowLog : %u \n", testNb, cParams.windowLog);
+ }
+ if (FUZ_rand(&lseed) & 1) {
+ CHECK_Z( setCCtxParameter(zc, cctxParams, ZSTD_p_hashLog, cParams.hashLog, useOpaqueAPI) );
+ DISPLAYLEVEL(5, "t%u: hashLog : %u \n", testNb, cParams.hashLog);
+ }
+ if (FUZ_rand(&lseed) & 1) {
+ CHECK_Z( setCCtxParameter(zc, cctxParams, ZSTD_p_chainLog, cParams.chainLog, useOpaqueAPI) );
+ DISPLAYLEVEL(5, "t%u: chainLog : %u \n", testNb, cParams.chainLog);
}
+ if (FUZ_rand(&lseed) & 1) CHECK_Z( setCCtxParameter(zc, cctxParams, ZSTD_p_searchLog, cParams.searchLog, useOpaqueAPI) );
+ if (FUZ_rand(&lseed) & 1) CHECK_Z( setCCtxParameter(zc, cctxParams, ZSTD_p_minMatch, cParams.searchLength, useOpaqueAPI) );
+ if (FUZ_rand(&lseed) & 1) CHECK_Z( setCCtxParameter(zc, cctxParams, ZSTD_p_targetLength, cParams.targetLength, useOpaqueAPI) );
+
+ /* mess with long distance matching parameters */
+ if (FUZ_rand(&lseed) & 1) CHECK_Z( setCCtxParameter(zc, cctxParams, ZSTD_p_enableLongDistanceMatching, FUZ_rand(&lseed) & 63, useOpaqueAPI) );
+ if (FUZ_rand(&lseed) & 3) CHECK_Z( setCCtxParameter(zc, cctxParams, ZSTD_p_ldmHashLog, FUZ_randomClampedLength(&lseed, ZSTD_HASHLOG_MIN, 23), useOpaqueAPI) );
+ if (FUZ_rand(&lseed) & 3) CHECK_Z( setCCtxParameter(zc, cctxParams, ZSTD_p_ldmMinMatch, FUZ_randomClampedLength(&lseed, ZSTD_LDM_MINMATCH_MIN, ZSTD_LDM_MINMATCH_MAX), useOpaqueAPI) );
+ if (FUZ_rand(&lseed) & 3) CHECK_Z( setCCtxParameter(zc, cctxParams, ZSTD_p_ldmBucketSizeLog, FUZ_randomClampedLength(&lseed, 0, ZSTD_LDM_BUCKETSIZELOG_MAX), useOpaqueAPI) );
+ if (FUZ_rand(&lseed) & 3) CHECK_Z( setCCtxParameter(zc, cctxParams, ZSTD_p_ldmHashEveryLog, FUZ_randomClampedLength(&lseed, 0, ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN), useOpaqueAPI) );
/* mess with frame parameters */
- if (FUZ_rand(&lseed) & 1) CHECK_Z( ZSTD_CCtx_setParameter(zc, ZSTD_p_checksumFlag, FUZ_rand(&lseed) & 1) );
- if (FUZ_rand(&lseed) & 1) CHECK_Z( ZSTD_CCtx_setParameter(zc, ZSTD_p_dictIDFlag, FUZ_rand(&lseed) & 1) );
- if (FUZ_rand(&lseed) & 1) CHECK_Z( ZSTD_CCtx_setParameter(zc, ZSTD_p_contentSizeFlag, FUZ_rand(&lseed) & 1) );
+ if (FUZ_rand(&lseed) & 1) CHECK_Z( setCCtxParameter(zc, cctxParams, ZSTD_p_checksumFlag, FUZ_rand(&lseed) & 1, useOpaqueAPI) );
+ if (FUZ_rand(&lseed) & 1) CHECK_Z( setCCtxParameter(zc, cctxParams, ZSTD_p_dictIDFlag, FUZ_rand(&lseed) & 1, useOpaqueAPI) );
+ if (FUZ_rand(&lseed) & 1) CHECK_Z( setCCtxParameter(zc, cctxParams, ZSTD_p_contentSizeFlag, FUZ_rand(&lseed) & 1, useOpaqueAPI) );
if (FUZ_rand(&lseed) & 1) CHECK_Z( ZSTD_CCtx_setPledgedSrcSize(zc, pledgedSrcSize) );
- DISPLAYLEVEL(5, "pledgedSrcSize : %u \n", (U32)pledgedSrcSize);
+ DISPLAYLEVEL(5, "t%u: pledgedSrcSize : %u \n", testNb, (U32)pledgedSrcSize);
/* multi-threading parameters */
{ U32 const nbThreadsCandidate = (FUZ_rand(&lseed) & 4) + 1;
- U32 const nbThreads = MIN(nbThreadsCandidate, nbThreadsMax);
- CHECK_Z( ZSTD_CCtx_setParameter(zc, ZSTD_p_nbThreads, nbThreads) );
+ U32 const nbThreadsAdjusted = (windowLogMalus < nbThreadsCandidate) ? nbThreadsCandidate - windowLogMalus : 1;
+ U32 const nbThreads = MIN(nbThreadsAdjusted, nbThreadsMax);
+ CHECK_Z( setCCtxParameter(zc, cctxParams, ZSTD_p_nbThreads, nbThreads, useOpaqueAPI) );
+ DISPLAYLEVEL(5, "t%u: nbThreads : %u \n", testNb, nbThreads);
if (nbThreads > 1) {
U32 const jobLog = FUZ_rand(&lseed) % (testLog+1);
- CHECK_Z( ZSTD_CCtx_setParameter(zc, ZSTD_p_overlapSizeLog, FUZ_rand(&lseed) % 10) );
- CHECK_Z( ZSTD_CCtx_setParameter(zc, ZSTD_p_jobSize, (U32)FUZ_rLogLength(&lseed, jobLog)) );
- } } } }
+ CHECK_Z( setCCtxParameter(zc, cctxParams, ZSTD_p_overlapSizeLog, FUZ_rand(&lseed) % 10, useOpaqueAPI) );
+ CHECK_Z( setCCtxParameter(zc, cctxParams, ZSTD_p_jobSize, (U32)FUZ_rLogLength(&lseed, jobLog), useOpaqueAPI) );
+ }
+ }
+
+ if (FUZ_rand(&lseed) & 1) CHECK_Z (setCCtxParameter(zc, cctxParams, ZSTD_p_forceMaxWindow, FUZ_rand(&lseed) & 1, useOpaqueAPI) );
+
+ /* Apply parameters */
+ if (useOpaqueAPI) {
+ CHECK_Z (ZSTD_CCtx_setParametersUsingCCtxParams(zc, cctxParams) );
+ }
+
+ if (FUZ_rand(&lseed) & 1) {
+ if (FUZ_rand(&lseed) & 1) {
+ CHECK_Z( ZSTD_CCtx_loadDictionary(zc, dict, dictSize) );
+ } else {
+ CHECK_Z( ZSTD_CCtx_loadDictionary_byReference(zc, dict, dictSize) );
+ }
+ if (dict && dictSize) {
+ /* test that compression parameters are rejected (correctly) after loading a non-NULL dictionary */
+ if (useOpaqueAPI) {
+ size_t const setError = ZSTD_CCtx_setParametersUsingCCtxParams(zc, cctxParams);
+ CHECK(!ZSTD_isError(setError), "ZSTD_CCtx_setParametersUsingCCtxParams should have failed");
+ } else {
+ size_t const setError = ZSTD_CCtx_setParameter(zc, ZSTD_p_windowLog, cParams.windowLog-1);
+ CHECK(!ZSTD_isError(setError), "ZSTD_CCtx_setParameter should have failed");
+ }
+ }
+ } else {
+ CHECK_Z( ZSTD_CCtx_refPrefix(zc, dict, dictSize) );
+ }
+ } }
/* multi-segments compression test */
XXH64_reset(&xxhState, 0);
@@ -1389,7 +1464,7 @@ static int fuzzerTests_newAPI(U32 seed, U32 nbTests, unsigned startTest, double
outBuff.size = outBuff.pos + dstBuffSize;
CHECK_Z( ZSTD_compress_generic(zc, &outBuff, &inBuff, flush) );
- DISPLAYLEVEL(5, "compress consumed %u bytes (total : %u) \n",
+ DISPLAYLEVEL(6, "compress consumed %u bytes (total : %u) \n",
(U32)inBuff.pos, (U32)(totalTestSize + inBuff.pos));
XXH64_update(&xxhState, srcBuffer+srcStart, inBuff.pos);
@@ -1404,11 +1479,11 @@ static int fuzzerTests_newAPI(U32 seed, U32 nbTests, unsigned startTest, double
size_t const randomDstSize = FUZ_randomLength(&lseed, maxSampleLog+1);
size_t const adjustedDstSize = MIN(cBufferSize - cSize, randomDstSize);
outBuff.size = outBuff.pos + adjustedDstSize;
- DISPLAYLEVEL(5, "End-flush into dst buffer of size %u \n", (U32)adjustedDstSize);
+ DISPLAYLEVEL(6, "End-flush into dst buffer of size %u \n", (U32)adjustedDstSize);
remainingToFlush = ZSTD_compress_generic(zc, &outBuff, &inBuff, ZSTD_e_end);
- CHECK(ZSTD_isError(remainingToFlush),
- "ZSTD_compress_generic w/ ZSTD_e_end error : %s",
- ZSTD_getErrorName(remainingToFlush) );
+ CHECK( ZSTD_isError(remainingToFlush),
+ "ZSTD_compress_generic w/ ZSTD_e_end error : %s",
+ ZSTD_getErrorName(remainingToFlush) );
} }
crcOrig = XXH64_digest(&xxhState);
cSize = outBuff.pos;
@@ -1431,12 +1506,12 @@ static int fuzzerTests_newAPI(U32 seed, U32 nbTests, unsigned startTest, double
size_t const randomDstSize = FUZ_randomLength(&lseed, maxSampleLog);
size_t const dstBuffSize = MIN(dstBufferSize - totalGenSize, randomDstSize);
inBuff.size = inBuff.pos + readCSrcSize;
- outBuff.size = inBuff.pos + dstBuffSize;
- DISPLAYLEVEL(5, "ZSTD_decompressStream input %u bytes (pos:%u/%u)\n",
+ outBuff.size = outBuff.pos + dstBuffSize;
+ DISPLAYLEVEL(6, "ZSTD_decompressStream input %u bytes (pos:%u/%u)\n",
(U32)readCSrcSize, (U32)inBuff.pos, (U32)cSize);
decompressionResult = ZSTD_decompressStream(zd, &outBuff, &inBuff);
CHECK (ZSTD_isError(decompressionResult), "decompression error : %s", ZSTD_getErrorName(decompressionResult));
- DISPLAYLEVEL(5, "inBuff.pos = %u \n", (U32)readCSrcSize);
+ DISPLAYLEVEL(6, "inBuff.pos = %u \n", (U32)readCSrcSize);
}
CHECK (outBuff.pos != totalTestSize, "decompressed data : wrong size (%u != %u)", (U32)outBuff.pos, (U32)totalTestSize);
CHECK (inBuff.pos != cSize, "compressed data should be fully read (%u != %u)", (U32)inBuff.pos, (U32)cSize);
@@ -1479,6 +1554,7 @@ _cleanup:
ZSTD_freeCCtx(zc);
ZSTD_freeDStream(zd);
ZSTD_freeDStream(zd_noise);
+ ZSTD_freeCCtxParams(cctxParams);
free(cNoiseBuffer[0]);
free(cNoiseBuffer[1]);
free(cNoiseBuffer[2]);
@@ -1494,7 +1570,6 @@ _output_error:
goto _cleanup;
}
-
/*-*******************************************************
* Command line
*********************************************************/
@@ -1530,6 +1605,7 @@ int main(int argc, const char** argv)
e_api selected_api = simple_api;
const char* const programName = argv[0];
ZSTD_customMem const customNULL = ZSTD_defaultCMem;
+ U32 useOpaqueAPI = 0;
/* Check command line */
for(argNb=1; argNb<argc; argNb++) {
@@ -1539,8 +1615,9 @@ int main(int argc, const char** argv)
/* Parsing commands. Aggregated commands are allowed */
if (argument[0]=='-') {
- if (!strcmp(argument, "--mt")) { selected_api=mt_api; continue; }
- if (!strcmp(argument, "--newapi")) { selected_api=advanced_api; continue; }
+ if (!strcmp(argument, "--mt")) { selected_api=mt_api; testNb += !testNb; continue; }
+ if (!strcmp(argument, "--newapi")) { selected_api=advanced_api; testNb += !testNb; continue; }
+ if (!strcmp(argument, "--opaqueapi")) { selected_api=advanced_api; testNb += !testNb; useOpaqueAPI = 1; continue; }
if (!strcmp(argument, "--no-big-tests")) { bigTests=0; continue; }
argument++;
@@ -1654,7 +1731,7 @@ int main(int argc, const char** argv)
result = fuzzerTests_MT(seed, nbTests, testNb, ((double)proba) / 100, bigTests);
break;
case advanced_api :
- result = fuzzerTests_newAPI(seed, nbTests, testNb, ((double)proba) / 100, bigTests);
+ result = fuzzerTests_newAPI(seed, nbTests, testNb, ((double)proba) / 100, bigTests, useOpaqueAPI);
break;
default :
assert(0); /* impossible */
diff --git a/zlibWrapper/Makefile b/zlibWrapper/Makefile
index 4e8fb4a329b6..c1896f8b80a3 100644
--- a/zlibWrapper/Makefile
+++ b/zlibWrapper/Makefile
@@ -34,7 +34,7 @@ EXT =
endif
-all: clean fitblk example zwrapbench minigzip
+all: fitblk example zwrapbench minigzip
test: example fitblk example_zstd fitblk_zstd zwrapbench minigzip minigzip_zstd
./example
diff --git a/zlibWrapper/examples/zwrapbench.c b/zlibWrapper/examples/zwrapbench.c
index 050c9db63218..25a23f885737 100644
--- a/zlibWrapper/examples/zwrapbench.c
+++ b/zlibWrapper/examples/zwrapbench.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * Copyright (c) 2016-present, Przemyslaw Skibinski, Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -36,7 +36,6 @@
#endif
-
/*-************************************
* Constants
**************************************/
@@ -162,7 +161,6 @@ static int BMK_benchMem(z_const void* srcBuffer, size_t srcSize,
ZSTD_CCtx* const ctx = ZSTD_createCCtx();
ZSTD_DCtx* const dctx = ZSTD_createDCtx();
U32 nbBlocks;
- UTIL_freq_t ticksPerSecond;
/* checks */
if (!compressedBuffer || !resultBuffer || !blockTable || !ctx || !dctx)
@@ -170,7 +168,6 @@ static int BMK_benchMem(z_const void* srcBuffer, size_t srcSize,
/* init */
if (strlen(displayName)>17) displayName += strlen(displayName)-17; /* can only display 17 characters */
- UTIL_initTimer(&ticksPerSecond);
/* Init blockTable data */
{ z_const char* srcPtr = (z_const char*)srcBuffer;
@@ -210,17 +207,17 @@ static int BMK_benchMem(z_const void* srcBuffer, size_t srcSize,
size_t cSize = 0;
double ratio = 0.;
- UTIL_getTime(&coolTime);
+ coolTime = UTIL_getTime();
DISPLAYLEVEL(2, "\r%79s\r", "");
while (!cCompleted | !dCompleted) {
UTIL_time_t clockStart;
U64 clockLoop = g_nbIterations ? TIMELOOP_MICROSEC : 1;
/* overheat protection */
- if (UTIL_clockSpanMicro(coolTime, ticksPerSecond) > ACTIVEPERIOD_MICROSEC) {
+ if (UTIL_clockSpanMicro(coolTime) > ACTIVEPERIOD_MICROSEC) {
DISPLAYLEVEL(2, "\rcooling down ... \r");
UTIL_sleep(COOLPERIOD_SEC);
- UTIL_getTime(&coolTime);
+ coolTime = UTIL_getTime();
}
/* Compression */
@@ -228,15 +225,15 @@ static int BMK_benchMem(z_const void* srcBuffer, size_t srcSize,
if (!cCompleted) memset(compressedBuffer, 0xE5, maxCompressedSize); /* warm up and erase result buffer */
UTIL_sleepMilli(1); /* give processor time to other processes */
- UTIL_waitForNextTick(ticksPerSecond);
- UTIL_getTime(&clockStart);
+ UTIL_waitForNextTick();
+ clockStart = UTIL_getTime();
if (!cCompleted) { /* still some time to do compression tests */
U32 nbLoops = 0;
if (compressor == BMK_ZSTD) {
ZSTD_parameters const zparams = ZSTD_getParams(cLevel, avgSize, dictBufferSize);
ZSTD_customMem const cmem = { NULL, NULL, NULL };
- ZSTD_CDict* const cdict = ZSTD_createCDict_advanced(dictBuffer, dictBufferSize, 1 /*byRef*/, ZSTD_dm_auto, zparams.cParams, cmem);
+ ZSTD_CDict* const cdict = ZSTD_createCDict_advanced(dictBuffer, dictBufferSize, ZSTD_dlm_byRef, ZSTD_dm_auto, zparams.cParams, cmem);
if (cdict==NULL) EXM_THROW(1, "ZSTD_createCDict_advanced() allocation failure");
do {
@@ -257,7 +254,7 @@ static int BMK_benchMem(z_const void* srcBuffer, size_t srcSize,
blockTable[blockNb].cSize = rSize;
}
nbLoops++;
- } while (UTIL_clockSpanMicro(clockStart, ticksPerSecond) < clockLoop);
+ } while (UTIL_clockSpanMicro(clockStart) < clockLoop);
ZSTD_freeCDict(cdict);
} else if (compressor == BMK_ZSTD_STREAM) {
ZSTD_parameters const zparams = ZSTD_getParams(cLevel, avgSize, dictBufferSize);
@@ -286,7 +283,7 @@ static int BMK_benchMem(z_const void* srcBuffer, size_t srcSize,
blockTable[blockNb].cSize = outBuffer.pos;
}
nbLoops++;
- } while (UTIL_clockSpanMicro(clockStart, ticksPerSecond) < clockLoop);
+ } while (UTIL_clockSpanMicro(clockStart) < clockLoop);
ZSTD_freeCStream(zbc);
} else if (compressor == BMK_ZWRAP_ZLIB_REUSE || compressor == BMK_ZWRAP_ZSTD_REUSE || compressor == BMK_ZLIB_REUSE) {
z_stream def;
@@ -327,7 +324,7 @@ static int BMK_benchMem(z_const void* srcBuffer, size_t srcSize,
blockTable[blockNb].cSize = def.total_out;
}
nbLoops++;
- } while (UTIL_clockSpanMicro(clockStart, ticksPerSecond) < clockLoop);
+ } while (UTIL_clockSpanMicro(clockStart) < clockLoop);
ret = deflateEnd(&def);
if (ret != Z_OK) EXM_THROW(1, "deflateEnd failure");
} else {
@@ -360,9 +357,9 @@ static int BMK_benchMem(z_const void* srcBuffer, size_t srcSize,
blockTable[blockNb].cSize = def.total_out;
}
nbLoops++;
- } while (UTIL_clockSpanMicro(clockStart, ticksPerSecond) < clockLoop);
+ } while (UTIL_clockSpanMicro(clockStart) < clockLoop);
}
- { U64 const clockSpan = UTIL_clockSpanMicro(clockStart, ticksPerSecond);
+ { U64 const clockSpan = UTIL_clockSpanMicro(clockStart);
if (clockSpan < fastestC*nbLoops) fastestC = clockSpan / nbLoops;
totalCTime += clockSpan;
cCompleted = totalCTime>maxTime;
@@ -382,8 +379,8 @@ static int BMK_benchMem(z_const void* srcBuffer, size_t srcSize,
if (!dCompleted) memset(resultBuffer, 0xD6, srcSize); /* warm result buffer */
UTIL_sleepMilli(1); /* give processor time to other processes */
- UTIL_waitForNextTick(ticksPerSecond);
- UTIL_getTime(&clockStart);
+ UTIL_waitForNextTick();
+ clockStart = UTIL_getTime();
if (!dCompleted) {
U32 nbLoops = 0;
@@ -406,7 +403,7 @@ static int BMK_benchMem(z_const void* srcBuffer, size_t srcSize,
blockTable[blockNb].resSize = regenSize;
}
nbLoops++;
- } while (UTIL_clockSpanMicro(clockStart, ticksPerSecond) < clockLoop);
+ } while (UTIL_clockSpanMicro(clockStart) < clockLoop);
ZSTD_freeDDict(ddict);
} else if (compressor == BMK_ZSTD_STREAM) {
ZSTD_inBuffer inBuffer;
@@ -432,7 +429,7 @@ static int BMK_benchMem(z_const void* srcBuffer, size_t srcSize,
blockTable[blockNb].resSize = outBuffer.pos;
}
nbLoops++;
- } while (UTIL_clockSpanMicro(clockStart, ticksPerSecond) < clockLoop);
+ } while (UTIL_clockSpanMicro(clockStart) < clockLoop);
ZSTD_freeDStream(zbd);
} else if (compressor == BMK_ZWRAP_ZLIB_REUSE || compressor == BMK_ZWRAP_ZSTD_REUSE || compressor == BMK_ZLIB_REUSE) {
z_stream inf;
@@ -468,7 +465,7 @@ static int BMK_benchMem(z_const void* srcBuffer, size_t srcSize,
blockTable[blockNb].resSize = inf.total_out;
}
nbLoops++;
- } while (UTIL_clockSpanMicro(clockStart, ticksPerSecond) < clockLoop);
+ } while (UTIL_clockSpanMicro(clockStart) < clockLoop);
ret = inflateEnd(&inf);
if (ret != Z_OK) EXM_THROW(1, "inflateEnd failure");
} else {
@@ -502,9 +499,9 @@ static int BMK_benchMem(z_const void* srcBuffer, size_t srcSize,
blockTable[blockNb].resSize = inf.total_out;
}
nbLoops++;
- } while (UTIL_clockSpanMicro(clockStart, ticksPerSecond) < clockLoop);
+ } while (UTIL_clockSpanMicro(clockStart) < clockLoop);
}
- { U64 const clockSpan = UTIL_clockSpanMicro(clockStart, ticksPerSecond);
+ { U64 const clockSpan = UTIL_clockSpanMicro(clockStart);
if (clockSpan < fastestD*nbLoops) fastestD = clockSpan / nbLoops;
totalDTime += clockSpan;
dCompleted = totalDTime>maxTime;
diff --git a/zlibWrapper/gzcompatibility.h b/zlibWrapper/gzcompatibility.h
index ac9020acc15b..ea8d50c8238a 100644
--- a/zlibWrapper/gzcompatibility.h
+++ b/zlibWrapper/gzcompatibility.h
@@ -1,10 +1,11 @@
/*
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * Copyright (c) 2016-present, Przemyslaw Skibinski, Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
*/
diff --git a/zlibWrapper/zstd_zlibwrapper.c b/zlibWrapper/zstd_zlibwrapper.c
index 272369a2874c..c5fbdf98e19d 100644
--- a/zlibWrapper/zstd_zlibwrapper.c
+++ b/zlibWrapper/zstd_zlibwrapper.c
@@ -1,10 +1,11 @@
/*
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * Copyright (c) 2016-present, Przemyslaw Skibinski, Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
*/
diff --git a/zlibWrapper/zstd_zlibwrapper.h b/zlibWrapper/zstd_zlibwrapper.h
index f8f36800928e..f828d3191b87 100644
--- a/zlibWrapper/zstd_zlibwrapper.h
+++ b/zlibWrapper/zstd_zlibwrapper.h
@@ -1,10 +1,11 @@
/*
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * Copyright (c) 2016-present, Przemyslaw Skibinski, Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
*/
#ifndef ZSTD_ZLIBWRAPPER_H