miniz_oxide/deflate/
core.rs

1//! Streaming compression functionality.
2
3use alloc::boxed::Box;
4use core::convert::TryInto;
5use core::{cmp, mem};
6
7use super::super::*;
8use super::deflate_flags::*;
9use super::CompressionLevel;
10use crate::deflate::buffer::{
11    update_hash, HashBuffers, LocalBuf, LZ_CODE_BUF_SIZE, LZ_DICT_FULL_SIZE, LZ_HASH_BITS,
12    LZ_HASH_SHIFT, LZ_HASH_SIZE, OUT_BUF_SIZE,
13};
14use crate::shared::{update_adler32, HUFFMAN_LENGTH_ORDER, MZ_ADLER32_INIT};
15use crate::DataFormat;
16
17// Currently not bubbled up outside this module, so can fill in with more
18// context eventually if needed.
19type Result<T, E = Error> = core::result::Result<T, E>;
20struct Error {}
21
22const MAX_PROBES_MASK: i32 = 0xFFF;
23
24const MAX_SUPPORTED_HUFF_CODESIZE: usize = 32;
25
26/// Length code for length values.
27#[rustfmt::skip]
28const LEN_SYM: [u16; 256] = [
29    257, 258, 259, 260, 261, 262, 263, 264, 265, 265, 266, 266, 267, 267, 268, 268,
30    269, 269, 269, 269, 270, 270, 270, 270, 271, 271, 271, 271, 272, 272, 272, 272,
31    273, 273, 273, 273, 273, 273, 273, 273, 274, 274, 274, 274, 274, 274, 274, 274,
32    275, 275, 275, 275, 275, 275, 275, 275, 276, 276, 276, 276, 276, 276, 276, 276,
33    277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277,
34    278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278,
35    279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279,
36    280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280,
37    281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281,
38    281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281,
39    282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282,
40    282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282,
41    283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283,
42    283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283,
43    284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284,
44    284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 285
45];
46
47/// Number of extra bits for length values.
48#[rustfmt::skip]
49const LEN_EXTRA: [u8; 256] = [
50    0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1,
51    2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
52    3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
53    3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
54    4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
55    4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
56    4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
57    4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
58    5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
59    5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
60    5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
61    5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
62    5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
63    5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
64    5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
65    5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 0
66];
67
68/// Distance codes for distances smaller than 512.
69#[rustfmt::skip]
70const SMALL_DIST_SYM: [u8; 512] = [
71     0,  1,  2,  3,  4,  4,  5,  5,  6,  6,  6,  6,  7,  7,  7,  7,
72     8,  8,  8,  8,  8,  8,  8,  8,  9,  9,  9,  9,  9,  9,  9,  9,
73    10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
74    11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
75    12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
76    12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
77    13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
78    13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
79    14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
80    14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
81    14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
82    14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
83    15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
84    15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
85    15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
86    15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
87    16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
88    16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
89    16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
90    16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
91    16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
92    16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
93    16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
94    16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
95    17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
96    17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
97    17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
98    17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
99    17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
100    17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
101    17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
102    17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17
103];
104
105/// Number of extra bits for distances smaller than 512.
106#[rustfmt::skip]
107const SMALL_DIST_EXTRA: [u8; 512] = [
108    0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
109    4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
110    5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
111    5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
112    6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
113    6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
114    6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
115    6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
116    7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
117    7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
118    7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
119    7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
120    7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
121    7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
122    7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
123    7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
124];
125
126/// Base values to calculate distances above 512.
127#[rustfmt::skip]
128const LARGE_DIST_SYM: [u8; 128] = [
129     0,  0, 18, 19, 20, 20, 21, 21, 22, 22, 22, 22, 23, 23, 23, 23,
130    24, 24, 24, 24, 24, 24, 24, 24, 25, 25, 25, 25, 25, 25, 25, 25,
131    26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
132    27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
133    28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
134    28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
135    29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29,
136    29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29
137];
138
139/// Number of extra bits distances above 512.
140#[rustfmt::skip]
141const LARGE_DIST_EXTRA: [u8; 128] = [
142     0,  0,  8,  8,  9,  9,  9,  9, 10, 10, 10, 10, 10, 10, 10, 10,
143    11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
144    12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
145    12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
146    13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
147    13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
148    13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
149    13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13
150];
151
152#[rustfmt::skip]
153const BITMASKS: [u32; 17] = [
154    0x0000, 0x0001, 0x0003, 0x0007, 0x000F, 0x001F, 0x003F, 0x007F, 0x00FF,
155    0x01FF, 0x03FF, 0x07FF, 0x0FFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF
156];
157
158/// The maximum number of checks for matches in the hash table the compressor will make for each
159/// compression level.
160const NUM_PROBES: [u32; 11] = [0, 1, 6, 32, 16, 32, 128, 256, 512, 768, 1500];
161
162#[derive(Copy, Clone)]
163struct SymFreq {
164    key: u16,
165    sym_index: u16,
166}
167
168pub mod deflate_flags {
169    /// Whether to use a zlib wrapper.
170    pub const TDEFL_WRITE_ZLIB_HEADER: u32 = 0x0000_1000;
171    /// Should we compute the adler32 checksum.
172    pub const TDEFL_COMPUTE_ADLER32: u32 = 0x0000_2000;
173    /// Should we use greedy parsing (as opposed to lazy parsing where look ahead one or more
174    /// bytes to check for better matches.)
175    pub const TDEFL_GREEDY_PARSING_FLAG: u32 = 0x0000_4000;
176    /// Used in miniz to skip zero-initializing hash and dict. We don't do this here, so
177    /// this flag is ignored.
178    pub const TDEFL_NONDETERMINISTIC_PARSING_FLAG: u32 = 0x0000_8000;
179    /// Only look for matches with a distance of 0.
180    pub const TDEFL_RLE_MATCHES: u32 = 0x0001_0000;
181    /// Only use matches that are at least 6 bytes long.
182    pub const TDEFL_FILTER_MATCHES: u32 = 0x0002_0000;
183    /// Force the compressor to only output static blocks. (Blocks using the default huffman codes
184    /// specified in the deflate specification.)
185    pub const TDEFL_FORCE_ALL_STATIC_BLOCKS: u32 = 0x0004_0000;
186    /// Force the compressor to only output raw/uncompressed blocks.
187    pub const TDEFL_FORCE_ALL_RAW_BLOCKS: u32 = 0x0008_0000;
188}
189
190/// Strategy setting for compression.
191///
192/// The non-default settings offer some special-case compression variants.
193#[repr(i32)]
194#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
195pub enum CompressionStrategy {
196    /// Don't use any of the special strategies.
197    Default = 0,
198    /// Only use matches that are at least 5 bytes long.
199    Filtered = 1,
200    /// Don't look for matches, only huffman encode the literals.
201    HuffmanOnly = 2,
202    /// Only look for matches with a distance of 1, i.e do run-length encoding only.
203    RLE = 3,
204    /// Only use static/fixed blocks. (Blocks using the default huffman codes
205    /// specified in the deflate specification.)
206    Fixed = 4,
207}
208
209impl From<CompressionStrategy> for i32 {
210    #[inline(always)]
211    fn from(value: CompressionStrategy) -> Self {
212        value as i32
213    }
214}
215
216/// A list of deflate flush types.
217#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
218pub enum TDEFLFlush {
219    /// Normal operation.
220    ///
221    /// Compress as much as there is space for, and then return waiting for more input.
222    None = 0,
223
224    /// Try to flush all the current data and output an empty raw block.
225    Sync = 2,
226
227    /// Same as [`Sync`][Self::Sync], but reset the dictionary so that the following data does not
228    /// depend on previous data.
229    Full = 3,
230
231    /// Try to flush everything and end the deflate stream.
232    ///
233    /// On success this will yield a [`TDEFLStatus::Done`] return status.
234    Finish = 4,
235}
236
237impl From<MZFlush> for TDEFLFlush {
238    fn from(flush: MZFlush) -> Self {
239        match flush {
240            MZFlush::None => TDEFLFlush::None,
241            MZFlush::Sync => TDEFLFlush::Sync,
242            MZFlush::Full => TDEFLFlush::Full,
243            MZFlush::Finish => TDEFLFlush::Finish,
244            _ => TDEFLFlush::None, // TODO: ??? What to do ???
245        }
246    }
247}
248
249impl TDEFLFlush {
250    pub const fn new(flush: i32) -> Result<Self, MZError> {
251        match flush {
252            0 => Ok(TDEFLFlush::None),
253            2 => Ok(TDEFLFlush::Sync),
254            3 => Ok(TDEFLFlush::Full),
255            4 => Ok(TDEFLFlush::Finish),
256            _ => Err(MZError::Param),
257        }
258    }
259}
260
261/// Return status of compression.
262#[repr(i32)]
263#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
264pub enum TDEFLStatus {
265    /// Usage error.
266    ///
267    /// This indicates that either the [`CompressorOxide`] experienced a previous error, or the
268    /// stream has already been [`TDEFLFlush::Finish`]'d.
269    BadParam = -2,
270
271    /// Error putting data into output buffer.
272    ///
273    /// This usually indicates a too-small buffer.
274    PutBufFailed = -1,
275
276    /// Compression succeeded normally.
277    Okay = 0,
278
279    /// Compression succeeded and the deflate stream was ended.
280    ///
281    /// This is the result of calling compression with [`TDEFLFlush::Finish`].
282    Done = 1,
283}
284
285const MAX_HUFF_SYMBOLS: usize = 288;
286/// Size of hash chain for fast compression mode.
287const LEVEL1_HASH_SIZE_MASK: u32 = 4095;
288/// The number of huffman tables used by the compressor.
289/// Literal/length, Distances and Length of the huffman codes for the other two tables.
290const MAX_HUFF_TABLES: usize = 3;
291/// Literal/length codes
292const MAX_HUFF_SYMBOLS_0: usize = 288;
293/// Distance codes.
294const MAX_HUFF_SYMBOLS_1: usize = 32;
295/// Huffman length values.
296const MAX_HUFF_SYMBOLS_2: usize = 19;
297/// Size of the chained hash table.
298pub(crate) const LZ_DICT_SIZE: usize = 32_768;
299/// Mask used when stepping through the hash chains.
300const LZ_DICT_SIZE_MASK: usize = (LZ_DICT_SIZE as u32 - 1) as usize;
301/// The minimum length of a match.
302const MIN_MATCH_LEN: u8 = 3;
303/// The maximum length of a match.
304pub(crate) const MAX_MATCH_LEN: usize = 258;
305
306const DEFAULT_FLAGS: u32 = NUM_PROBES[4] | TDEFL_WRITE_ZLIB_HEADER;
307
308mod zlib {
309    use super::{TDEFL_FORCE_ALL_RAW_BLOCKS, TDEFL_RLE_MATCHES};
310
311    const DEFAULT_CM: u8 = 8;
312    const DEFAULT_CINFO: u8 = 7 << 4;
313    const _DEFAULT_FDICT: u8 = 0;
314    const DEFAULT_CMF: u8 = DEFAULT_CM | DEFAULT_CINFO;
315    // CMF used for RLE (technically it uses a window size of 0 but the lowest that can
316    // be specified in the header corresponds to a window size of 1 << (0 + 8) aka 256.
317    const MIN_CMF: u8 = DEFAULT_CM; // | 0
318    /// The 16-bit value consisting of CMF and FLG must be divisible by this to be valid.
319    const FCHECK_DIVISOR: u8 = 31;
320
321    /// Generate FCHECK from CMF and FLG (without FCKECH )so that they are correct according to the
322    /// specification, i.e (CMF*256 + FCHK) % 31 = 0.
323    /// Returns flg with the FCHKECK bits added (any existing FCHECK bits are ignored).
324    fn add_fcheck(cmf: u8, flg: u8) -> u8 {
325        let rem = ((usize::from(cmf) * 256) + usize::from(flg)) % usize::from(FCHECK_DIVISOR);
326
327        // Clear existing FCHECK if any
328        let flg = flg & 0b11100000;
329
330        // Casting is safe as rem can't overflow since it is a value mod 31
331        // We can simply add the value to flg as (31 - rem) will never be above 2^5
332        flg + (FCHECK_DIVISOR - rem as u8)
333    }
334
335    const fn zlib_level_from_flags(flags: u32) -> u8 {
336        use super::NUM_PROBES;
337
338        let num_probes = flags & (super::MAX_PROBES_MASK as u32);
339        if (flags & super::TDEFL_GREEDY_PARSING_FLAG != 0)
340            || (flags & super::TDEFL_RLE_MATCHES != 0)
341        {
342            if num_probes <= 1 {
343                0
344            } else {
345                1
346            }
347        } else if num_probes >= NUM_PROBES[9] {
348            3
349        } else {
350            2
351        }
352    }
353
354    const fn cmf_from_flags(flags: u32) -> u8 {
355        if (flags & TDEFL_RLE_MATCHES == 0) && (flags & TDEFL_FORCE_ALL_RAW_BLOCKS == 0) {
356            DEFAULT_CMF
357        // If we are using RLE encoding or no compression the window bits can be set as the
358        // minimum.
359        } else {
360            MIN_CMF
361        }
362    }
363
364    /// Get the zlib header for the level using the default window size and no
365    /// dictionary.
366    fn header_from_level(level: u8, flags: u32) -> [u8; 2] {
367        let cmf = cmf_from_flags(flags);
368        [cmf, add_fcheck(cmf, level << 6)]
369    }
370
371    /// Create a zlib header from the given compression flags.
372    /// Only level is considered.
373    pub fn header_from_flags(flags: u32) -> [u8; 2] {
374        let level = zlib_level_from_flags(flags);
375        header_from_level(level, flags)
376    }
377
378    #[cfg(test)]
379    mod test {
380        #[test]
381        fn zlib() {
382            use super::super::*;
383            use super::*;
384
385            let test_level = |level, expected| {
386                let flags = create_comp_flags_from_zip_params(
387                    level,
388                    MZ_DEFAULT_WINDOW_BITS,
389                    CompressionStrategy::Default as i32,
390                );
391                assert_eq!(zlib_level_from_flags(flags), expected);
392            };
393
394            assert_eq!(zlib_level_from_flags(DEFAULT_FLAGS), 2);
395            test_level(0, 0);
396            test_level(1, 0);
397            test_level(2, 1);
398            test_level(3, 1);
399            for i in 4..=8 {
400                test_level(i, 2)
401            }
402            test_level(9, 3);
403            test_level(10, 3);
404        }
405
406        #[test]
407        fn test_header() {
408            let header = super::header_from_level(3, 0);
409            assert_eq!(
410                ((usize::from(header[0]) * 256) + usize::from(header[1])) % 31,
411                0
412            );
413        }
414    }
415}
416
417#[cfg(test)]
418#[inline]
419fn write_u16_le(val: u16, slice: &mut [u8], pos: usize) {
420    slice[pos] = val as u8;
421    slice[pos + 1] = (val >> 8) as u8;
422}
423
424// Read the two bytes starting at pos and interpret them as an u16.
425#[inline]
426const fn read_u16_le(slice: &[u8], pos: usize) -> u16 {
427    // The compiler is smart enough to optimize this into an unaligned load.
428    slice[pos] as u16 | ((slice[pos + 1] as u16) << 8)
429}
430
431/// Main compression struct.
432pub struct CompressorOxide {
433    lz: LZOxide,
434    params: ParamsOxide,
435    /// Put HuffmanOxide on the heap with default trick to avoid
436    /// excessive stack copies.
437    huff: Box<HuffmanOxide>,
438    dict: DictOxide,
439}
440
441impl CompressorOxide {
442    /// Create a new `CompressorOxide` with the given flags.
443    ///
444    /// # Notes
445    /// This function may be changed to take different parameters in the future.
446    pub fn new(flags: u32) -> Self {
447        CompressorOxide {
448            lz: LZOxide::new(),
449            params: ParamsOxide::new(flags),
450            huff: Box::default(),
451            dict: DictOxide::new(flags),
452        }
453    }
454
455    /// Get the adler32 checksum of the currently encoded data.
456    pub const fn adler32(&self) -> u32 {
457        self.params.adler32
458    }
459
460    /// Get the return status of the previous [`compress`](fn.compress.html)
461    /// call with this compressor.
462    pub const fn prev_return_status(&self) -> TDEFLStatus {
463        self.params.prev_return_status
464    }
465
466    /// Get the raw compressor flags.
467    ///
468    /// # Notes
469    /// This function may be deprecated or changed in the future to use more rust-style flags.
470    pub const fn flags(&self) -> i32 {
471        self.params.flags as i32
472    }
473
474    /// Returns whether the compressor is wrapping the data in a zlib format or not.
475    pub const fn data_format(&self) -> DataFormat {
476        if (self.params.flags & TDEFL_WRITE_ZLIB_HEADER) != 0 {
477            DataFormat::Zlib
478        } else {
479            DataFormat::Raw
480        }
481    }
482
483    /// Reset the state of the compressor, keeping the same parameters.
484    ///
485    /// This avoids re-allocating data.
486    pub fn reset(&mut self) {
487        // LZ buf and huffman has no settings or dynamic memory
488        // that needs to be saved, so we simply replace them.
489        self.lz = LZOxide::new();
490        self.params.reset();
491        *self.huff = HuffmanOxide::default();
492        self.dict.reset();
493    }
494
495    /// Set the compression level of the compressor.
496    ///
497    /// Using this to change level after compression has started is supported.
498    /// # Notes
499    /// The compression strategy will be reset to the default one when this is called.
500    pub fn set_compression_level(&mut self, level: CompressionLevel) {
501        let format = self.data_format();
502        self.set_format_and_level(format, level as u8);
503    }
504
505    /// Set the compression level of the compressor using an integer value.
506    ///
507    /// Using this to change level after compression has started is supported.
508    /// # Notes
509    /// The compression strategy will be reset to the default one when this is called.
510    pub fn set_compression_level_raw(&mut self, level: u8) {
511        let format = self.data_format();
512        self.set_format_and_level(format, level);
513    }
514
515    /// Update the compression settings of the compressor.
516    ///
517    /// Changing the `DataFormat` after compression has started will result in
518    /// a corrupted stream.
519    ///
520    /// # Notes
521    /// This function mainly intended for setting the initial settings after e.g creating with
522    /// `default` or after calling `CompressorOxide::reset()`, and behaviour may be changed
523    /// to disallow calling it after starting compression in the future.
524    pub fn set_format_and_level(&mut self, data_format: DataFormat, level: u8) {
525        let flags = create_comp_flags_from_zip_params(
526            level.into(),
527            data_format.to_window_bits(),
528            CompressionStrategy::Default as i32,
529        );
530        self.params.update_flags(flags);
531        self.dict.update_flags(flags);
532    }
533}
534
535impl Default for CompressorOxide {
536    /// Initialize the compressor with a level of 4, zlib wrapper and
537    /// the default strategy.
538    fn default() -> Self {
539        CompressorOxide {
540            lz: LZOxide::new(),
541            params: ParamsOxide::new(DEFAULT_FLAGS),
542            huff: Box::default(),
543            dict: DictOxide::new(DEFAULT_FLAGS),
544        }
545    }
546}
547
548/// Callback function and user used in `compress_to_output`.
549pub struct CallbackFunc<'a> {
550    pub put_buf_func: &'a mut dyn FnMut(&[u8]) -> bool,
551}
552
553impl CallbackFunc<'_> {
554    fn flush_output(
555        &mut self,
556        saved_output: SavedOutputBufferOxide,
557        params: &mut ParamsOxide,
558    ) -> i32 {
559        // TODO: As this could be unsafe since
560        // we can't verify the function pointer
561        // this whole function should maybe be unsafe as well.
562        let call_success = (self.put_buf_func)(&params.local_buf.b[0..saved_output.pos]);
563
564        if !call_success {
565            params.prev_return_status = TDEFLStatus::PutBufFailed;
566            return params.prev_return_status as i32;
567        }
568
569        params.flush_remaining as i32
570    }
571}
572
573struct CallbackBuf<'a> {
574    pub out_buf: &'a mut [u8],
575}
576
577impl CallbackBuf<'_> {
578    fn flush_output(
579        &mut self,
580        saved_output: SavedOutputBufferOxide,
581        params: &mut ParamsOxide,
582    ) -> i32 {
583        if saved_output.local {
584            let n = cmp::min(saved_output.pos, self.out_buf.len() - params.out_buf_ofs);
585            (self.out_buf[params.out_buf_ofs..params.out_buf_ofs + n])
586                .copy_from_slice(&params.local_buf.b[..n]);
587
588            params.out_buf_ofs += n;
589            if saved_output.pos != n {
590                params.flush_ofs = n as u32;
591                params.flush_remaining = (saved_output.pos - n) as u32;
592            }
593        } else {
594            params.out_buf_ofs += saved_output.pos;
595        }
596
597        params.flush_remaining as i32
598    }
599}
600
601enum CallbackOut<'a> {
602    Func(CallbackFunc<'a>),
603    Buf(CallbackBuf<'a>),
604}
605
606impl CallbackOut<'_> {
607    fn new_output_buffer<'b>(
608        &'b mut self,
609        local_buf: &'b mut [u8],
610        out_buf_ofs: usize,
611    ) -> OutputBufferOxide<'b> {
612        let is_local;
613        let buf_len = OUT_BUF_SIZE - 16;
614        let chosen_buffer = match *self {
615            CallbackOut::Buf(ref mut cb) if cb.out_buf.len() - out_buf_ofs >= OUT_BUF_SIZE => {
616                is_local = false;
617                &mut cb.out_buf[out_buf_ofs..out_buf_ofs + buf_len]
618            }
619            _ => {
620                is_local = true;
621                &mut local_buf[..buf_len]
622            }
623        };
624
625        OutputBufferOxide {
626            inner: chosen_buffer,
627            inner_pos: 0,
628            local: is_local,
629            bit_buffer: 0,
630            bits_in: 0,
631        }
632    }
633}
634
635struct CallbackOxide<'a> {
636    in_buf: Option<&'a [u8]>,
637    in_buf_size: Option<&'a mut usize>,
638    out_buf_size: Option<&'a mut usize>,
639    out: CallbackOut<'a>,
640}
641
642impl<'a> CallbackOxide<'a> {
643    fn new_callback_buf(in_buf: &'a [u8], out_buf: &'a mut [u8]) -> Self {
644        CallbackOxide {
645            in_buf: Some(in_buf),
646            in_buf_size: None,
647            out_buf_size: None,
648            out: CallbackOut::Buf(CallbackBuf { out_buf }),
649        }
650    }
651
652    fn new_callback_func(in_buf: &'a [u8], callback_func: CallbackFunc<'a>) -> Self {
653        CallbackOxide {
654            in_buf: Some(in_buf),
655            in_buf_size: None,
656            out_buf_size: None,
657            out: CallbackOut::Func(callback_func),
658        }
659    }
660
661    fn update_size(&mut self, in_size: Option<usize>, out_size: Option<usize>) {
662        if let (Some(in_size), Some(size)) = (in_size, self.in_buf_size.as_mut()) {
663            **size = in_size;
664        }
665
666        if let (Some(out_size), Some(size)) = (out_size, self.out_buf_size.as_mut()) {
667            **size = out_size
668        }
669    }
670
671    fn flush_output(
672        &mut self,
673        saved_output: SavedOutputBufferOxide,
674        params: &mut ParamsOxide,
675    ) -> i32 {
676        if saved_output.pos == 0 {
677            return params.flush_remaining as i32;
678        }
679
680        self.update_size(Some(params.src_pos), None);
681        match self.out {
682            CallbackOut::Func(ref mut cf) => cf.flush_output(saved_output, params),
683            CallbackOut::Buf(ref mut cb) => cb.flush_output(saved_output, params),
684        }
685    }
686}
687
688struct OutputBufferOxide<'a> {
689    pub inner: &'a mut [u8],
690    pub inner_pos: usize,
691    pub local: bool,
692
693    pub bit_buffer: u32,
694    pub bits_in: u32,
695}
696
697impl OutputBufferOxide<'_> {
698    fn put_bits(&mut self, bits: u32, len: u32) {
699        // TODO: Removing this assertion worsens performance
700        // Need to figure out why
701        assert!(bits <= ((1u32 << len) - 1u32));
702        self.bit_buffer |= bits << self.bits_in;
703        self.bits_in += len;
704
705        while self.bits_in >= 8 {
706            self.inner[self.inner_pos] = self.bit_buffer as u8;
707            self.inner_pos += 1;
708            self.bit_buffer >>= 8;
709            self.bits_in -= 8;
710        }
711    }
712
713    const fn save(&self) -> SavedOutputBufferOxide {
714        SavedOutputBufferOxide {
715            pos: self.inner_pos,
716            bit_buffer: self.bit_buffer,
717            bits_in: self.bits_in,
718            local: self.local,
719        }
720    }
721
722    fn load(&mut self, saved: SavedOutputBufferOxide) {
723        self.inner_pos = saved.pos;
724        self.bit_buffer = saved.bit_buffer;
725        self.bits_in = saved.bits_in;
726        self.local = saved.local;
727    }
728
729    fn pad_to_bytes(&mut self) {
730        if self.bits_in != 0 {
731            let len = 8 - self.bits_in;
732            self.put_bits(0, len);
733        }
734    }
735}
736
737struct SavedOutputBufferOxide {
738    pub pos: usize,
739    pub bit_buffer: u32,
740    pub bits_in: u32,
741    pub local: bool,
742}
743
744struct BitBuffer {
745    pub bit_buffer: u64,
746    pub bits_in: u32,
747}
748
749impl BitBuffer {
750    fn put_fast(&mut self, bits: u64, len: u32) {
751        self.bit_buffer |= bits << self.bits_in;
752        self.bits_in += len;
753    }
754
755    fn flush(&mut self, output: &mut OutputBufferOxide) -> Result<()> {
756        let pos = output.inner_pos;
757        {
758            // isolation to please borrow checker
759            let inner = &mut output.inner[pos..pos + 8];
760            let bytes = u64::to_le_bytes(self.bit_buffer);
761            inner.copy_from_slice(&bytes);
762        }
763        match output.inner_pos.checked_add((self.bits_in >> 3) as usize) {
764            Some(n) if n <= output.inner.len() => output.inner_pos = n,
765            _ => return Err(Error {}),
766        }
767        self.bit_buffer >>= self.bits_in & !7;
768        self.bits_in &= 7;
769        Ok(())
770    }
771}
772
773/// A struct containing data about huffman codes and symbol frequencies.
774///
775/// NOTE: Only the literal/lengths have enough symbols to actually use
776/// the full array. It's unclear why it's defined like this in miniz,
777/// it could be for cache/alignment reasons.
778struct HuffmanOxide {
779    /// Number of occurrences of each symbol.
780    pub count: [[u16; MAX_HUFF_SYMBOLS]; MAX_HUFF_TABLES],
781    /// The bits of the huffman code assigned to the symbol
782    pub codes: [[u16; MAX_HUFF_SYMBOLS]; MAX_HUFF_TABLES],
783    /// The length of the huffman code assigned to the symbol.
784    pub code_sizes: [[u8; MAX_HUFF_SYMBOLS]; MAX_HUFF_TABLES],
785}
786
787/// Tables used for literal/lengths in `HuffmanOxide`.
788const LITLEN_TABLE: usize = 0;
789/// Tables for distances.
790const DIST_TABLE: usize = 1;
791/// Tables for the run-length encoded huffman lengths for literals/lengths/distances.
792const HUFF_CODES_TABLE: usize = 2;
793
794/// Status of RLE encoding of huffman code lengths.
795struct Rle {
796    pub z_count: u32,
797    pub repeat_count: u16,
798    pub prev_code_size: u8,
799}
800
801impl Rle {
802    fn prev_code_size(
803        &mut self,
804        packed_code_sizes: &mut [u8],
805        packed_pos: &mut usize,
806        h: &mut HuffmanOxide,
807    ) -> Result<()> {
808        let mut write = |buf| write(buf, packed_code_sizes, packed_pos);
809        let counts = &mut h.count[HUFF_CODES_TABLE];
810        if self.repeat_count != 0 {
811            if self.repeat_count < 3 {
812                counts[self.prev_code_size as usize] =
813                    counts[self.prev_code_size as usize].wrapping_add(self.repeat_count);
814                let code = self.prev_code_size;
815                write(&[code, code, code][..self.repeat_count as usize])?;
816            } else {
817                counts[16] = counts[16].wrapping_add(1);
818                write(&[16, (self.repeat_count - 3) as u8][..])?;
819            }
820            self.repeat_count = 0;
821        }
822
823        Ok(())
824    }
825
826    fn zero_code_size(
827        &mut self,
828        packed_code_sizes: &mut [u8],
829        packed_pos: &mut usize,
830        h: &mut HuffmanOxide,
831    ) -> Result<()> {
832        let mut write = |buf| write(buf, packed_code_sizes, packed_pos);
833        let counts = &mut h.count[HUFF_CODES_TABLE];
834        if self.z_count != 0 {
835            if self.z_count < 3 {
836                counts[0] = counts[0].wrapping_add(self.z_count as u16);
837                write(&[0, 0, 0][..self.z_count as usize])?;
838            } else if self.z_count <= 10 {
839                counts[17] = counts[17].wrapping_add(1);
840                write(&[17, (self.z_count - 3) as u8][..])?;
841            } else {
842                counts[18] = counts[18].wrapping_add(1);
843                write(&[18, (self.z_count - 11) as u8][..])?;
844            }
845            self.z_count = 0;
846        }
847
848        Ok(())
849    }
850}
851
852fn write(src: &[u8], dst: &mut [u8], dst_pos: &mut usize) -> Result<()> {
853    match dst.get_mut(*dst_pos..*dst_pos + src.len()) {
854        Some(s) => s.copy_from_slice(src),
855        None => return Err(Error {}),
856    }
857    *dst_pos += src.len();
858    Ok(())
859}
860
861impl Default for HuffmanOxide {
862    fn default() -> Self {
863        HuffmanOxide {
864            count: [[0; MAX_HUFF_SYMBOLS]; MAX_HUFF_TABLES],
865            codes: [[0; MAX_HUFF_SYMBOLS]; MAX_HUFF_TABLES],
866            code_sizes: [[0; MAX_HUFF_SYMBOLS]; MAX_HUFF_TABLES],
867        }
868    }
869}
870
871impl HuffmanOxide {
872    fn radix_sort_symbols<'a>(
873        symbols0: &'a mut [SymFreq],
874        symbols1: &'a mut [SymFreq],
875    ) -> &'a mut [SymFreq] {
876        let mut hist = [[0; 256]; 2];
877
878        for freq in symbols0.iter() {
879            hist[0][(freq.key & 0xFF) as usize] += 1;
880            hist[1][((freq.key >> 8) & 0xFF) as usize] += 1;
881        }
882
883        let mut n_passes = 2;
884        if symbols0.len() == hist[1][0] {
885            n_passes -= 1;
886        }
887
888        let mut current_symbols = symbols0;
889        let mut new_symbols = symbols1;
890
891        for (pass, hist_item) in hist.iter().enumerate().take(n_passes) {
892            let mut offsets = [0; 256];
893            let mut offset = 0;
894            for i in 0..256 {
895                offsets[i] = offset;
896                offset += hist_item[i];
897            }
898
899            for sym in current_symbols.iter() {
900                let j = ((sym.key >> (pass * 8)) & 0xFF) as usize;
901                new_symbols[offsets[j]] = *sym;
902                offsets[j] += 1;
903            }
904
905            mem::swap(&mut current_symbols, &mut new_symbols);
906        }
907
908        current_symbols
909    }
910
911    fn calculate_minimum_redundancy(symbols: &mut [SymFreq]) {
912        match symbols.len() {
913            0 => (),
914            1 => symbols[0].key = 1,
915            n => {
916                symbols[0].key += symbols[1].key;
917                let mut root = 0;
918                let mut leaf = 2;
919                for next in 1..n - 1 {
920                    if (leaf >= n) || (symbols[root].key < symbols[leaf].key) {
921                        symbols[next].key = symbols[root].key;
922                        symbols[root].key = next as u16;
923                        root += 1;
924                    } else {
925                        symbols[next].key = symbols[leaf].key;
926                        leaf += 1;
927                    }
928
929                    if (leaf >= n) || (root < next && symbols[root].key < symbols[leaf].key) {
930                        symbols[next].key = symbols[next].key.wrapping_add(symbols[root].key);
931                        symbols[root].key = next as u16;
932                        root += 1;
933                    } else {
934                        symbols[next].key = symbols[next].key.wrapping_add(symbols[leaf].key);
935                        leaf += 1;
936                    }
937                }
938
939                symbols[n - 2].key = 0;
940                for next in (0..n - 2).rev() {
941                    symbols[next].key = symbols[symbols[next].key as usize].key + 1;
942                }
943
944                let mut avbl = 1;
945                let mut used = 0;
946                let mut dpth = 0;
947                let mut root = (n - 2) as i32;
948                let mut next = (n - 1) as i32;
949                while avbl > 0 {
950                    while (root >= 0) && (symbols[root as usize].key == dpth) {
951                        used += 1;
952                        root -= 1;
953                    }
954                    while avbl > used {
955                        symbols[next as usize].key = dpth;
956                        next -= 1;
957                        avbl -= 1;
958                    }
959                    avbl = 2 * used;
960                    dpth += 1;
961                    used = 0;
962                }
963            }
964        }
965    }
966
967    fn enforce_max_code_size(num_codes: &mut [i32], code_list_len: usize, max_code_size: usize) {
968        if code_list_len <= 1 {
969            return;
970        }
971
972        num_codes[max_code_size] += num_codes[max_code_size + 1..].iter().sum::<i32>();
973        let total = num_codes[1..=max_code_size]
974            .iter()
975            .rev()
976            .enumerate()
977            .fold(0u32, |total, (i, &x)| total + ((x as u32) << i));
978
979        for _ in (1 << max_code_size)..total {
980            num_codes[max_code_size] -= 1;
981            for i in (1..max_code_size).rev() {
982                if num_codes[i] != 0 {
983                    num_codes[i] -= 1;
984                    num_codes[i + 1] += 2;
985                    break;
986                }
987            }
988        }
989    }
990
991    fn optimize_table(
992        &mut self,
993        table_num: usize,
994        table_len: usize,
995        code_size_limit: usize,
996        static_table: bool,
997    ) {
998        let mut num_codes = [0i32; MAX_SUPPORTED_HUFF_CODESIZE + 1];
999        let mut next_code = [0u32; MAX_SUPPORTED_HUFF_CODESIZE + 1];
1000
1001        if static_table {
1002            for &code_size in &self.code_sizes[table_num][..table_len] {
1003                num_codes[code_size as usize] += 1;
1004            }
1005        } else {
1006            let mut symbols0 = [SymFreq {
1007                key: 0,
1008                sym_index: 0,
1009            }; MAX_HUFF_SYMBOLS];
1010            let mut symbols1 = [SymFreq {
1011                key: 0,
1012                sym_index: 0,
1013            }; MAX_HUFF_SYMBOLS];
1014
1015            let mut num_used_symbols = 0;
1016            for i in 0..table_len {
1017                if self.count[table_num][i] != 0 {
1018                    symbols0[num_used_symbols] = SymFreq {
1019                        key: self.count[table_num][i],
1020                        sym_index: i as u16,
1021                    };
1022                    num_used_symbols += 1;
1023                }
1024            }
1025
1026            let symbols = Self::radix_sort_symbols(
1027                &mut symbols0[..num_used_symbols],
1028                &mut symbols1[..num_used_symbols],
1029            );
1030            Self::calculate_minimum_redundancy(symbols);
1031
1032            for symbol in symbols.iter() {
1033                num_codes[symbol.key as usize] += 1;
1034            }
1035
1036            Self::enforce_max_code_size(&mut num_codes, num_used_symbols, code_size_limit);
1037
1038            self.code_sizes[table_num].fill(0);
1039            self.codes[table_num].fill(0);
1040
1041            let mut last = num_used_symbols;
1042            for (i, &num_item) in num_codes
1043                .iter()
1044                .enumerate()
1045                .take(code_size_limit + 1)
1046                .skip(1)
1047            {
1048                let first = last - num_item as usize;
1049                for symbol in &symbols[first..last] {
1050                    self.code_sizes[table_num][symbol.sym_index as usize] = i as u8;
1051                }
1052                last = first;
1053            }
1054        }
1055
1056        let mut j = 0;
1057        next_code[1] = 0;
1058        for i in 2..=code_size_limit {
1059            j = (j + num_codes[i - 1]) << 1;
1060            next_code[i] = j as u32;
1061        }
1062
1063        for (&code_size, huff_code) in self.code_sizes[table_num]
1064            .iter()
1065            .take(table_len)
1066            .zip(self.codes[table_num].iter_mut().take(table_len))
1067        {
1068            if code_size == 0 {
1069                continue;
1070            }
1071
1072            let mut code = next_code[code_size as usize];
1073            next_code[code_size as usize] += 1;
1074
1075            let mut rev_code = 0;
1076            for _ in 0..code_size {
1077                rev_code = (rev_code << 1) | (code & 1);
1078                code >>= 1;
1079            }
1080            *huff_code = rev_code as u16;
1081        }
1082    }
1083
1084    fn start_static_block(&mut self, output: &mut OutputBufferOxide) {
1085        self.code_sizes[LITLEN_TABLE][0..144].fill(8);
1086        self.code_sizes[LITLEN_TABLE][144..256].fill(9);
1087        self.code_sizes[LITLEN_TABLE][256..280].fill(7);
1088        self.code_sizes[LITLEN_TABLE][280..288].fill(8);
1089
1090        self.code_sizes[DIST_TABLE][..32].fill(5);
1091
1092        self.optimize_table(LITLEN_TABLE, 288, 15, true);
1093        self.optimize_table(DIST_TABLE, 32, 15, true);
1094
1095        output.put_bits(0b01, 2)
1096    }
1097
1098    fn start_dynamic_block(&mut self, output: &mut OutputBufferOxide) -> Result<()> {
1099        // There will always be one, and only one end of block code.
1100        self.count[0][256] = 1;
1101
1102        self.optimize_table(0, MAX_HUFF_SYMBOLS_0, 15, false);
1103        self.optimize_table(1, MAX_HUFF_SYMBOLS_1, 15, false);
1104
1105        let num_lit_codes = 286
1106            - &self.code_sizes[0][257..286]
1107                .iter()
1108                .rev()
1109                .take_while(|&x| *x == 0)
1110                .count();
1111
1112        let num_dist_codes = 30
1113            - &self.code_sizes[1][1..30]
1114                .iter()
1115                .rev()
1116                .take_while(|&x| *x == 0)
1117                .count();
1118
1119        let mut code_sizes_to_pack = [0u8; MAX_HUFF_SYMBOLS_0 + MAX_HUFF_SYMBOLS_1];
1120        let mut packed_code_sizes = [0u8; MAX_HUFF_SYMBOLS_0 + MAX_HUFF_SYMBOLS_1];
1121
1122        let total_code_sizes_to_pack = num_lit_codes + num_dist_codes;
1123
1124        code_sizes_to_pack[..num_lit_codes].copy_from_slice(&self.code_sizes[0][..num_lit_codes]);
1125
1126        code_sizes_to_pack[num_lit_codes..total_code_sizes_to_pack]
1127            .copy_from_slice(&self.code_sizes[1][..num_dist_codes]);
1128
1129        let mut rle = Rle {
1130            z_count: 0,
1131            repeat_count: 0,
1132            prev_code_size: 0xFF,
1133        };
1134
1135        self.count[HUFF_CODES_TABLE][..MAX_HUFF_SYMBOLS_2].fill(0);
1136
1137        let mut packed_pos = 0;
1138        for &code_size in &code_sizes_to_pack[..total_code_sizes_to_pack] {
1139            if code_size == 0 {
1140                rle.prev_code_size(&mut packed_code_sizes, &mut packed_pos, self)?;
1141                rle.z_count += 1;
1142                if rle.z_count == 138 {
1143                    rle.zero_code_size(&mut packed_code_sizes, &mut packed_pos, self)?;
1144                }
1145            } else {
1146                rle.zero_code_size(&mut packed_code_sizes, &mut packed_pos, self)?;
1147                if code_size != rle.prev_code_size {
1148                    rle.prev_code_size(&mut packed_code_sizes, &mut packed_pos, self)?;
1149                    self.count[HUFF_CODES_TABLE][code_size as usize] =
1150                        self.count[HUFF_CODES_TABLE][code_size as usize].wrapping_add(1);
1151                    write(&[code_size], &mut packed_code_sizes, &mut packed_pos)?;
1152                } else {
1153                    rle.repeat_count += 1;
1154                    if rle.repeat_count == 6 {
1155                        rle.prev_code_size(&mut packed_code_sizes, &mut packed_pos, self)?;
1156                    }
1157                }
1158            }
1159            rle.prev_code_size = code_size;
1160        }
1161
1162        if rle.repeat_count != 0 {
1163            rle.prev_code_size(&mut packed_code_sizes, &mut packed_pos, self)?;
1164        } else {
1165            rle.zero_code_size(&mut packed_code_sizes, &mut packed_pos, self)?;
1166        }
1167
1168        self.optimize_table(2, MAX_HUFF_SYMBOLS_2, 7, false);
1169
1170        output.put_bits(2, 2);
1171
1172        output.put_bits((num_lit_codes - 257) as u32, 5);
1173        output.put_bits((num_dist_codes - 1) as u32, 5);
1174
1175        let mut num_bit_lengths = 18
1176            - HUFFMAN_LENGTH_ORDER
1177                .iter()
1178                .rev()
1179                .take_while(|&swizzle| self.code_sizes[HUFF_CODES_TABLE][*swizzle as usize] == 0)
1180                .count();
1181
1182        num_bit_lengths = cmp::max(4, num_bit_lengths + 1);
1183        output.put_bits(num_bit_lengths as u32 - 4, 4);
1184        for &swizzle in &HUFFMAN_LENGTH_ORDER[..num_bit_lengths] {
1185            output.put_bits(
1186                u32::from(self.code_sizes[HUFF_CODES_TABLE][swizzle as usize]),
1187                3,
1188            );
1189        }
1190
1191        let mut packed_code_size_index = 0;
1192        while packed_code_size_index < packed_pos {
1193            let code = packed_code_sizes[packed_code_size_index] as usize;
1194            packed_code_size_index += 1;
1195            assert!(code < MAX_HUFF_SYMBOLS_2);
1196            output.put_bits(
1197                u32::from(self.codes[HUFF_CODES_TABLE][code]),
1198                u32::from(self.code_sizes[HUFF_CODES_TABLE][code]),
1199            );
1200            if code >= 16 {
1201                output.put_bits(
1202                    u32::from(packed_code_sizes[packed_code_size_index]),
1203                    [2, 3, 7][code - 16],
1204                );
1205                packed_code_size_index += 1;
1206            }
1207        }
1208
1209        Ok(())
1210    }
1211}
1212
1213struct DictOxide {
1214    /// The maximum number of checks in the hash chain, for the initial,
1215    /// and the lazy match respectively.
1216    pub max_probes: [u32; 2],
1217    /// Buffer of input data.
1218    /// Padded with 1 byte to simplify matching code in `compress_fast`.
1219    pub b: Box<HashBuffers>,
1220
1221    pub code_buf_dict_pos: usize,
1222    pub lookahead_size: usize,
1223    pub lookahead_pos: usize,
1224    pub size: usize,
1225}
1226
1227const fn probes_from_flags(flags: u32) -> [u32; 2] {
1228    [
1229        1 + ((flags & 0xFFF) + 2) / 3,
1230        1 + (((flags & 0xFFF) >> 2) + 2) / 3,
1231    ]
1232}
1233
1234impl DictOxide {
1235    fn new(flags: u32) -> Self {
1236        DictOxide {
1237            max_probes: probes_from_flags(flags),
1238            b: Box::default(),
1239            code_buf_dict_pos: 0,
1240            lookahead_size: 0,
1241            lookahead_pos: 0,
1242            size: 0,
1243        }
1244    }
1245
1246    fn update_flags(&mut self, flags: u32) {
1247        self.max_probes = probes_from_flags(flags);
1248    }
1249
1250    fn reset(&mut self) {
1251        self.b.reset();
1252        self.code_buf_dict_pos = 0;
1253        self.lookahead_size = 0;
1254        self.lookahead_pos = 0;
1255        self.size = 0;
1256    }
1257
1258    /// Do an unaligned read of the data at `pos` in the dictionary and treat it as if it was of
1259    /// type T.
1260    #[inline]
1261    fn read_unaligned_u32(&self, pos: usize) -> u32 {
1262        // Masking the value here helps avoid bounds checks.
1263        let pos = pos & LZ_DICT_SIZE_MASK;
1264        let end = pos + 4;
1265        // Somehow this assertion makes things faster.
1266        // TODO: as of may 2024 this does not seem to make any difference
1267        // so consider removing.
1268        assert!(end < LZ_DICT_FULL_SIZE);
1269
1270        let bytes: [u8; 4] = self.b.dict[pos..end].try_into().unwrap();
1271        u32::from_le_bytes(bytes)
1272    }
1273
1274    /// Do an unaligned read of the data at `pos` in the dictionary and treat it as if it was of
1275    /// type T.
1276    #[inline]
1277    fn read_unaligned_u64(&self, pos: usize) -> u64 {
1278        // Help evade bounds/panic code check by masking the position value
1279        // This provides a small speedup at the cost of an instruction or two instead of
1280        // having to use unsafe.
1281        let pos = pos & LZ_DICT_SIZE_MASK;
1282        let bytes: [u8; 8] = self.b.dict[pos..pos + 8].try_into().unwrap();
1283        u64::from_le_bytes(bytes)
1284    }
1285
1286    /// Do an unaligned read of the data at `pos` in the dictionary and treat it as if it was of
1287    /// type T.
1288    #[inline]
1289    fn read_as_u16(&self, pos: usize) -> u16 {
1290        read_u16_le(&self.b.dict[..], pos)
1291    }
1292
1293    /// Try to find a match for the data at lookahead_pos in the dictionary that is
1294    /// longer than `match_len`.
1295    /// Returns a tuple containing (match_distance, match_length). Will be equal to the input
1296    /// values if no better matches were found.
1297    fn find_match(
1298        &self,
1299        lookahead_pos: usize,
1300        max_dist: usize,
1301        max_match_len: u32,
1302        mut match_dist: u32,
1303        mut match_len: u32,
1304    ) -> (u32, u32) {
1305        // Clamp the match len and max_match_len to be valid. (It should be when this is called, but
1306        // do it for now just in case for safety reasons.)
1307        // This should normally end up as at worst conditional moves,
1308        // so it shouldn't slow us down much.
1309        // TODO: Statically verify these so we don't need to do this.
1310        let max_match_len = cmp::min(MAX_MATCH_LEN as u32, max_match_len);
1311        match_len = cmp::max(match_len, 1);
1312
1313        let pos = lookahead_pos & LZ_DICT_SIZE_MASK;
1314        let mut probe_pos = pos;
1315        // Number of probes into the hash chains.
1316        let mut num_probes_left = self.max_probes[(match_len >= 32) as usize];
1317
1318        // If we already have a match of the full length don't bother searching for another one.
1319        if max_match_len <= match_len {
1320            return (match_dist, match_len);
1321        }
1322
1323        // Read the last byte of the current match, and the next one, used to compare matches.
1324        let mut c01: u16 = self.read_as_u16(pos + match_len as usize - 1);
1325        // Read the two bytes at the end position of the current match.
1326        let s01: u16 = self.read_as_u16(pos);
1327
1328        'outer: loop {
1329            let mut dist;
1330            'found: loop {
1331                num_probes_left -= 1;
1332                if num_probes_left == 0 {
1333                    // We have done as many probes in the hash chain as the current compression
1334                    // settings allow, so return the best match we found, if any.
1335                    return (match_dist, match_len);
1336                }
1337
1338                for _ in 0..3 {
1339                    let next_probe_pos = self.b.next[probe_pos] as usize;
1340
1341                    dist = (lookahead_pos - next_probe_pos) & 0xFFFF;
1342                    if next_probe_pos == 0 || dist > max_dist {
1343                        // We reached the end of the hash chain, or the next value is further away
1344                        // than the maximum allowed distance, so return the best match we found, if
1345                        // any.
1346                        return (match_dist, match_len);
1347                    }
1348
1349                    // Mask the position value to get the position in the hash chain of the next
1350                    // position to match against.
1351                    probe_pos = next_probe_pos & LZ_DICT_SIZE_MASK;
1352
1353                    if self.read_as_u16(probe_pos + match_len as usize - 1) == c01 {
1354                        break 'found;
1355                    }
1356                }
1357            }
1358
1359            if dist == 0 {
1360                // We've looked through the whole match range, so return the best match we
1361                // found.
1362                return (match_dist, match_len);
1363            }
1364
1365            // Check if the two first bytes match.
1366            if self.read_as_u16(probe_pos) != s01 {
1367                continue;
1368            }
1369
1370            let mut p = pos + 2;
1371            let mut q = probe_pos + 2;
1372            // The first two bytes matched, so check the full length of the match.
1373            for _ in 0..32 {
1374                let p_data: u64 = self.read_unaligned_u64(p);
1375                let q_data: u64 = self.read_unaligned_u64(q);
1376                // Compare of 8 bytes at a time by using unaligned loads of 64-bit integers.
1377                let xor_data = p_data ^ q_data;
1378                if xor_data == 0 {
1379                    p += 8;
1380                    q += 8;
1381                } else {
1382                    // If not all of the last 8 bytes matched, check how may of them did.
1383                    let trailing = xor_data.trailing_zeros();
1384
1385                    let probe_len = p - pos + (trailing as usize >> 3);
1386                    if probe_len > match_len as usize {
1387                        match_dist = dist as u32;
1388                        match_len = cmp::min(max_match_len, probe_len as u32);
1389                        if match_len == max_match_len {
1390                            // We found a match that had the maximum allowed length,
1391                            // so there is now point searching further.
1392                            return (match_dist, match_len);
1393                        }
1394                        // We found a better match, so save the last two bytes for further match
1395                        // comparisons.
1396                        c01 = self.read_as_u16(pos + match_len as usize - 1)
1397                    }
1398                    continue 'outer;
1399                }
1400            }
1401
1402            return (dist as u32, cmp::min(max_match_len, MAX_MATCH_LEN as u32));
1403        }
1404    }
1405}
1406
1407struct ParamsOxide {
1408    pub flags: u32,
1409    pub greedy_parsing: bool,
1410    pub block_index: u32,
1411
1412    pub saved_match_dist: u32,
1413    pub saved_match_len: u32,
1414    pub saved_lit: u8,
1415
1416    pub flush: TDEFLFlush,
1417    pub flush_ofs: u32,
1418    pub flush_remaining: u32,
1419    pub finished: bool,
1420
1421    pub adler32: u32,
1422
1423    pub src_pos: usize,
1424
1425    pub out_buf_ofs: usize,
1426    pub prev_return_status: TDEFLStatus,
1427
1428    pub saved_bit_buffer: u32,
1429    pub saved_bits_in: u32,
1430
1431    pub local_buf: Box<LocalBuf>,
1432}
1433
1434impl ParamsOxide {
1435    fn new(flags: u32) -> Self {
1436        ParamsOxide {
1437            flags,
1438            greedy_parsing: flags & TDEFL_GREEDY_PARSING_FLAG != 0,
1439            block_index: 0,
1440            saved_match_dist: 0,
1441            saved_match_len: 0,
1442            saved_lit: 0,
1443            flush: TDEFLFlush::None,
1444            flush_ofs: 0,
1445            flush_remaining: 0,
1446            finished: false,
1447            adler32: MZ_ADLER32_INIT,
1448            src_pos: 0,
1449            out_buf_ofs: 0,
1450            prev_return_status: TDEFLStatus::Okay,
1451            saved_bit_buffer: 0,
1452            saved_bits_in: 0,
1453            local_buf: Box::default(),
1454        }
1455    }
1456
1457    fn update_flags(&mut self, flags: u32) {
1458        self.flags = flags;
1459        self.greedy_parsing = self.flags & TDEFL_GREEDY_PARSING_FLAG != 0;
1460    }
1461
1462    /// Reset state, saving settings.
1463    fn reset(&mut self) {
1464        self.block_index = 0;
1465        self.saved_match_len = 0;
1466        self.saved_match_dist = 0;
1467        self.saved_lit = 0;
1468        self.flush = TDEFLFlush::None;
1469        self.flush_ofs = 0;
1470        self.flush_remaining = 0;
1471        self.finished = false;
1472        self.adler32 = MZ_ADLER32_INIT;
1473        self.src_pos = 0;
1474        self.out_buf_ofs = 0;
1475        self.prev_return_status = TDEFLStatus::Okay;
1476        self.saved_bit_buffer = 0;
1477        self.saved_bits_in = 0;
1478        self.local_buf.b = [0; OUT_BUF_SIZE];
1479    }
1480}
1481
1482struct LZOxide {
1483    pub codes: [u8; LZ_CODE_BUF_SIZE],
1484    pub code_position: usize,
1485    pub flag_position: usize,
1486
1487    // The total number of bytes in the current block.
1488    pub total_bytes: u32,
1489    pub num_flags_left: u32,
1490}
1491
1492impl LZOxide {
1493    const fn new() -> Self {
1494        LZOxide {
1495            codes: [0; LZ_CODE_BUF_SIZE],
1496            code_position: 1,
1497            flag_position: 0,
1498            total_bytes: 0,
1499            num_flags_left: 8,
1500        }
1501    }
1502
1503    fn write_code(&mut self, val: u8) {
1504        // Perf - go via u16 to help evade bounds check
1505        // TODO: see if we can use u16 for flag_position in general.
1506        self.codes[usize::from(self.code_position as u16)] = val;
1507        self.code_position += 1;
1508    }
1509
1510    fn init_flag(&mut self) {
1511        if self.num_flags_left == 8 {
1512            *self.get_flag() = 0;
1513            self.code_position -= 1;
1514        } else {
1515            *self.get_flag() >>= self.num_flags_left;
1516        }
1517    }
1518
1519    fn get_flag(&mut self) -> &mut u8 {
1520        // Perf - go via u16 to help evade bounds check
1521        // TODO: see if we can use u16 for flag_position in general.
1522        &mut self.codes[usize::from(self.flag_position as u16)]
1523    }
1524
1525    fn plant_flag(&mut self) {
1526        self.flag_position = self.code_position;
1527        self.code_position += 1;
1528    }
1529
1530    fn consume_flag(&mut self) {
1531        self.num_flags_left -= 1;
1532        if self.num_flags_left == 0 {
1533            self.num_flags_left = 8;
1534            self.plant_flag();
1535        }
1536    }
1537}
1538
1539fn compress_lz_codes(
1540    huff: &HuffmanOxide,
1541    output: &mut OutputBufferOxide,
1542    lz_code_buf: &[u8],
1543) -> Result<bool> {
1544    let mut flags = 1;
1545    let mut bb = BitBuffer {
1546        bit_buffer: u64::from(output.bit_buffer),
1547        bits_in: output.bits_in,
1548    };
1549
1550    let mut i: usize = 0;
1551    while i < lz_code_buf.len() {
1552        if flags == 1 {
1553            flags = u32::from(lz_code_buf[i]) | 0x100;
1554            i += 1;
1555        }
1556
1557        // The lz code was a length code
1558        if flags & 1 == 1 {
1559            flags >>= 1;
1560
1561            let sym;
1562            let num_extra_bits;
1563
1564            let match_len = lz_code_buf[i] as usize;
1565
1566            let match_dist = read_u16_le(lz_code_buf, i + 1);
1567
1568            i += 3;
1569
1570            debug_assert!(huff.code_sizes[0][LEN_SYM[match_len] as usize] != 0);
1571            bb.put_fast(
1572                u64::from(huff.codes[0][LEN_SYM[match_len] as usize]),
1573                u32::from(huff.code_sizes[0][LEN_SYM[match_len] as usize]),
1574            );
1575            bb.put_fast(
1576                match_len as u64 & u64::from(BITMASKS[LEN_EXTRA[match_len] as usize]),
1577                u32::from(LEN_EXTRA[match_len]),
1578            );
1579
1580            if match_dist < 512 {
1581                sym = SMALL_DIST_SYM[match_dist as usize] as usize;
1582                num_extra_bits = SMALL_DIST_EXTRA[match_dist as usize] as usize;
1583            } else {
1584                sym = LARGE_DIST_SYM[(match_dist >> 8) as usize] as usize;
1585                num_extra_bits = LARGE_DIST_EXTRA[(match_dist >> 8) as usize] as usize;
1586            }
1587
1588            debug_assert!(huff.code_sizes[1][sym] != 0);
1589            bb.put_fast(
1590                u64::from(huff.codes[1][sym]),
1591                u32::from(huff.code_sizes[1][sym]),
1592            );
1593            bb.put_fast(
1594                u64::from(match_dist) & u64::from(BITMASKS[num_extra_bits]),
1595                num_extra_bits as u32,
1596            );
1597        } else {
1598            // The lz code was a literal
1599            for _ in 0..3 {
1600                flags >>= 1;
1601                let lit = lz_code_buf[i];
1602                i += 1;
1603
1604                debug_assert!(huff.code_sizes[0][lit as usize] != 0);
1605                bb.put_fast(
1606                    u64::from(huff.codes[0][lit as usize]),
1607                    u32::from(huff.code_sizes[0][lit as usize]),
1608                );
1609
1610                if flags & 1 == 1 || i >= lz_code_buf.len() {
1611                    break;
1612                }
1613            }
1614        }
1615
1616        bb.flush(output)?;
1617    }
1618
1619    output.bits_in = 0;
1620    output.bit_buffer = 0;
1621    while bb.bits_in != 0 {
1622        let n = cmp::min(bb.bits_in, 16);
1623        output.put_bits(bb.bit_buffer as u32 & BITMASKS[n as usize], n);
1624        bb.bit_buffer >>= n;
1625        bb.bits_in -= n;
1626    }
1627
1628    // Output the end of block symbol.
1629    output.put_bits(
1630        u32::from(huff.codes[0][256]),
1631        u32::from(huff.code_sizes[0][256]),
1632    );
1633
1634    Ok(true)
1635}
1636
1637fn compress_block(
1638    huff: &mut HuffmanOxide,
1639    output: &mut OutputBufferOxide,
1640    lz: &LZOxide,
1641    static_block: bool,
1642) -> Result<bool> {
1643    if static_block {
1644        huff.start_static_block(output);
1645    } else {
1646        huff.start_dynamic_block(output)?;
1647    }
1648
1649    compress_lz_codes(huff, output, &lz.codes[..lz.code_position])
1650}
1651
1652fn flush_block(
1653    d: &mut CompressorOxide,
1654    callback: &mut CallbackOxide,
1655    flush: TDEFLFlush,
1656) -> Result<i32> {
1657    let mut saved_buffer;
1658    {
1659        let mut output = callback
1660            .out
1661            .new_output_buffer(&mut d.params.local_buf.b, d.params.out_buf_ofs);
1662        output.bit_buffer = d.params.saved_bit_buffer;
1663        output.bits_in = d.params.saved_bits_in;
1664
1665        let use_raw_block = (d.params.flags & TDEFL_FORCE_ALL_RAW_BLOCKS != 0)
1666            && (d.dict.lookahead_pos - d.dict.code_buf_dict_pos) <= d.dict.size;
1667
1668        assert!(d.params.flush_remaining == 0);
1669        d.params.flush_ofs = 0;
1670        d.params.flush_remaining = 0;
1671
1672        d.lz.init_flag();
1673
1674        // If we are at the start of the stream, write the zlib header if requested.
1675        if d.params.flags & TDEFL_WRITE_ZLIB_HEADER != 0 && d.params.block_index == 0 {
1676            let header = zlib::header_from_flags(d.params.flags);
1677            output.put_bits(header[0].into(), 8);
1678            output.put_bits(header[1].into(), 8);
1679        }
1680
1681        // Output the block header.
1682        output.put_bits((flush == TDEFLFlush::Finish) as u32, 1);
1683
1684        saved_buffer = output.save();
1685
1686        let comp_success = if !use_raw_block {
1687            let use_static =
1688                (d.params.flags & TDEFL_FORCE_ALL_STATIC_BLOCKS != 0) || (d.lz.total_bytes < 48);
1689            compress_block(&mut d.huff, &mut output, &d.lz, use_static)?
1690        } else {
1691            false
1692        };
1693
1694        // If we failed to compress anything and the output would take up more space than the output
1695        // data, output a stored block instead, which has at most 5 bytes of overhead.
1696        // We only use some simple heuristics for now.
1697        // A stored block will have an overhead of at least 4 bytes containing the block length
1698        // but usually more due to the length parameters having to start at a byte boundary and thus
1699        // requiring up to 5 bytes of padding.
1700        // As a static block will have an overhead of at most 1 bit per byte
1701        // (as literals are either 8 or 9 bytes), a raw block will
1702        // never take up less space if the number of input bytes are less than 32.
1703        let expanded = (d.lz.total_bytes > 32)
1704            && (output.inner_pos - saved_buffer.pos + 1 >= (d.lz.total_bytes as usize))
1705            && (d.dict.lookahead_pos - d.dict.code_buf_dict_pos <= d.dict.size);
1706
1707        if use_raw_block || expanded {
1708            output.load(saved_buffer);
1709
1710            // Block header.
1711            output.put_bits(0, 2);
1712
1713            // Block length has to start on a byte boundary, s opad.
1714            output.pad_to_bytes();
1715
1716            // Block length and ones complement of block length.
1717            output.put_bits(d.lz.total_bytes & 0xFFFF, 16);
1718            output.put_bits(!d.lz.total_bytes & 0xFFFF, 16);
1719
1720            // Write the actual bytes.
1721            for i in 0..d.lz.total_bytes {
1722                let pos = (d.dict.code_buf_dict_pos + i as usize) & LZ_DICT_SIZE_MASK;
1723                output.put_bits(u32::from(d.dict.b.dict[pos]), 8);
1724            }
1725        } else if !comp_success {
1726            output.load(saved_buffer);
1727            compress_block(&mut d.huff, &mut output, &d.lz, true)?;
1728        }
1729
1730        if flush != TDEFLFlush::None {
1731            if flush == TDEFLFlush::Finish {
1732                output.pad_to_bytes();
1733                if d.params.flags & TDEFL_WRITE_ZLIB_HEADER != 0 {
1734                    let mut adler = d.params.adler32;
1735                    for _ in 0..4 {
1736                        output.put_bits((adler >> 24) & 0xFF, 8);
1737                        adler <<= 8;
1738                    }
1739                }
1740            } else {
1741                // Sync or Full flush.
1742                // Output an empty raw block.
1743                output.put_bits(0, 3);
1744                output.pad_to_bytes();
1745                output.put_bits(0, 16);
1746                output.put_bits(0xFFFF, 16);
1747            }
1748        }
1749
1750        d.huff.count[0][..MAX_HUFF_SYMBOLS_0].fill(0);
1751        d.huff.count[1][..MAX_HUFF_SYMBOLS_1].fill(0);
1752
1753        d.lz.code_position = 1;
1754        d.lz.flag_position = 0;
1755        d.lz.num_flags_left = 8;
1756        d.dict.code_buf_dict_pos += d.lz.total_bytes as usize;
1757        d.lz.total_bytes = 0;
1758        d.params.block_index += 1;
1759
1760        saved_buffer = output.save();
1761
1762        d.params.saved_bit_buffer = saved_buffer.bit_buffer;
1763        d.params.saved_bits_in = saved_buffer.bits_in;
1764    }
1765
1766    Ok(callback.flush_output(saved_buffer, &mut d.params))
1767}
1768
1769fn record_literal(h: &mut HuffmanOxide, lz: &mut LZOxide, lit: u8) {
1770    lz.total_bytes += 1;
1771    lz.write_code(lit);
1772
1773    *lz.get_flag() >>= 1;
1774    lz.consume_flag();
1775
1776    h.count[0][lit as usize] += 1;
1777}
1778
1779fn record_match(h: &mut HuffmanOxide, lz: &mut LZOxide, mut match_len: u32, mut match_dist: u32) {
1780    debug_assert!(match_len >= MIN_MATCH_LEN.into());
1781    debug_assert!(match_dist >= 1);
1782    debug_assert!(match_dist as usize <= LZ_DICT_SIZE);
1783
1784    lz.total_bytes += match_len;
1785    match_dist -= 1;
1786    match_len -= u32::from(MIN_MATCH_LEN);
1787    lz.write_code(match_len as u8);
1788    lz.write_code(match_dist as u8);
1789    lz.write_code((match_dist >> 8) as u8);
1790
1791    *lz.get_flag() >>= 1;
1792    *lz.get_flag() |= 0x80;
1793    lz.consume_flag();
1794
1795    let symbol = if match_dist < 512 {
1796        SMALL_DIST_SYM[match_dist as usize]
1797    } else {
1798        LARGE_DIST_SYM[((match_dist >> 8) & 127) as usize]
1799    } as usize;
1800    h.count[1][symbol] += 1;
1801    // Perf - go via u8 to help optimize out bounds check.
1802    h.count[0][LEN_SYM[usize::from(match_len as u8)] as usize] += 1;
1803}
1804
1805fn compress_normal(d: &mut CompressorOxide, callback: &mut CallbackOxide) -> bool {
1806    let mut src_pos = d.params.src_pos;
1807    let in_buf = match callback.in_buf {
1808        None => return true,
1809        Some(in_buf) => in_buf,
1810    };
1811
1812    let mut lookahead_size = d.dict.lookahead_size;
1813    let mut lookahead_pos = d.dict.lookahead_pos;
1814    let mut saved_lit = d.params.saved_lit;
1815    let mut saved_match_dist = d.params.saved_match_dist;
1816    let mut saved_match_len = d.params.saved_match_len;
1817
1818    while src_pos < in_buf.len() || (d.params.flush != TDEFLFlush::None && lookahead_size != 0) {
1819        let src_buf_left = in_buf.len() - src_pos;
1820        let num_bytes_to_process = cmp::min(src_buf_left, MAX_MATCH_LEN - lookahead_size);
1821
1822        if lookahead_size + d.dict.size >= usize::from(MIN_MATCH_LEN) - 1
1823            && num_bytes_to_process > 0
1824        {
1825            let dictb = &mut d.dict.b;
1826
1827            let mut dst_pos = (lookahead_pos + lookahead_size) & LZ_DICT_SIZE_MASK;
1828            let mut ins_pos = lookahead_pos + lookahead_size - 2;
1829            // Start the hash value from the first two bytes
1830            let mut hash = update_hash(
1831                u16::from(dictb.dict[ins_pos & LZ_DICT_SIZE_MASK]),
1832                dictb.dict[(ins_pos + 1) & LZ_DICT_SIZE_MASK],
1833            );
1834
1835            lookahead_size += num_bytes_to_process;
1836
1837            for &c in &in_buf[src_pos..src_pos + num_bytes_to_process] {
1838                // Add byte to input buffer.
1839                dictb.dict[dst_pos] = c;
1840                if dst_pos < MAX_MATCH_LEN - 1 {
1841                    dictb.dict[LZ_DICT_SIZE + dst_pos] = c;
1842                }
1843
1844                // Generate hash from the current byte,
1845                hash = update_hash(hash, c);
1846                dictb.next[ins_pos & LZ_DICT_SIZE_MASK] = dictb.hash[hash as usize];
1847                // and insert it into the hash chain.
1848                dictb.hash[hash as usize] = ins_pos as u16;
1849                dst_pos = (dst_pos + 1) & LZ_DICT_SIZE_MASK;
1850                ins_pos += 1;
1851            }
1852            src_pos += num_bytes_to_process;
1853        } else {
1854            let dictb = &mut d.dict.b;
1855            for &c in &in_buf[src_pos..src_pos + num_bytes_to_process] {
1856                let dst_pos = (lookahead_pos + lookahead_size) & LZ_DICT_SIZE_MASK;
1857                dictb.dict[dst_pos] = c;
1858                if dst_pos < MAX_MATCH_LEN - 1 {
1859                    dictb.dict[LZ_DICT_SIZE + dst_pos] = c;
1860                }
1861
1862                lookahead_size += 1;
1863                if lookahead_size + d.dict.size >= MIN_MATCH_LEN.into() {
1864                    let ins_pos = lookahead_pos + lookahead_size - 3;
1865                    let hash = ((u32::from(dictb.dict[ins_pos & LZ_DICT_SIZE_MASK])
1866                        << (LZ_HASH_SHIFT * 2))
1867                        ^ ((u32::from(dictb.dict[(ins_pos + 1) & LZ_DICT_SIZE_MASK])
1868                            << LZ_HASH_SHIFT)
1869                            ^ u32::from(c)))
1870                        & (LZ_HASH_SIZE as u32 - 1);
1871
1872                    dictb.next[ins_pos & LZ_DICT_SIZE_MASK] = dictb.hash[hash as usize];
1873                    dictb.hash[hash as usize] = ins_pos as u16;
1874                }
1875            }
1876
1877            src_pos += num_bytes_to_process;
1878        }
1879
1880        d.dict.size = cmp::min(LZ_DICT_SIZE - lookahead_size, d.dict.size);
1881        if d.params.flush == TDEFLFlush::None && lookahead_size < MAX_MATCH_LEN {
1882            break;
1883        }
1884
1885        let mut len_to_move = 1;
1886        let mut cur_match_dist = 0;
1887        let mut cur_match_len = if saved_match_len != 0 {
1888            saved_match_len
1889        } else {
1890            u32::from(MIN_MATCH_LEN) - 1
1891        };
1892        let cur_pos = lookahead_pos & LZ_DICT_SIZE_MASK;
1893        if d.params.flags & (TDEFL_RLE_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS) != 0 {
1894            // If TDEFL_RLE_MATCHES is set, we only look for repeating sequences of the current byte.
1895            if d.dict.size != 0 && d.params.flags & TDEFL_FORCE_ALL_RAW_BLOCKS == 0 {
1896                let c = d.dict.b.dict[(cur_pos.wrapping_sub(1)) & LZ_DICT_SIZE_MASK];
1897                cur_match_len = d.dict.b.dict[cur_pos..(cur_pos + lookahead_size)]
1898                    .iter()
1899                    .take_while(|&x| *x == c)
1900                    .count() as u32;
1901                if cur_match_len < MIN_MATCH_LEN.into() {
1902                    cur_match_len = 0
1903                } else {
1904                    cur_match_dist = 1
1905                }
1906            }
1907        } else {
1908            // Try to find a match for the bytes at the current position.
1909            let dist_len = d.dict.find_match(
1910                lookahead_pos,
1911                d.dict.size,
1912                lookahead_size as u32,
1913                cur_match_dist,
1914                cur_match_len,
1915            );
1916            cur_match_dist = dist_len.0;
1917            cur_match_len = dist_len.1;
1918        }
1919
1920        let far_and_small = cur_match_len == MIN_MATCH_LEN.into() && cur_match_dist >= 8 * 1024;
1921        let filter_small = d.params.flags & TDEFL_FILTER_MATCHES != 0 && cur_match_len <= 5;
1922        if far_and_small || filter_small || cur_pos == cur_match_dist as usize {
1923            cur_match_dist = 0;
1924            cur_match_len = 0;
1925        }
1926
1927        if saved_match_len != 0 {
1928            if cur_match_len > saved_match_len {
1929                record_literal(&mut d.huff, &mut d.lz, saved_lit);
1930                if cur_match_len >= 128 {
1931                    record_match(&mut d.huff, &mut d.lz, cur_match_len, cur_match_dist);
1932                    saved_match_len = 0;
1933                    len_to_move = cur_match_len as usize;
1934                } else {
1935                    saved_lit = d.dict.b.dict[cur_pos];
1936                    saved_match_dist = cur_match_dist;
1937                    saved_match_len = cur_match_len;
1938                }
1939            } else {
1940                record_match(&mut d.huff, &mut d.lz, saved_match_len, saved_match_dist);
1941                len_to_move = (saved_match_len - 1) as usize;
1942                saved_match_len = 0;
1943            }
1944        } else if cur_match_dist == 0 {
1945            record_literal(
1946                &mut d.huff,
1947                &mut d.lz,
1948                d.dict.b.dict[cmp::min(cur_pos, d.dict.b.dict.len() - 1)],
1949            );
1950        } else if d.params.greedy_parsing
1951            || (d.params.flags & TDEFL_RLE_MATCHES != 0)
1952            || cur_match_len >= 128
1953        {
1954            // If we are using lazy matching, check for matches at the next byte if the current
1955            // match was shorter than 128 bytes.
1956            record_match(&mut d.huff, &mut d.lz, cur_match_len, cur_match_dist);
1957            len_to_move = cur_match_len as usize;
1958        } else {
1959            saved_lit = d.dict.b.dict[cmp::min(cur_pos, d.dict.b.dict.len() - 1)];
1960            saved_match_dist = cur_match_dist;
1961            saved_match_len = cur_match_len;
1962        }
1963
1964        lookahead_pos += len_to_move;
1965        assert!(lookahead_size >= len_to_move);
1966        lookahead_size -= len_to_move;
1967        d.dict.size = cmp::min(d.dict.size + len_to_move, LZ_DICT_SIZE);
1968
1969        let lz_buf_tight = d.lz.code_position > LZ_CODE_BUF_SIZE - 8;
1970        let raw = d.params.flags & TDEFL_FORCE_ALL_RAW_BLOCKS != 0;
1971        let fat = ((d.lz.code_position * 115) >> 7) >= d.lz.total_bytes as usize;
1972        let fat_or_raw = (d.lz.total_bytes > 31 * 1024) && (fat || raw);
1973
1974        if lz_buf_tight || fat_or_raw {
1975            d.params.src_pos = src_pos;
1976            // These values are used in flush_block, so we need to write them back here.
1977            d.dict.lookahead_size = lookahead_size;
1978            d.dict.lookahead_pos = lookahead_pos;
1979
1980            let n = flush_block(d, callback, TDEFLFlush::None)
1981                .unwrap_or(TDEFLStatus::PutBufFailed as i32);
1982            if n != 0 {
1983                d.params.saved_lit = saved_lit;
1984                d.params.saved_match_dist = saved_match_dist;
1985                d.params.saved_match_len = saved_match_len;
1986                return n > 0;
1987            }
1988        }
1989    }
1990
1991    d.params.src_pos = src_pos;
1992    d.dict.lookahead_size = lookahead_size;
1993    d.dict.lookahead_pos = lookahead_pos;
1994    d.params.saved_lit = saved_lit;
1995    d.params.saved_match_dist = saved_match_dist;
1996    d.params.saved_match_len = saved_match_len;
1997    true
1998}
1999
2000const COMP_FAST_LOOKAHEAD_SIZE: usize = 4096;
2001
2002fn compress_fast(d: &mut CompressorOxide, callback: &mut CallbackOxide) -> bool {
2003    let mut src_pos = d.params.src_pos;
2004    let mut lookahead_size = d.dict.lookahead_size;
2005    let mut lookahead_pos = d.dict.lookahead_pos;
2006
2007    let mut cur_pos = lookahead_pos & LZ_DICT_SIZE_MASK;
2008    let in_buf = match callback.in_buf {
2009        None => return true,
2010        Some(in_buf) => in_buf,
2011    };
2012
2013    debug_assert!(d.lz.code_position < LZ_CODE_BUF_SIZE - 2);
2014
2015    while src_pos < in_buf.len() || (d.params.flush != TDEFLFlush::None && lookahead_size > 0) {
2016        let mut dst_pos = (lookahead_pos + lookahead_size) & LZ_DICT_SIZE_MASK;
2017        let mut num_bytes_to_process = cmp::min(
2018            in_buf.len() - src_pos,
2019            COMP_FAST_LOOKAHEAD_SIZE - lookahead_size,
2020        );
2021        lookahead_size += num_bytes_to_process;
2022
2023        while num_bytes_to_process != 0 {
2024            let n = cmp::min(LZ_DICT_SIZE - dst_pos, num_bytes_to_process);
2025            d.dict.b.dict[dst_pos..dst_pos + n].copy_from_slice(&in_buf[src_pos..src_pos + n]);
2026
2027            if dst_pos < MAX_MATCH_LEN - 1 {
2028                let m = cmp::min(n, MAX_MATCH_LEN - 1 - dst_pos);
2029                d.dict.b.dict[dst_pos + LZ_DICT_SIZE..dst_pos + LZ_DICT_SIZE + m]
2030                    .copy_from_slice(&in_buf[src_pos..src_pos + m]);
2031            }
2032
2033            src_pos += n;
2034            dst_pos = (dst_pos + n) & LZ_DICT_SIZE_MASK;
2035            num_bytes_to_process -= n;
2036        }
2037
2038        d.dict.size = cmp::min(LZ_DICT_SIZE - lookahead_size, d.dict.size);
2039        if d.params.flush == TDEFLFlush::None && lookahead_size < COMP_FAST_LOOKAHEAD_SIZE {
2040            break;
2041        }
2042
2043        while lookahead_size >= 4 {
2044            let mut cur_match_len = 1;
2045
2046            let first_trigram = d.dict.read_unaligned_u32(cur_pos) & 0xFF_FFFF;
2047
2048            let hash = (first_trigram ^ (first_trigram >> (24 - (LZ_HASH_BITS - 8))))
2049                & LEVEL1_HASH_SIZE_MASK;
2050
2051            let mut probe_pos = usize::from(d.dict.b.hash[hash as usize]);
2052            d.dict.b.hash[hash as usize] = lookahead_pos as u16;
2053
2054            let mut cur_match_dist = (lookahead_pos - probe_pos) as u16;
2055            if cur_match_dist as usize <= d.dict.size {
2056                probe_pos &= LZ_DICT_SIZE_MASK;
2057
2058                let trigram = d.dict.read_unaligned_u32(probe_pos) & 0xFF_FFFF;
2059
2060                if first_trigram == trigram {
2061                    // Trigram was tested, so we can start with "+ 3" displacement.
2062                    let mut p = cur_pos + 3;
2063                    let mut q = probe_pos + 3;
2064                    cur_match_len = (|| {
2065                        for _ in 0..32 {
2066                            let p_data: u64 = d.dict.read_unaligned_u64(p);
2067                            let q_data: u64 = d.dict.read_unaligned_u64(q);
2068                            let xor_data = p_data ^ q_data;
2069                            if xor_data == 0 {
2070                                p += 8;
2071                                q += 8;
2072                            } else {
2073                                let trailing = xor_data.trailing_zeros();
2074                                return p as u32 - cur_pos as u32 + (trailing >> 3);
2075                            }
2076                        }
2077
2078                        if cur_match_dist == 0 {
2079                            0
2080                        } else {
2081                            MAX_MATCH_LEN as u32
2082                        }
2083                    })();
2084
2085                    if cur_match_len < MIN_MATCH_LEN.into()
2086                        || (cur_match_len == MIN_MATCH_LEN.into() && cur_match_dist >= 8 * 1024)
2087                    {
2088                        let lit = first_trigram as u8;
2089                        cur_match_len = 1;
2090                        d.lz.write_code(lit);
2091                        *d.lz.get_flag() >>= 1;
2092                        d.huff.count[0][lit as usize] += 1;
2093                    } else {
2094                        // Limit the match to the length of the lookahead so we don't create a match
2095                        // that ends after the end of the input data.
2096                        cur_match_len = cmp::min(cur_match_len, lookahead_size as u32);
2097                        debug_assert!(cur_match_len >= MIN_MATCH_LEN.into());
2098                        debug_assert!(cur_match_dist >= 1);
2099                        debug_assert!(cur_match_dist as usize <= LZ_DICT_SIZE);
2100                        cur_match_dist -= 1;
2101
2102                        d.lz.write_code((cur_match_len - u32::from(MIN_MATCH_LEN)) as u8);
2103                        d.lz.write_code(cur_match_dist as u8);
2104                        d.lz.write_code((cur_match_dist >> 8) as u8);
2105
2106                        *d.lz.get_flag() >>= 1;
2107                        *d.lz.get_flag() |= 0x80;
2108                        if cur_match_dist < 512 {
2109                            d.huff.count[1][SMALL_DIST_SYM[cur_match_dist as usize] as usize] += 1;
2110                        } else {
2111                            d.huff.count[1]
2112                                [LARGE_DIST_SYM[(cur_match_dist >> 8) as usize] as usize] += 1;
2113                        }
2114
2115                        d.huff.count[0][LEN_SYM[(cur_match_len - u32::from(MIN_MATCH_LEN)) as usize]
2116                            as usize] += 1;
2117                    }
2118                } else {
2119                    d.lz.write_code(first_trigram as u8);
2120                    *d.lz.get_flag() >>= 1;
2121                    d.huff.count[0][first_trigram as u8 as usize] += 1;
2122                }
2123
2124                d.lz.consume_flag();
2125                d.lz.total_bytes += cur_match_len;
2126                lookahead_pos += cur_match_len as usize;
2127                d.dict.size = cmp::min(d.dict.size + cur_match_len as usize, LZ_DICT_SIZE);
2128                cur_pos = (cur_pos + cur_match_len as usize) & LZ_DICT_SIZE_MASK;
2129                lookahead_size -= cur_match_len as usize;
2130
2131                if d.lz.code_position > LZ_CODE_BUF_SIZE - 8 {
2132                    // These values are used in flush_block, so we need to write them back here.
2133                    d.dict.lookahead_size = lookahead_size;
2134                    d.dict.lookahead_pos = lookahead_pos;
2135
2136                    let n = match flush_block(d, callback, TDEFLFlush::None) {
2137                        Err(_) => {
2138                            d.params.src_pos = src_pos;
2139                            d.params.prev_return_status = TDEFLStatus::PutBufFailed;
2140                            return false;
2141                        }
2142                        Ok(status) => status,
2143                    };
2144                    if n != 0 {
2145                        d.params.src_pos = src_pos;
2146                        return n > 0;
2147                    }
2148                    debug_assert!(d.lz.code_position < LZ_CODE_BUF_SIZE - 2);
2149
2150                    lookahead_size = d.dict.lookahead_size;
2151                    lookahead_pos = d.dict.lookahead_pos;
2152                }
2153            }
2154        }
2155
2156        while lookahead_size != 0 {
2157            let lit = d.dict.b.dict[cur_pos];
2158            d.lz.total_bytes += 1;
2159            d.lz.write_code(lit);
2160            *d.lz.get_flag() >>= 1;
2161            d.lz.consume_flag();
2162
2163            d.huff.count[0][lit as usize] += 1;
2164            lookahead_pos += 1;
2165            d.dict.size = cmp::min(d.dict.size + 1, LZ_DICT_SIZE);
2166            cur_pos = (cur_pos + 1) & LZ_DICT_SIZE_MASK;
2167            lookahead_size -= 1;
2168
2169            if d.lz.code_position > LZ_CODE_BUF_SIZE - 8 {
2170                // These values are used in flush_block, so we need to write them back here.
2171                d.dict.lookahead_size = lookahead_size;
2172                d.dict.lookahead_pos = lookahead_pos;
2173
2174                let n = match flush_block(d, callback, TDEFLFlush::None) {
2175                    Err(_) => {
2176                        d.params.prev_return_status = TDEFLStatus::PutBufFailed;
2177                        d.params.src_pos = src_pos;
2178                        return false;
2179                    }
2180                    Ok(status) => status,
2181                };
2182                if n != 0 {
2183                    d.params.src_pos = src_pos;
2184                    return n > 0;
2185                }
2186
2187                lookahead_size = d.dict.lookahead_size;
2188                lookahead_pos = d.dict.lookahead_pos;
2189            }
2190        }
2191    }
2192
2193    d.params.src_pos = src_pos;
2194    d.dict.lookahead_size = lookahead_size;
2195    d.dict.lookahead_pos = lookahead_pos;
2196    true
2197}
2198
2199fn flush_output_buffer(c: &mut CallbackOxide, p: &mut ParamsOxide) -> (TDEFLStatus, usize, usize) {
2200    let mut res = (TDEFLStatus::Okay, p.src_pos, 0);
2201    if let CallbackOut::Buf(ref mut cb) = c.out {
2202        let n = cmp::min(cb.out_buf.len() - p.out_buf_ofs, p.flush_remaining as usize);
2203        if n != 0 {
2204            cb.out_buf[p.out_buf_ofs..p.out_buf_ofs + n]
2205                .copy_from_slice(&p.local_buf.b[p.flush_ofs as usize..p.flush_ofs as usize + n]);
2206        }
2207        p.flush_ofs += n as u32;
2208        p.flush_remaining -= n as u32;
2209        p.out_buf_ofs += n;
2210        res.2 = p.out_buf_ofs;
2211    }
2212
2213    if p.finished && p.flush_remaining == 0 {
2214        res.0 = TDEFLStatus::Done
2215    }
2216    res
2217}
2218
2219/// Main compression function. Tries to compress as much as possible from `in_buf` and
2220/// puts compressed output into `out_buf`.
2221///
2222/// The value of `flush` determines if the compressor should attempt to flush all output
2223/// and alternatively try to finish the stream.
2224///
2225/// Use [`TDEFLFlush::Finish`] on the final call to signal that the stream is finishing.
2226///
2227/// Note that this function does not keep track of whether a flush marker has been output, so
2228/// if called using [`TDEFLFlush::Sync`], the caller needs to ensure there is enough space in the
2229/// output buffer if they want to avoid repeated flush markers.
2230/// See #105 for details.
2231///
2232/// # Returns
2233/// Returns a tuple containing the current status of the compressor, the current position
2234/// in the input buffer and the current position in the output buffer.
2235pub fn compress(
2236    d: &mut CompressorOxide,
2237    in_buf: &[u8],
2238    out_buf: &mut [u8],
2239    flush: TDEFLFlush,
2240) -> (TDEFLStatus, usize, usize) {
2241    compress_inner(
2242        d,
2243        &mut CallbackOxide::new_callback_buf(in_buf, out_buf),
2244        flush,
2245    )
2246}
2247
2248/// Main compression function. Callbacks output.
2249///
2250/// # Returns
2251/// Returns a tuple containing the current status of the compressor, the current position
2252/// in the input buffer.
2253///
2254/// The caller is responsible for ensuring the `CallbackFunc` struct will not cause undefined
2255/// behaviour.
2256pub fn compress_to_output(
2257    d: &mut CompressorOxide,
2258    in_buf: &[u8],
2259    flush: TDEFLFlush,
2260    mut callback_func: impl FnMut(&[u8]) -> bool,
2261) -> (TDEFLStatus, usize) {
2262    let res = compress_inner(
2263        d,
2264        &mut CallbackOxide::new_callback_func(
2265            in_buf,
2266            CallbackFunc {
2267                put_buf_func: &mut callback_func,
2268            },
2269        ),
2270        flush,
2271    );
2272
2273    (res.0, res.1)
2274}
2275
2276fn compress_inner(
2277    d: &mut CompressorOxide,
2278    callback: &mut CallbackOxide,
2279    flush: TDEFLFlush,
2280) -> (TDEFLStatus, usize, usize) {
2281    d.params.out_buf_ofs = 0;
2282    d.params.src_pos = 0;
2283
2284    let prev_ok = d.params.prev_return_status == TDEFLStatus::Okay;
2285    let flush_finish_once = d.params.flush != TDEFLFlush::Finish || flush == TDEFLFlush::Finish;
2286
2287    d.params.flush = flush;
2288    if !prev_ok || !flush_finish_once {
2289        d.params.prev_return_status = TDEFLStatus::BadParam;
2290        return (d.params.prev_return_status, 0, 0);
2291    }
2292
2293    if d.params.flush_remaining != 0 || d.params.finished {
2294        let res = flush_output_buffer(callback, &mut d.params);
2295        d.params.prev_return_status = res.0;
2296        return res;
2297    }
2298
2299    let one_probe = d.params.flags & MAX_PROBES_MASK as u32 == 1;
2300    let greedy = d.params.flags & TDEFL_GREEDY_PARSING_FLAG != 0;
2301    let filter_or_rle_or_raw = d.params.flags
2302        & (TDEFL_FILTER_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS | TDEFL_RLE_MATCHES)
2303        != 0;
2304
2305    let compress_success = if one_probe && greedy && !filter_or_rle_or_raw {
2306        compress_fast(d, callback)
2307    } else {
2308        compress_normal(d, callback)
2309    };
2310
2311    if !compress_success {
2312        return (
2313            d.params.prev_return_status,
2314            d.params.src_pos,
2315            d.params.out_buf_ofs,
2316        );
2317    }
2318
2319    if let Some(in_buf) = callback.in_buf {
2320        if d.params.flags & (TDEFL_WRITE_ZLIB_HEADER | TDEFL_COMPUTE_ADLER32) != 0 {
2321            d.params.adler32 = update_adler32(d.params.adler32, &in_buf[..d.params.src_pos]);
2322        }
2323    }
2324
2325    let flush_none = d.params.flush == TDEFLFlush::None;
2326    let in_left = callback.in_buf.map_or(0, |buf| buf.len()) - d.params.src_pos;
2327    let remaining = in_left != 0 || d.params.flush_remaining != 0;
2328    if !flush_none && d.dict.lookahead_size == 0 && !remaining {
2329        let flush = d.params.flush;
2330        match flush_block(d, callback, flush) {
2331            Err(_) => {
2332                d.params.prev_return_status = TDEFLStatus::PutBufFailed;
2333                return (
2334                    d.params.prev_return_status,
2335                    d.params.src_pos,
2336                    d.params.out_buf_ofs,
2337                );
2338            }
2339            Ok(x) if x < 0 => {
2340                return (
2341                    d.params.prev_return_status,
2342                    d.params.src_pos,
2343                    d.params.out_buf_ofs,
2344                )
2345            }
2346            _ => {
2347                d.params.finished = d.params.flush == TDEFLFlush::Finish;
2348                if d.params.flush == TDEFLFlush::Full {
2349                    d.dict.b.hash.fill(0);
2350                    d.dict.b.next.fill(0);
2351                    d.dict.size = 0;
2352                }
2353            }
2354        }
2355    }
2356
2357    let res = flush_output_buffer(callback, &mut d.params);
2358    d.params.prev_return_status = res.0;
2359
2360    res
2361}
2362
2363/// Create a set of compression flags using parameters used by zlib and other compressors.
2364/// Mainly intended for use with transition from c libraries as it deals with raw integers.
2365///
2366/// # Parameters
2367/// `level` determines compression level. Clamped to maximum of 10. Negative values result in
2368/// `CompressionLevel::DefaultLevel`.
2369/// `window_bits`: Above 0, wraps the stream in a zlib wrapper, 0 or negative for a raw deflate
2370/// stream.
2371/// `strategy`: Sets the strategy if this conforms to any of the values in `CompressionStrategy`.
2372///
2373/// # Notes
2374/// This function may be removed or moved to the `miniz_oxide_c_api` in the future.
2375pub fn create_comp_flags_from_zip_params(level: i32, window_bits: i32, strategy: i32) -> u32 {
2376    let num_probes = (if level >= 0 {
2377        cmp::min(10, level)
2378    } else {
2379        CompressionLevel::DefaultLevel as i32
2380    }) as usize;
2381    let greedy = if level <= 3 {
2382        TDEFL_GREEDY_PARSING_FLAG
2383    } else {
2384        0
2385    };
2386    let mut comp_flags = NUM_PROBES[num_probes] | greedy;
2387
2388    if window_bits > 0 {
2389        comp_flags |= TDEFL_WRITE_ZLIB_HEADER;
2390    }
2391
2392    if level == 0 {
2393        comp_flags |= TDEFL_FORCE_ALL_RAW_BLOCKS;
2394    } else if strategy == CompressionStrategy::Filtered as i32 {
2395        comp_flags |= TDEFL_FILTER_MATCHES;
2396    } else if strategy == CompressionStrategy::HuffmanOnly as i32 {
2397        comp_flags &= !MAX_PROBES_MASK as u32;
2398    } else if strategy == CompressionStrategy::Fixed as i32 {
2399        comp_flags |= TDEFL_FORCE_ALL_STATIC_BLOCKS;
2400    } else if strategy == CompressionStrategy::RLE as i32 {
2401        comp_flags |= TDEFL_RLE_MATCHES;
2402    }
2403
2404    comp_flags
2405}
2406
2407#[cfg(test)]
2408mod test {
2409    use super::{
2410        compress_to_output, create_comp_flags_from_zip_params, read_u16_le, write_u16_le,
2411        CompressionStrategy, CompressorOxide, TDEFLFlush, TDEFLStatus, DEFAULT_FLAGS,
2412        MZ_DEFAULT_WINDOW_BITS,
2413    };
2414    use crate::inflate::decompress_to_vec;
2415    use alloc::vec;
2416
2417    #[test]
2418    fn u16_to_slice() {
2419        let mut slice = [0, 0];
2420        write_u16_le(2000, &mut slice, 0);
2421        assert_eq!(slice, [208, 7]);
2422    }
2423
2424    #[test]
2425    fn u16_from_slice() {
2426        let slice = [208, 7];
2427        assert_eq!(read_u16_le(&slice, 0), 2000);
2428    }
2429
2430    #[test]
2431    fn compress_output() {
2432        assert_eq!(
2433            DEFAULT_FLAGS,
2434            create_comp_flags_from_zip_params(
2435                4,
2436                MZ_DEFAULT_WINDOW_BITS,
2437                CompressionStrategy::Default as i32
2438            )
2439        );
2440
2441        let slice = [
2442            1, 2, 3, 4, 1, 2, 3, 1, 2, 3, 1, 2, 6, 1, 2, 3, 1, 2, 3, 2, 3, 1, 2, 3,
2443        ];
2444        let mut encoded = vec![];
2445        let flags = create_comp_flags_from_zip_params(6, 0, 0);
2446        let mut d = CompressorOxide::new(flags);
2447        let (status, in_consumed) =
2448            compress_to_output(&mut d, &slice, TDEFLFlush::Finish, |out: &[u8]| {
2449                encoded.extend_from_slice(out);
2450                true
2451            });
2452
2453        assert_eq!(status, TDEFLStatus::Done);
2454        assert_eq!(in_consumed, slice.len());
2455
2456        let decoded = decompress_to_vec(&encoded[..]).unwrap();
2457        assert_eq!(&decoded[..], &slice[..]);
2458    }
2459
2460    #[test]
2461    /// Check fast compress mode
2462    fn compress_fast() {
2463        let slice = [
2464            1, 2, 3, 4, 1, 2, 3, 1, 2, 3, 1, 2, 6, 1, 2, 3, 1, 2, 3, 2, 3, 1, 2, 3,
2465        ];
2466        let mut encoded = vec![];
2467        let flags = create_comp_flags_from_zip_params(1, 0, 0);
2468        let mut d = CompressorOxide::new(flags);
2469        let (status, in_consumed) =
2470            compress_to_output(&mut d, &slice, TDEFLFlush::Finish, |out: &[u8]| {
2471                encoded.extend_from_slice(out);
2472                true
2473            });
2474
2475        assert_eq!(status, TDEFLStatus::Done);
2476        assert_eq!(in_consumed, slice.len());
2477
2478        // Needs to be altered if algorithm improves.
2479        assert_eq!(
2480            &encoded[..],
2481            [99, 100, 98, 102, 1, 98, 48, 98, 3, 147, 204, 76, 204, 140, 76, 204, 0]
2482        );
2483
2484        let decoded = decompress_to_vec(&encoded[..]).unwrap();
2485        assert_eq!(&decoded[..], &slice[..]);
2486    }
2487
2488    #[test]
2489    fn zlib_window_bits() {
2490        use crate::inflate::stream::{inflate, InflateState};
2491        use crate::DataFormat;
2492        use alloc::boxed::Box;
2493        let slice = [
2494            1, 2, 3, 4, 1, 2, 3, 1, 2, 3, 1, 2, 6, 1, 2, 3, 1, 2, 3, 2, 3, 1, 2, 3, 35, 22, 22, 2,
2495            6, 2, 6,
2496        ];
2497        let mut encoded = vec![];
2498        let flags = create_comp_flags_from_zip_params(2, 1, CompressionStrategy::RLE.into());
2499        let mut d = CompressorOxide::new(flags);
2500        let (status, in_consumed) =
2501            compress_to_output(&mut d, &slice, TDEFLFlush::Finish, |out: &[u8]| {
2502                encoded.extend_from_slice(out);
2503                true
2504            });
2505
2506        assert_eq!(status, TDEFLStatus::Done);
2507        assert_eq!(in_consumed, slice.len());
2508
2509        let mut output = vec![0; slice.len()];
2510
2511        let mut decompressor = Box::new(InflateState::new(DataFormat::Zlib));
2512
2513        let mut out_slice = output.as_mut_slice();
2514        // Feed 1 byte at a time and no back buffer to test that RLE encoding has been used.
2515        for i in 0..encoded.len() {
2516            let result = inflate(
2517                &mut decompressor,
2518                &encoded[i..i + 1],
2519                out_slice,
2520                crate::MZFlush::None,
2521            );
2522            out_slice = &mut out_slice[result.bytes_written..];
2523        }
2524        let cmf = decompressor.decompressor().zlib_header().0;
2525        assert_eq!(cmf, 8);
2526        assert_eq!(output, slice)
2527    }
2528}