blob: 57a44e767dc6e3c54844ca3875b754301a6164cd [file] [log] [blame]
Abhay Kumar40252eb2025-10-13 13:25:53 +00001package lz4
2
3import (
4 "fmt"
5 "reflect"
6 "runtime"
7
8 "github.com/pierrec/lz4/v4/internal/lz4block"
9 "github.com/pierrec/lz4/v4/internal/lz4errors"
10)
11
12//go:generate go run golang.org/x/tools/cmd/stringer -type=BlockSize,CompressionLevel -output options_gen.go
13
14type (
15 applier interface {
16 Apply(...Option) error
17 private()
18 }
19 // Option defines the parameters to setup an LZ4 Writer or Reader.
20 Option func(applier) error
21)
22
23// String returns a string representation of the option with its parameter(s).
24func (o Option) String() string {
25 return o(nil).Error()
26}
27
28// Default options.
29var (
30 DefaultBlockSizeOption = BlockSizeOption(Block4Mb)
31 DefaultChecksumOption = ChecksumOption(true)
32 DefaultConcurrency = ConcurrencyOption(1)
33 defaultOnBlockDone = OnBlockDoneOption(nil)
34)
35
36const (
37 Block64Kb BlockSize = 1 << (16 + iota*2)
38 Block256Kb
39 Block1Mb
40 Block4Mb
41)
42
43// BlockSizeIndex defines the size of the blocks to be compressed.
44type BlockSize uint32
45
46// BlockSizeOption defines the maximum size of compressed blocks (default=Block4Mb).
47func BlockSizeOption(size BlockSize) Option {
48 return func(a applier) error {
49 switch w := a.(type) {
50 case nil:
51 s := fmt.Sprintf("BlockSizeOption(%s)", size)
52 return lz4errors.Error(s)
53 case *Writer:
54 size := uint32(size)
55 if !lz4block.IsValid(size) {
56 return fmt.Errorf("%w: %d", lz4errors.ErrOptionInvalidBlockSize, size)
57 }
58 w.frame.Descriptor.Flags.BlockSizeIndexSet(lz4block.Index(size))
59 return nil
60 case *CompressingReader:
61 size := uint32(size)
62 if !lz4block.IsValid(size) {
63 return fmt.Errorf("%w: %d", lz4errors.ErrOptionInvalidBlockSize, size)
64 }
65 w.frame.Descriptor.Flags.BlockSizeIndexSet(lz4block.Index(size))
66 return nil
67 }
68 return lz4errors.ErrOptionNotApplicable
69 }
70}
71
72// BlockChecksumOption enables or disables block checksum (default=false).
73func BlockChecksumOption(flag bool) Option {
74 return func(a applier) error {
75 switch w := a.(type) {
76 case nil:
77 s := fmt.Sprintf("BlockChecksumOption(%v)", flag)
78 return lz4errors.Error(s)
79 case *Writer:
80 w.frame.Descriptor.Flags.BlockChecksumSet(flag)
81 return nil
82 case *CompressingReader:
83 w.frame.Descriptor.Flags.BlockChecksumSet(flag)
84 return nil
85 }
86 return lz4errors.ErrOptionNotApplicable
87 }
88}
89
90// ChecksumOption enables/disables all blocks or content checksum (default=true).
91func ChecksumOption(flag bool) Option {
92 return func(a applier) error {
93 switch w := a.(type) {
94 case nil:
95 s := fmt.Sprintf("ChecksumOption(%v)", flag)
96 return lz4errors.Error(s)
97 case *Writer:
98 w.frame.Descriptor.Flags.ContentChecksumSet(flag)
99 return nil
100 case *CompressingReader:
101 w.frame.Descriptor.Flags.ContentChecksumSet(flag)
102 return nil
103 }
104 return lz4errors.ErrOptionNotApplicable
105 }
106}
107
108// SizeOption sets the size of the original uncompressed data (default=0). It is useful to know the size of the
109// whole uncompressed data stream.
110func SizeOption(size uint64) Option {
111 return func(a applier) error {
112 switch w := a.(type) {
113 case nil:
114 s := fmt.Sprintf("SizeOption(%d)", size)
115 return lz4errors.Error(s)
116 case *Writer:
117 w.frame.Descriptor.Flags.SizeSet(size > 0)
118 w.frame.Descriptor.ContentSize = size
119 return nil
120 case *CompressingReader:
121 w.frame.Descriptor.Flags.SizeSet(size > 0)
122 w.frame.Descriptor.ContentSize = size
123 return nil
124 }
125 return lz4errors.ErrOptionNotApplicable
126 }
127}
128
129// ConcurrencyOption sets the number of go routines used for compression.
130// If n <= 0, then the output of runtime.GOMAXPROCS(0) is used.
131func ConcurrencyOption(n int) Option {
132 if n <= 0 {
133 n = runtime.GOMAXPROCS(0)
134 }
135 return func(a applier) error {
136 switch rw := a.(type) {
137 case nil:
138 s := fmt.Sprintf("ConcurrencyOption(%d)", n)
139 return lz4errors.Error(s)
140 case *Writer:
141 rw.num = n
142 return nil
143 case *Reader:
144 rw.num = n
145 return nil
146 }
147 return lz4errors.ErrOptionNotApplicable
148 }
149}
150
151// CompressionLevel defines the level of compression to use. The higher the better, but slower, compression.
152type CompressionLevel uint32
153
154const (
155 Fast CompressionLevel = 0
156 Level1 CompressionLevel = 1 << (8 + iota)
157 Level2
158 Level3
159 Level4
160 Level5
161 Level6
162 Level7
163 Level8
164 Level9
165)
166
167// CompressionLevelOption defines the compression level (default=Fast).
168func CompressionLevelOption(level CompressionLevel) Option {
169 return func(a applier) error {
170 switch w := a.(type) {
171 case nil:
172 s := fmt.Sprintf("CompressionLevelOption(%s)", level)
173 return lz4errors.Error(s)
174 case *Writer:
175 switch level {
176 case Fast, Level1, Level2, Level3, Level4, Level5, Level6, Level7, Level8, Level9:
177 default:
178 return fmt.Errorf("%w: %d", lz4errors.ErrOptionInvalidCompressionLevel, level)
179 }
180 w.level = lz4block.CompressionLevel(level)
181 return nil
182 case *CompressingReader:
183 switch level {
184 case Fast, Level1, Level2, Level3, Level4, Level5, Level6, Level7, Level8, Level9:
185 default:
186 return fmt.Errorf("%w: %d", lz4errors.ErrOptionInvalidCompressionLevel, level)
187 }
188 w.level = lz4block.CompressionLevel(level)
189 return nil
190 }
191 return lz4errors.ErrOptionNotApplicable
192 }
193}
194
195func onBlockDone(int) {}
196
197// OnBlockDoneOption is triggered when a block has been processed. For a Writer, it is when is has been compressed,
198// for a Reader, it is when it has been uncompressed.
199func OnBlockDoneOption(handler func(size int)) Option {
200 if handler == nil {
201 handler = onBlockDone
202 }
203 return func(a applier) error {
204 switch rw := a.(type) {
205 case nil:
206 s := fmt.Sprintf("OnBlockDoneOption(%s)", reflect.TypeOf(handler).String())
207 return lz4errors.Error(s)
208 case *Writer:
209 rw.handler = handler
210 return nil
211 case *Reader:
212 rw.handler = handler
213 return nil
214 case *CompressingReader:
215 rw.handler = handler
216 return nil
217 }
218 return lz4errors.ErrOptionNotApplicable
219 }
220}
221
222// LegacyOption provides support for writing LZ4 frames in the legacy format.
223//
224// See https://github.com/lz4/lz4/blob/dev/doc/lz4_Frame_format.md#legacy-frame.
225//
226// NB. compressed Linux kernel images use a tweaked LZ4 legacy format where
227// the compressed stream is followed by the original (uncompressed) size of
228// the kernel (https://events.static.linuxfound.org/sites/events/files/lcjpcojp13_klee.pdf).
229// This is also supported as a special case.
230func LegacyOption(legacy bool) Option {
231 return func(a applier) error {
232 switch rw := a.(type) {
233 case nil:
234 s := fmt.Sprintf("LegacyOption(%v)", legacy)
235 return lz4errors.Error(s)
236 case *Writer:
237 rw.legacy = legacy
238 return nil
239 }
240 return lz4errors.ErrOptionNotApplicable
241 }
242}