[BTRFS]
[reactos.git] / reactos / drivers / filesystems / btrfs / compress.c
1 /* Copyright (c) Mark Harmstone 2016
2 * Copyright (c) Reimar Doeffinger 2006
3 * Copyright (c) Markus Oberhumer 1996
4 *
5 * This file is part of WinBtrfs.
6 *
7 * WinBtrfs is free software: you can redistribute it and/or modify
8 * it under the terms of the GNU Lesser General Public Licence as published by
9 * the Free Software Foundation, either version 3 of the Licence, or
10 * (at your option) any later version.
11 *
12 * WinBtrfs is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU Lesser General Public Licence for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public Licence
18 * along with WinBtrfs. If not, see <http://www.gnu.org/licenses/>. */
19
20 // Portion of the LZO decompression code here were cribbed from code in
21 // libavcodec, also under the LGPL. Thank you, Reimar Doeffinger.
22
23 // The LZO compression code comes from v0.22 of lzo, written way back in
24 // 1996, and available here:
25 // https://www.ibiblio.org/pub/historic-linux/ftp-archives/sunsite.unc.edu/Sep-29-1996/libs/lzo-0.22.tar.gz
26 // Modern versions of lzo are licensed under the GPL, but the very oldest
27 // versions are under the LGPL and hence okay to use here.
28
29 #include "btrfs_drv.h"
30
31 #define Z_SOLO
32 #define ZLIB_INTERNAL
33
34 #ifndef __REACTOS__
35 #include "zlib/zlib.h"
36 #include "zlib/inftrees.h"
37 #include "zlib/inflate.h"
38 #else
39 #include <zlib.h>
40 #endif
41
42 #define LINUX_PAGE_SIZE 4096
43
44 typedef struct {
45 UINT8* in;
46 UINT32 inlen;
47 UINT32 inpos;
48 UINT8* out;
49 UINT32 outlen;
50 UINT32 outpos;
51 BOOL error;
52 void* wrkmem;
53 } lzo_stream;
54
55 #define LZO1X_MEM_COMPRESS ((UINT32) (16384L * sizeof(UINT8*)))
56
57 #define M1_MAX_OFFSET 0x0400
58 #define M2_MAX_OFFSET 0x0800
59 #define M3_MAX_OFFSET 0x4000
60 #define M4_MAX_OFFSET 0xbfff
61
62 #define MX_MAX_OFFSET (M1_MAX_OFFSET + M2_MAX_OFFSET)
63
64 #define M1_MARKER 0
65 #define M2_MARKER 64
66 #define M3_MARKER 32
67 #define M4_MARKER 16
68
69 #define _DV2(p, shift1, shift2) (((( (UINT32)(p[2]) << shift1) ^ p[1]) << shift2) ^ p[0])
70 #define DVAL_NEXT(dv, p) dv ^= p[-1]; dv = (((dv) >> 5) ^ ((UINT32)(p[2]) << (2*5)))
71 #define _DV(p, shift) _DV2(p, shift, shift)
72 #define DVAL_FIRST(dv, p) dv = _DV((p), 5)
73 #define _DINDEX(dv, p) ((40799u * (dv)) >> 5)
74 #define DINDEX(dv, p) (((_DINDEX(dv, p)) & 0x3fff) << 0)
75 #define UPDATE_D(dict, cycle, dv, p) dict[DINDEX(dv, p)] = (p)
76 #define UPDATE_I(dict, cycle, index, p) dict[index] = (p)
77
78 #define LZO_CHECK_MPOS_NON_DET(m_pos, m_off, in, ip, max_offset) \
79 ((void*) m_pos < (void*) in || \
80 (m_off = (UINT8*) ip - (UINT8*) m_pos) <= 0 || \
81 m_off > max_offset)
82
83 #define LZO_BYTE(x) ((unsigned char) (x))
84
85 static UINT8 lzo_nextbyte(lzo_stream* stream) {
86 UINT8 c;
87
88 if (stream->inpos >= stream->inlen) {
89 stream->error = TRUE;
90 return 0;
91 }
92
93 c = stream->in[stream->inpos];
94 stream->inpos++;
95
96 return c;
97 }
98
99 static int lzo_len(lzo_stream* stream, int byte, int mask) {
100 int len = byte & mask;
101
102 if (len == 0) {
103 while (!(byte = lzo_nextbyte(stream))) {
104 if (stream->error) return 0;
105
106 len += 255;
107 }
108
109 len += mask + byte;
110 }
111
112 return len;
113 }
114
115 static void lzo_copy(lzo_stream* stream, int len) {
116 if (stream->inpos + len > stream->inlen) {
117 stream->error = TRUE;
118 return;
119 }
120
121 if (stream->outpos + len > stream->outlen) {
122 stream->error = TRUE;
123 return;
124 }
125
126 do {
127 stream->out[stream->outpos] = stream->in[stream->inpos];
128 stream->inpos++;
129 stream->outpos++;
130 len--;
131 } while (len > 0);
132 }
133
134 static void lzo_copyback(lzo_stream* stream, int back, int len) {
135 if (stream->outpos < back) {
136 stream->error = TRUE;
137 return;
138 }
139
140 if (stream->outpos + len > stream->outlen) {
141 stream->error = TRUE;
142 return;
143 }
144
145 do {
146 stream->out[stream->outpos] = stream->out[stream->outpos - back];
147 stream->outpos++;
148 len--;
149 } while (len > 0);
150 }
151
152 static NTSTATUS do_lzo_decompress(lzo_stream* stream) {
153 UINT8 byte;
154 UINT32 len, back;
155 BOOL backcopy = FALSE;
156
157 stream->error = FALSE;
158
159 byte = lzo_nextbyte(stream);
160 if (stream->error) return STATUS_INTERNAL_ERROR;
161
162 if (byte > 17) {
163 lzo_copy(stream, byte - 17);
164 if (stream->error) return STATUS_INTERNAL_ERROR;
165
166 byte = lzo_nextbyte(stream);
167 if (stream->error) return STATUS_INTERNAL_ERROR;
168
169 if (byte < 16) return STATUS_INTERNAL_ERROR;
170 }
171
172 while (1) {
173 if (byte >> 4) {
174 backcopy = TRUE;
175 if (byte >> 6) {
176 len = (byte >> 5) - 1;
177 back = (lzo_nextbyte(stream) << 3) + ((byte >> 2) & 7) + 1;
178 if (stream->error) return STATUS_INTERNAL_ERROR;
179 } else if (byte >> 5) {
180 len = lzo_len(stream, byte, 31);
181 if (stream->error) return STATUS_INTERNAL_ERROR;
182
183 byte = lzo_nextbyte(stream);
184 if (stream->error) return STATUS_INTERNAL_ERROR;
185
186 back = (lzo_nextbyte(stream) << 6) + (byte >> 2) + 1;
187 if (stream->error) return STATUS_INTERNAL_ERROR;
188 } else {
189 len = lzo_len(stream, byte, 7);
190 if (stream->error) return STATUS_INTERNAL_ERROR;
191
192 back = (1 << 14) + ((byte & 8) << 11);
193
194 byte = lzo_nextbyte(stream);
195 if (stream->error) return STATUS_INTERNAL_ERROR;
196
197 back += (lzo_nextbyte(stream) << 6) + (byte >> 2);
198 if (stream->error) return STATUS_INTERNAL_ERROR;
199
200 if (back == (1 << 14)) {
201 if (len != 1)
202 return STATUS_INTERNAL_ERROR;
203 break;
204 }
205 }
206 } else if (backcopy) {
207 len = 0;
208 back = (lzo_nextbyte(stream) << 2) + (byte >> 2) + 1;
209 if (stream->error) return STATUS_INTERNAL_ERROR;
210 } else {
211 len = lzo_len(stream, byte, 15);
212 if (stream->error) return STATUS_INTERNAL_ERROR;
213
214 lzo_copy(stream, len + 3);
215 if (stream->error) return STATUS_INTERNAL_ERROR;
216
217 byte = lzo_nextbyte(stream);
218 if (stream->error) return STATUS_INTERNAL_ERROR;
219
220 if (byte >> 4)
221 continue;
222
223 len = 1;
224 back = (1 << 11) + (lzo_nextbyte(stream) << 2) + (byte >> 2) + 1;
225 if (stream->error) return STATUS_INTERNAL_ERROR;
226
227 break;
228 }
229
230 lzo_copyback(stream, back, len + 2);
231 if (stream->error) return STATUS_INTERNAL_ERROR;
232
233 len = byte & 3;
234
235 if (len) {
236 lzo_copy(stream, len);
237 if (stream->error) return STATUS_INTERNAL_ERROR;
238 } else
239 backcopy = !backcopy;
240
241 byte = lzo_nextbyte(stream);
242 if (stream->error) return STATUS_INTERNAL_ERROR;
243 }
244
245 return STATUS_SUCCESS;
246 }
247
248 static NTSTATUS lzo_decompress(UINT8* inbuf, UINT64 inlen, UINT8* outbuf, UINT64 outlen) {
249 NTSTATUS Status;
250 UINT32 extlen, partlen, inoff, outoff;
251 lzo_stream stream;
252
253 extlen = *((UINT32*)inbuf);
254 if (inlen < extlen) {
255 ERR("compressed extent was %llx, should have been at least %x\n", inlen, extlen);
256 return STATUS_INTERNAL_ERROR;
257 }
258
259 inoff = sizeof(UINT32);
260 outoff = 0;
261
262 do {
263 partlen = *(UINT32*)&inbuf[inoff];
264
265 if (partlen + inoff > inlen) {
266 ERR("overflow: %x + %x > %llx\n", partlen, inoff, inlen);
267 return STATUS_INTERNAL_ERROR;
268 }
269
270 inoff += sizeof(UINT32);
271
272 stream.in = &inbuf[inoff];
273 stream.inlen = partlen;
274 stream.inpos = 0;
275 stream.out = &outbuf[outoff];
276 stream.outlen = LINUX_PAGE_SIZE;
277 stream.outpos = 0;
278
279 Status = do_lzo_decompress(&stream);
280 if (!NT_SUCCESS(Status)) {
281 ERR("do_lzo_decompress returned %08x\n", Status);
282 return Status;
283 }
284
285 if (stream.outpos < stream.outlen)
286 RtlZeroMemory(&stream.out[stream.outpos], stream.outlen - stream.outpos);
287
288 inoff += partlen;
289 outoff += stream.outlen;
290
291 if (LINUX_PAGE_SIZE - (inoff % LINUX_PAGE_SIZE) < sizeof(UINT32))
292 inoff = ((inoff / LINUX_PAGE_SIZE) + 1) * LINUX_PAGE_SIZE;
293 } while (inoff < extlen);
294
295 return STATUS_SUCCESS;
296 }
297
298 static void* zlib_alloc(void* opaque, unsigned int items, unsigned int size) {
299 return ExAllocatePoolWithTag(PagedPool, items * size, ALLOC_TAG_ZLIB);
300 }
301
302 static void zlib_free(void* opaque, void* ptr) {
303 ExFreePool(ptr);
304 }
305
306 static NTSTATUS zlib_decompress(UINT8* inbuf, UINT64 inlen, UINT8* outbuf, UINT64 outlen) {
307 z_stream c_stream;
308 int ret;
309
310 c_stream.zalloc = zlib_alloc;
311 c_stream.zfree = zlib_free;
312 c_stream.opaque = (voidpf)0;
313
314 ret = inflateInit(&c_stream);
315
316 if (ret != Z_OK) {
317 ERR("inflateInit returned %08x\n", ret);
318 return STATUS_INTERNAL_ERROR;
319 }
320
321 c_stream.next_in = inbuf;
322 c_stream.avail_in = inlen;
323
324 c_stream.next_out = outbuf;
325 c_stream.avail_out = outlen;
326
327 do {
328 ret = inflate(&c_stream, Z_NO_FLUSH);
329
330 if (ret != Z_OK && ret != Z_STREAM_END) {
331 ERR("inflate returned %08x\n", ret);
332 inflateEnd(&c_stream);
333 return STATUS_INTERNAL_ERROR;
334 }
335 } while (ret != Z_STREAM_END);
336
337 ret = inflateEnd(&c_stream);
338
339 if (ret != Z_OK) {
340 ERR("inflateEnd returned %08x\n", ret);
341 return STATUS_INTERNAL_ERROR;
342 }
343
344 // FIXME - if we're short, should we zero the end of outbuf so we don't leak information into userspace?
345
346 return STATUS_SUCCESS;
347 }
348
349 NTSTATUS decompress(UINT8 type, UINT8* inbuf, UINT64 inlen, UINT8* outbuf, UINT64 outlen) {
350 if (type == BTRFS_COMPRESSION_ZLIB)
351 return zlib_decompress(inbuf, inlen, outbuf, outlen);
352 else if (type == BTRFS_COMPRESSION_LZO)
353 return lzo_decompress(inbuf, inlen, outbuf, outlen);
354 else {
355 ERR("unsupported compression type %x\n", type);
356 return STATUS_NOT_SUPPORTED;
357 }
358 }
359
360 static NTSTATUS zlib_write_compressed_bit(fcb* fcb, UINT64 start_data, UINT64 end_data, void* data, BOOL* compressed, LIST_ENTRY* changed_sector_list, PIRP Irp, LIST_ENTRY* rollback) {
361 NTSTATUS Status;
362 UINT8 compression;
363 UINT64 comp_length;
364 UINT8* comp_data;
365 UINT32 out_left;
366 LIST_ENTRY* le;
367 chunk* c;
368 z_stream c_stream;
369 int ret;
370
371 comp_data = ExAllocatePoolWithTag(PagedPool, end_data - start_data, ALLOC_TAG);
372 if (!comp_data) {
373 ERR("out of memory\n");
374 return STATUS_INSUFFICIENT_RESOURCES;
375 }
376
377 Status = excise_extents(fcb->Vcb, fcb, start_data, end_data, Irp, rollback);
378 if (!NT_SUCCESS(Status)) {
379 ERR("excise_extents returned %08x\n", Status);
380 ExFreePool(comp_data);
381 return Status;
382 }
383
384 c_stream.zalloc = zlib_alloc;
385 c_stream.zfree = zlib_free;
386 c_stream.opaque = (voidpf)0;
387
388 ret = deflateInit(&c_stream, fcb->Vcb->options.zlib_level);
389
390 if (ret != Z_OK) {
391 ERR("deflateInit returned %08x\n", ret);
392 ExFreePool(comp_data);
393 return STATUS_INTERNAL_ERROR;
394 }
395
396 c_stream.avail_in = end_data - start_data;
397 c_stream.next_in = data;
398 c_stream.avail_out = end_data - start_data;
399 c_stream.next_out = comp_data;
400
401 do {
402 ret = deflate(&c_stream, Z_FINISH);
403
404 if (ret == Z_STREAM_ERROR) {
405 ERR("deflate returned %x\n", ret);
406 ExFreePool(comp_data);
407 return STATUS_INTERNAL_ERROR;
408 }
409 } while (c_stream.avail_in > 0 && c_stream.avail_out > 0);
410
411 out_left = c_stream.avail_out;
412
413 ret = deflateEnd(&c_stream);
414
415 if (ret != Z_OK) {
416 ERR("deflateEnd returned %08x\n", ret);
417 ExFreePool(comp_data);
418 return STATUS_INTERNAL_ERROR;
419 }
420
421 if (out_left < fcb->Vcb->superblock.sector_size) { // compressed extent would be larger than or same size as uncompressed extent
422 ExFreePool(comp_data);
423
424 comp_length = end_data - start_data;
425 comp_data = data;
426 compression = BTRFS_COMPRESSION_NONE;
427
428 *compressed = FALSE;
429 } else {
430 UINT32 cl;
431
432 compression = BTRFS_COMPRESSION_ZLIB;
433 cl = end_data - start_data - out_left;
434 comp_length = sector_align(cl, fcb->Vcb->superblock.sector_size);
435
436 RtlZeroMemory(comp_data + cl, comp_length - cl);
437
438 *compressed = TRUE;
439 }
440
441 ExAcquireResourceSharedLite(&fcb->Vcb->chunk_lock, TRUE);
442
443 le = fcb->Vcb->chunks.Flink;
444 while (le != &fcb->Vcb->chunks) {
445 c = CONTAINING_RECORD(le, chunk, list_entry);
446
447 ExAcquireResourceExclusiveLite(&c->lock, TRUE);
448
449 if (c->chunk_item->type == fcb->Vcb->data_flags && (c->chunk_item->size - c->used) >= comp_length) {
450 if (insert_extent_chunk(fcb->Vcb, fcb, c, start_data, comp_length, FALSE, comp_data, changed_sector_list, Irp, rollback, compression, end_data - start_data)) {
451 ExReleaseResourceLite(&c->lock);
452 ExReleaseResourceLite(&fcb->Vcb->chunk_lock);
453
454 if (compression != BTRFS_COMPRESSION_NONE)
455 ExFreePool(comp_data);
456
457 return STATUS_SUCCESS;
458 }
459 }
460
461 ExReleaseResourceLite(&c->lock);
462
463 le = le->Flink;
464 }
465
466 ExReleaseResourceLite(&fcb->Vcb->chunk_lock);
467
468 ExAcquireResourceExclusiveLite(&fcb->Vcb->chunk_lock, TRUE);
469
470 if ((c = alloc_chunk(fcb->Vcb, fcb->Vcb->data_flags))) {
471 ExReleaseResourceLite(&fcb->Vcb->chunk_lock);
472
473 ExAcquireResourceExclusiveLite(&c->lock, TRUE);
474
475 if (c->chunk_item->type == fcb->Vcb->data_flags && (c->chunk_item->size - c->used) >= comp_length) {
476 if (insert_extent_chunk(fcb->Vcb, fcb, c, start_data, comp_length, FALSE, comp_data, changed_sector_list, Irp, rollback, compression, end_data - start_data)) {
477 ExReleaseResourceLite(&c->lock);
478
479 if (compression != BTRFS_COMPRESSION_NONE)
480 ExFreePool(comp_data);
481
482 return STATUS_SUCCESS;
483 }
484 }
485
486 ExReleaseResourceLite(&c->lock);
487 } else
488 ExReleaseResourceLite(&fcb->Vcb->chunk_lock);
489
490 WARN("couldn't find any data chunks with %llx bytes free\n", comp_length);
491
492 return STATUS_DISK_FULL;
493 }
494
495 static NTSTATUS lzo_do_compress(const UINT8* in, UINT32 in_len, UINT8* out, UINT32* out_len, void* wrkmem) {
496 const UINT8* ip;
497 UINT32 dv;
498 UINT8* op;
499 const UINT8* in_end = in + in_len;
500 const UINT8* ip_end = in + in_len - 9 - 4;
501 const UINT8* ii;
502 const UINT8** dict = (const UINT8**)wrkmem;
503
504 op = out;
505 ip = in;
506 ii = ip;
507
508 DVAL_FIRST(dv, ip); UPDATE_D(dict, cycle, dv, ip); ip++;
509 DVAL_NEXT(dv, ip); UPDATE_D(dict, cycle, dv, ip); ip++;
510 DVAL_NEXT(dv, ip); UPDATE_D(dict, cycle, dv, ip); ip++;
511 DVAL_NEXT(dv, ip); UPDATE_D(dict, cycle, dv, ip); ip++;
512
513 while (1) {
514 const UINT8* m_pos;
515 UINT32 m_len;
516 ptrdiff_t m_off;
517 UINT32 lit, dindex;
518
519 dindex = DINDEX(dv, ip);
520 m_pos = dict[dindex];
521 UPDATE_I(dict, cycle, dindex, ip);
522
523 if (!LZO_CHECK_MPOS_NON_DET(m_pos, m_off, in, ip, M4_MAX_OFFSET) && m_pos[0] == ip[0] && m_pos[1] == ip[1] && m_pos[2] == ip[2]) {
524 lit = ip - ii;
525 m_pos += 3;
526 if (m_off <= M2_MAX_OFFSET)
527 goto match;
528
529 if (lit == 3) { /* better compression, but slower */
530 if (op - 2 <= out)
531 return STATUS_INTERNAL_ERROR;
532
533 op[-2] |= LZO_BYTE(3);
534 *op++ = *ii++; *op++ = *ii++; *op++ = *ii++;
535 goto code_match;
536 }
537
538 if (*m_pos == ip[3])
539 goto match;
540 }
541
542 /* a literal */
543 ++ip;
544 if (ip >= ip_end)
545 break;
546 DVAL_NEXT(dv, ip);
547 continue;
548
549 /* a match */
550 match:
551 /* store current literal run */
552 if (lit > 0) {
553 UINT32 t = lit;
554
555 if (t <= 3) {
556 if (op - 2 <= out)
557 return STATUS_INTERNAL_ERROR;
558
559 op[-2] |= LZO_BYTE(t);
560 } else if (t <= 18)
561 *op++ = LZO_BYTE(t - 3);
562 else {
563 UINT32 tt = t - 18;
564
565 *op++ = 0;
566 while (tt > 255) {
567 tt -= 255;
568 *op++ = 0;
569 }
570
571 if (tt <= 0)
572 return STATUS_INTERNAL_ERROR;
573
574 *op++ = LZO_BYTE(tt);
575 }
576
577 do {
578 *op++ = *ii++;
579 } while (--t > 0);
580 }
581
582
583 /* code the match */
584 code_match:
585 if (ii != ip)
586 return STATUS_INTERNAL_ERROR;
587
588 ip += 3;
589 if (*m_pos++ != *ip++ || *m_pos++ != *ip++ || *m_pos++ != *ip++ ||
590 *m_pos++ != *ip++ || *m_pos++ != *ip++ || *m_pos++ != *ip++) {
591 --ip;
592 m_len = ip - ii;
593
594 if (m_len < 3 || m_len > 8)
595 return STATUS_INTERNAL_ERROR;
596
597 if (m_off <= M2_MAX_OFFSET) {
598 m_off -= 1;
599 *op++ = LZO_BYTE(((m_len - 1) << 5) | ((m_off & 7) << 2));
600 *op++ = LZO_BYTE(m_off >> 3);
601 } else if (m_off <= M3_MAX_OFFSET) {
602 m_off -= 1;
603 *op++ = LZO_BYTE(M3_MARKER | (m_len - 2));
604 goto m3_m4_offset;
605 } else {
606 m_off -= 0x4000;
607
608 if (m_off <= 0 || m_off > 0x7fff)
609 return STATUS_INTERNAL_ERROR;
610
611 *op++ = LZO_BYTE(M4_MARKER | ((m_off & 0x4000) >> 11) | (m_len - 2));
612 goto m3_m4_offset;
613 }
614 } else {
615 const UINT8* end;
616 end = in_end;
617 while (ip < end && *m_pos == *ip)
618 m_pos++, ip++;
619 m_len = (ip - ii);
620
621 if (m_len < 3)
622 return STATUS_INTERNAL_ERROR;
623
624 if (m_off <= M3_MAX_OFFSET) {
625 m_off -= 1;
626 if (m_len <= 33)
627 *op++ = LZO_BYTE(M3_MARKER | (m_len - 2));
628 else {
629 m_len -= 33;
630 *op++ = M3_MARKER | 0;
631 goto m3_m4_len;
632 }
633 } else {
634 m_off -= 0x4000;
635
636 if (m_off <= 0 || m_off > 0x7fff)
637 return STATUS_INTERNAL_ERROR;
638
639 if (m_len <= 9)
640 *op++ = LZO_BYTE(M4_MARKER | ((m_off & 0x4000) >> 11) | (m_len - 2));
641 else {
642 m_len -= 9;
643 *op++ = LZO_BYTE(M4_MARKER | ((m_off & 0x4000) >> 11));
644 m3_m4_len:
645 while (m_len > 255) {
646 m_len -= 255;
647 *op++ = 0;
648 }
649
650 if (m_len <= 0)
651 return STATUS_INTERNAL_ERROR;
652
653 *op++ = LZO_BYTE(m_len);
654 }
655 }
656
657 m3_m4_offset:
658 *op++ = LZO_BYTE((m_off & 63) << 2);
659 *op++ = LZO_BYTE(m_off >> 6);
660 }
661
662 ii = ip;
663 if (ip >= ip_end)
664 break;
665 DVAL_FIRST(dv, ip);
666 }
667
668 /* store final literal run */
669 if (in_end - ii > 0) {
670 UINT32 t = in_end - ii;
671
672 if (op == out && t <= 238)
673 *op++ = LZO_BYTE(17 + t);
674 else if (t <= 3)
675 op[-2] |= LZO_BYTE(t);
676 else if (t <= 18)
677 *op++ = LZO_BYTE(t - 3);
678 else {
679 UINT32 tt = t - 18;
680
681 *op++ = 0;
682 while (tt > 255) {
683 tt -= 255;
684 *op++ = 0;
685 }
686
687 if (tt <= 0)
688 return STATUS_INTERNAL_ERROR;
689
690 *op++ = LZO_BYTE(tt);
691 }
692
693 do {
694 *op++ = *ii++;
695 } while (--t > 0);
696 }
697
698 *out_len = op - out;
699
700 return STATUS_SUCCESS;
701 }
702
703 static NTSTATUS lzo1x_1_compress(lzo_stream* stream) {
704 UINT8 *op = stream->out;
705 NTSTATUS Status = STATUS_SUCCESS;
706
707 if (stream->inlen <= 0)
708 stream->outlen = 0;
709 else if (stream->inlen <= 9 + 4) {
710 *op++ = LZO_BYTE(17 + stream->inlen);
711
712 stream->inpos = 0;
713 do {
714 *op++ = stream->in[stream->inpos];
715 stream->inpos++;
716 } while (stream->inlen < stream->inpos);
717 stream->outlen = op - stream->out;
718 } else
719 Status = lzo_do_compress(stream->in, stream->inlen, stream->out, &stream->outlen, stream->wrkmem);
720
721 if (Status == STATUS_SUCCESS) {
722 op = stream->out + stream->outlen;
723 *op++ = M4_MARKER | 1;
724 *op++ = 0;
725 *op++ = 0;
726 stream->outlen += 3;
727 }
728
729 return Status;
730 }
731
732 static __inline UINT32 lzo_max_outlen(UINT32 inlen) {
733 return inlen + (inlen / 16) + 64 + 3; // formula comes from LZO.FAQ
734 }
735
736 static NTSTATUS lzo_write_compressed_bit(fcb* fcb, UINT64 start_data, UINT64 end_data, void* data, BOOL* compressed, LIST_ENTRY* changed_sector_list, PIRP Irp, LIST_ENTRY* rollback) {
737 NTSTATUS Status;
738 UINT8 compression;
739 UINT64 comp_length;
740 ULONG comp_data_len, num_pages, i;
741 UINT8* comp_data;
742 BOOL skip_compression = FALSE;
743 lzo_stream stream;
744 UINT32* out_size;
745 LIST_ENTRY* le;
746 chunk* c;
747
748 num_pages = (sector_align(end_data - start_data, LINUX_PAGE_SIZE)) / LINUX_PAGE_SIZE;
749
750 // Four-byte overall header
751 // Another four-byte header page
752 // Each page has a maximum size of lzo_max_outlen(LINUX_PAGE_SIZE)
753 // Plus another four bytes for possible padding
754 comp_data_len = sizeof(UINT32) + ((lzo_max_outlen(LINUX_PAGE_SIZE) + (2 * sizeof(UINT32))) * num_pages);
755
756 comp_data = ExAllocatePoolWithTag(PagedPool, comp_data_len, ALLOC_TAG);
757 if (!comp_data) {
758 ERR("out of memory\n");
759 return STATUS_INSUFFICIENT_RESOURCES;
760 }
761
762 stream.wrkmem = ExAllocatePoolWithTag(PagedPool, LZO1X_MEM_COMPRESS, ALLOC_TAG);
763 if (!stream.wrkmem) {
764 ERR("out of memory\n");
765 ExFreePool(comp_data);
766 return STATUS_INSUFFICIENT_RESOURCES;
767 }
768
769 Status = excise_extents(fcb->Vcb, fcb, start_data, end_data, Irp, rollback);
770 if (!NT_SUCCESS(Status)) {
771 ERR("excise_extents returned %08x\n", Status);
772 ExFreePool(comp_data);
773 ExFreePool(stream.wrkmem);
774 return Status;
775 }
776
777 out_size = (UINT32*)comp_data;
778 *out_size = sizeof(UINT32);
779
780 stream.in = data;
781 stream.out = comp_data + (2 * sizeof(UINT32));
782
783 for (i = 0; i < num_pages; i++) {
784 UINT32* pagelen = (UINT32*)(stream.out - sizeof(UINT32));
785
786 stream.inlen = min(LINUX_PAGE_SIZE, end_data - start_data - (i * LINUX_PAGE_SIZE));
787
788 Status = lzo1x_1_compress(&stream);
789 if (!NT_SUCCESS(Status)) {
790 ERR("lzo1x_1_compress returned %08x\n", Status);
791 skip_compression = TRUE;
792 break;
793 }
794
795 *pagelen = stream.outlen;
796 *out_size += stream.outlen + sizeof(UINT32);
797
798 stream.in += LINUX_PAGE_SIZE;
799 stream.out += stream.outlen + sizeof(UINT32);
800
801 if (LINUX_PAGE_SIZE - (*out_size % LINUX_PAGE_SIZE) < sizeof(UINT32)) {
802 RtlZeroMemory(stream.out, LINUX_PAGE_SIZE - (*out_size % LINUX_PAGE_SIZE));
803 stream.out += LINUX_PAGE_SIZE - (*out_size % LINUX_PAGE_SIZE);
804 *out_size += LINUX_PAGE_SIZE - (*out_size % LINUX_PAGE_SIZE);
805 }
806 }
807
808 ExFreePool(stream.wrkmem);
809
810 if (skip_compression || *out_size >= end_data - start_data - fcb->Vcb->superblock.sector_size) { // compressed extent would be larger than or same size as uncompressed extent
811 ExFreePool(comp_data);
812
813 comp_length = end_data - start_data;
814 comp_data = data;
815 compression = BTRFS_COMPRESSION_NONE;
816
817 *compressed = FALSE;
818 } else {
819 compression = BTRFS_COMPRESSION_LZO;
820 comp_length = sector_align(*out_size, fcb->Vcb->superblock.sector_size);
821
822 RtlZeroMemory(comp_data + *out_size, comp_length - *out_size);
823
824 *compressed = TRUE;
825 }
826
827 ExAcquireResourceSharedLite(&fcb->Vcb->chunk_lock, TRUE);
828
829 le = fcb->Vcb->chunks.Flink;
830 while (le != &fcb->Vcb->chunks) {
831 c = CONTAINING_RECORD(le, chunk, list_entry);
832
833 ExAcquireResourceExclusiveLite(&c->lock, TRUE);
834
835 if (c->chunk_item->type == fcb->Vcb->data_flags && (c->chunk_item->size - c->used) >= comp_length) {
836 if (insert_extent_chunk(fcb->Vcb, fcb, c, start_data, comp_length, FALSE, comp_data, changed_sector_list, Irp, rollback, compression, end_data - start_data)) {
837 ExReleaseResourceLite(&c->lock);
838 ExReleaseResourceLite(&fcb->Vcb->chunk_lock);
839
840 if (compression != BTRFS_COMPRESSION_NONE)
841 ExFreePool(comp_data);
842
843 return STATUS_SUCCESS;
844 }
845 }
846
847 ExReleaseResourceLite(&c->lock);
848
849 le = le->Flink;
850 }
851
852 ExReleaseResourceLite(&fcb->Vcb->chunk_lock);
853
854 ExAcquireResourceExclusiveLite(&fcb->Vcb->chunk_lock, TRUE);
855
856 if ((c = alloc_chunk(fcb->Vcb, fcb->Vcb->data_flags))) {
857 ExReleaseResourceLite(&fcb->Vcb->chunk_lock);
858
859 ExAcquireResourceExclusiveLite(&c->lock, TRUE);
860
861 if (c->chunk_item->type == fcb->Vcb->data_flags && (c->chunk_item->size - c->used) >= comp_length) {
862 if (insert_extent_chunk(fcb->Vcb, fcb, c, start_data, comp_length, FALSE, comp_data, changed_sector_list, Irp, rollback, compression, end_data - start_data)) {
863 ExReleaseResourceLite(&c->lock);
864
865 if (compression != BTRFS_COMPRESSION_NONE)
866 ExFreePool(comp_data);
867
868 return STATUS_SUCCESS;
869 }
870 }
871
872 ExReleaseResourceLite(&c->lock);
873 } else
874 ExReleaseResourceLite(&fcb->Vcb->chunk_lock);
875
876 WARN("couldn't find any data chunks with %llx bytes free\n", comp_length);
877
878 return STATUS_DISK_FULL;
879 }
880
881 NTSTATUS write_compressed_bit(fcb* fcb, UINT64 start_data, UINT64 end_data, void* data, BOOL* compressed, LIST_ENTRY* changed_sector_list, PIRP Irp, LIST_ENTRY* rollback) {
882 UINT8 type;
883
884 if (fcb->Vcb->options.compress_type != 0)
885 type = fcb->Vcb->options.compress_type;
886 else {
887 if (fcb->Vcb->superblock.incompat_flags & BTRFS_INCOMPAT_FLAGS_COMPRESS_LZO)
888 type = BTRFS_COMPRESSION_LZO;
889 else
890 type = BTRFS_COMPRESSION_ZLIB;
891 }
892
893 if (type == BTRFS_COMPRESSION_LZO) {
894 fcb->Vcb->superblock.incompat_flags |= BTRFS_INCOMPAT_FLAGS_COMPRESS_LZO;
895 return lzo_write_compressed_bit(fcb, start_data, end_data, data, compressed, changed_sector_list, Irp, rollback);
896 } else
897 return zlib_write_compressed_bit(fcb, start_data, end_data, data, compressed, changed_sector_list, Irp, rollback);
898 }