Remove unnecessary executable bits
[reactos.git] / drivers / filesystems / btrfs / compress.c
1 /* Copyright (c) Mark Harmstone 2016-17
2 * Copyright (c) Reimar Doeffinger 2006
3 * Copyright (c) Markus Oberhumer 1996
4 *
5 * This file is part of WinBtrfs.
6 *
7 * WinBtrfs is free software: you can redistribute it and/or modify
8 * it under the terms of the GNU Lesser General Public Licence as published by
9 * the Free Software Foundation, either version 3 of the Licence, or
10 * (at your option) any later version.
11 *
12 * WinBtrfs is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU Lesser General Public Licence for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public Licence
18 * along with WinBtrfs. If not, see <http://www.gnu.org/licenses/>. */
19
20 // Portions of the LZO decompression code here were cribbed from code in
21 // libavcodec, also under the LGPL. Thank you, Reimar Doeffinger.
22
23 // The LZO compression code comes from v0.22 of lzo, written way back in
24 // 1996, and available here:
25 // https://www.ibiblio.org/pub/historic-linux/ftp-archives/sunsite.unc.edu/Sep-29-1996/libs/lzo-0.22.tar.gz
26 // Modern versions of lzo are licensed under the GPL, but the very oldest
27 // versions are under the LGPL and hence okay to use here.
28
29 #include "btrfs_drv.h"
30
31 #define Z_SOLO
32 #define ZLIB_INTERNAL
33
34 #ifndef __REACTOS__
35 #include "zlib/zlib.h"
36 #include "zlib/inftrees.h"
37 #include "zlib/inflate.h"
38 #else
39 #include <zlib.h>
40 #endif
41
42 #define LINUX_PAGE_SIZE 4096
43
44 typedef struct {
45 UINT8* in;
46 UINT32 inlen;
47 UINT32 inpos;
48 UINT8* out;
49 UINT32 outlen;
50 UINT32 outpos;
51 BOOL error;
52 void* wrkmem;
53 } lzo_stream;
54
55 #define LZO1X_MEM_COMPRESS ((UINT32) (16384L * sizeof(UINT8*)))
56
57 #define M1_MAX_OFFSET 0x0400
58 #define M2_MAX_OFFSET 0x0800
59 #define M3_MAX_OFFSET 0x4000
60 #define M4_MAX_OFFSET 0xbfff
61
62 #define MX_MAX_OFFSET (M1_MAX_OFFSET + M2_MAX_OFFSET)
63
64 #define M1_MARKER 0
65 #define M2_MARKER 64
66 #define M3_MARKER 32
67 #define M4_MARKER 16
68
69 #define _DV2(p, shift1, shift2) (((( (UINT32)(p[2]) << shift1) ^ p[1]) << shift2) ^ p[0])
70 #define DVAL_NEXT(dv, p) dv ^= p[-1]; dv = (((dv) >> 5) ^ ((UINT32)(p[2]) << (2*5)))
71 #define _DV(p, shift) _DV2(p, shift, shift)
72 #define DVAL_FIRST(dv, p) dv = _DV((p), 5)
73 #define _DINDEX(dv, p) ((40799u * (dv)) >> 5)
74 #define DINDEX(dv, p) (((_DINDEX(dv, p)) & 0x3fff) << 0)
75 #define UPDATE_D(dict, cycle, dv, p) dict[DINDEX(dv, p)] = (p)
76 #define UPDATE_I(dict, cycle, index, p) dict[index] = (p)
77
78 #define LZO_CHECK_MPOS_NON_DET(m_pos, m_off, in, ip, max_offset) \
79 ((void*) m_pos < (void*) in || \
80 (m_off = (UINT8*) ip - (UINT8*) m_pos) <= 0 || \
81 m_off > max_offset)
82
83 #define LZO_BYTE(x) ((unsigned char) (x))
84
85 static UINT8 lzo_nextbyte(lzo_stream* stream) {
86 UINT8 c;
87
88 if (stream->inpos >= stream->inlen) {
89 stream->error = TRUE;
90 return 0;
91 }
92
93 c = stream->in[stream->inpos];
94 stream->inpos++;
95
96 return c;
97 }
98
99 static int lzo_len(lzo_stream* stream, int byte, int mask) {
100 int len = byte & mask;
101
102 if (len == 0) {
103 while (!(byte = lzo_nextbyte(stream))) {
104 if (stream->error) return 0;
105
106 len += 255;
107 }
108
109 len += mask + byte;
110 }
111
112 return len;
113 }
114
115 static void lzo_copy(lzo_stream* stream, int len) {
116 if (stream->inpos + len > stream->inlen) {
117 stream->error = TRUE;
118 return;
119 }
120
121 if (stream->outpos + len > stream->outlen) {
122 stream->error = TRUE;
123 return;
124 }
125
126 do {
127 stream->out[stream->outpos] = stream->in[stream->inpos];
128 stream->inpos++;
129 stream->outpos++;
130 len--;
131 } while (len > 0);
132 }
133
134 static void lzo_copyback(lzo_stream* stream, UINT32 back, int len) {
135 if (stream->outpos < back) {
136 stream->error = TRUE;
137 return;
138 }
139
140 if (stream->outpos + len > stream->outlen) {
141 stream->error = TRUE;
142 return;
143 }
144
145 do {
146 stream->out[stream->outpos] = stream->out[stream->outpos - back];
147 stream->outpos++;
148 len--;
149 } while (len > 0);
150 }
151
152 static NTSTATUS do_lzo_decompress(lzo_stream* stream) {
153 UINT8 byte;
154 UINT32 len, back;
155 BOOL backcopy = FALSE;
156
157 stream->error = FALSE;
158
159 byte = lzo_nextbyte(stream);
160 if (stream->error) return STATUS_INTERNAL_ERROR;
161
162 if (byte > 17) {
163 lzo_copy(stream, min((UINT8)(byte - 17), (UINT32)(stream->outlen - stream->outpos)));
164 if (stream->error) return STATUS_INTERNAL_ERROR;
165
166 if (stream->outlen == stream->outpos)
167 return STATUS_SUCCESS;
168
169 byte = lzo_nextbyte(stream);
170 if (stream->error) return STATUS_INTERNAL_ERROR;
171
172 if (byte < 16) return STATUS_INTERNAL_ERROR;
173 }
174
175 while (1) {
176 if (byte >> 4) {
177 backcopy = TRUE;
178 if (byte >> 6) {
179 len = (byte >> 5) - 1;
180 back = (lzo_nextbyte(stream) << 3) + ((byte >> 2) & 7) + 1;
181 if (stream->error) return STATUS_INTERNAL_ERROR;
182 } else if (byte >> 5) {
183 len = lzo_len(stream, byte, 31);
184 if (stream->error) return STATUS_INTERNAL_ERROR;
185
186 byte = lzo_nextbyte(stream);
187 if (stream->error) return STATUS_INTERNAL_ERROR;
188
189 back = (lzo_nextbyte(stream) << 6) + (byte >> 2) + 1;
190 if (stream->error) return STATUS_INTERNAL_ERROR;
191 } else {
192 len = lzo_len(stream, byte, 7);
193 if (stream->error) return STATUS_INTERNAL_ERROR;
194
195 back = (1 << 14) + ((byte & 8) << 11);
196
197 byte = lzo_nextbyte(stream);
198 if (stream->error) return STATUS_INTERNAL_ERROR;
199
200 back += (lzo_nextbyte(stream) << 6) + (byte >> 2);
201 if (stream->error) return STATUS_INTERNAL_ERROR;
202
203 if (back == (1 << 14)) {
204 if (len != 1)
205 return STATUS_INTERNAL_ERROR;
206 break;
207 }
208 }
209 } else if (backcopy) {
210 len = 0;
211 back = (lzo_nextbyte(stream) << 2) + (byte >> 2) + 1;
212 if (stream->error) return STATUS_INTERNAL_ERROR;
213 } else {
214 len = lzo_len(stream, byte, 15);
215 if (stream->error) return STATUS_INTERNAL_ERROR;
216
217 lzo_copy(stream, min(len + 3, stream->outlen - stream->outpos));
218 if (stream->error) return STATUS_INTERNAL_ERROR;
219
220 if (stream->outlen == stream->outpos)
221 return STATUS_SUCCESS;
222
223 byte = lzo_nextbyte(stream);
224 if (stream->error) return STATUS_INTERNAL_ERROR;
225
226 if (byte >> 4)
227 continue;
228
229 len = 1;
230 back = (1 << 11) + (lzo_nextbyte(stream) << 2) + (byte >> 2) + 1;
231 if (stream->error) return STATUS_INTERNAL_ERROR;
232
233 break;
234 }
235
236 lzo_copyback(stream, back, min(len + 2, stream->outlen - stream->outpos));
237 if (stream->error) return STATUS_INTERNAL_ERROR;
238
239 if (stream->outlen == stream->outpos)
240 return STATUS_SUCCESS;
241
242 len = byte & 3;
243
244 if (len) {
245 lzo_copy(stream, min(len, stream->outlen - stream->outpos));
246 if (stream->error) return STATUS_INTERNAL_ERROR;
247
248 if (stream->outlen == stream->outpos)
249 return STATUS_SUCCESS;
250 } else
251 backcopy = !backcopy;
252
253 byte = lzo_nextbyte(stream);
254 if (stream->error) return STATUS_INTERNAL_ERROR;
255 }
256
257 return STATUS_SUCCESS;
258 }
259
260 NTSTATUS lzo_decompress(UINT8* inbuf, UINT32 inlen, UINT8* outbuf, UINT32 outlen, UINT32 inpageoff) {
261 NTSTATUS Status;
262 UINT32 partlen, inoff, outoff;
263 lzo_stream stream;
264
265 inoff = 0;
266 outoff = 0;
267
268 do {
269 partlen = *(UINT32*)&inbuf[inoff];
270
271 if (partlen + inoff > inlen) {
272 ERR("overflow: %x + %x > %llx\n", partlen, inoff, inlen);
273 return STATUS_INTERNAL_ERROR;
274 }
275
276 inoff += sizeof(UINT32);
277
278 stream.in = &inbuf[inoff];
279 stream.inlen = partlen;
280 stream.inpos = 0;
281 stream.out = &outbuf[outoff];
282 stream.outlen = min(outlen, LINUX_PAGE_SIZE);
283 stream.outpos = 0;
284
285 Status = do_lzo_decompress(&stream);
286 if (!NT_SUCCESS(Status)) {
287 ERR("do_lzo_decompress returned %08x\n", Status);
288 return Status;
289 }
290
291 if (stream.outpos < stream.outlen)
292 RtlZeroMemory(&stream.out[stream.outpos], stream.outlen - stream.outpos);
293
294 inoff += partlen;
295 outoff += stream.outlen;
296
297 if (LINUX_PAGE_SIZE - ((inpageoff + inoff) % LINUX_PAGE_SIZE) < sizeof(UINT32))
298 inoff = ((((inpageoff + inoff) / LINUX_PAGE_SIZE) + 1) * LINUX_PAGE_SIZE) - inpageoff;
299
300 outlen -= stream.outlen;
301 } while (inoff < inlen && outlen > 0);
302
303 return STATUS_SUCCESS;
304 }
305
306 static void* zlib_alloc(void* opaque, unsigned int items, unsigned int size) {
307 UNUSED(opaque);
308
309 return ExAllocatePoolWithTag(PagedPool, items * size, ALLOC_TAG_ZLIB);
310 }
311
312 static void zlib_free(void* opaque, void* ptr) {
313 UNUSED(opaque);
314
315 ExFreePool(ptr);
316 }
317
318 NTSTATUS zlib_decompress(UINT8* inbuf, UINT32 inlen, UINT8* outbuf, UINT32 outlen) {
319 z_stream c_stream;
320 int ret;
321
322 c_stream.zalloc = zlib_alloc;
323 c_stream.zfree = zlib_free;
324 c_stream.opaque = (voidpf)0;
325
326 ret = inflateInit(&c_stream);
327
328 if (ret != Z_OK) {
329 ERR("inflateInit returned %08x\n", ret);
330 return STATUS_INTERNAL_ERROR;
331 }
332
333 c_stream.next_in = inbuf;
334 c_stream.avail_in = inlen;
335
336 c_stream.next_out = outbuf;
337 c_stream.avail_out = outlen;
338
339 do {
340 ret = inflate(&c_stream, Z_NO_FLUSH);
341
342 if (ret != Z_OK && ret != Z_STREAM_END) {
343 ERR("inflate returned %08x\n", ret);
344 inflateEnd(&c_stream);
345 return STATUS_INTERNAL_ERROR;
346 }
347
348 if (c_stream.avail_out == 0)
349 break;
350 } while (ret != Z_STREAM_END);
351
352 ret = inflateEnd(&c_stream);
353
354 if (ret != Z_OK) {
355 ERR("inflateEnd returned %08x\n", ret);
356 return STATUS_INTERNAL_ERROR;
357 }
358
359 // FIXME - if we're short, should we zero the end of outbuf so we don't leak information into userspace?
360
361 return STATUS_SUCCESS;
362 }
363
364 static NTSTATUS zlib_write_compressed_bit(fcb* fcb, UINT64 start_data, UINT64 end_data, void* data, BOOL* compressed, PIRP Irp, LIST_ENTRY* rollback) {
365 NTSTATUS Status;
366 UINT8 compression;
367 UINT32 comp_length;
368 UINT8* comp_data;
369 UINT32 out_left;
370 LIST_ENTRY* le;
371 chunk* c;
372 z_stream c_stream;
373 int ret;
374
375 comp_data = ExAllocatePoolWithTag(PagedPool, (UINT32)(end_data - start_data), ALLOC_TAG);
376 if (!comp_data) {
377 ERR("out of memory\n");
378 return STATUS_INSUFFICIENT_RESOURCES;
379 }
380
381 Status = excise_extents(fcb->Vcb, fcb, start_data, end_data, Irp, rollback);
382 if (!NT_SUCCESS(Status)) {
383 ERR("excise_extents returned %08x\n", Status);
384 ExFreePool(comp_data);
385 return Status;
386 }
387
388 c_stream.zalloc = zlib_alloc;
389 c_stream.zfree = zlib_free;
390 c_stream.opaque = (voidpf)0;
391
392 ret = deflateInit(&c_stream, fcb->Vcb->options.zlib_level);
393
394 if (ret != Z_OK) {
395 ERR("deflateInit returned %08x\n", ret);
396 ExFreePool(comp_data);
397 return STATUS_INTERNAL_ERROR;
398 }
399
400 c_stream.avail_in = (UINT32)(end_data - start_data);
401 c_stream.next_in = data;
402 c_stream.avail_out = (UINT32)(end_data - start_data);
403 c_stream.next_out = comp_data;
404
405 do {
406 ret = deflate(&c_stream, Z_FINISH);
407
408 if (ret == Z_STREAM_ERROR) {
409 ERR("deflate returned %x\n", ret);
410 ExFreePool(comp_data);
411 return STATUS_INTERNAL_ERROR;
412 }
413 } while (c_stream.avail_in > 0 && c_stream.avail_out > 0);
414
415 out_left = c_stream.avail_out;
416
417 ret = deflateEnd(&c_stream);
418
419 if (ret != Z_OK) {
420 ERR("deflateEnd returned %08x\n", ret);
421 ExFreePool(comp_data);
422 return STATUS_INTERNAL_ERROR;
423 }
424
425 if (out_left < fcb->Vcb->superblock.sector_size) { // compressed extent would be larger than or same size as uncompressed extent
426 ExFreePool(comp_data);
427
428 comp_length = (UINT32)(end_data - start_data);
429 comp_data = data;
430 compression = BTRFS_COMPRESSION_NONE;
431
432 *compressed = FALSE;
433 } else {
434 UINT32 cl;
435
436 compression = BTRFS_COMPRESSION_ZLIB;
437 cl = (UINT32)(end_data - start_data - out_left);
438 comp_length = (UINT32)sector_align(cl, fcb->Vcb->superblock.sector_size);
439
440 RtlZeroMemory(comp_data + cl, comp_length - cl);
441
442 *compressed = TRUE;
443 }
444
445 ExAcquireResourceSharedLite(&fcb->Vcb->chunk_lock, TRUE);
446
447 le = fcb->Vcb->chunks.Flink;
448 while (le != &fcb->Vcb->chunks) {
449 c = CONTAINING_RECORD(le, chunk, list_entry);
450
451 if (!c->readonly && !c->reloc) {
452 ExAcquireResourceExclusiveLite(&c->lock, TRUE);
453
454 if (c->chunk_item->type == fcb->Vcb->data_flags && (c->chunk_item->size - c->used) >= comp_length) {
455 if (insert_extent_chunk(fcb->Vcb, fcb, c, start_data, comp_length, FALSE, comp_data, Irp, rollback, compression, end_data - start_data, FALSE, 0)) {
456 ExReleaseResourceLite(&fcb->Vcb->chunk_lock);
457
458 if (compression != BTRFS_COMPRESSION_NONE)
459 ExFreePool(comp_data);
460
461 return STATUS_SUCCESS;
462 }
463 }
464
465 ExReleaseResourceLite(&c->lock);
466 }
467
468 le = le->Flink;
469 }
470
471 ExReleaseResourceLite(&fcb->Vcb->chunk_lock);
472
473 ExAcquireResourceExclusiveLite(&fcb->Vcb->chunk_lock, TRUE);
474
475 Status = alloc_chunk(fcb->Vcb, fcb->Vcb->data_flags, &c, FALSE);
476
477 ExReleaseResourceLite(&fcb->Vcb->chunk_lock);
478
479 if (!NT_SUCCESS(Status)) {
480 ERR("alloc_chunk returned %08x\n", Status);
481
482 if (compression != BTRFS_COMPRESSION_NONE)
483 ExFreePool(comp_data);
484
485 return Status;
486 }
487
488 if (c) {
489 ExAcquireResourceExclusiveLite(&c->lock, TRUE);
490
491 if (c->chunk_item->type == fcb->Vcb->data_flags && (c->chunk_item->size - c->used) >= comp_length) {
492 if (insert_extent_chunk(fcb->Vcb, fcb, c, start_data, comp_length, FALSE, comp_data, Irp, rollback, compression, end_data - start_data, FALSE, 0)) {
493 if (compression != BTRFS_COMPRESSION_NONE)
494 ExFreePool(comp_data);
495
496 return STATUS_SUCCESS;
497 }
498 }
499
500 ExReleaseResourceLite(&c->lock);
501 }
502
503 WARN("couldn't find any data chunks with %llx bytes free\n", comp_length);
504
505 if (compression != BTRFS_COMPRESSION_NONE)
506 ExFreePool(comp_data);
507
508 return STATUS_DISK_FULL;
509 }
510
511 static NTSTATUS lzo_do_compress(const UINT8* in, UINT32 in_len, UINT8* out, UINT32* out_len, void* wrkmem) {
512 const UINT8* ip;
513 UINT32 dv;
514 UINT8* op;
515 const UINT8* in_end = in + in_len;
516 const UINT8* ip_end = in + in_len - 9 - 4;
517 const UINT8* ii;
518 const UINT8** dict = (const UINT8**)wrkmem;
519
520 op = out;
521 ip = in;
522 ii = ip;
523
524 DVAL_FIRST(dv, ip); UPDATE_D(dict, cycle, dv, ip); ip++;
525 DVAL_NEXT(dv, ip); UPDATE_D(dict, cycle, dv, ip); ip++;
526 DVAL_NEXT(dv, ip); UPDATE_D(dict, cycle, dv, ip); ip++;
527 DVAL_NEXT(dv, ip); UPDATE_D(dict, cycle, dv, ip); ip++;
528
529 while (1) {
530 const UINT8* m_pos;
531 UINT32 m_len;
532 ptrdiff_t m_off;
533 UINT32 lit, dindex;
534
535 dindex = DINDEX(dv, ip);
536 m_pos = dict[dindex];
537 UPDATE_I(dict, cycle, dindex, ip);
538
539 if (!LZO_CHECK_MPOS_NON_DET(m_pos, m_off, in, ip, M4_MAX_OFFSET) && m_pos[0] == ip[0] && m_pos[1] == ip[1] && m_pos[2] == ip[2]) {
540 lit = (UINT32)(ip - ii);
541 m_pos += 3;
542 if (m_off <= M2_MAX_OFFSET)
543 goto match;
544
545 if (lit == 3) { /* better compression, but slower */
546 if (op - 2 <= out)
547 return STATUS_INTERNAL_ERROR;
548
549 op[-2] |= LZO_BYTE(3);
550 *op++ = *ii++; *op++ = *ii++; *op++ = *ii++;
551 goto code_match;
552 }
553
554 if (*m_pos == ip[3])
555 goto match;
556 }
557
558 /* a literal */
559 ++ip;
560 if (ip >= ip_end)
561 break;
562 DVAL_NEXT(dv, ip);
563 continue;
564
565 /* a match */
566 match:
567 /* store current literal run */
568 if (lit > 0) {
569 UINT32 t = lit;
570
571 if (t <= 3) {
572 if (op - 2 <= out)
573 return STATUS_INTERNAL_ERROR;
574
575 op[-2] |= LZO_BYTE(t);
576 } else if (t <= 18)
577 *op++ = LZO_BYTE(t - 3);
578 else {
579 UINT32 tt = t - 18;
580
581 *op++ = 0;
582 while (tt > 255) {
583 tt -= 255;
584 *op++ = 0;
585 }
586
587 if (tt <= 0)
588 return STATUS_INTERNAL_ERROR;
589
590 *op++ = LZO_BYTE(tt);
591 }
592
593 do {
594 *op++ = *ii++;
595 } while (--t > 0);
596 }
597
598
599 /* code the match */
600 code_match:
601 if (ii != ip)
602 return STATUS_INTERNAL_ERROR;
603
604 ip += 3;
605 if (*m_pos++ != *ip++ || *m_pos++ != *ip++ || *m_pos++ != *ip++ ||
606 *m_pos++ != *ip++ || *m_pos++ != *ip++ || *m_pos++ != *ip++) {
607 --ip;
608 m_len = (UINT32)(ip - ii);
609
610 if (m_len < 3 || m_len > 8)
611 return STATUS_INTERNAL_ERROR;
612
613 if (m_off <= M2_MAX_OFFSET) {
614 m_off -= 1;
615 *op++ = LZO_BYTE(((m_len - 1) << 5) | ((m_off & 7) << 2));
616 *op++ = LZO_BYTE(m_off >> 3);
617 } else if (m_off <= M3_MAX_OFFSET) {
618 m_off -= 1;
619 *op++ = LZO_BYTE(M3_MARKER | (m_len - 2));
620 goto m3_m4_offset;
621 } else {
622 m_off -= 0x4000;
623
624 if (m_off <= 0 || m_off > 0x7fff)
625 return STATUS_INTERNAL_ERROR;
626
627 *op++ = LZO_BYTE(M4_MARKER | ((m_off & 0x4000) >> 11) | (m_len - 2));
628 goto m3_m4_offset;
629 }
630 } else {
631 const UINT8* end;
632 end = in_end;
633 while (ip < end && *m_pos == *ip)
634 m_pos++, ip++;
635 m_len = (UINT32)(ip - ii);
636
637 if (m_len < 3)
638 return STATUS_INTERNAL_ERROR;
639
640 if (m_off <= M3_MAX_OFFSET) {
641 m_off -= 1;
642 if (m_len <= 33)
643 *op++ = LZO_BYTE(M3_MARKER | (m_len - 2));
644 else {
645 m_len -= 33;
646 *op++ = M3_MARKER | 0;
647 goto m3_m4_len;
648 }
649 } else {
650 m_off -= 0x4000;
651
652 if (m_off <= 0 || m_off > 0x7fff)
653 return STATUS_INTERNAL_ERROR;
654
655 if (m_len <= 9)
656 *op++ = LZO_BYTE(M4_MARKER | ((m_off & 0x4000) >> 11) | (m_len - 2));
657 else {
658 m_len -= 9;
659 *op++ = LZO_BYTE(M4_MARKER | ((m_off & 0x4000) >> 11));
660 m3_m4_len:
661 while (m_len > 255) {
662 m_len -= 255;
663 *op++ = 0;
664 }
665
666 if (m_len <= 0)
667 return STATUS_INTERNAL_ERROR;
668
669 *op++ = LZO_BYTE(m_len);
670 }
671 }
672
673 m3_m4_offset:
674 *op++ = LZO_BYTE((m_off & 63) << 2);
675 *op++ = LZO_BYTE(m_off >> 6);
676 }
677
678 ii = ip;
679 if (ip >= ip_end)
680 break;
681 DVAL_FIRST(dv, ip);
682 }
683
684 /* store final literal run */
685 if (in_end - ii > 0) {
686 UINT32 t = (UINT32)(in_end - ii);
687
688 if (op == out && t <= 238)
689 *op++ = LZO_BYTE(17 + t);
690 else if (t <= 3)
691 op[-2] |= LZO_BYTE(t);
692 else if (t <= 18)
693 *op++ = LZO_BYTE(t - 3);
694 else {
695 UINT32 tt = t - 18;
696
697 *op++ = 0;
698 while (tt > 255) {
699 tt -= 255;
700 *op++ = 0;
701 }
702
703 if (tt <= 0)
704 return STATUS_INTERNAL_ERROR;
705
706 *op++ = LZO_BYTE(tt);
707 }
708
709 do {
710 *op++ = *ii++;
711 } while (--t > 0);
712 }
713
714 *out_len = (UINT32)(op - out);
715
716 return STATUS_SUCCESS;
717 }
718
719 static NTSTATUS lzo1x_1_compress(lzo_stream* stream) {
720 UINT8 *op = stream->out;
721 NTSTATUS Status = STATUS_SUCCESS;
722
723 if (stream->inlen <= 0)
724 stream->outlen = 0;
725 else if (stream->inlen <= 9 + 4) {
726 *op++ = LZO_BYTE(17 + stream->inlen);
727
728 stream->inpos = 0;
729 do {
730 *op++ = stream->in[stream->inpos];
731 stream->inpos++;
732 } while (stream->inlen < stream->inpos);
733 stream->outlen = (UINT32)(op - stream->out);
734 } else
735 Status = lzo_do_compress(stream->in, stream->inlen, stream->out, &stream->outlen, stream->wrkmem);
736
737 if (Status == STATUS_SUCCESS) {
738 op = stream->out + stream->outlen;
739 *op++ = M4_MARKER | 1;
740 *op++ = 0;
741 *op++ = 0;
742 stream->outlen += 3;
743 }
744
745 return Status;
746 }
747
748 static __inline UINT32 lzo_max_outlen(UINT32 inlen) {
749 return inlen + (inlen / 16) + 64 + 3; // formula comes from LZO.FAQ
750 }
751
752 static NTSTATUS lzo_write_compressed_bit(fcb* fcb, UINT64 start_data, UINT64 end_data, void* data, BOOL* compressed, PIRP Irp, LIST_ENTRY* rollback) {
753 NTSTATUS Status;
754 UINT8 compression;
755 UINT64 comp_length;
756 ULONG comp_data_len, num_pages, i;
757 UINT8* comp_data;
758 BOOL skip_compression = FALSE;
759 lzo_stream stream;
760 UINT32* out_size;
761 LIST_ENTRY* le;
762 chunk* c;
763
764 num_pages = (ULONG)((sector_align(end_data - start_data, LINUX_PAGE_SIZE)) / LINUX_PAGE_SIZE);
765
766 // Four-byte overall header
767 // Another four-byte header page
768 // Each page has a maximum size of lzo_max_outlen(LINUX_PAGE_SIZE)
769 // Plus another four bytes for possible padding
770 comp_data_len = sizeof(UINT32) + ((lzo_max_outlen(LINUX_PAGE_SIZE) + (2 * sizeof(UINT32))) * num_pages);
771
772 comp_data = ExAllocatePoolWithTag(PagedPool, comp_data_len, ALLOC_TAG);
773 if (!comp_data) {
774 ERR("out of memory\n");
775 return STATUS_INSUFFICIENT_RESOURCES;
776 }
777
778 stream.wrkmem = ExAllocatePoolWithTag(PagedPool, LZO1X_MEM_COMPRESS, ALLOC_TAG);
779 if (!stream.wrkmem) {
780 ERR("out of memory\n");
781 ExFreePool(comp_data);
782 return STATUS_INSUFFICIENT_RESOURCES;
783 }
784
785 Status = excise_extents(fcb->Vcb, fcb, start_data, end_data, Irp, rollback);
786 if (!NT_SUCCESS(Status)) {
787 ERR("excise_extents returned %08x\n", Status);
788 ExFreePool(comp_data);
789 ExFreePool(stream.wrkmem);
790 return Status;
791 }
792
793 out_size = (UINT32*)comp_data;
794 *out_size = sizeof(UINT32);
795
796 stream.in = data;
797 stream.out = comp_data + (2 * sizeof(UINT32));
798
799 for (i = 0; i < num_pages; i++) {
800 UINT32* pagelen = (UINT32*)(stream.out - sizeof(UINT32));
801
802 stream.inlen = (UINT32)min(LINUX_PAGE_SIZE, end_data - start_data - (i * LINUX_PAGE_SIZE));
803
804 Status = lzo1x_1_compress(&stream);
805 if (!NT_SUCCESS(Status)) {
806 ERR("lzo1x_1_compress returned %08x\n", Status);
807 skip_compression = TRUE;
808 break;
809 }
810
811 *pagelen = stream.outlen;
812 *out_size += stream.outlen + sizeof(UINT32);
813
814 stream.in += LINUX_PAGE_SIZE;
815 stream.out += stream.outlen + sizeof(UINT32);
816
817 if (LINUX_PAGE_SIZE - (*out_size % LINUX_PAGE_SIZE) < sizeof(UINT32)) {
818 RtlZeroMemory(stream.out, LINUX_PAGE_SIZE - (*out_size % LINUX_PAGE_SIZE));
819 stream.out += LINUX_PAGE_SIZE - (*out_size % LINUX_PAGE_SIZE);
820 *out_size += LINUX_PAGE_SIZE - (*out_size % LINUX_PAGE_SIZE);
821 }
822 }
823
824 ExFreePool(stream.wrkmem);
825
826 if (skip_compression || *out_size >= end_data - start_data - fcb->Vcb->superblock.sector_size) { // compressed extent would be larger than or same size as uncompressed extent
827 ExFreePool(comp_data);
828
829 comp_length = end_data - start_data;
830 comp_data = data;
831 compression = BTRFS_COMPRESSION_NONE;
832
833 *compressed = FALSE;
834 } else {
835 compression = BTRFS_COMPRESSION_LZO;
836 comp_length = sector_align(*out_size, fcb->Vcb->superblock.sector_size);
837
838 RtlZeroMemory(comp_data + *out_size, (ULONG)(comp_length - *out_size));
839
840 *compressed = TRUE;
841 }
842
843 ExAcquireResourceSharedLite(&fcb->Vcb->chunk_lock, TRUE);
844
845 le = fcb->Vcb->chunks.Flink;
846 while (le != &fcb->Vcb->chunks) {
847 c = CONTAINING_RECORD(le, chunk, list_entry);
848
849 if (!c->readonly && !c->reloc) {
850 ExAcquireResourceExclusiveLite(&c->lock, TRUE);
851
852 if (c->chunk_item->type == fcb->Vcb->data_flags && (c->chunk_item->size - c->used) >= comp_length) {
853 if (insert_extent_chunk(fcb->Vcb, fcb, c, start_data, comp_length, FALSE, comp_data, Irp, rollback, compression, end_data - start_data, FALSE, 0)) {
854 ExReleaseResourceLite(&fcb->Vcb->chunk_lock);
855
856 if (compression != BTRFS_COMPRESSION_NONE)
857 ExFreePool(comp_data);
858
859 return STATUS_SUCCESS;
860 }
861 }
862
863 ExReleaseResourceLite(&c->lock);
864 }
865
866 le = le->Flink;
867 }
868
869 ExReleaseResourceLite(&fcb->Vcb->chunk_lock);
870
871 ExAcquireResourceExclusiveLite(&fcb->Vcb->chunk_lock, TRUE);
872
873 Status = alloc_chunk(fcb->Vcb, fcb->Vcb->data_flags, &c, FALSE);
874
875 ExReleaseResourceLite(&fcb->Vcb->chunk_lock);
876
877 if (!NT_SUCCESS(Status)) {
878 ERR("alloc_chunk returned %08x\n", Status);
879
880 if (compression != BTRFS_COMPRESSION_NONE)
881 ExFreePool(comp_data);
882
883 return Status;
884 }
885
886 if (c) {
887 ExAcquireResourceExclusiveLite(&c->lock, TRUE);
888
889 if (c->chunk_item->type == fcb->Vcb->data_flags && (c->chunk_item->size - c->used) >= comp_length) {
890 if (insert_extent_chunk(fcb->Vcb, fcb, c, start_data, comp_length, FALSE, comp_data, Irp, rollback, compression, end_data - start_data, FALSE, 0)) {
891 if (compression != BTRFS_COMPRESSION_NONE)
892 ExFreePool(comp_data);
893
894 return STATUS_SUCCESS;
895 }
896 }
897
898 ExReleaseResourceLite(&c->lock);
899 }
900
901 WARN("couldn't find any data chunks with %llx bytes free\n", comp_length);
902
903 if (compression != BTRFS_COMPRESSION_NONE)
904 ExFreePool(comp_data);
905
906 return STATUS_DISK_FULL;
907 }
908
909 NTSTATUS write_compressed_bit(fcb* fcb, UINT64 start_data, UINT64 end_data, void* data, BOOL* compressed, PIRP Irp, LIST_ENTRY* rollback) {
910 UINT8 type;
911
912 if (fcb->Vcb->options.compress_type != 0 && fcb->prop_compression == PropCompression_None)
913 type = fcb->Vcb->options.compress_type;
914 else {
915 if (!(fcb->Vcb->superblock.incompat_flags & BTRFS_INCOMPAT_FLAGS_COMPRESS_LZO) && fcb->prop_compression == PropCompression_LZO) {
916 fcb->Vcb->superblock.incompat_flags |= BTRFS_INCOMPAT_FLAGS_COMPRESS_LZO;
917 type = BTRFS_COMPRESSION_LZO;
918 } else if (fcb->Vcb->superblock.incompat_flags & BTRFS_INCOMPAT_FLAGS_COMPRESS_LZO && fcb->prop_compression != PropCompression_Zlib)
919 type = BTRFS_COMPRESSION_LZO;
920 else
921 type = BTRFS_COMPRESSION_ZLIB;
922 }
923
924 if (type == BTRFS_COMPRESSION_LZO) {
925 fcb->Vcb->superblock.incompat_flags |= BTRFS_INCOMPAT_FLAGS_COMPRESS_LZO;
926 return lzo_write_compressed_bit(fcb, start_data, end_data, data, compressed, Irp, rollback);
927 } else
928 return zlib_write_compressed_bit(fcb, start_data, end_data, data, compressed, Irp, rollback);
929 }