1 /* Copyright (c) Mark Harmstone 2016-17
2 * Copyright (c) Reimar Doeffinger 2006
3 * Copyright (c) Markus Oberhumer 1996
5 * This file is part of WinBtrfs.
7 * WinBtrfs is free software: you can redistribute it and/or modify
8 * it under the terms of the GNU Lesser General Public Licence as published by
9 * the Free Software Foundation, either version 3 of the Licence, or
10 * (at your option) any later version.
12 * WinBtrfs is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU Lesser General Public Licence for more details.
17 * You should have received a copy of the GNU Lesser General Public Licence
18 * along with WinBtrfs. If not, see <http://www.gnu.org/licenses/>. */
20 // Portions of the LZO decompression code here were cribbed from code in
21 // libavcodec, also under the LGPL. Thank you, Reimar Doeffinger.
23 // The LZO compression code comes from v0.22 of lzo, written way back in
24 // 1996, and available here:
25 // https://www.ibiblio.org/pub/historic-linux/ftp-archives/sunsite.unc.edu/Sep-29-1996/libs/lzo-0.22.tar.gz
26 // Modern versions of lzo are licensed under the GPL, but the very oldest
27 // versions are under the LGPL and hence okay to use here.
29 #include "btrfs_drv.h"
35 #include "zlib/zlib.h"
36 #include "zlib/inftrees.h"
37 #include "zlib/inflate.h"
42 #define LINUX_PAGE_SIZE 4096
55 #define LZO1X_MEM_COMPRESS ((UINT32) (16384L * sizeof(UINT8*)))
57 #define M1_MAX_OFFSET 0x0400
58 #define M2_MAX_OFFSET 0x0800
59 #define M3_MAX_OFFSET 0x4000
60 #define M4_MAX_OFFSET 0xbfff
62 #define MX_MAX_OFFSET (M1_MAX_OFFSET + M2_MAX_OFFSET)
69 #define _DV2(p, shift1, shift2) (((( (UINT32)(p[2]) << shift1) ^ p[1]) << shift2) ^ p[0])
70 #define DVAL_NEXT(dv, p) dv ^= p[-1]; dv = (((dv) >> 5) ^ ((UINT32)(p[2]) << (2*5)))
71 #define _DV(p, shift) _DV2(p, shift, shift)
72 #define DVAL_FIRST(dv, p) dv = _DV((p), 5)
73 #define _DINDEX(dv, p) ((40799u * (dv)) >> 5)
74 #define DINDEX(dv, p) (((_DINDEX(dv, p)) & 0x3fff) << 0)
75 #define UPDATE_D(dict, cycle, dv, p) dict[DINDEX(dv, p)] = (p)
76 #define UPDATE_I(dict, cycle, index, p) dict[index] = (p)
78 #define LZO_CHECK_MPOS_NON_DET(m_pos, m_off, in, ip, max_offset) \
79 ((void*) m_pos < (void*) in || \
80 (m_off = (UINT8*) ip - (UINT8*) m_pos) <= 0 || \
83 #define LZO_BYTE(x) ((unsigned char) (x))
85 static UINT8
lzo_nextbyte(lzo_stream
* stream
) {
88 if (stream
->inpos
>= stream
->inlen
) {
93 c
= stream
->in
[stream
->inpos
];
99 static int lzo_len(lzo_stream
* stream
, int byte
, int mask
) {
100 int len
= byte
& mask
;
103 while (!(byte
= lzo_nextbyte(stream
))) {
104 if (stream
->error
) return 0;
115 static void lzo_copy(lzo_stream
* stream
, int len
) {
116 if (stream
->inpos
+ len
> stream
->inlen
) {
117 stream
->error
= TRUE
;
121 if (stream
->outpos
+ len
> stream
->outlen
) {
122 stream
->error
= TRUE
;
127 stream
->out
[stream
->outpos
] = stream
->in
[stream
->inpos
];
134 static void lzo_copyback(lzo_stream
* stream
, UINT32 back
, int len
) {
135 if (stream
->outpos
< back
) {
136 stream
->error
= TRUE
;
140 if (stream
->outpos
+ len
> stream
->outlen
) {
141 stream
->error
= TRUE
;
146 stream
->out
[stream
->outpos
] = stream
->out
[stream
->outpos
- back
];
152 static NTSTATUS
do_lzo_decompress(lzo_stream
* stream
) {
155 BOOL backcopy
= FALSE
;
157 stream
->error
= FALSE
;
159 byte
= lzo_nextbyte(stream
);
160 if (stream
->error
) return STATUS_INTERNAL_ERROR
;
163 lzo_copy(stream
, min((UINT8
)(byte
- 17), (UINT32
)(stream
->outlen
- stream
->outpos
)));
164 if (stream
->error
) return STATUS_INTERNAL_ERROR
;
166 if (stream
->outlen
== stream
->outpos
)
167 return STATUS_SUCCESS
;
169 byte
= lzo_nextbyte(stream
);
170 if (stream
->error
) return STATUS_INTERNAL_ERROR
;
172 if (byte
< 16) return STATUS_INTERNAL_ERROR
;
179 len
= (byte
>> 5) - 1;
180 back
= (lzo_nextbyte(stream
) << 3) + ((byte
>> 2) & 7) + 1;
181 if (stream
->error
) return STATUS_INTERNAL_ERROR
;
182 } else if (byte
>> 5) {
183 len
= lzo_len(stream
, byte
, 31);
184 if (stream
->error
) return STATUS_INTERNAL_ERROR
;
186 byte
= lzo_nextbyte(stream
);
187 if (stream
->error
) return STATUS_INTERNAL_ERROR
;
189 back
= (lzo_nextbyte(stream
) << 6) + (byte
>> 2) + 1;
190 if (stream
->error
) return STATUS_INTERNAL_ERROR
;
192 len
= lzo_len(stream
, byte
, 7);
193 if (stream
->error
) return STATUS_INTERNAL_ERROR
;
195 back
= (1 << 14) + ((byte
& 8) << 11);
197 byte
= lzo_nextbyte(stream
);
198 if (stream
->error
) return STATUS_INTERNAL_ERROR
;
200 back
+= (lzo_nextbyte(stream
) << 6) + (byte
>> 2);
201 if (stream
->error
) return STATUS_INTERNAL_ERROR
;
203 if (back
== (1 << 14)) {
205 return STATUS_INTERNAL_ERROR
;
209 } else if (backcopy
) {
211 back
= (lzo_nextbyte(stream
) << 2) + (byte
>> 2) + 1;
212 if (stream
->error
) return STATUS_INTERNAL_ERROR
;
214 len
= lzo_len(stream
, byte
, 15);
215 if (stream
->error
) return STATUS_INTERNAL_ERROR
;
217 lzo_copy(stream
, min(len
+ 3, stream
->outlen
- stream
->outpos
));
218 if (stream
->error
) return STATUS_INTERNAL_ERROR
;
220 if (stream
->outlen
== stream
->outpos
)
221 return STATUS_SUCCESS
;
223 byte
= lzo_nextbyte(stream
);
224 if (stream
->error
) return STATUS_INTERNAL_ERROR
;
230 back
= (1 << 11) + (lzo_nextbyte(stream
) << 2) + (byte
>> 2) + 1;
231 if (stream
->error
) return STATUS_INTERNAL_ERROR
;
236 lzo_copyback(stream
, back
, min(len
+ 2, stream
->outlen
- stream
->outpos
));
237 if (stream
->error
) return STATUS_INTERNAL_ERROR
;
239 if (stream
->outlen
== stream
->outpos
)
240 return STATUS_SUCCESS
;
245 lzo_copy(stream
, min(len
, stream
->outlen
- stream
->outpos
));
246 if (stream
->error
) return STATUS_INTERNAL_ERROR
;
248 if (stream
->outlen
== stream
->outpos
)
249 return STATUS_SUCCESS
;
251 backcopy
= !backcopy
;
253 byte
= lzo_nextbyte(stream
);
254 if (stream
->error
) return STATUS_INTERNAL_ERROR
;
257 return STATUS_SUCCESS
;
260 NTSTATUS
lzo_decompress(UINT8
* inbuf
, UINT32 inlen
, UINT8
* outbuf
, UINT32 outlen
, UINT32 inpageoff
) {
262 UINT32 partlen
, inoff
, outoff
;
269 partlen
= *(UINT32
*)&inbuf
[inoff
];
271 if (partlen
+ inoff
> inlen
) {
272 ERR("overflow: %x + %x > %llx\n", partlen
, inoff
, inlen
);
273 return STATUS_INTERNAL_ERROR
;
276 inoff
+= sizeof(UINT32
);
278 stream
.in
= &inbuf
[inoff
];
279 stream
.inlen
= partlen
;
281 stream
.out
= &outbuf
[outoff
];
282 stream
.outlen
= min(outlen
, LINUX_PAGE_SIZE
);
285 Status
= do_lzo_decompress(&stream
);
286 if (!NT_SUCCESS(Status
)) {
287 ERR("do_lzo_decompress returned %08x\n", Status
);
291 if (stream
.outpos
< stream
.outlen
)
292 RtlZeroMemory(&stream
.out
[stream
.outpos
], stream
.outlen
- stream
.outpos
);
295 outoff
+= stream
.outlen
;
297 if (LINUX_PAGE_SIZE
- ((inpageoff
+ inoff
) % LINUX_PAGE_SIZE
) < sizeof(UINT32
))
298 inoff
= ((((inpageoff
+ inoff
) / LINUX_PAGE_SIZE
) + 1) * LINUX_PAGE_SIZE
) - inpageoff
;
300 outlen
-= stream
.outlen
;
301 } while (inoff
< inlen
&& outlen
> 0);
303 return STATUS_SUCCESS
;
306 static void* zlib_alloc(void* opaque
, unsigned int items
, unsigned int size
) {
309 return ExAllocatePoolWithTag(PagedPool
, items
* size
, ALLOC_TAG_ZLIB
);
312 static void zlib_free(void* opaque
, void* ptr
) {
318 NTSTATUS
zlib_decompress(UINT8
* inbuf
, UINT32 inlen
, UINT8
* outbuf
, UINT32 outlen
) {
322 c_stream
.zalloc
= zlib_alloc
;
323 c_stream
.zfree
= zlib_free
;
324 c_stream
.opaque
= (voidpf
)0;
326 ret
= inflateInit(&c_stream
);
329 ERR("inflateInit returned %08x\n", ret
);
330 return STATUS_INTERNAL_ERROR
;
333 c_stream
.next_in
= inbuf
;
334 c_stream
.avail_in
= inlen
;
336 c_stream
.next_out
= outbuf
;
337 c_stream
.avail_out
= outlen
;
340 ret
= inflate(&c_stream
, Z_NO_FLUSH
);
342 if (ret
!= Z_OK
&& ret
!= Z_STREAM_END
) {
343 ERR("inflate returned %08x\n", ret
);
344 inflateEnd(&c_stream
);
345 return STATUS_INTERNAL_ERROR
;
348 if (c_stream
.avail_out
== 0)
350 } while (ret
!= Z_STREAM_END
);
352 ret
= inflateEnd(&c_stream
);
355 ERR("inflateEnd returned %08x\n", ret
);
356 return STATUS_INTERNAL_ERROR
;
359 // FIXME - if we're short, should we zero the end of outbuf so we don't leak information into userspace?
361 return STATUS_SUCCESS
;
364 static NTSTATUS
zlib_write_compressed_bit(fcb
* fcb
, UINT64 start_data
, UINT64 end_data
, void* data
, BOOL
* compressed
, PIRP Irp
, LIST_ENTRY
* rollback
) {
375 comp_data
= ExAllocatePoolWithTag(PagedPool
, (UINT32
)(end_data
- start_data
), ALLOC_TAG
);
377 ERR("out of memory\n");
378 return STATUS_INSUFFICIENT_RESOURCES
;
381 Status
= excise_extents(fcb
->Vcb
, fcb
, start_data
, end_data
, Irp
, rollback
);
382 if (!NT_SUCCESS(Status
)) {
383 ERR("excise_extents returned %08x\n", Status
);
384 ExFreePool(comp_data
);
388 c_stream
.zalloc
= zlib_alloc
;
389 c_stream
.zfree
= zlib_free
;
390 c_stream
.opaque
= (voidpf
)0;
392 ret
= deflateInit(&c_stream
, fcb
->Vcb
->options
.zlib_level
);
395 ERR("deflateInit returned %08x\n", ret
);
396 ExFreePool(comp_data
);
397 return STATUS_INTERNAL_ERROR
;
400 c_stream
.avail_in
= (UINT32
)(end_data
- start_data
);
401 c_stream
.next_in
= data
;
402 c_stream
.avail_out
= (UINT32
)(end_data
- start_data
);
403 c_stream
.next_out
= comp_data
;
406 ret
= deflate(&c_stream
, Z_FINISH
);
408 if (ret
== Z_STREAM_ERROR
) {
409 ERR("deflate returned %x\n", ret
);
410 ExFreePool(comp_data
);
411 return STATUS_INTERNAL_ERROR
;
413 } while (c_stream
.avail_in
> 0 && c_stream
.avail_out
> 0);
415 out_left
= c_stream
.avail_out
;
417 ret
= deflateEnd(&c_stream
);
420 ERR("deflateEnd returned %08x\n", ret
);
421 ExFreePool(comp_data
);
422 return STATUS_INTERNAL_ERROR
;
425 if (out_left
< fcb
->Vcb
->superblock
.sector_size
) { // compressed extent would be larger than or same size as uncompressed extent
426 ExFreePool(comp_data
);
428 comp_length
= (UINT32
)(end_data
- start_data
);
430 compression
= BTRFS_COMPRESSION_NONE
;
436 compression
= BTRFS_COMPRESSION_ZLIB
;
437 cl
= (UINT32
)(end_data
- start_data
- out_left
);
438 comp_length
= (UINT32
)sector_align(cl
, fcb
->Vcb
->superblock
.sector_size
);
440 RtlZeroMemory(comp_data
+ cl
, comp_length
- cl
);
445 ExAcquireResourceSharedLite(&fcb
->Vcb
->chunk_lock
, TRUE
);
447 le
= fcb
->Vcb
->chunks
.Flink
;
448 while (le
!= &fcb
->Vcb
->chunks
) {
449 c
= CONTAINING_RECORD(le
, chunk
, list_entry
);
451 if (!c
->readonly
&& !c
->reloc
) {
452 ExAcquireResourceExclusiveLite(&c
->lock
, TRUE
);
454 if (c
->chunk_item
->type
== fcb
->Vcb
->data_flags
&& (c
->chunk_item
->size
- c
->used
) >= comp_length
) {
455 if (insert_extent_chunk(fcb
->Vcb
, fcb
, c
, start_data
, comp_length
, FALSE
, comp_data
, Irp
, rollback
, compression
, end_data
- start_data
, FALSE
, 0)) {
456 ExReleaseResourceLite(&fcb
->Vcb
->chunk_lock
);
458 if (compression
!= BTRFS_COMPRESSION_NONE
)
459 ExFreePool(comp_data
);
461 return STATUS_SUCCESS
;
465 ExReleaseResourceLite(&c
->lock
);
471 ExReleaseResourceLite(&fcb
->Vcb
->chunk_lock
);
473 ExAcquireResourceExclusiveLite(&fcb
->Vcb
->chunk_lock
, TRUE
);
475 Status
= alloc_chunk(fcb
->Vcb
, fcb
->Vcb
->data_flags
, &c
, FALSE
);
477 ExReleaseResourceLite(&fcb
->Vcb
->chunk_lock
);
479 if (!NT_SUCCESS(Status
)) {
480 ERR("alloc_chunk returned %08x\n", Status
);
482 if (compression
!= BTRFS_COMPRESSION_NONE
)
483 ExFreePool(comp_data
);
489 ExAcquireResourceExclusiveLite(&c
->lock
, TRUE
);
491 if (c
->chunk_item
->type
== fcb
->Vcb
->data_flags
&& (c
->chunk_item
->size
- c
->used
) >= comp_length
) {
492 if (insert_extent_chunk(fcb
->Vcb
, fcb
, c
, start_data
, comp_length
, FALSE
, comp_data
, Irp
, rollback
, compression
, end_data
- start_data
, FALSE
, 0)) {
493 if (compression
!= BTRFS_COMPRESSION_NONE
)
494 ExFreePool(comp_data
);
496 return STATUS_SUCCESS
;
500 ExReleaseResourceLite(&c
->lock
);
503 WARN("couldn't find any data chunks with %llx bytes free\n", comp_length
);
505 if (compression
!= BTRFS_COMPRESSION_NONE
)
506 ExFreePool(comp_data
);
508 return STATUS_DISK_FULL
;
511 static NTSTATUS
lzo_do_compress(const UINT8
* in
, UINT32 in_len
, UINT8
* out
, UINT32
* out_len
, void* wrkmem
) {
515 const UINT8
* in_end
= in
+ in_len
;
516 const UINT8
* ip_end
= in
+ in_len
- 9 - 4;
518 const UINT8
** dict
= (const UINT8
**)wrkmem
;
524 DVAL_FIRST(dv
, ip
); UPDATE_D(dict
, cycle
, dv
, ip
); ip
++;
525 DVAL_NEXT(dv
, ip
); UPDATE_D(dict
, cycle
, dv
, ip
); ip
++;
526 DVAL_NEXT(dv
, ip
); UPDATE_D(dict
, cycle
, dv
, ip
); ip
++;
527 DVAL_NEXT(dv
, ip
); UPDATE_D(dict
, cycle
, dv
, ip
); ip
++;
535 dindex
= DINDEX(dv
, ip
);
536 m_pos
= dict
[dindex
];
537 UPDATE_I(dict
, cycle
, dindex
, ip
);
539 if (!LZO_CHECK_MPOS_NON_DET(m_pos
, m_off
, in
, ip
, M4_MAX_OFFSET
) && m_pos
[0] == ip
[0] && m_pos
[1] == ip
[1] && m_pos
[2] == ip
[2]) {
540 lit
= (UINT32
)(ip
- ii
);
542 if (m_off
<= M2_MAX_OFFSET
)
545 if (lit
== 3) { /* better compression, but slower */
547 return STATUS_INTERNAL_ERROR
;
549 op
[-2] |= LZO_BYTE(3);
550 *op
++ = *ii
++; *op
++ = *ii
++; *op
++ = *ii
++;
567 /* store current literal run */
573 return STATUS_INTERNAL_ERROR
;
575 op
[-2] |= LZO_BYTE(t
);
577 *op
++ = LZO_BYTE(t
- 3);
588 return STATUS_INTERNAL_ERROR
;
590 *op
++ = LZO_BYTE(tt
);
602 return STATUS_INTERNAL_ERROR
;
605 if (*m_pos
++ != *ip
++ || *m_pos
++ != *ip
++ || *m_pos
++ != *ip
++ ||
606 *m_pos
++ != *ip
++ || *m_pos
++ != *ip
++ || *m_pos
++ != *ip
++) {
608 m_len
= (UINT32
)(ip
- ii
);
610 if (m_len
< 3 || m_len
> 8)
611 return STATUS_INTERNAL_ERROR
;
613 if (m_off
<= M2_MAX_OFFSET
) {
615 *op
++ = LZO_BYTE(((m_len
- 1) << 5) | ((m_off
& 7) << 2));
616 *op
++ = LZO_BYTE(m_off
>> 3);
617 } else if (m_off
<= M3_MAX_OFFSET
) {
619 *op
++ = LZO_BYTE(M3_MARKER
| (m_len
- 2));
624 if (m_off
<= 0 || m_off
> 0x7fff)
625 return STATUS_INTERNAL_ERROR
;
627 *op
++ = LZO_BYTE(M4_MARKER
| ((m_off
& 0x4000) >> 11) | (m_len
- 2));
633 while (ip
< end
&& *m_pos
== *ip
)
635 m_len
= (UINT32
)(ip
- ii
);
638 return STATUS_INTERNAL_ERROR
;
640 if (m_off
<= M3_MAX_OFFSET
) {
643 *op
++ = LZO_BYTE(M3_MARKER
| (m_len
- 2));
646 *op
++ = M3_MARKER
| 0;
652 if (m_off
<= 0 || m_off
> 0x7fff)
653 return STATUS_INTERNAL_ERROR
;
656 *op
++ = LZO_BYTE(M4_MARKER
| ((m_off
& 0x4000) >> 11) | (m_len
- 2));
659 *op
++ = LZO_BYTE(M4_MARKER
| ((m_off
& 0x4000) >> 11));
661 while (m_len
> 255) {
667 return STATUS_INTERNAL_ERROR
;
669 *op
++ = LZO_BYTE(m_len
);
674 *op
++ = LZO_BYTE((m_off
& 63) << 2);
675 *op
++ = LZO_BYTE(m_off
>> 6);
684 /* store final literal run */
685 if (in_end
- ii
> 0) {
686 UINT32 t
= (UINT32
)(in_end
- ii
);
688 if (op
== out
&& t
<= 238)
689 *op
++ = LZO_BYTE(17 + t
);
691 op
[-2] |= LZO_BYTE(t
);
693 *op
++ = LZO_BYTE(t
- 3);
704 return STATUS_INTERNAL_ERROR
;
706 *op
++ = LZO_BYTE(tt
);
714 *out_len
= (UINT32
)(op
- out
);
716 return STATUS_SUCCESS
;
719 static NTSTATUS
lzo1x_1_compress(lzo_stream
* stream
) {
720 UINT8
*op
= stream
->out
;
721 NTSTATUS Status
= STATUS_SUCCESS
;
723 if (stream
->inlen
<= 0)
725 else if (stream
->inlen
<= 9 + 4) {
726 *op
++ = LZO_BYTE(17 + stream
->inlen
);
730 *op
++ = stream
->in
[stream
->inpos
];
732 } while (stream
->inlen
< stream
->inpos
);
733 stream
->outlen
= (UINT32
)(op
- stream
->out
);
735 Status
= lzo_do_compress(stream
->in
, stream
->inlen
, stream
->out
, &stream
->outlen
, stream
->wrkmem
);
737 if (Status
== STATUS_SUCCESS
) {
738 op
= stream
->out
+ stream
->outlen
;
739 *op
++ = M4_MARKER
| 1;
748 static __inline UINT32
lzo_max_outlen(UINT32 inlen
) {
749 return inlen
+ (inlen
/ 16) + 64 + 3; // formula comes from LZO.FAQ
752 static NTSTATUS
lzo_write_compressed_bit(fcb
* fcb
, UINT64 start_data
, UINT64 end_data
, void* data
, BOOL
* compressed
, PIRP Irp
, LIST_ENTRY
* rollback
) {
756 ULONG comp_data_len
, num_pages
, i
;
758 BOOL skip_compression
= FALSE
;
764 num_pages
= (ULONG
)((sector_align(end_data
- start_data
, LINUX_PAGE_SIZE
)) / LINUX_PAGE_SIZE
);
766 // Four-byte overall header
767 // Another four-byte header page
768 // Each page has a maximum size of lzo_max_outlen(LINUX_PAGE_SIZE)
769 // Plus another four bytes for possible padding
770 comp_data_len
= sizeof(UINT32
) + ((lzo_max_outlen(LINUX_PAGE_SIZE
) + (2 * sizeof(UINT32
))) * num_pages
);
772 comp_data
= ExAllocatePoolWithTag(PagedPool
, comp_data_len
, ALLOC_TAG
);
774 ERR("out of memory\n");
775 return STATUS_INSUFFICIENT_RESOURCES
;
778 stream
.wrkmem
= ExAllocatePoolWithTag(PagedPool
, LZO1X_MEM_COMPRESS
, ALLOC_TAG
);
779 if (!stream
.wrkmem
) {
780 ERR("out of memory\n");
781 ExFreePool(comp_data
);
782 return STATUS_INSUFFICIENT_RESOURCES
;
785 Status
= excise_extents(fcb
->Vcb
, fcb
, start_data
, end_data
, Irp
, rollback
);
786 if (!NT_SUCCESS(Status
)) {
787 ERR("excise_extents returned %08x\n", Status
);
788 ExFreePool(comp_data
);
789 ExFreePool(stream
.wrkmem
);
793 out_size
= (UINT32
*)comp_data
;
794 *out_size
= sizeof(UINT32
);
797 stream
.out
= comp_data
+ (2 * sizeof(UINT32
));
799 for (i
= 0; i
< num_pages
; i
++) {
800 UINT32
* pagelen
= (UINT32
*)(stream
.out
- sizeof(UINT32
));
802 stream
.inlen
= (UINT32
)min(LINUX_PAGE_SIZE
, end_data
- start_data
- (i
* LINUX_PAGE_SIZE
));
804 Status
= lzo1x_1_compress(&stream
);
805 if (!NT_SUCCESS(Status
)) {
806 ERR("lzo1x_1_compress returned %08x\n", Status
);
807 skip_compression
= TRUE
;
811 *pagelen
= stream
.outlen
;
812 *out_size
+= stream
.outlen
+ sizeof(UINT32
);
814 stream
.in
+= LINUX_PAGE_SIZE
;
815 stream
.out
+= stream
.outlen
+ sizeof(UINT32
);
817 if (LINUX_PAGE_SIZE
- (*out_size
% LINUX_PAGE_SIZE
) < sizeof(UINT32
)) {
818 RtlZeroMemory(stream
.out
, LINUX_PAGE_SIZE
- (*out_size
% LINUX_PAGE_SIZE
));
819 stream
.out
+= LINUX_PAGE_SIZE
- (*out_size
% LINUX_PAGE_SIZE
);
820 *out_size
+= LINUX_PAGE_SIZE
- (*out_size
% LINUX_PAGE_SIZE
);
824 ExFreePool(stream
.wrkmem
);
826 if (skip_compression
|| *out_size
>= end_data
- start_data
- fcb
->Vcb
->superblock
.sector_size
) { // compressed extent would be larger than or same size as uncompressed extent
827 ExFreePool(comp_data
);
829 comp_length
= end_data
- start_data
;
831 compression
= BTRFS_COMPRESSION_NONE
;
835 compression
= BTRFS_COMPRESSION_LZO
;
836 comp_length
= sector_align(*out_size
, fcb
->Vcb
->superblock
.sector_size
);
838 RtlZeroMemory(comp_data
+ *out_size
, (ULONG
)(comp_length
- *out_size
));
843 ExAcquireResourceSharedLite(&fcb
->Vcb
->chunk_lock
, TRUE
);
845 le
= fcb
->Vcb
->chunks
.Flink
;
846 while (le
!= &fcb
->Vcb
->chunks
) {
847 c
= CONTAINING_RECORD(le
, chunk
, list_entry
);
849 if (!c
->readonly
&& !c
->reloc
) {
850 ExAcquireResourceExclusiveLite(&c
->lock
, TRUE
);
852 if (c
->chunk_item
->type
== fcb
->Vcb
->data_flags
&& (c
->chunk_item
->size
- c
->used
) >= comp_length
) {
853 if (insert_extent_chunk(fcb
->Vcb
, fcb
, c
, start_data
, comp_length
, FALSE
, comp_data
, Irp
, rollback
, compression
, end_data
- start_data
, FALSE
, 0)) {
854 ExReleaseResourceLite(&fcb
->Vcb
->chunk_lock
);
856 if (compression
!= BTRFS_COMPRESSION_NONE
)
857 ExFreePool(comp_data
);
859 return STATUS_SUCCESS
;
863 ExReleaseResourceLite(&c
->lock
);
869 ExReleaseResourceLite(&fcb
->Vcb
->chunk_lock
);
871 ExAcquireResourceExclusiveLite(&fcb
->Vcb
->chunk_lock
, TRUE
);
873 Status
= alloc_chunk(fcb
->Vcb
, fcb
->Vcb
->data_flags
, &c
, FALSE
);
875 ExReleaseResourceLite(&fcb
->Vcb
->chunk_lock
);
877 if (!NT_SUCCESS(Status
)) {
878 ERR("alloc_chunk returned %08x\n", Status
);
880 if (compression
!= BTRFS_COMPRESSION_NONE
)
881 ExFreePool(comp_data
);
887 ExAcquireResourceExclusiveLite(&c
->lock
, TRUE
);
889 if (c
->chunk_item
->type
== fcb
->Vcb
->data_flags
&& (c
->chunk_item
->size
- c
->used
) >= comp_length
) {
890 if (insert_extent_chunk(fcb
->Vcb
, fcb
, c
, start_data
, comp_length
, FALSE
, comp_data
, Irp
, rollback
, compression
, end_data
- start_data
, FALSE
, 0)) {
891 if (compression
!= BTRFS_COMPRESSION_NONE
)
892 ExFreePool(comp_data
);
894 return STATUS_SUCCESS
;
898 ExReleaseResourceLite(&c
->lock
);
901 WARN("couldn't find any data chunks with %llx bytes free\n", comp_length
);
903 if (compression
!= BTRFS_COMPRESSION_NONE
)
904 ExFreePool(comp_data
);
906 return STATUS_DISK_FULL
;
909 NTSTATUS
write_compressed_bit(fcb
* fcb
, UINT64 start_data
, UINT64 end_data
, void* data
, BOOL
* compressed
, PIRP Irp
, LIST_ENTRY
* rollback
) {
912 if (fcb
->Vcb
->options
.compress_type
!= 0 && fcb
->prop_compression
== PropCompression_None
)
913 type
= fcb
->Vcb
->options
.compress_type
;
915 if (!(fcb
->Vcb
->superblock
.incompat_flags
& BTRFS_INCOMPAT_FLAGS_COMPRESS_LZO
) && fcb
->prop_compression
== PropCompression_LZO
) {
916 fcb
->Vcb
->superblock
.incompat_flags
|= BTRFS_INCOMPAT_FLAGS_COMPRESS_LZO
;
917 type
= BTRFS_COMPRESSION_LZO
;
918 } else if (fcb
->Vcb
->superblock
.incompat_flags
& BTRFS_INCOMPAT_FLAGS_COMPRESS_LZO
&& fcb
->prop_compression
!= PropCompression_Zlib
)
919 type
= BTRFS_COMPRESSION_LZO
;
921 type
= BTRFS_COMPRESSION_ZLIB
;
924 if (type
== BTRFS_COMPRESSION_LZO
) {
925 fcb
->Vcb
->superblock
.incompat_flags
|= BTRFS_INCOMPAT_FLAGS_COMPRESS_LZO
;
926 return lzo_write_compressed_bit(fcb
, start_data
, end_data
, data
, compressed
, Irp
, rollback
);
928 return zlib_write_compressed_bit(fcb
, start_data
, end_data
, data
, compressed
, Irp
, rollback
);