[UDFS] Fix all GCC warnings! By V. with one little change by me to keep MSVC build...
[reactos.git] / reactos / drivers / filesystems / udfs / udf_info / alloc.cpp
1 ////////////////////////////////////////////////////////////////////
2 // Copyright (C) Alexander Telyatnikov, Ivan Keliukh, Yegor Anchishkin, SKIF Software, 1999-2013. Kiev, Ukraine
3 // All rights reserved
4 // This file was released under the GPLv2 on June 2015.
5 ////////////////////////////////////////////////////////////////////
6 /*
7 Module name:
8
9 alloc.cpp
10
11 Abstract:
12
13 This file contains filesystem-specific routines
14 responsible for disk space management
15
16 */
17
18 #include "udf.h"
19
20 #define UDF_BUG_CHECK_ID UDF_FILE_UDF_INFO_ALLOC
21
22 static const int8 bit_count_tab[] = {
23 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4,
24 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
25 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
26 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
27 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
28 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
29 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
30 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
31
32 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
33 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
34 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
35 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
36 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
37 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
38 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
39 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8
40 };
41
42 /*
43 This routine converts physical address to logical in specified partition
44 */
45 uint32
46 UDFPhysLbaToPart(
47 IN PVCB Vcb,
48 IN uint32 PartNum,
49 IN uint32 Addr
50 )
51 {
52 PUDFPartMap pm = Vcb->Partitions;
53 //#ifdef _X86_
54 #ifdef _MSC_VER
55 uint32 retval;
56 __asm {
57 push ebx
58 push ecx
59 push edx
60
61 mov ebx,Vcb
62 mov edx,[ebx]Vcb.PartitionMaps
63 mov ebx,pm
64 mov ecx,PartNum
65 xor eax,eax
66 loop_pl2p:
67 cmp ecx,edx
68 jae short EO_pl2p
69 cmp [ebx]pm.PartitionNum,cx
70 jne short cont_pl2p
71 mov eax,Addr
72 sub eax,[ebx]pm.PartitionRoot
73 mov ecx,Vcb
74 mov ecx,[ecx]Vcb.LB2B_Bits
75 shr eax,cl
76 jmp short EO_pl2p
77 cont_pl2p:
78 add ebx,size UDFPartMap
79 inc ecx
80 jmp short loop_pl2p
81 EO_pl2p:
82 mov retval,eax
83
84 pop edx
85 pop ecx
86 pop ebx
87 }
88 #ifdef UDF_DBG
89 {
90 // validate return value
91 lb_addr locAddr;
92 locAddr.logicalBlockNum = retval;
93 locAddr.partitionReferenceNum = (uint16)PartNum;
94 UDFPartLbaToPhys(Vcb, &locAddr);
95 }
96 #endif // UDF_DBG
97 return retval;
98 #else // NO X86 optimization , use generic C/C++
99 uint32 i;
100 // walk through partition maps to find suitable one...
101 for(i=PartNum; i<Vcb->PartitionMaps; i++, pm++) {
102 if(pm->PartitionNum == PartNum)
103 // wow! return relative address
104 return (Addr - pm->PartitionRoot) >> Vcb->LB2B_Bits;
105 }
106 return 0;
107 #endif // _X86_
108 } // end UDFPhysLbaToPart()
109
110 /*
111 This routine returns physycal Lba for partition-relative addr
112 */
113 uint32
114 __fastcall
115 UDFPartLbaToPhys(
116 IN PVCB Vcb,
117 IN lb_addr* Addr
118 )
119 {
120 uint32 i, a;
121 if(Addr->partitionReferenceNum >= Vcb->PartitionMaps) {
122 AdPrint(("UDFPartLbaToPhys: part %x, lbn %x (err)\n",
123 Addr->partitionReferenceNum, Addr->logicalBlockNum));
124 if(Vcb->PartitionMaps &&
125 (Vcb->CompatFlags & UDF_VCB_IC_INSTANT_COMPAT_ALLOC_DESCS)) {
126 AdPrint(("UDFPartLbaToPhys: try to recover: part %x -> %x\n",
127 Addr->partitionReferenceNum, Vcb->PartitionMaps-1));
128 Addr->partitionReferenceNum = (USHORT)(Vcb->PartitionMaps-1);
129 } else {
130 return LBA_OUT_OF_EXTENT;
131 }
132 }
133 // walk through partition maps & transform relative address
134 // to physical
135 for(i=Addr->partitionReferenceNum; i<Vcb->PartitionMaps; i++) {
136 if(Vcb->Partitions[i].PartitionNum == Addr->partitionReferenceNum) {
137 a = Vcb->Partitions[i].PartitionRoot +
138 (Addr->logicalBlockNum << Vcb->LB2B_Bits);
139 if(a > Vcb->LastPossibleLBA) {
140 AdPrint(("UDFPartLbaToPhys: root %x, lbn %x, lba %x (err1)\n",
141 Vcb->Partitions[i].PartitionRoot, Addr->logicalBlockNum, a));
142 BrutePoint();
143 return LBA_OUT_OF_EXTENT;
144 }
145 return a;
146 }
147 }
148 a = Vcb->Partitions[i-1].PartitionRoot +
149 (Addr->logicalBlockNum << Vcb->LB2B_Bits);
150 if(a > Vcb->LastPossibleLBA) {
151 AdPrint(("UDFPartLbaToPhys: i %x, root %x, lbn %x, lba %x (err2)\n",
152 i, Vcb->Partitions[i-1].PartitionRoot, Addr->logicalBlockNum, a));
153 BrutePoint();
154 return LBA_OUT_OF_EXTENT;
155 }
156 return a;
157 } // end UDFPartLbaToPhys()
158
159
160 /*
161 This routine returns physycal Lba for partition-relative addr
162 No partition bounds check is performed.
163 This routine only checks if requested partition exists.
164 It is introduced for 'Adaptec DirectCD' compatibility,
165 because it uses negative values as extent terminator (against standard)
166 */
167 /*uint32
168 __fastcall
169 UDFPartLbaToPhysCompat(
170 IN PVCB Vcb,
171 IN lb_addr* Addr
172 )
173 {
174 uint32 i, a;
175 if(Addr->partitionReferenceNum >= Vcb->PartitionMaps) return LBA_NOT_ALLOCATED;
176 // walk through partition maps & transform relative address
177 // to physical
178 for(i=Addr->partitionReferenceNum; i<Vcb->PartitionMaps; i++) {
179 if(Vcb->Partitions[i].PartitionNum == Addr->partitionReferenceNum) {
180 a = Vcb->Partitions[i].PartitionRoot +
181 (Addr->logicalBlockNum << Vcb->LB2B_Bits);
182 if(a > Vcb->LastPossibleLBA) {
183 BrutePoint();
184 }
185 return a;
186 }
187 }
188 a = Vcb->Partitions[i-1].PartitionRoot +
189 (Addr->logicalBlockNum << Vcb->LB2B_Bits);
190 if(a > Vcb->LastPossibleLBA) {
191 BrutePoint();
192 }
193 return a;
194 } // end UDFPartLbaToPhysCompat()*/
195
196
197 /*
198 This routine looks for the partition containing given physical sector
199 */
200 uint32
201 __fastcall
202 UDFGetPartNumByPhysLba(
203 IN PVCB Vcb,
204 IN uint32 Lba
205 )
206 {
207 uint32 i=Vcb->PartitionMaps-1, root;
208 PUDFPartMap pm = &(Vcb->Partitions[i]);
209 // walk through the partition maps to find suitable one
210 for(;i!=0xffffffff;i--,pm--) {
211 if( ((root = pm->PartitionRoot) <= Lba) &&
212 ((root + pm->PartitionLen) > Lba) ) return (uint16)pm->PartitionNum;
213 }
214 return LBA_OUT_OF_EXTENT; // Lba doesn't belong to any partition
215 } // end UDFGetPartNumByPhysLba()
216
217 /*
218 Very simple routine. It walks through the Partition Maps & returns
219 the 1st Lba of the 1st suitable one
220 */
221 uint32
222 __fastcall
223 UDFPartStart(
224 PVCB Vcb,
225 uint32 PartNum
226 )
227 {
228 uint32 i;
229 if(PartNum == (uint32)-1) return 0;
230 if(PartNum == (uint32)-2) return Vcb->Partitions[0].PartitionRoot;
231 for(i=PartNum; i<Vcb->PartitionMaps; i++) {
232 if(Vcb->Partitions[i].PartitionNum == PartNum) return Vcb->Partitions[i].PartitionRoot;
233 }
234 return 0;
235 } // end UDFPartStart(
236
237 /*
238 This routine does almost the same as previous.
239 The only difference is changing First Lba to Last one...
240 */
241 uint32
242 __fastcall
243 UDFPartEnd(
244 PVCB Vcb,
245 uint32 PartNum
246 )
247 {
248 uint32 i;
249 if(PartNum == (uint32)-1) return Vcb->LastLBA;
250 if(PartNum == (uint32)-2) PartNum = Vcb->PartitionMaps-1;
251 for(i=PartNum; i<Vcb->PartitionMaps; i++) {
252 if(Vcb->Partitions[i].PartitionNum == PartNum)
253 return (Vcb->Partitions[i].PartitionRoot +
254 Vcb->Partitions[i].PartitionLen);
255 }
256 return (Vcb->Partitions[i-1].PartitionRoot +
257 Vcb->Partitions[i-1].PartitionLen);
258 } // end UDFPartEnd()
259
260 /*
261 Very simple routine. It walks through the Partition Maps & returns
262 the 1st Lba of the 1st suitable one
263 */
264 uint32
265 __fastcall
266 UDFPartLen(
267 PVCB Vcb,
268 uint32 PartNum
269 )
270 {
271
272 if(PartNum == (uint32)-2) return UDFPartEnd(Vcb, -2) - UDFPartStart(Vcb, -2);
273 /*#ifdef _X86_
274 uint32 ret_val;
275 __asm {
276 mov ebx,Vcb
277 mov eax,PartNum
278 cmp eax,-1
279 jne short NOT_last_gpl
280 mov eax,[ebx]Vcb.LastLBA
281 jmp short EO_gpl
282 NOT_last_gpl:
283 mov esi,eax
284 xor eax,eax
285 mov ecx,[ebx]Vcb.PartitionMaps
286 jecxz EO_gpl
287
288 mov eax,esi
289 mov edx,size UDFTrackMap
290 mul edx
291 add ebx,eax
292 mov eax,esi
293 gpl_loop:
294 cmp [ebx]Vcb.PartitionMaps.PartitionNum,ax
295 je short EO_gpl_1
296 add ebx,size UDFTrackMap
297 inc eax
298 cmp eax,ecx
299 jb short gpl_loop
300 sub ebx,size UDFTrackMap
301 EO_gpl_1:
302 mov eax,[ebx]Vcb.PartitionMaps.PartitionLen
303 add eax,[ebx]Vcb.PartitionMaps.PartitionRoot
304 EO_gpl:
305 mov ret_val,eax
306 }
307 return ret_val;
308 #else // NO X86 optimization , use generic C/C++*/
309 uint32 i;
310 if(PartNum == (uint32)-1) return Vcb->LastLBA;
311 for(i=PartNum; i<Vcb->PartitionMaps; i++) {
312 if(Vcb->Partitions[i].PartitionNum == PartNum)
313 return Vcb->Partitions[i].PartitionLen;
314 }
315 return (Vcb->Partitions[i-1].PartitionRoot +
316 Vcb->Partitions[i-1].PartitionLen);
317 /*#endif // _X86_*/
318 } // end UDFPartLen()
319
320 /*
321 This routine returns length of bit-chain starting from Offs bit in
322 array Bitmap. Bitmap scan is limited with Lim.
323 */
324
325 //#if defined _X86_
326 #if defined _MSC_VER
327
328 __declspec (naked)
329 uint32
330 __stdcall
331 UDFGetBitmapLen(
332 uint32* Bitmap,
333 uint32 Offs,
334 uint32 Lim // NOT included
335 )
336 {
337 _asm {
338 push ebp
339 mov ebp, esp
340
341 push ebx
342 push ecx
343 push edx
344 push esi
345 push edi
346
347 xor edx,edx // init bit-counter
348 mov ebx,[ebp+0x08] // set base pointer in EBX (Bitmap)
349 mov esi,[ebp+0x0c] // set Offs in ESI
350 mov edi,[ebp+0x10] // set Lim in EDI
351
352 // check if Lim <= Offs
353 cmp esi,edi
354 // jb start_count
355 // ja exit_count
356 // inc edx
357 // jmp exit_count
358 jae exit_count
359
360 //start_count:
361
362 // set 1st bit number in CL
363 mov ecx,esi
364 and cl,0x1f
365 // make ESI uint32-index
366 shr esi,5
367
368 // save last bit number in CH
369 mov eax,edi
370 and al,0x1f
371 mov ch,al
372 // make EDI uint32-index of the last uint32
373 shr edi,5
374
375 mov eax,[ebx+esi*4]
376 shr eax,cl
377 test eax,1
378
379 jz Loop_0
380
381 /* COUNT 1-BITS SECTION */
382 Loop_1:
383
384 cmp esi,edi
385 ja exit_count // must never happen
386 jb non_last_1
387
388 Loop_last_1:
389
390 cmp cl,ch
391 jae exit_count
392 // do we met 0 ?
393 test eax,1
394 jz exit_count
395 shr eax,1
396 inc edx
397 inc cl
398 jmp Loop_last_1
399
400 non_last_1:
401
402 or cl,cl
403 jnz std_count_1
404 cmp eax,-1
405 je quick_count_1
406
407 std_count_1:
408
409 cmp cl,0x1f
410 ja next_uint32_1
411 // do we met 0 ?
412 test eax,1
413 jz exit_count
414 shr eax,1
415 inc edx
416 inc cl
417 jmp std_count_1
418
419 quick_count_1:
420
421 add edx,0x20
422
423 next_uint32_1:
424
425 inc esi
426 mov eax,[ebx+esi*4]
427 xor cl,cl
428 jmp Loop_1
429
430 /* COUNT 0-BITS SECTION */
431 Loop_0:
432
433 cmp esi,edi
434 ja exit_count // must never happen
435 jb non_last_0
436
437 Loop_last_0:
438
439 cmp cl,ch
440 jae exit_count
441 // do we met 1 ?
442 test eax,1
443 jnz exit_count
444 shr eax,1
445 inc edx
446 inc cl
447 jmp Loop_last_0
448
449 non_last_0:
450
451 or cl,cl
452 jnz std_count_0
453 or eax,eax
454 jz quick_count_0
455
456 std_count_0:
457
458 cmp cl,0x1f
459 ja next_uint32_0
460 // do we met 1 ?
461 test eax,1
462 jnz exit_count
463 shr eax,1
464 inc edx
465 inc cl
466 jmp std_count_0
467
468 quick_count_0:
469
470 add edx,0x20
471
472 next_uint32_0:
473
474 inc esi
475 mov eax,[ebx+esi*4]
476 xor cl,cl
477 jmp Loop_0
478
479 exit_count:
480
481 mov eax,edx
482
483 pop edi
484 pop esi
485 pop edx
486 pop ecx
487 pop ebx
488
489 pop ebp
490
491 ret 0x0c
492 }
493
494 #else // NO X86 optimization , use generic C/C++
495
496 uint32
497 __stdcall
498 UDFGetBitmapLen(
499 uint32* Bitmap,
500 uint32 Offs,
501 uint32 Lim // NOT included
502 )
503 {
504 ASSERT(Offs <= Lim);
505 if(Offs >= Lim) {
506 return 0;//(Offs == Lim);
507 }
508
509 BOOLEAN bit = UDFGetBit(Bitmap, Offs);
510 uint32 i=Offs>>5;
511 uint32 len=0;
512 uint8 j=(uint8)(Offs&31);
513 uint8 lLim=(uint8)(Lim&31);
514
515 Lim = Lim>>5;
516
517 ASSERT((bit == 0) || (bit == 1));
518
519 uint32 a;
520
521 a = Bitmap[i] >> j;
522
523 while(i<=Lim) {
524
525 while( j < ((i<Lim) ? 32 : lLim) ) {
526 if( ((BOOLEAN)(a&1)) != bit)
527 return len;
528 len++;
529 a>>=1;
530 j++;
531 }
532 j=0;
533 While_3:
534 i++;
535 a = Bitmap[i];
536
537 if(i<Lim) {
538 if((bit && (a==0xffffffff)) ||
539 (!bit && !a)) {
540 len+=32;
541 goto While_3;
542 }
543 }
544 }
545
546 return len;
547
548 #endif // _X86_
549
550 } // end UDFGetBitmapLen()
551
552 #ifndef UDF_READ_ONLY_BUILD
553 /*
554 This routine scans disc free space Bitmap for minimal suitable extent.
555 It returns maximal available extent if no long enough extents found.
556 */
557 uint32
558 UDFFindMinSuitableExtent(
559 IN PVCB Vcb,
560 IN uint32 Length, // in blocks
561 IN uint32 SearchStart,
562 IN uint32 SearchLim, // NOT included
563 OUT uint32* MaxExtLen,
564 IN uint8 AllocFlags
565 )
566 {
567 uint32 i, len;
568 uint32* cur;
569 uint32 best_lba=0;
570 uint32 best_len=0;
571 uint32 max_lba=0;
572 uint32 max_len=0;
573 BOOLEAN align = FALSE;
574 uint32 PS = Vcb->WriteBlockSize >> Vcb->BlockSizeBits;
575
576 UDF_CHECK_BITMAP_RESOURCE(Vcb);
577
578 // we'll try to allocate packet-aligned block at first
579 if(!(Length & (PS-1)) && !Vcb->CDR_Mode && (Length >= PS*2))
580 align = TRUE;
581 if(AllocFlags & EXTENT_FLAG_ALLOC_SEQUENTIAL)
582 align = TRUE;
583 if(Length > (uint32)(UDF_MAX_EXTENT_LENGTH >> Vcb->BlockSizeBits))
584 Length = (UDF_MAX_EXTENT_LENGTH >> Vcb->BlockSizeBits);
585 // align Length according to _Logical_ block size & convert it to BCount
586 i = (1<<Vcb->LB2B_Bits)-1;
587 Length = (Length+i) & ~i;
588 cur = (uint32*)(Vcb->FSBM_Bitmap);
589
590 retry_no_align:
591
592 i=SearchStart;
593 // scan Bitmap
594 while(i<SearchLim) {
595 ASSERT(i <= SearchLim);
596 if(align) {
597 i = (i+PS-1) & ~(PS-1);
598 ASSERT(i <= SearchLim);
599 if(i >= SearchLim)
600 break;
601 }
602 len = UDFGetBitmapLen(cur, i, SearchLim);
603 if(UDFGetFreeBit(cur, i)) { // is the extent found free or used ?
604 // wow! it is free!
605 if(len >= Length) {
606 // minimize extent length
607 if(!best_len || (best_len > len)) {
608 best_lba = i;
609 best_len = len;
610 }
611 if(len == Length)
612 break;
613 } else {
614 // remember max extent
615 if(max_len < len) {
616 max_lba = i;
617 max_len = len;
618 }
619 }
620 // if this is CD-R mode, we should not think about fragmentation
621 // due to CD-R nature file will be fragmented in any case
622 if(Vcb->CDR_Mode) break;
623 }
624 i += len;
625 }
626 // if we can't find suitable Packet-size aligned block,
627 // retry without any alignment requirements
628 if(!best_len && align) {
629 align = FALSE;
630 goto retry_no_align;
631 }
632 if(best_len) {
633 // minimal suitable block
634 (*MaxExtLen) = best_len;
635 return best_lba;
636 }
637 // maximal available
638 (*MaxExtLen) = max_len;
639 return max_lba;
640 } // end UDFFindMinSuitableExtent()
641 #endif //UDF_READ_ONLY_BUILD
642
643 #ifdef UDF_CHECK_DISK_ALLOCATION
644 /*
645 This routine checks space described by Mapping as Used/Freed (optionaly)
646 */
647 void
648 UDFCheckSpaceAllocation_(
649 IN PVCB Vcb,
650 IN PEXTENT_MAP Map,
651 IN uint32 asXXX
652 #ifdef UDF_TRACK_ONDISK_ALLOCATION
653 ,IN uint32 FE_lba,
654 IN uint32 BugCheckId,
655 IN uint32 Line
656 #endif //UDF_TRACK_ONDISK_ALLOCATION
657 )
658 {
659 uint32 i=0;
660 uint32 lba, j, len, BS, BSh;
661 BOOLEAN asUsed = (asXXX == AS_USED);
662
663 if(!Map) return;
664
665 BS = Vcb->BlockSize;
666 BSh = Vcb->BlockSizeBits;
667
668 UDFAcquireResourceShared(&(Vcb->BitMapResource1),TRUE);
669 // walk through all frags in data area specified
670 #ifdef UDF_TRACK_ONDISK_ALLOCATION
671 AdPrint(("ChkAlloc:Map:%x:File:%x:Line:%d\n",
672 Map,
673 BugCheckId,
674 Line
675 ));
676 #endif //UDF_TRACK_ONDISK_ALLOCATION
677 while(Map[i].extLength & UDF_EXTENT_LENGTH_MASK) {
678
679 #ifdef UDF_TRACK_ONDISK_ALLOCATION
680 AdPrint(("ChkAlloc:%x:%s:%x:@:%x:(%x):File:%x:Line:%d\n",
681 FE_lba,
682 asUsed ? "U" : "F",
683 (Map[i].extLength & UDF_EXTENT_LENGTH_MASK) >> BSh,
684 Map[i].extLocation,
685 (Map[i].extLength >> 30),
686 BugCheckId,
687 Line
688 ));
689 #endif //UDF_TRACK_ONDISK_ALLOCATION
690 if(asUsed) {
691 UDFCheckUsedBitOwner(Vcb, (Map[i].extLength & UDF_EXTENT_LENGTH_MASK) >> BSh, FE_lba);
692 } else {
693 UDFCheckFreeBitOwner(Vcb, (Map[i].extLength & UDF_EXTENT_LENGTH_MASK) >> BSh);
694 }
695
696 if((Map[i].extLength >> 30) == EXTENT_NOT_RECORDED_NOT_ALLOCATED) {
697 // skip unallocated frags
698 // ASSERT(!(Map[i].extLength & UDF_EXTENT_LENGTH_MASK));
699 ASSERT(!Map[i].extLocation);
700 i++;
701 continue;
702 } else {
703 // ASSERT(!(Map[i].extLength & UDF_EXTENT_LENGTH_MASK));
704 ASSERT(Map[i].extLocation);
705 }
706
707 #ifdef UDF_CHECK_EXTENT_SIZE_ALIGNMENT
708 ASSERT(!(Map[i].extLength & (BS-1)));
709 #endif //UDF_CHECK_EXTENT_SIZE_ALIGNMENT
710 len = ((Map[i].extLength & UDF_EXTENT_LENGTH_MASK)+BS-1) >> BSh;
711 lba = Map[i].extLocation;
712 if((lba+len) > Vcb->LastPossibleLBA) {
713 // skip blocks beyond media boundary
714 if(lba > Vcb->LastPossibleLBA) {
715 ASSERT(FALSE);
716 i++;
717 continue;
718 }
719 len = Vcb->LastPossibleLBA - lba;
720 }
721
722 // mark frag as XXX (see asUsed parameter)
723 if(asUsed) {
724
725 ASSERT(len);
726 for(j=0;j<len;j++) {
727 if(lba+j > Vcb->LastPossibleLBA) {
728 BrutePoint();
729 AdPrint(("USED Mapping covers block(s) beyond media @%x\n",lba+j));
730 break;
731 }
732 if(!UDFGetUsedBit(Vcb->FSBM_Bitmap, lba+j)) {
733 BrutePoint();
734 AdPrint(("USED Mapping covers FREE block(s) @%x\n",lba+j));
735 break;
736 }
737 }
738
739 } else {
740
741 ASSERT(len);
742 for(j=0;j<len;j++) {
743 if(lba+j > Vcb->LastPossibleLBA) {
744 BrutePoint();
745 AdPrint(("USED Mapping covers block(s) beyond media @%x\n",lba+j));
746 break;
747 }
748 if(!UDFGetFreeBit(Vcb->FSBM_Bitmap, lba+j)) {
749 BrutePoint();
750 AdPrint(("FREE Mapping covers USED block(s) @%x\n",lba+j));
751 break;
752 }
753 }
754 }
755
756 i++;
757 }
758 UDFReleaseResource(&(Vcb->BitMapResource1));
759 } // end UDFCheckSpaceAllocation_()
760 #endif //UDF_CHECK_DISK_ALLOCATION
761
762 void
763 UDFMarkBadSpaceAsUsed(
764 IN PVCB Vcb,
765 IN lba_t lba,
766 IN ULONG len
767 )
768 {
769 uint32 j;
770 #define BIT_C (sizeof(Vcb->BSBM_Bitmap[0])*8)
771 len = (lba+len+BIT_C-1)/BIT_C;
772 if(Vcb->BSBM_Bitmap) {
773 for(j=lba/BIT_C; j<len; j++) {
774 Vcb->FSBM_Bitmap[j] &= ~Vcb->BSBM_Bitmap[j];
775 }
776 }
777 #undef BIT_C
778 } // UDFMarkBadSpaceAsUsed()
779
780 /*
781 This routine marks space described by Mapping as Used/Freed (optionaly)
782 */
783 void
784 UDFMarkSpaceAsXXXNoProtect_(
785 IN PVCB Vcb,
786 IN PEXTENT_MAP Map,
787 IN uint32 asXXX
788 #ifdef UDF_TRACK_ONDISK_ALLOCATION
789 ,IN uint32 FE_lba,
790 IN uint32 BugCheckId,
791 IN uint32 Line
792 #endif //UDF_TRACK_ONDISK_ALLOCATION
793 )
794 {
795 uint32 i=0;
796 uint32 lba, j, len, BS, BSh;
797 uint32 root;
798 BOOLEAN asUsed = (asXXX == AS_USED || (asXXX & AS_BAD));
799 #ifdef UDF_TRACK_ONDISK_ALLOCATION
800 BOOLEAN bit_before, bit_after;
801 #endif //UDF_TRACK_ONDISK_ALLOCATION
802
803 UDF_CHECK_BITMAP_RESOURCE(Vcb);
804
805 if(!Map) return;
806
807 BS = Vcb->BlockSize;
808 BSh = Vcb->BlockSizeBits;
809 Vcb->BitmapModified = TRUE;
810 UDFSetModified(Vcb);
811 // walk through all frags in data area specified
812 while(Map[i].extLength & UDF_EXTENT_LENGTH_MASK) {
813 if((Map[i].extLength >> 30) == EXTENT_NOT_RECORDED_NOT_ALLOCATED) {
814 // skip unallocated frags
815 i++;
816 continue;
817 }
818 ASSERT(Map[i].extLocation);
819
820 #ifdef UDF_TRACK_ONDISK_ALLOCATION
821 AdPrint(("Alloc:%x:%s:%x:@:%x:File:%x:Line:%d\n",
822 FE_lba,
823 asUsed ? ((asXXX & AS_BAD) ? "B" : "U") : "F",
824 (Map[i].extLength & UDF_EXTENT_LENGTH_MASK) >> Vcb->BlockSizeBits,
825 Map[i].extLocation,
826 BugCheckId,
827 Line
828 ));
829 #endif //UDF_TRACK_ONDISK_ALLOCATION
830
831 #ifdef UDF_DBG
832 #ifdef UDF_CHECK_EXTENT_SIZE_ALIGNMENT
833 ASSERT(!(Map[i].extLength & (BS-1)));
834 #endif //UDF_CHECK_EXTENT_SIZE_ALIGNMENT
835 // len = ((Map[i].extLength & UDF_EXTENT_LENGTH_MASK)+BS-1) >> BSh;
836 #else // UDF_DBG
837 // len = (Map[i].extLength & UDF_EXTENT_LENGTH_MASK) >> BSh;
838 #endif // UDF_DBG
839 len = ((Map[i].extLength & UDF_EXTENT_LENGTH_MASK)+BS-1) >> BSh;
840 lba = Map[i].extLocation;
841 if((lba+len) > Vcb->LastPossibleLBA) {
842 // skip blocks beyond media boundary
843 if(lba > Vcb->LastPossibleLBA) {
844 ASSERT(FALSE);
845 i++;
846 continue;
847 }
848 len = Vcb->LastPossibleLBA - lba;
849 }
850
851 #ifdef UDF_TRACK_ONDISK_ALLOCATION
852 if(lba)
853 bit_before = UDFGetBit(Vcb->FSBM_Bitmap, lba-1);
854 bit_after = UDFGetBit(Vcb->FSBM_Bitmap, lba+len);
855 #endif //UDF_TRACK_ONDISK_ALLOCATION
856
857 // mark frag as XXX (see asUsed parameter)
858 if(asUsed) {
859 /* for(j=0;j<len;j++) {
860 UDFSetUsedBit(Vcb->FSBM_Bitmap, lba+j);
861 }*/
862 ASSERT(len);
863 UDFSetUsedBits(Vcb->FSBM_Bitmap, lba, len);
864 #ifdef UDF_TRACK_ONDISK_ALLOCATION
865 for(j=0;j<len;j++) {
866 ASSERT(UDFGetUsedBit(Vcb->FSBM_Bitmap, lba+j));
867 }
868 #endif //UDF_TRACK_ONDISK_ALLOCATION
869
870 if(Vcb->Vat) {
871 // mark logical blocks in VAT as used
872 for(j=0;j<len;j++) {
873 root = UDFPartStart(Vcb, UDFGetPartNumByPhysLba(Vcb, lba));
874 if((Vcb->Vat[lba-root+j] == UDF_VAT_FREE_ENTRY) &&
875 (lba > Vcb->LastLBA)) {
876 Vcb->Vat[lba-root+j] = 0x7fffffff;
877 }
878 }
879 }
880 } else {
881 /* for(j=0;j<len;j++) {
882 UDFSetFreeBit(Vcb->FSBM_Bitmap, lba+j);
883 }*/
884 ASSERT(len);
885 UDFSetFreeBits(Vcb->FSBM_Bitmap, lba, len);
886 #ifdef UDF_TRACK_ONDISK_ALLOCATION
887 for(j=0;j<len;j++) {
888 ASSERT(UDFGetFreeBit(Vcb->FSBM_Bitmap, lba+j));
889 }
890 #endif //UDF_TRACK_ONDISK_ALLOCATION
891 if(asXXX & AS_BAD) {
892 UDFSetBits(Vcb->BSBM_Bitmap, lba, len);
893 }
894 UDFMarkBadSpaceAsUsed(Vcb, lba, len);
895
896 if(asXXX & AS_DISCARDED) {
897 UDFUnmapRange(Vcb, lba, len);
898 WCacheDiscardBlocks__(&(Vcb->FastCache), Vcb, lba, len);
899 UDFSetZeroBits(Vcb->ZSBM_Bitmap, lba, len);
900 }
901 if(Vcb->Vat) {
902 // mark logical blocks in VAT as free
903 // this operation can decrease resulting VAT size
904 for(j=0;j<len;j++) {
905 root = UDFPartStart(Vcb, UDFGetPartNumByPhysLba(Vcb, lba));
906 Vcb->Vat[lba-root+j] = UDF_VAT_FREE_ENTRY;
907 }
908 }
909 // mark discarded extent as Not-Alloc-Not-Rec to
910 // prevent writes there
911 Map[i].extLength = (len << BSh) | (EXTENT_NOT_RECORDED_NOT_ALLOCATED << 30);
912 Map[i].extLocation = 0;
913 }
914
915 #ifdef UDF_TRACK_ONDISK_ALLOCATION
916 if(lba)
917 ASSERT(bit_before == UDFGetBit(Vcb->FSBM_Bitmap, lba-1));
918 ASSERT(bit_after == UDFGetBit(Vcb->FSBM_Bitmap, lba+len));
919 #endif //UDF_TRACK_ONDISK_ALLOCATION
920
921 i++;
922 }
923 } // end UDFMarkSpaceAsXXXNoProtect_()
924
925 /*
926 This routine marks space described by Mapping as Used/Freed (optionaly)
927 It protects data with sync Resource
928 */
929 void
930 UDFMarkSpaceAsXXX_(
931 IN PVCB Vcb,
932 IN PEXTENT_MAP Map,
933 IN uint32 asXXX
934 #ifdef UDF_TRACK_ONDISK_ALLOCATION
935 ,IN uint32 FE_lba,
936 IN uint32 BugCheckId,
937 IN uint32 Line
938 #endif //UDF_TRACK_ONDISK_ALLOCATION
939 )
940 {
941 if(!Map) return;
942 if(!Map[0].extLength) {
943 #ifdef UDF_DBG
944 ASSERT(!Map[0].extLocation);
945 #endif // UDF_DBG
946 return;
947 }
948
949 UDFAcquireResourceExclusive(&(Vcb->BitMapResource1),TRUE);
950 #ifdef UDF_TRACK_ONDISK_ALLOCATION
951 UDFMarkSpaceAsXXXNoProtect_(Vcb, Map, asXXX, FE_lba, BugCheckId, Line);
952 #else //UDF_TRACK_ONDISK_ALLOCATION
953 UDFMarkSpaceAsXXXNoProtect_(Vcb, Map, asXXX);
954 #endif //UDF_TRACK_ONDISK_ALLOCATION
955 UDFReleaseResource(&(Vcb->BitMapResource1));
956
957 } // end UDFMarkSpaceAsXXX_()
958
959 #ifndef UDF_READ_ONLY_BUILD
960 /*
961 This routine builds mapping for Length bytes in FreeSpace
962 It should be used when IN_ICB method is unavailable.
963 */
964 OSSTATUS
965 UDFAllocFreeExtent_(
966 IN PVCB Vcb,
967 IN int64 Length,
968 IN uint32 SearchStart,
969 IN uint32 SearchLim, // NOT included
970 OUT PEXTENT_INFO ExtInfo,
971 IN uint8 AllocFlags
972 #ifdef UDF_TRACK_ALLOC_FREE_EXTENT
973 ,IN uint32 src,
974 IN uint32 line
975 #endif //UDF_TRACK_ALLOC_FREE_EXTENT
976 )
977 {
978 EXTENT_AD Ext;
979 PEXTENT_MAP Map = NULL;
980 uint32 len, LBS, BSh, blen;
981
982 LBS = Vcb->LBlockSize;
983 BSh = Vcb->BlockSizeBits;
984 blen = (uint32)(((Length+LBS-1) & ~((int64)LBS-1)) >> BSh);
985 ExtInfo->Mapping = NULL;
986 ExtInfo->Offset = 0;
987
988 ASSERT(blen <= (uint32)(UDF_MAX_EXTENT_LENGTH >> BSh));
989
990 UDFAcquireResourceExclusive(&(Vcb->BitMapResource1),TRUE);
991
992 if(blen > (SearchLim - SearchStart)) {
993 goto no_free_space_err;
994 }
995 // walk through the free space bitmap & find a single extent or a set of
996 // frags giving in sum the Length specified
997 while(blen) {
998 Ext.extLocation = UDFFindMinSuitableExtent(Vcb, blen, SearchStart,
999 SearchLim, &len, AllocFlags);
1000
1001 // ASSERT(len <= (uint32)(UDF_MAX_EXTENT_LENGTH >> BSh));
1002 if(len >= blen) {
1003 // complete search
1004 Ext.extLength = blen<<BSh;
1005 blen = 0;
1006 } else if(len) {
1007 // we need still some frags to complete request &
1008 // probably we have the opportunity to do it
1009 Ext.extLength = len<<BSh;
1010 blen -= len;
1011 } else {
1012 no_free_space_err:
1013 // no more free space. abort
1014 if(ExtInfo->Mapping) {
1015 UDFMarkSpaceAsXXXNoProtect(Vcb, 0, ExtInfo->Mapping, AS_DISCARDED); // free
1016 MyFreePool__(ExtInfo->Mapping);
1017 ExtInfo->Mapping = NULL;
1018 }
1019 UDFReleaseResource(&(Vcb->BitMapResource1));
1020 ExtInfo->Length = 0;//UDFGetExtentLength(ExtInfo->Mapping);
1021 AdPrint((" DISK_FULL\n"));
1022 return STATUS_DISK_FULL;
1023 }
1024 // append the frag found to mapping
1025 ASSERT(!(Ext.extLength >> 30));
1026 ASSERT(Ext.extLocation);
1027
1028 // mark newly allocated blocks as zero-filled
1029 UDFSetZeroBits(Vcb->ZSBM_Bitmap, Ext.extLocation, (Ext.extLength & UDF_EXTENT_LENGTH_MASK) >> BSh);
1030
1031 if(AllocFlags & EXTENT_FLAG_VERIFY) {
1032 if(!UDFCheckArea(Vcb, Ext.extLocation, Ext.extLength >> BSh)) {
1033 AdPrint(("newly allocated extent contains BB\n"));
1034 UDFMarkSpaceAsXXXNoProtect(Vcb, 0, ExtInfo->Mapping, AS_DISCARDED); // free
1035 UDFMarkBadSpaceAsUsed(Vcb, Ext.extLocation, Ext.extLength >> BSh); // bad -> bad+used
1036 // roll back
1037 blen += Ext.extLength>>BSh;
1038 continue;
1039 }
1040 }
1041
1042 Ext.extLength |= EXTENT_NOT_RECORDED_ALLOCATED << 30;
1043 if(!(ExtInfo->Mapping)) {
1044 // create new
1045 #ifdef UDF_TRACK_ALLOC_FREE_EXTENT
1046 ExtInfo->Mapping = UDFExtentToMapping_(&Ext, src, line);
1047 #else // UDF_TRACK_ALLOC_FREE_EXTENT
1048 ExtInfo->Mapping = UDFExtentToMapping(&Ext);
1049 #endif // UDF_TRACK_ALLOC_FREE_EXTENT
1050 if(!ExtInfo->Mapping) {
1051 BrutePoint();
1052 UDFReleaseResource(&(Vcb->BitMapResource1));
1053 ExtInfo->Length = 0;
1054 return STATUS_INSUFFICIENT_RESOURCES;
1055 }
1056 UDFMarkSpaceAsXXXNoProtect(Vcb, 0, ExtInfo->Mapping, AS_USED); // used
1057 } else {
1058 // update existing
1059 Map = UDFExtentToMapping(&Ext);
1060 if(!Map) {
1061 BrutePoint();
1062 UDFReleaseResource(&(Vcb->BitMapResource1));
1063 ExtInfo->Length = UDFGetExtentLength(ExtInfo->Mapping);
1064 return STATUS_INSUFFICIENT_RESOURCES;
1065 }
1066 UDFMarkSpaceAsXXXNoProtect(Vcb, 0, Map, AS_USED); // used
1067 ExtInfo->Mapping = UDFMergeMappings(ExtInfo->Mapping, Map);
1068 MyFreePool__(Map);
1069 }
1070 if(!ExtInfo->Mapping) {
1071 BrutePoint();
1072 UDFReleaseResource(&(Vcb->BitMapResource1));
1073 ExtInfo->Length = 0;
1074 return STATUS_INSUFFICIENT_RESOURCES;
1075 }
1076 }
1077 UDFReleaseResource(&(Vcb->BitMapResource1));
1078 ExtInfo->Length = Length;
1079 return STATUS_SUCCESS;
1080 } // end UDFAllocFreeExtent_()
1081 #endif //UDF_READ_ONLY_BUILD
1082
1083 /*
1084 Returns block-count
1085 */
1086 uint32
1087 __fastcall
1088 UDFGetPartFreeSpace(
1089 IN PVCB Vcb,
1090 IN uint32 partNum
1091 )
1092 {
1093 uint32 lim/*, len=1*/;
1094 uint32 s=0;
1095 uint32 j;
1096 PUCHAR cur = (PUCHAR)(Vcb->FSBM_Bitmap);
1097
1098 lim = (UDFPartEnd(Vcb,partNum)+7)/8;
1099 for(j=(UDFPartStart(Vcb,partNum)+7)/8; j<lim/* && len*/; j++) {
1100 s+=bit_count_tab[cur[j]];
1101 }
1102 return s;
1103 } // end UDFGetPartFreeSpace()
1104
1105 int64
1106 __fastcall
1107 UDFGetFreeSpace(
1108 IN PVCB Vcb
1109 )
1110 {
1111 int64 s=0;
1112 uint32 i;
1113 // uint32* cur = (uint32*)(Vcb->FSBM_Bitmap);
1114
1115 if(!Vcb->CDR_Mode &&
1116 !(Vcb->VCBFlags & UDF_VCB_FLAGS_RAW_DISK)) {
1117 for(i=0;i<Vcb->PartitionMaps;i++) {
1118 /* lim = UDFPartEnd(Vcb,i);
1119 for(j=UDFPartStart(Vcb,i); j<lim && len; ) {
1120 len = UDFGetBitmapLen(cur, j, lim);
1121 if(UDFGetFreeBit(cur, j)) // is the extent found free or used ?
1122 s+=len;
1123 j+=len;
1124 }*/
1125 s += UDFGetPartFreeSpace(Vcb, i);
1126 }
1127 } else {
1128 ASSERT(Vcb->LastPossibleLBA >= max(Vcb->NWA, Vcb->LastLBA));
1129 s = Vcb->LastPossibleLBA - max(Vcb->NWA, Vcb->LastLBA);
1130 //if(s & ((int64)1 << 64)) s=0;
1131 }
1132 return s >> Vcb->LB2B_Bits;
1133 } // end UDFGetFreeSpace()
1134
1135 /*
1136 Returns block-count
1137 */
1138 int64
1139 __fastcall
1140 UDFGetTotalSpace(
1141 IN PVCB Vcb
1142 )
1143 {
1144 int64 s=0;
1145 uint32 i;
1146
1147 if(Vcb->VCBFlags & UDF_VCB_FLAGS_RAW_DISK) {
1148 s= Vcb->LastPossibleLBA;
1149 } else if(!Vcb->CDR_Mode) {
1150 for(i=0;i<Vcb->PartitionMaps;i++) {
1151 s+=Vcb->Partitions[i].PartitionLen;
1152 }
1153 } else {
1154 if(s & ((int64)1 << 63)) s=0; /* FIXME ReactOS this shift value was 64, which is undefiened behavior. */
1155 s= Vcb->LastPossibleLBA - Vcb->Partitions[0].PartitionRoot;
1156 }
1157 return s >> Vcb->LB2B_Bits;
1158 } // end UDFGetTotalSpace()
1159
1160 /*
1161 Callback for WCache
1162 returns Allocated and Zero-filled flags for given block
1163 any data in 'unallocated' blocks may be changed during flush process
1164 */
1165 uint32
1166 UDFIsBlockAllocated(
1167 IN void* _Vcb,
1168 IN uint32 Lba
1169 )
1170 {
1171 ULONG ret_val = 0;
1172 uint32* bm;
1173 // return TRUE;
1174 if(!(((PVCB)_Vcb)->VCBFlags & UDF_VCB_ASSUME_ALL_USED)) {
1175 // check used
1176 if((bm = (uint32*)(((PVCB)_Vcb)->FSBM_Bitmap)))
1177 ret_val = (UDFGetUsedBit(bm, Lba) ? WCACHE_BLOCK_USED : 0);
1178 // check zero-filled
1179 if((bm = (uint32*)(((PVCB)_Vcb)->ZSBM_Bitmap)))
1180 ret_val |= (UDFGetZeroBit(bm, Lba) ? WCACHE_BLOCK_ZERO : 0);
1181 } else {
1182 ret_val = WCACHE_BLOCK_USED;
1183 }
1184 // check bad block
1185
1186 // WCache works with LOGICAL addresses, not PHYSICAL, BB check must be performed UNDER cache
1187 /*
1188 if(bm = (uint32*)(((PVCB)_Vcb)->BSBM_Bitmap)) {
1189 ret_val |= (UDFGetBadBit(bm, Lba) ? WCACHE_BLOCK_BAD : 0);
1190 if(ret_val & WCACHE_BLOCK_BAD) {
1191 KdPrint(("Marked BB @ %#x\n", Lba));
1192 }
1193 }
1194 */
1195 return ret_val;
1196 } // end UDFIsBlockAllocated()
1197
1198 #ifdef _X86_
1199
1200 #ifdef _MSC_VER
1201 #pragma warning(disable:4035) // re-enable below
1202 #endif
1203
1204 #ifdef _MSC_VER
1205 __declspec (naked)
1206 #endif
1207 BOOLEAN
1208 __fastcall
1209 UDFGetBit__(
1210 IN uint32* arr, // ECX
1211 IN uint32 bit // EDX
1212 )
1213 {
1214 // CheckAddr(arr);
1215 // ASSERT(bit < 300000);
1216 #ifdef _MSC_VER
1217 __asm {
1218 push ebx
1219 push ecx
1220 // mov eax,bit
1221 mov eax,edx
1222 shr eax,3
1223 and al,0fch
1224 add eax,ecx // eax+arr
1225 mov eax,[eax]
1226 mov cl,dl
1227 ror eax,cl
1228 and eax,1
1229
1230 pop ecx
1231 pop ebx
1232 ret
1233 }
1234 #else
1235 /* FIXME ReactOS */
1236 return ((BOOLEAN)(((((uint32*)(arr))[(bit)>>5]) >> ((bit)&31)) &1));
1237 #endif
1238 } // end UDFGetBit__()
1239
1240 #ifdef _MSC_VER
1241 __declspec (naked)
1242 #endif
1243 void
1244 __fastcall
1245 UDFSetBit__(
1246 IN uint32* arr, // ECX
1247 IN uint32 bit // EDX
1248 )
1249 {
1250 // CheckAddr(arr);
1251 // ASSERT(bit < 300000);
1252 #ifdef _MSC_VER
1253 __asm {
1254 push eax
1255 push ebx
1256 push ecx
1257 // mov eax,bit
1258 mov eax,edx
1259 shr eax,3
1260 and al,0fch
1261 add eax,ecx // eax+arr
1262 mov ebx,1
1263 mov cl,dl
1264 rol ebx,cl
1265 or [eax],ebx
1266
1267 pop ecx
1268 pop ebx
1269 pop eax
1270 ret
1271 }
1272 #else
1273 /* FIXME ReactOS */
1274 (((uint32*)(arr))[(bit)>>5]) |= (((uint32)1) << ((bit)&31));
1275 #endif
1276 } // end UDFSetBit__()
1277
1278 void
1279 UDFSetBits__(
1280 IN uint32* arr,
1281 IN uint32 bit,
1282 IN uint32 bc
1283 )
1284 {
1285 #ifdef _MSC_VER
1286 __asm {
1287 push eax
1288 push ebx
1289 push ecx
1290 push edx
1291 push esi
1292
1293 mov edx,bc
1294 or edx,edx
1295 jz short EO_sb_loop
1296
1297 mov ecx,bit
1298 mov esi,arr
1299
1300 mov ebx,1
1301 rol ebx,cl
1302
1303 mov eax,ecx
1304 shr eax,3
1305 and al,0fch
1306
1307 test cl, 0x1f
1308 jnz short sb_loop_cont
1309 sb_loop_2:
1310 cmp edx,0x20
1311 jb short sb_loop_cont
1312
1313 mov [dword ptr esi+eax],0xffffffff
1314 sub edx,0x20
1315 jz short EO_sb_loop
1316 add eax,4
1317 add ecx,0x20
1318 jmp short sb_loop_2
1319
1320 sb_loop_cont:
1321 or [esi+eax],ebx
1322
1323 rol ebx,1
1324 inc ecx
1325 dec edx
1326 jz short EO_sb_loop
1327
1328 test cl, 0x1f
1329 jnz short sb_loop_cont
1330 add eax,4
1331 jmp short sb_loop_2
1332 EO_sb_loop:
1333 pop esi
1334 pop edx
1335 pop ecx
1336 pop ebx
1337 pop eax
1338 }
1339 #else
1340 /* FIXME ReactOS */
1341 uint32 j;
1342 for(j=0;j<bc;j++) {
1343 UDFSetBit(arr, bit+j);
1344 }
1345 #endif
1346 } // end UDFSetBits__()
1347
1348 #ifdef _MSC_VER
1349 __declspec (naked)
1350 #endif
1351 void
1352 __fastcall
1353 UDFClrBit__(
1354 IN uint32* arr, // ECX
1355 IN uint32 bit // EDX
1356 )
1357 {
1358 // CheckAddr(arr);
1359 // ASSERT(bit < 300000);
1360 #ifdef _MSC_VER
1361 __asm {
1362 push eax
1363 push ebx
1364 push ecx
1365 // mov eax,bit
1366 mov eax,edx
1367 shr eax,3
1368 and al,0fch
1369 add eax,ecx // eax+arr
1370 mov ebx,0fffffffeh
1371 mov cl,dl
1372 rol ebx,cl
1373 and [eax],ebx
1374
1375 pop ecx
1376 pop ebx
1377 pop eax
1378 ret
1379 }
1380 #else
1381 /* FIXME ReactOS */
1382 (((uint32*)(arr))[(bit)>>5]) &= (~(((uint32)1) << ((bit)&31)));
1383 #endif
1384 } // end UDFClrBit__()
1385
1386 void
1387 UDFClrBits__(
1388 IN uint32* arr,
1389 IN uint32 bit,
1390 IN uint32 bc
1391 )
1392 {
1393 #ifdef _MSC_VER
1394 __asm {
1395 push eax
1396 push ebx
1397 push ecx
1398 push edx
1399 push esi
1400
1401 mov edx,bc
1402 or edx,edx
1403 jz short EO_cp_loop
1404
1405 mov ecx,bit
1406 mov esi,arr
1407
1408 mov ebx,0xfffffffe
1409 rol ebx,cl
1410
1411 mov eax,ecx
1412 shr eax,3
1413 and al,0fch
1414
1415 test cl, 0x1f
1416 jnz short cp_loop_cont
1417 cp_loop_2:
1418 cmp edx,0x20
1419 jb short cp_loop_cont
1420
1421 mov [dword ptr esi+eax],0x00000000
1422 sub edx,0x20
1423 jz short EO_cp_loop
1424 add eax,4
1425 add ecx,0x20
1426 jmp short cp_loop_2
1427
1428 cp_loop_cont:
1429 and [esi+eax],ebx
1430
1431 rol ebx,1
1432 inc ecx
1433 dec edx
1434 jz short EO_cp_loop
1435
1436 test cl, 0x1f
1437 jnz short cp_loop_cont
1438 add eax,4
1439 jmp short cp_loop_2
1440 EO_cp_loop:
1441 pop esi
1442 pop edx
1443 pop ecx
1444 pop ebx
1445 pop eax
1446 }
1447 #else
1448 /* FIXME ReactOS */
1449 uint32 j;
1450 for(j=0;j<bc;j++) {
1451 UDFClrBit(arr, bit+j);
1452 }
1453 #endif
1454 } // end UDFClrBits__()
1455
1456 #ifdef _MSC_VER
1457 #pragma warning(default:4035)
1458 #endif
1459 #endif // _X86_