991ae3b74a02503aa386b96805a740f339d1361a
[reactos.git] / reactos / drivers / filesystems / udfs / udf_info / extent.cpp
1 ////////////////////////////////////////////////////////////////////
2 // Copyright (C) Alexander Telyatnikov, Ivan Keliukh, Yegor Anchishkin, SKIF Software, 1999-2013. Kiev, Ukraine
3 // All rights reserved
4 // This file was released under the GPLv2 on June 2015.
5 ////////////////////////////////////////////////////////////////////
6 /*
7 Module name:
8
9 extent.cpp
10
11 Abstract:
12
13 This file contains filesystem-specific routines
14 responsible for extent & mapping management
15
16 */
17
18 #include "udf.h"
19
20 #define UDF_BUG_CHECK_ID UDF_FILE_UDF_INFO_EXTENT
21
22 /*
23 This routine converts offset in extent to Lba & returns offset in the 1st
24 sector & bytes before end of block.
25 Here we assume no references to AllocDescs
26 */
27 uint32
28 UDFExtentOffsetToLba(
29 IN PVCB Vcb,
30 IN PEXTENT_MAP Extent, // Extent array
31 IN int64 Offset, // offset in extent
32 OUT uint32* SectorOffset,
33 OUT uint32* AvailLength, // available data in this block
34 OUT uint32* Flags,
35 OUT uint32* Index
36 )
37 {
38 uint32 j=0, l, d, BSh = Vcb->BlockSizeBits;
39 uint32 Offs;
40 uint32 i=0, BOffset; // block nums
41
42 BOffset = (uint32)(Offset >> BSh);
43 // scan extent table for suitable range (frag)
44 ExtPrint(("ExtLen %x\n", Extent->extLength));
45 while(i+(d = (l = (Extent->extLength & UDF_EXTENT_LENGTH_MASK)) >> BSh) <= BOffset) {
46
47 if(!l) {
48 if(Index) (*Index) = j-1;
49 if(Flags) {
50 Extent--;
51 (*Flags) = (Extent->extLength >> 30);
52 }
53 return LBA_OUT_OF_EXTENT;
54 }
55 if(!d)
56 break;
57 i += d; //frag offset
58 j++; // frag index
59 Extent++;
60 }
61 BOffset -= i;
62 Offs = (*((uint32*)&Offset)) - (i << BSh); // offset in frag
63
64 if(SectorOffset)
65 (*SectorOffset) = Offs & (Vcb->BlockSize-1);// offset in 1st Lba
66 if(AvailLength)
67 (*AvailLength) = l - Offs;// bytes to EO frag
68 if(Flags)
69 (*Flags) = (Extent->extLength >> 30);
70 if(Index)
71 (*Index) = j;
72
73 ASSERT(((Extent->extLength >> 30) == EXTENT_NOT_RECORDED_NOT_ALLOCATED) || Extent->extLocation);
74
75 return Extent->extLocation + BOffset;// 1st Lba
76 } // end UDFExtentOffsetToLba()
77
78 uint32
79 UDFNextExtentToLba(
80 IN PVCB Vcb,
81 IN PEXTENT_MAP Extent, // Extent array
82 OUT uint32* AvailLength, // available data in this block
83 OUT uint32* Flags,
84 OUT uint32* Index
85 )
86 {
87 // uint32 Lba;
88
89 uint32 l;
90 // uint32 d;
91
92 // scan extent table for suitable range (frag)
93 // d = (l = (Extent->extLength & UDF_EXTENT_LENGTH_MASK));
94 l = (Extent->extLength & UDF_EXTENT_LENGTH_MASK);
95
96 if(!l) {
97 (*Index) = -1;
98 Extent--;
99 (*Flags) = (Extent->extLength >> 30);
100 return LBA_OUT_OF_EXTENT;
101 }
102
103 (*Index) = 0;
104 (*AvailLength) = l;// bytes to EO frag
105 (*Flags) = (Extent->extLength >> 30);
106
107 ASSERT(((*Flags) == EXTENT_NOT_RECORDED_NOT_ALLOCATED) || Extent->extLocation);
108
109 return Extent->extLocation;// 1st Lba
110 } // end UDFNextExtentToLba()
111
112 /*
113 This routine locates frag containing specified Lba in extent
114 */
115 ULONG
116 UDFLocateLbaInExtent(
117 IN PVCB Vcb,
118 IN PEXTENT_MAP Extent, // Extent array
119 IN lba_t lba
120 )
121 {
122 uint32 l, BSh = Vcb->BlockSizeBits;
123 uint32 i=0;
124
125 while((l = ((Extent->extLength & UDF_EXTENT_LENGTH_MASK) >> BSh))) {
126
127 if(Extent->extLocation >= lba &&
128 Extent->extLocation+l < lba) {
129 return i;
130 }
131 i++; //frag offset
132 Extent++;
133 }
134 return LBA_OUT_OF_EXTENT;// index of item in extent, containing out Lba
135 } // end UDFLocateLbaInExtent()
136
137 /*
138 This routine calculates total length of specified extent.
139 Here we assume no references to AllocDescs
140 */
141 int64
142 UDFGetExtentLength(
143 IN PEXTENT_MAP Extent // Extent array
144 )
145 {
146 if(!Extent) return 0;
147 int64 i=0;
148
149 //#ifdef _X86_
150 #ifdef _MSC_VER
151
152 __asm push ebx
153 __asm push ecx
154 __asm push esi
155
156 __asm lea ebx,i
157 __asm mov esi,Extent
158 __asm xor ecx,ecx
159 While_1:
160 __asm mov eax,[esi+ecx*8] // Extent[j].extLength
161 __asm and eax,UDF_EXTENT_LENGTH_MASK
162 __asm jz EO_While
163 __asm add [ebx],eax
164 __asm adc [ebx+4],0
165 __asm inc ecx
166 __asm jmp While_1
167 EO_While:;
168 __asm pop esi
169 __asm pop ecx
170 __asm pop ebx
171
172 #else // NO X86 optimization , use generic C/C++
173
174 while(Extent->extLength) {
175 i += (Extent->extLength & UDF_EXTENT_LENGTH_MASK);
176 Extent++;
177 }
178
179 #endif // _X86_
180
181 return i;
182 } // UDFGetExtentLength()
183
184 /*
185 This routine appends Zero-terminator to single Extent-entry.
186 Such operation makes it compatible with other internal routines
187 */
188 PEXTENT_MAP
189 __fastcall
190 UDFExtentToMapping_(
191 IN PEXTENT_AD Extent
192 #ifdef UDF_TRACK_EXTENT_TO_MAPPING
193 ,IN ULONG src,
194 IN ULONG line
195 #endif //UDF_TRACK_EXTENT_TO_MAPPING
196 )
197 {
198 PEXTENT_MAP Map;
199
200 #ifdef UDF_TRACK_EXTENT_TO_MAPPING
201 #define UDF_EXT_MAP_MULT 4
202 #else //UDF_TRACK_EXTENT_TO_MAPPING
203 #define UDF_EXT_MAP_MULT 2
204 #endif //UDF_TRACK_EXTENT_TO_MAPPING
205
206 Map = (PEXTENT_MAP)MyAllocatePoolTag__(NonPagedPool , UDF_EXT_MAP_MULT *
207 sizeof(EXTENT_MAP), MEM_EXTMAP_TAG);
208 if(!Map) return NULL;
209 RtlZeroMemory((int8*)(Map+1), sizeof(EXTENT_MAP));
210 Map[0].extLength = Extent->extLength;
211 Map[0].extLocation = Extent->extLocation;
212 #ifdef UDF_TRACK_EXTENT_TO_MAPPING
213 Map[2].extLength = src;
214 Map[2].extLocation = line;
215 #endif //UDF_TRACK_EXTENT_TO_MAPPING
216 return Map;
217 } // end UDFExtentToMapping()
218
219 /*
220 This routine calculates file mapping length (in bytes) including
221 ZERO-terminator
222 */
223 uint32
224 UDFGetMappingLength(
225 IN PEXTENT_MAP Extent
226 )
227 {
228 if(!Extent) return 0;
229 uint32 i=0;
230
231 //#ifdef _X86_
232 #ifdef _MSC_VER
233 __asm push ebx
234
235 __asm mov ebx,Extent
236 __asm xor eax,eax
237 While_1:
238 __asm mov ecx,[ebx+eax*8]
239 __asm jecxz EO_While
240 __asm inc eax
241 __asm jmp While_1
242 EO_While:
243 __asm inc eax
244 __asm shl eax,3
245 __asm mov i,eax
246
247 __asm pop ebx
248
249 #else // NO X86 optimization , use generic C/C++
250
251 while(Extent->extLength) {
252 i++;
253 Extent++;
254 }
255 i++;
256 i*=sizeof(EXTENT_MAP);
257
258 #endif // _X86_
259
260 return i; // i*sizeof(EXTENT_MAP)
261 } // end UDFGetMappingLength()
262
263 /*
264 This routine merges 2 sequencial file mappings
265 */
266 PEXTENT_MAP
267 __fastcall
268 UDFMergeMappings(
269 IN PEXTENT_MAP Extent,
270 IN PEXTENT_MAP Extent2
271 )
272 {
273 PEXTENT_MAP NewExt;
274 uint32 len, len2;
275
276 len = UDFGetMappingLength(Extent);
277 len2 = UDFGetMappingLength(Extent2);
278 ASSERT(len2 && len);
279 if(!len2) {
280 return Extent;
281 }
282 if(MyReallocPool__((int8*)Extent, len, (int8**)(&NewExt), len+len2-sizeof(EXTENT_MAP))) {
283 RtlCopyMemory(((int8*)NewExt)+len-sizeof(EXTENT_MAP), (int8*)Extent2, len2);
284 } else {
285 ExtPrint(("UDFMergeMappings failed\n"));
286 BrutePoint();
287 }
288 return NewExt;
289 } // end UDFMergeMappings()
290
291 /*
292 This routine builds file mapping according to ShortAllocDesc (SHORT_AD)
293 array
294 */
295 PEXTENT_MAP
296 UDFShortAllocDescToMapping(
297 IN PVCB Vcb,
298 IN uint32 PartNum,
299 IN PSHORT_AD AllocDesc,
300 IN uint32 AllocDescLength,
301 IN uint32 SubCallCount,
302 OUT PEXTENT_INFO AllocLoc
303 )
304 {
305 uint32 i, lim, l, len, type;
306 // uint32 BSh;
307 PEXTENT_MAP Extent, Extent2, AllocMap;
308 EXTENT_AD AllocExt;
309 PALLOC_EXT_DESC NextAllocDesc;
310 lb_addr locAddr;
311 uint32 ReadBytes;
312 EXTENT_INFO NextAllocLoc;
313 BOOLEAN w2k_compat = FALSE;
314
315 ExtPrint(("UDFShortAllocDescToMapping: len=%x\n", AllocDescLength));
316
317 if(SubCallCount > ALLOC_DESC_MAX_RECURSE) return NULL;
318
319 locAddr.partitionReferenceNum = (uint16)PartNum;
320 // BSh = Vcb->BlockSizeBits;
321 l = ((lim = (AllocDescLength/sizeof(SHORT_AD))) + 1 ) * sizeof(EXTENT_AD);
322 Extent = (PEXTENT_MAP)MyAllocatePoolTag__(NonPagedPool, l, MEM_EXTMAP_TAG);
323 if(!Extent) return NULL;
324
325 NextAllocLoc.Offset = 0;
326
327 for(i=0;i<lim;i++) {
328 type = AllocDesc[i].extLength >> 30;
329 len = AllocDesc[i].extLength & UDF_EXTENT_LENGTH_MASK;
330 ExtPrint(("ShExt: type %x, loc %x, len %x\n", type, AllocDesc[i].extPosition, len));
331 if(type == EXTENT_NEXT_EXTENT_ALLOCDESC) {
332 // read next frag of allocation descriptors if encountered
333 if(len < sizeof(ALLOC_EXT_DESC)) {
334 MyFreePool__(Extent);
335 return NULL;
336 }
337 NextAllocDesc = (PALLOC_EXT_DESC)MyAllocatePoolTag__(NonPagedPool, len, MEM_ALLOCDESC_TAG);
338 if(!NextAllocDesc) {
339 MyFreePool__(Extent);
340 return NULL;
341 }
342 // record information about this frag
343 locAddr.logicalBlockNum = AllocDesc[i].extPosition;
344 AllocExt.extLength = len;
345 AllocExt.extLocation = UDFPartLbaToPhys(Vcb, &locAddr);
346 if(AllocExt.extLocation == LBA_OUT_OF_EXTENT) {
347 KdPrint(("bad address\n"));
348 MyFreePool__(NextAllocDesc);
349 MyFreePool__(Extent);
350 return NULL;
351 }
352 NextAllocLoc.Mapping =
353 AllocMap = UDFExtentToMapping(&AllocExt);
354 NextAllocLoc.Length = len;
355 if(!AllocMap) {
356 MyFreePool__(NextAllocDesc);
357 MyFreePool__(Extent);
358 return NULL;
359 }
360 AllocLoc->Mapping = UDFMergeMappings(AllocLoc->Mapping, AllocMap);
361 if(!AllocLoc->Mapping ||
362 // read this frag
363 !OS_SUCCESS(UDFReadExtent(Vcb, &NextAllocLoc,
364 0, len, FALSE, (int8*)NextAllocDesc, &ReadBytes)))
365 {
366 MyFreePool__(AllocMap);
367 MyFreePool__(NextAllocDesc);
368 MyFreePool__(Extent);
369 return NULL;
370 }
371 MyFreePool__(AllocMap);
372 // check integrity
373 if((NextAllocDesc->descTag.tagIdent != TID_ALLOC_EXTENT_DESC) ||
374 (NextAllocDesc->lengthAllocDescs > (len - sizeof(ALLOC_EXT_DESC))) ) {
375 KdPrint(("Integrity check failed\n"));
376 KdPrint(("NextAllocDesc->descTag.tagIdent = %x\n", NextAllocDesc->descTag.tagIdent));
377 KdPrint(("NextAllocDesc->lengthAllocDescs = %x\n", NextAllocDesc->lengthAllocDescs));
378 KdPrint(("len = %x\n", len));
379 MyFreePool__(NextAllocDesc);
380 MyFreePool__(Extent);
381 return NULL;
382 }
383 // perform recursive call to obtain mapping
384 NextAllocLoc.Flags = 0;
385 Extent2 = UDFShortAllocDescToMapping(Vcb, PartNum, (PSHORT_AD)(NextAllocDesc+1),
386 NextAllocDesc->lengthAllocDescs, SubCallCount+1, AllocLoc);
387 if(!Extent2) {
388 MyFreePool__(NextAllocDesc);
389 MyFreePool__(Extent);
390 return NULL;
391 }
392 UDFCheckSpaceAllocation(Vcb, 0, Extent2, AS_USED); // check if used
393 // and merge this 2 mappings into 1
394 Extent[i].extLength = 0;
395 Extent[i].extLocation = 0;
396 Extent = UDFMergeMappings(Extent, Extent2);
397
398 if(NextAllocLoc.Flags & EXTENT_FLAG_2K_COMPAT) {
399 ExtPrint(("w2k-compat\n"));
400 AllocLoc->Flags |= EXTENT_FLAG_2K_COMPAT;
401 }
402
403 MyFreePool__(Extent2);
404 return Extent;
405 }
406 //
407 #ifdef UDF_CHECK_EXTENT_SIZE_ALIGNMENT
408 ASSERT(!(len & (Vcb->LBlockSize-1) ));
409 #endif //UDF_CHECK_EXTENT_SIZE_ALIGNMENT
410 if(len & (Vcb->LBlockSize-1)) {
411 w2k_compat = TRUE;
412 }
413 Extent[i].extLength = (len+Vcb->LBlockSize-1) & ~(Vcb->LBlockSize-1);
414 locAddr.logicalBlockNum = AllocDesc[i].extPosition;
415 // Note: for compatibility Adaptec DirectCD we check 'len' here
416 // That strange implementation records bogus extLocation in terminal entries
417 if(type != EXTENT_NOT_RECORDED_NOT_ALLOCATED && len) {
418 Extent[i].extLocation = UDFPartLbaToPhys(Vcb, &locAddr);
419 if(Extent[i].extLocation == LBA_OUT_OF_EXTENT) {
420 KdPrint(("bad address (2)\n"));
421 MyFreePool__(Extent);
422 return NULL;
423 }
424 } else {
425 Extent[i].extLocation = 0;
426 }
427 if(!len) {
428 // some UDF implementations set strange AllocDesc sequence length,
429 // but terminates it with zeros in proper place, so handle
430 // this case
431 ASSERT(i>=(lim-1));
432 ASSERT(!Extent[i].extLength);
433 Extent[i].extLocation = 0;
434 if(/*!SubCallCount &&*/ w2k_compat) {
435 ExtPrint(("w2k-compat\n"));
436 AllocLoc->Flags |= EXTENT_FLAG_2K_COMPAT;
437 }
438 return Extent;
439 }
440 Extent[i].extLength |= (type << 30);
441 }
442 // set terminator
443 Extent[i].extLength = 0;
444 Extent[i].extLocation = 0;
445
446 if(/*!SubCallCount &&*/ w2k_compat) {
447 ExtPrint(("w2k-compat\n"));
448 AllocLoc->Flags |= EXTENT_FLAG_2K_COMPAT;
449 }
450
451 return Extent;
452 } // end UDFShortAllocDescToMapping()
453
454 /*
455 This routine builds file mapping according to LongAllocDesc (LONG_AD)
456 array
457 */
458 PEXTENT_MAP
459 UDFLongAllocDescToMapping(
460 IN PVCB Vcb,
461 IN PLONG_AD AllocDesc,
462 IN uint32 AllocDescLength,
463 IN uint32 SubCallCount,
464 OUT PEXTENT_INFO AllocLoc // .Mapping must be intialized (non-Zero)
465 )
466 {
467 uint32 i, lim, l, len, type;
468 // uint32 BSh;
469 PEXTENT_MAP Extent, Extent2, AllocMap;
470 EXTENT_AD AllocExt;
471 PALLOC_EXT_DESC NextAllocDesc;
472 uint32 ReadBytes;
473 EXTENT_INFO NextAllocLoc;
474
475 ExtPrint(("UDFLongAllocDescToMapping: len=%x\n", AllocDescLength));
476
477 if(SubCallCount > ALLOC_DESC_MAX_RECURSE) return NULL;
478
479 // BSh = Vcb->BlockSizeBits;
480 l = ((lim = (AllocDescLength/sizeof(LONG_AD))) + 1 ) * sizeof(EXTENT_AD);
481 Extent = (PEXTENT_MAP)MyAllocatePoolTag__(NonPagedPool, l, MEM_EXTMAP_TAG);
482 if(!Extent) return NULL;
483
484 NextAllocLoc.Offset = 0;
485
486 for(i=0;i<lim;i++) {
487 type = AllocDesc[i].extLength >> 30;
488 len = AllocDesc[i].extLength & UDF_EXTENT_LENGTH_MASK;
489 ExtPrint(("LnExt: type %x, loc %x (%x:%x), len %x\n", type, UDFPartLbaToPhys(Vcb,&(AllocDesc[i].extLocation)),
490 AllocDesc[i].extLocation.partitionReferenceNum, AllocDesc[i].extLocation.logicalBlockNum,
491 len));
492 if(type == EXTENT_NEXT_EXTENT_ALLOCDESC) {
493 // read next frag of allocation descriptors if encountered
494 if(len < sizeof(ALLOC_EXT_DESC)) {
495 MyFreePool__(Extent);
496 return NULL;
497 }
498 NextAllocDesc = (PALLOC_EXT_DESC)MyAllocatePoolTag__(NonPagedPool, len, MEM_ALLOCDESC_TAG);
499 if(!NextAllocDesc) {
500 MyFreePool__(Extent);
501 return NULL;
502 }
503 // record information about this frag
504 AllocExt.extLength = len;
505 AllocExt.extLocation = UDFPartLbaToPhys(Vcb,&(AllocDesc[i].extLocation));
506 if(AllocExt.extLocation == LBA_OUT_OF_EXTENT) {
507 KdPrint(("bad address\n"));
508 MyFreePool__(NextAllocDesc);
509 MyFreePool__(Extent);
510 return NULL;
511 }
512 NextAllocLoc.Mapping =
513 AllocMap = UDFExtentToMapping(&AllocExt);
514 NextAllocLoc.Length = len;
515 if(!AllocMap) {
516 MyFreePool__(NextAllocDesc);
517 MyFreePool__(Extent);
518 return NULL;
519 }
520 AllocLoc->Mapping = UDFMergeMappings(AllocLoc->Mapping, AllocMap);
521 if(!AllocLoc->Mapping ||
522 // read this frag
523 !OS_SUCCESS(UDFReadExtent(Vcb, &NextAllocLoc,
524 0, len, FALSE, (int8*)NextAllocDesc, &ReadBytes)))
525 {
526 MyFreePool__(AllocMap);
527 MyFreePool__(NextAllocDesc);
528 MyFreePool__(Extent);
529 return NULL;
530 }
531 MyFreePool__(AllocMap);
532 // check integrity
533 if((NextAllocDesc->descTag.tagIdent != TID_ALLOC_EXTENT_DESC) ||
534 (NextAllocDesc->lengthAllocDescs > (len - sizeof(ALLOC_EXT_DESC))) ) {
535 KdPrint(("Integrity check failed\n"));
536 KdPrint(("NextAllocDesc->descTag.tagIdent = %x\n", NextAllocDesc->descTag.tagIdent));
537 KdPrint(("NextAllocDesc->lengthAllocDescs = %x\n", NextAllocDesc->lengthAllocDescs));
538 KdPrint(("len = %x\n", len));
539 MyFreePool__(NextAllocDesc);
540 MyFreePool__(Extent);
541 return NULL;
542 }
543 // perform recursive call to obtain mapping
544 Extent2 = UDFLongAllocDescToMapping(Vcb, (PLONG_AD)(NextAllocDesc+1),
545 NextAllocDesc->lengthAllocDescs, SubCallCount+1, AllocLoc);
546 if(!Extent2) {
547 MyFreePool__(NextAllocDesc);
548 MyFreePool__(Extent);
549 return NULL;
550 }
551 // and merge this 2 mappings into 1
552 Extent[i].extLength = 0;
553 Extent[i].extLocation = 0;
554 Extent = UDFMergeMappings(Extent, Extent2);
555 MyFreePool__(Extent2);
556 return Extent;
557 }
558 //
559 Extent[i].extLength = len;
560 #ifdef UDF_CHECK_EXTENT_SIZE_ALIGNMENT
561 ASSERT(!(len & (Vcb->LBlockSize-1) ));
562 #endif //UDF_CHECK_EXTENT_SIZE_ALIGNMENT
563 Extent[i].extLength = (len+Vcb->LBlockSize-1) & ~(Vcb->LBlockSize-1);
564 // Note: for compatibility Adaptec DirectCD we check 'len' here
565 // That strange implementation records bogus extLocation in terminal entries
566 if(type != EXTENT_NOT_RECORDED_NOT_ALLOCATED && len) {
567 Extent[i].extLocation = UDFPartLbaToPhys(Vcb,&(AllocDesc[i].extLocation));
568 if(Extent[i].extLocation == LBA_OUT_OF_EXTENT) {
569 KdPrint(("bad address (2)\n"));
570 MyFreePool__(Extent);
571 return NULL;
572 }
573 } else {
574 Extent[i].extLocation = 0;
575 }
576 if(!len) {
577 // some UDF implementations set strange AllocDesc sequence length,
578 // but terminates it with zeros in proper place, so handle
579 // this case
580 Extent[i].extLocation = 0;
581 return Extent;
582 }
583 Extent[i].extLength |= (type << 30);
584 }
585 // set terminator
586 Extent[i].extLength = 0;
587 Extent[i].extLocation = 0;
588
589 return Extent;
590 } // end UDFLongAllocDescToMapping()
591
592 /*
593 This routine builds file mapping according to ExtendedAllocDesc (EXT_AD)
594 array
595 */
596 PEXTENT_MAP
597 UDFExtAllocDescToMapping(
598 IN PVCB Vcb,
599 IN PEXT_AD AllocDesc,
600 IN uint32 AllocDescLength,
601 IN uint32 SubCallCount,
602 OUT PEXTENT_INFO AllocLoc // .Mapping must be intialized (non-Zero)
603 )
604 {
605 uint32 i, lim, l, len, type;
606 // uint32 BSh;
607 PEXTENT_MAP Extent, Extent2, AllocMap;
608 EXTENT_AD AllocExt;
609 PALLOC_EXT_DESC NextAllocDesc;
610 uint32 ReadBytes;
611 EXTENT_INFO NextAllocLoc;
612
613 ExtPrint(("UDFExtAllocDescToMapping: len=%x\n", AllocDescLength));
614
615 if(SubCallCount > ALLOC_DESC_MAX_RECURSE) return NULL;
616
617 // BSh = Vcb->BlockSizeBits;
618 l = ((lim = (AllocDescLength/sizeof(EXT_AD))) + 1 ) * sizeof(EXTENT_AD);
619 Extent = (PEXTENT_MAP)MyAllocatePoolTag__(NonPagedPool, l, MEM_EXTMAP_TAG);
620 if(!Extent) return NULL;
621
622 NextAllocLoc.Offset = 0;
623
624 for(i=0;i<lim;i++) {
625 type = AllocDesc[i].extLength >> 30;
626 len = AllocDesc[i].extLength & UDF_EXTENT_LENGTH_MASK;
627 ExtPrint(("ExExt: type %x, loc %x, len %x\n", type, UDFPartLbaToPhys(Vcb,&(AllocDesc[i].extLocation)), len));
628 if(type == EXTENT_NEXT_EXTENT_ALLOCDESC) {
629 // read next frag of allocation descriptors if encountered
630 if(len < sizeof(ALLOC_EXT_DESC)) {
631 MyFreePool__(Extent);
632 return NULL;
633 }
634 NextAllocDesc = (PALLOC_EXT_DESC)MyAllocatePoolTag__(NonPagedPool, len, MEM_ALLOCDESC_TAG);
635 if(!NextAllocDesc) {
636 MyFreePool__(Extent);
637 return NULL;
638 }
639 // record information about this frag
640 AllocExt.extLength = len;
641 AllocExt.extLocation = UDFPartLbaToPhys(Vcb,&(AllocDesc[i].extLocation));
642 if(AllocExt.extLocation == LBA_OUT_OF_EXTENT) {
643 KdPrint(("bad address\n"));
644 MyFreePool__(NextAllocDesc);
645 MyFreePool__(Extent);
646 return NULL;
647 }
648 NextAllocLoc.Mapping =
649 AllocMap = UDFExtentToMapping(&AllocExt);
650 NextAllocLoc.Length = len;
651 if(!AllocMap) {
652 MyFreePool__(NextAllocDesc);
653 MyFreePool__(Extent);
654 return NULL;
655 }
656 AllocLoc->Mapping = UDFMergeMappings(AllocLoc->Mapping, AllocMap);
657 if(!AllocLoc->Mapping ||
658 // read this frag
659 !OS_SUCCESS(UDFReadExtent(Vcb, &NextAllocLoc,
660 0, len, FALSE, (int8*)NextAllocDesc, &ReadBytes)))
661 {
662 MyFreePool__(AllocMap);
663 MyFreePool__(NextAllocDesc);
664 MyFreePool__(Extent);
665 return NULL;
666 }
667 MyFreePool__(AllocMap);
668 // check integrity
669 if((NextAllocDesc->descTag.tagIdent != TID_ALLOC_EXTENT_DESC) ||
670 (NextAllocDesc->lengthAllocDescs > (len - sizeof(ALLOC_EXT_DESC))) ) {
671 KdPrint(("Integrity check failed\n"));
672 MyFreePool__(NextAllocDesc);
673 MyFreePool__(Extent);
674 return NULL;
675 }
676 // perform recursive call to obtain mapping
677 Extent2 = UDFExtAllocDescToMapping(Vcb, (PEXT_AD)(NextAllocDesc+1),
678 NextAllocDesc->lengthAllocDescs, SubCallCount+1, AllocLoc);
679 if(!Extent2) {
680 MyFreePool__(NextAllocDesc);
681 MyFreePool__(Extent);
682 return NULL;
683 }
684 // and merge this 2 mappings into 1
685 Extent[i].extLength = 0;
686 Extent[i].extLocation = 0;
687 Extent = UDFMergeMappings(Extent, Extent2);
688 MyFreePool__(Extent2);
689 return Extent;
690 }
691 /* if((AllocDesc[i].extLength & UDF_EXTENT_LENGTH_MASK) > // Uncomment!!!
692 (AllocDesc[i].recordedLength & UDF_EXTENT_LENGTH_MASK)) {
693 Extent[i].extLength = AllocDesc[i].recordedLength;
694 Extent[i].extLocation = UDFPartLbaToPhys(Vcb,&(AllocDesc[i].extLocation));
695 }*/
696 Extent[i].extLength = len;
697 #ifdef UDF_CHECK_EXTENT_SIZE_ALIGNMENT
698 ASSERT(!(len & (Vcb->LBlockSize-1) ));
699 #endif //UDF_CHECK_EXTENT_SIZE_ALIGNMENT
700 // Note: for compatibility Adaptec DirectCD we check 'len' here
701 // That strange implementation records bogus extLocation in terminal entries
702 if(type != EXTENT_NOT_RECORDED_NOT_ALLOCATED && len) {
703 Extent[i].extLocation = UDFPartLbaToPhys(Vcb,&(AllocDesc[i].extLocation));
704 if(Extent[i].extLocation == LBA_OUT_OF_EXTENT) {
705 KdPrint(("bad address (2)\n"));
706 MyFreePool__(Extent);
707 return NULL;
708 }
709 } else {
710 Extent[i].extLocation = 0;
711 }
712 if(!len) {
713 // some UDF implementations set strange AllocDesc sequence length,
714 // but terminates it with zeros in proper place, so handle
715 // this case
716 Extent[i].extLocation = 0;
717 return Extent;
718 }
719 Extent[i].extLength |= (type << 30);
720 }
721 // set terminator
722 Extent[i].extLength = 0;
723 Extent[i].extLocation = 0;
724
725 return Extent;
726 } // end UDFExtAllocDescToMapping()
727
728
729 /*
730 This routine builds FileMapping according to given FileEntry
731 Return: pointer to EXTENT_MAP array
732 or offset inside FileEntry (negative)
733 when ICB_FLAG_AD_IN_ICB encountered
734 of NULL if an error occured
735 */
736 PEXTENT_MAP
737 UDFReadMappingFromXEntry(
738 IN PVCB Vcb,
739 IN uint32 PartNum,
740 IN tag* XEntry,
741 IN OUT uint32* Offset,
742 OUT PEXTENT_INFO AllocLoc // .Mapping must be intialized (non-Zero)
743 )
744 {
745 PEXTENT_AD Extent;
746 uint16 AllocMode;
747 int8* AllocDescs;
748 uint32 len;
749 // EntityID* eID; // for compatibility with Adaptec DirectCD
750
751 Extent = NULL;
752 (*Offset) = 0;
753
754
755 if(XEntry->tagIdent == TID_FILE_ENTRY) {
756 // KdPrint(("Standard FileEntry\n"));
757 PFILE_ENTRY FileEntry = (PFILE_ENTRY)XEntry;
758 ExtPrint(("Standard FileEntry\n"));
759
760 AllocDescs = (int8*)(((int8*)(FileEntry+1))+(FileEntry->lengthExtendedAttr));
761 len = FileEntry->lengthAllocDescs;
762 AllocLoc->Offset = sizeof(FILE_ENTRY) + FileEntry->lengthExtendedAttr;
763 // eID = &(FileEntry->impIdent);
764
765 AllocMode = FileEntry->icbTag.flags & ICB_FLAG_ALLOC_MASK;
766
767 } else if(XEntry->tagIdent == TID_EXTENDED_FILE_ENTRY) {
768 // KdPrint(("Extended FileEntry\n"));
769 ExtPrint(("Extended FileEntry\n"));
770 PEXTENDED_FILE_ENTRY ExFileEntry = (PEXTENDED_FILE_ENTRY)XEntry;
771
772 AllocDescs = (((int8*)(ExFileEntry+1))+(ExFileEntry->lengthExtendedAttr));
773 len = ExFileEntry->lengthAllocDescs;
774 AllocLoc->Offset = sizeof(EXTENDED_FILE_ENTRY) + ExFileEntry->lengthExtendedAttr;
775 // eID = &(FileEntry->impIdent);
776
777 AllocMode = ExFileEntry->icbTag.flags & ICB_FLAG_ALLOC_MASK;
778
779 } else {
780 return NULL;
781 }
782
783 // for compatibility with Adaptec DirectCD
784 // if(!(Vcb->UDF_VCB_IC_ADAPTEC_NONALLOC_COMPAT))
785
786 AllocLoc->Length=len;
787 AllocLoc->Flags |= EXTENT_FLAG_VERIFY; // for metadata
788
789 switch (AllocMode) {
790 case ICB_FLAG_AD_SHORT: {
791 Extent = UDFShortAllocDescToMapping(Vcb, PartNum, (PSHORT_AD)AllocDescs, len, 0, AllocLoc);
792 break;
793 }
794 case ICB_FLAG_AD_LONG: {
795 Extent = UDFLongAllocDescToMapping(Vcb, (PLONG_AD)AllocDescs, len, 0, AllocLoc);
796 break;
797 }
798 case ICB_FLAG_AD_EXTENDED: {
799 Extent = UDFExtAllocDescToMapping(Vcb, (PEXT_AD)AllocDescs, len, 0, AllocLoc);
800 break;
801 }
802 default : { // case ICB_FLAG_AD_IN_ICB
803 Extent = NULL;
804 *Offset = (uint32)AllocDescs - (uint32)XEntry;
805 AllocLoc->Offset=0;
806 AllocLoc->Length=0;
807 if(AllocLoc->Mapping) MyFreePool__(AllocLoc->Mapping);
808 AllocLoc->Mapping=NULL;
809 break;
810 }
811 }
812
813 ExtPrint(("UDFReadMappingFromXEntry: mode %x, loc %x, len %x\n", AllocMode,
814 AllocLoc->Mapping ? AllocLoc->Mapping[0].extLocation : -1, len));
815
816 UDFCheckSpaceAllocation(Vcb, 0, Extent, AS_USED); // check if used
817
818 return Extent;
819 }// end UDFReadMappingFromXEntry()
820
821 #ifndef UDF_READ_ONLY_BUILD
822 /*
823 This routine builds data for AllocDesc sequence for specified
824 extent
825 */
826 OSSTATUS
827 UDFBuildShortAllocDescs(
828 IN PVCB Vcb,
829 IN uint32 PartNum,
830 OUT int8** Buff, // data for AllocLoc
831 IN uint32 InitSz,
832 IN OUT PUDF_FILE_INFO FileInfo
833 )
834 {
835 uint32 i, j;
836 uint32 len=0;
837 PEXTENT_MAP Extent = FileInfo->Dloc->DataLoc.Mapping;
838 PEXTENT_INFO AllocExtent = &(FileInfo->Dloc->AllocLoc);
839 PSHORT_AD Alloc;
840 uint32 NewLen;
841 OSSTATUS status;
842 uint32 ph_len=0; // in general, this should be uint64,
843 // but we need its lower part only
844 #ifdef UDF_ALLOW_FRAG_AD
845 uint32 ts, ac, len2;
846 uint32 LBS = Vcb->LBlockSize;
847 uint32 LBSh = Vcb->BlockSizeBits;
848 uint32 TagLen = 0;
849 tag* Tag = NULL;
850 PSHORT_AD saved_Alloc;
851 uint32 TagLoc, prevTagLoc;
852 uint32 BufOffs;
853 uint32 ExtOffs;
854 uint32 saved_NewLen;
855 #endif //UDF_ALLOW_FRAG_AD
856
857 ValidateFileInfo(FileInfo);
858 ExtPrint(("UDFBuildShortAllocDescs: FE %x\n", FileInfo->Dloc->FELoc.Mapping[0].extLocation));
859 // calculate length
860 for(len=0; (i=(Extent[len].extLength & UDF_EXTENT_LENGTH_MASK)); len++, ph_len+=i) {
861 ExtPrint(("bShExt: type %x, loc %x, len %x\n",
862 Extent[len].extLength >> 30, Extent[len].extLocation, Extent[len].extLength & UDF_EXTENT_LENGTH_MASK));
863 }
864 Alloc = (PSHORT_AD)MyAllocatePoolTag__(NonPagedPool, (len+1)*sizeof(SHORT_AD), MEM_SHAD_TAG);
865 if(!Alloc) {
866 BrutePoint();
867 return STATUS_INSUFFICIENT_RESOURCES;
868 }
869 // fill contiguous AllocDesc buffer (decribing UserData)
870 for(i=0;i<len;i++) {
871 Alloc[i].extLength = Extent[i].extLength;
872 Alloc[i].extPosition = UDFPhysLbaToPart(Vcb, PartNum, Extent[i].extLocation);
873 }
874 if((Vcb->CompatFlags & UDF_VCB_IC_W2K_COMPAT_ALLOC_DESCS) && i) {
875 Alloc[i-1].extLength -= (ph_len - (ULONG)(FileInfo->Dloc->DataLoc.Length)) &
876 (Vcb->LBlockSize-1);
877 ExtPrint(("bShExt: cut tail -> %x\n",
878 Alloc[i-1].extLength & UDF_EXTENT_LENGTH_MASK));
879 }
880 Alloc[i].extLength =
881 Alloc[i].extPosition = 0;
882 j = len*sizeof(SHORT_AD); // required space
883 len = (InitSz & ~(sizeof(SHORT_AD)-1)); // space available in 1st block
884 ASSERT(len == InitSz);
885
886 // Ok. Let's init AllocLoc
887 if(!(FileInfo->Dloc->AllocLoc.Mapping)) {
888 FileInfo->Dloc->AllocLoc.Mapping = (PEXTENT_MAP)MyAllocatePoolTag__(NonPagedPool, 2 * sizeof(EXTENT_MAP), MEM_EXTMAP_TAG);
889 if(!(FileInfo->Dloc->AllocLoc.Mapping)) {
890 BrutePoint();
891 MyFreePool__(Alloc);
892 return STATUS_INSUFFICIENT_RESOURCES;
893 }
894 // allocation descriptors are located in the same sector as FileEntry
895 // (at least their 1st part), just after it
896 FileInfo->Dloc->AllocLoc.Mapping[0] = FileInfo->Dloc->FELoc.Mapping[0];
897 FileInfo->Dloc->AllocLoc.Offset = FileInfo->Dloc->FileEntryLen;
898 FileInfo->Dloc->AllocLoc.Length = 0;
899 // set terminator
900 FileInfo->Dloc->AllocLoc.Mapping[1].extLength =
901 FileInfo->Dloc->AllocLoc.Mapping[1].extLocation = 0;
902 }
903
904 if(j <= len) {
905 // we needn't allocating additional blocks to store AllocDescs
906 AdPrint(("in-ICB AllocDescs, j=%x\n",j));
907 RtlCopyMemory(*Buff, (int8*)Alloc, j);
908 NewLen = j;
909 MyFreePool__(Alloc);
910 } else {
911 #ifndef UDF_ALLOW_FRAG_AD
912 AdPrint((" DISK_FULL\n"));
913 return STATUS_DISK_FULL;
914 #else //UDF_ALLOW_FRAG_AD
915 AdPrint(("multi-block AllocDescs, j=%x\n",j));
916 BufOffs = 0;
917 TagLoc = prevTagLoc = 0;
918 // calculate the space available for SHORT_ADs in each block
919 ac = (LBS - (sizeof(ALLOC_EXT_DESC) + sizeof(SHORT_AD))) & ~(sizeof(SHORT_AD)-1);
920 len2 = len;
921 // tail size
922 ts = InitSz - len2;
923 len -= sizeof(SHORT_AD);
924 // calculate actual AllocSequence length (in bytes)
925 NewLen = ( ((j - len + ac - 1) / ac) << LBSh) + InitSz + sizeof(SHORT_AD);
926 MyFreePool__(*Buff);
927 (*Buff) = (int8*)MyAllocatePoolTag__(NonPagedPool, NewLen, MEM_SHAD_TAG);
928 if(!(*Buff)) {
929 status = STATUS_INSUFFICIENT_RESOURCES;
930 KdPrint(("UDFResizeExtent() failed (%x)\n",status));
931 BrutePoint();
932 goto sh_alloc_err;
933 }
934 if(UDFGetExtentLength(AllocExtent->Mapping) < NewLen) {
935 status = UDFResizeExtent(Vcb, PartNum, NewLen, TRUE, AllocExtent);
936 if(!OS_SUCCESS(status)) {
937 KdPrint(("UDFResizeExtent(2) failed (%x)\n",status));
938 BrutePoint();
939 sh_alloc_err:
940 MyFreePool__(Alloc);
941 return status;
942 }
943 }
944 ExtOffs = AllocExtent->Offset;
945 RtlZeroMemory(*Buff, NewLen);
946 saved_NewLen = NewLen;
947 NewLen = 0; // recorded length
948 saved_Alloc = Alloc;
949 // fill buffer sector by sector (adding links at the end of each one)
950 while(TRUE) {
951
952 // j - remained AllocDescs length (in bytes)
953 // len - bytes available for AllocDescs in current block
954 // ac - bytes available for AllocDescs in each block
955
956 // leave space for terminator or pointer to next part of sequence
957 if(j == len2) {
958 // if we have only 1 SHORT_AD that we can fit in last sector
959 // we shall do it instead of recording link & allocating new block
960 len =
961 TagLen = len2;
962 }
963 ASSERT(saved_NewLen >= (BufOffs + len));
964 RtlCopyMemory( (*Buff)+BufOffs, (int8*)Alloc, len);
965 Alloc = (PSHORT_AD)((int8*)Alloc + len);
966 j -= len;
967 BufOffs += len;
968 if(Tag) {
969 // Set up Tag for AllocDesc
970 Tag->tagIdent = TID_ALLOC_EXTENT_DESC;
971 UDFSetUpTag(Vcb, Tag, (uint16)TagLen, TagLoc);
972 prevTagLoc = TagLoc;
973 }
974 if(!j) {
975 // terminate loop
976 NewLen = BufOffs;
977 break;
978 }
979 len = ac;
980 if(j <= (len + sizeof(SHORT_AD)))
981 len = j - sizeof(SHORT_AD);
982 len2 = len + sizeof(SHORT_AD);
983 // we have more than 1 SHORT_AD that we can't fit in current block
984 // so we shall set up pointer to the next block
985 ((PSHORT_AD)((*Buff)+BufOffs))->extLength = /*LBS*/ len2 |
986 (((uint32)EXTENT_NEXT_EXTENT_ALLOCDESC) << 30) ;
987 ((PSHORT_AD)((*Buff)+BufOffs))->extPosition = TagLoc =
988 UDFPhysLbaToPart(Vcb, PartNum,
989 UDFExtentOffsetToLba(Vcb, AllocExtent->Mapping,
990 ExtOffs+BufOffs+sizeof(SHORT_AD)+ts,
991 NULL, NULL, NULL, NULL) );
992 // reflect additional (link) block & LBlock tail (if any)
993 BufOffs += ts+sizeof(SHORT_AD);
994 // init AllocDesc
995 ( (PALLOC_EXT_DESC) ((*Buff)+BufOffs))->lengthAllocDescs = len2;
996 ( (PALLOC_EXT_DESC) ((*Buff)+BufOffs))->previousAllocExtLocation = prevTagLoc;
997 Tag = (tag*)((*Buff)+BufOffs);
998 TagLen = len2;
999 ts = LBS-len2-sizeof(ALLOC_EXT_DESC);
1000 BufOffs += sizeof(ALLOC_EXT_DESC);
1001 }
1002 MyFreePool__(saved_Alloc);
1003 #endif //UDF_ALLOW_FRAG_AD
1004 }
1005 status = UDFResizeExtent(Vcb, PartNum, NewLen, TRUE, AllocExtent);
1006 return status;
1007 } // end UDFBuildShortAllocDescs()
1008
1009 /*
1010 This routine builds data for AllocDesc sequence for specified
1011 extent
1012 */
1013 OSSTATUS
1014 UDFBuildLongAllocDescs(
1015 IN PVCB Vcb,
1016 IN uint32 PartNum,
1017 OUT int8** Buff, // data for AllocLoc
1018 IN uint32 InitSz,
1019 IN OUT PUDF_FILE_INFO FileInfo
1020 )
1021 {
1022 uint32 i, j;
1023 uint32 len=0;
1024 PEXTENT_MAP Extent = FileInfo->Dloc->DataLoc.Mapping;
1025 PEXTENT_INFO AllocExtent = &(FileInfo->Dloc->AllocLoc);
1026 PLONG_AD Alloc;
1027 uint32 NewLen;
1028 OSSTATUS status;
1029 uint32 ph_len=0; // in general, this should be uint64,
1030 // but we need its lower part only
1031 #ifdef UDF_ALLOW_FRAG_AD
1032 uint32 ac, len2, ts;
1033 uint32 TagLoc, prevTagLoc;
1034 uint32 LBS = Vcb->LBlockSize;
1035 uint32 LBSh = Vcb->BlockSizeBits;
1036 uint32 BufOffs;
1037 uint32 ExtOffs = AllocExtent->Offset;
1038 PLONG_AD saved_Alloc;
1039 uint32 TagLen = 0;
1040 tag* Tag = NULL;
1041 #endif //UDF_ALLOW_FRAG_AD
1042
1043 ValidateFileInfo(FileInfo);
1044 ExtPrint(("UDFBuildLongAllocDescs: FE %x\n", FileInfo->Dloc->FELoc.Mapping[0].extLocation));
1045 // calculate length
1046 //for(len=0; i=(Extent[len].extLength & UDF_EXTENT_LENGTH_MASK); len++, ph_len+=i);
1047 for(len=0; (i=(Extent[len].extLength & UDF_EXTENT_LENGTH_MASK)); len++, ph_len+=i) {
1048 ExtPrint(("bLnExt: type %x, loc %x, len %x\n",
1049 Extent[len].extLength >> 30, Extent[len].extLocation, Extent[len].extLength & UDF_EXTENT_LENGTH_MASK));
1050 }
1051 Alloc = (PLONG_AD)MyAllocatePoolTag__(NonPagedPool, (len+1)*sizeof(LONG_AD), MEM_LNGAD_TAG);
1052 if(!Alloc) return STATUS_INSUFFICIENT_RESOURCES;
1053 // fill contiguous AllocDesc buffer (decribing UserData)
1054 for(i=0;i<len;i++) {
1055 Alloc[i].extLength = Extent[i].extLength;
1056 Alloc[i].extLocation.logicalBlockNum = UDFPhysLbaToPart(Vcb, PartNum, Extent[i].extLocation);
1057 Alloc[i].extLocation.partitionReferenceNum = (uint16)PartNum;
1058 RtlZeroMemory(&(Alloc[i].impUse), sizeof(Alloc[i].impUse));
1059 }
1060 if((Vcb->CompatFlags & UDF_VCB_IC_W2K_COMPAT_ALLOC_DESCS) && i) {
1061 Alloc[i-1].extLength -= (ph_len - (ULONG)(FileInfo->Dloc->DataLoc.Length)) &
1062 (Vcb->LBlockSize-1);
1063 ExtPrint(("bLnExt: cut tail -> %x\n",
1064 Alloc[i-1].extLength & UDF_EXTENT_LENGTH_MASK));
1065 }
1066 RtlZeroMemory(&(Alloc[i]), sizeof(LONG_AD));
1067 j = len*sizeof(LONG_AD); // required space
1068 len = (InitSz & ~(sizeof(LONG_AD)-1)); // space available in 1st block
1069 ASSERT(len == InitSz);
1070
1071 // Ok. Let's init AllocLoc
1072 if(!(FileInfo->Dloc->AllocLoc.Mapping)) {
1073 FileInfo->Dloc->AllocLoc.Mapping = (PEXTENT_MAP)MyAllocatePoolTag__(NonPagedPool, 2 * sizeof(EXTENT_MAP), MEM_EXTMAP_TAG);
1074 if(!(FileInfo->Dloc->AllocLoc.Mapping)) {
1075 MyFreePool__(Alloc);
1076 return STATUS_INSUFFICIENT_RESOURCES;
1077 }
1078 // allocation descriptors are located in the same sector as FileEntry
1079 // (at least their 1st part), just after it
1080 FileInfo->Dloc->AllocLoc.Mapping[0] = FileInfo->Dloc->FELoc.Mapping[0];
1081 FileInfo->Dloc->AllocLoc.Offset = FileInfo->Dloc->FileEntryLen;
1082 FileInfo->Dloc->AllocLoc.Length = 0;
1083 // set terminator
1084 FileInfo->Dloc->AllocLoc.Mapping[1].extLength =
1085 FileInfo->Dloc->AllocLoc.Mapping[1].extLocation = 0;
1086 }
1087
1088 if(j <= len) {
1089 // we needn't allocating additional blocks to store AllocDescs
1090 RtlCopyMemory(*Buff, (int8*)Alloc, j);
1091 NewLen = j;
1092 MyFreePool__(Alloc);
1093 } else {
1094 #ifndef UDF_ALLOW_FRAG_AD
1095 AdPrint((" DISK_FULL\n"));
1096 return STATUS_DISK_FULL;
1097 #else //UDF_ALLOW_FRAG_AD
1098 BufOffs = 0;
1099 TagLoc = prevTagLoc = 0;
1100 // calculate the space available for LONG_ADs in each block
1101 ac = (LBS - (sizeof(ALLOC_EXT_DESC) + sizeof(LONG_AD))) & ~(sizeof(LONG_AD)-1);
1102 len2 = len;
1103 // tail size
1104 ts = InitSz - len2;
1105 len -= sizeof(LONG_AD);
1106 // calculate actual AllocSequence length (in LBlocks)
1107 NewLen = ( ((j - len + ac - 1) / ac) << LBSh) + InitSz + sizeof(LONG_AD);
1108 MyFreePool__(*Buff);
1109 (*Buff) = (int8*)MyAllocatePoolTag__(NonPagedPool, NewLen, MEM_LNGAD_TAG);
1110 if(!(*Buff)) {
1111 status = STATUS_INSUFFICIENT_RESOURCES;
1112 goto lad_alloc_err;
1113 }
1114 if(UDFGetExtentLength(AllocExtent->Mapping) < NewLen) {
1115 status = UDFResizeExtent(Vcb, PartNum, NewLen, TRUE, AllocExtent);
1116 if(!OS_SUCCESS(status)) {
1117 lad_alloc_err:
1118 MyFreePool__(Alloc);
1119 return status;
1120 }
1121 }
1122 ExtOffs = AllocExtent->Offset;
1123 RtlZeroMemory(*Buff, NewLen);
1124 NewLen = 0; // recorded length
1125 saved_Alloc = Alloc;
1126 len2 = len+sizeof(LONG_AD);
1127 // fill buffer sector by sector (adding links at the end of each one)
1128 while(TRUE) {
1129
1130 // j - remained AllocDescs length (in bytes)
1131 // len - bytes available for in AllocDescs each block
1132
1133 // leave space for terminator or pointer to next part of sequence
1134 if(j == len2) {
1135 // if we have only 1 LONG_AD that we can fit in last sector
1136 // we shall do it instead of recording link & allocating new block
1137 len =
1138 TagLen = len2;
1139 }
1140 RtlCopyMemory( (*Buff)+BufOffs, (int8*)Alloc, len);
1141 Alloc = (PLONG_AD)((int8*)Alloc + len);
1142 j -= len;
1143 BufOffs += len;
1144 if(Tag) {
1145 // Set up Tag for AllocDesc
1146 Tag->tagIdent = TID_ALLOC_EXTENT_DESC;
1147 UDFSetUpTag(Vcb, Tag, (uint16)TagLen, TagLoc);
1148 prevTagLoc = TagLoc;
1149 }
1150 if(!j) {
1151 // terminate loop
1152 NewLen = BufOffs;
1153 break;
1154 }
1155 len = ac;
1156 if(j <= (len + sizeof(LONG_AD)))
1157 len = j - sizeof(LONG_AD);
1158 len2 = len+sizeof(LONG_AD);
1159 // we have more than 1 LONG_AD that we can't fit in current block
1160 // so we shall set up pointer to the next block
1161 ((PLONG_AD)((*Buff)+BufOffs))->extLength = /*LBS*/ len2 |
1162 (((uint32)EXTENT_NEXT_EXTENT_ALLOCDESC) << 30) ;
1163 ((PLONG_AD)((*Buff)+BufOffs))->extLocation.logicalBlockNum = TagLoc =
1164 UDFPhysLbaToPart(Vcb, PartNum,
1165 UDFExtentOffsetToLba(Vcb, AllocExtent->Mapping,
1166 ExtOffs+BufOffs+sizeof(LONG_AD)+ts,
1167 NULL, NULL, NULL, NULL) );
1168 ((PLONG_AD)((*Buff)+BufOffs))->extLocation.partitionReferenceNum = (uint16)PartNum;
1169 // reflect additional (link) block & LBlock tail (if any)
1170 BufOffs += ts+sizeof(LONG_AD);
1171 // init AllocDesc
1172 ( (PALLOC_EXT_DESC) ((*Buff)+BufOffs))->lengthAllocDescs = len2;
1173 ( (PALLOC_EXT_DESC) ((*Buff)+BufOffs))->previousAllocExtLocation = prevTagLoc;
1174 Tag = (tag*)((*Buff)+BufOffs);
1175 TagLen = len2;
1176 ts = LBS-len2-sizeof(ALLOC_EXT_DESC);
1177 BufOffs += sizeof(ALLOC_EXT_DESC);
1178 }
1179 MyFreePool__(saved_Alloc);
1180 #endif //UDF_ALLOW_FRAG_AD
1181 }
1182 status = UDFResizeExtent(Vcb, PartNum, NewLen, TRUE, AllocExtent);
1183 return status;
1184 } // end UDFBuildLongAllocDescs()
1185
1186 /*
1187 This routine builds data for AllocDesc sequence for specified
1188 extent
1189 */
1190 /*OSSTATUS
1191 UDFBuildExtAllocDescs(
1192 IN PVCB Vcb,
1193 IN uint32 PartNum,
1194 OUT int8** Buff, // data for AllocLoc
1195 IN uint32 InitSz,
1196 IN OUT PUDF_FILE_INFO FileInfo
1197 )
1198 {
1199 uint32 i, j;
1200 uint32 len=0, ac, len2;
1201 uint32 TagLoc, prevTagLoc;
1202 uint32 LBS = Vcb->LBlockSize;
1203 uint32 LBSh = Vcb->BlockSizeBits;
1204 PEXTENT_MAP Extent = FileInfo->Dloc->DataLoc.Mapping;
1205 PEXTENT_INFO AllocExtent = &(FileInfo->Dloc->AllocLoc);
1206 PEXT_AD Alloc, saved_Alloc;
1207 uint32 BufOffs;
1208 uint32 ExtOffs = AllocExtent->Offset;
1209 uint32 NewLen;
1210 OSSTATUS status;
1211 uint32 TagLen = 0;
1212 tag* Tag = NULL;
1213
1214 ValidateFileInfo(FileInfo);
1215 // calculate length
1216 for(len=0; Extent[len].extLength; len++);
1217 Alloc = (PEXT_AD)MyAllocatePool__(NonPagedPool, (len+1)*sizeof(EXT_AD));
1218 if(!Alloc) return STATUS_INSUFFICIENT_RESOURCES;
1219 // fill contiguous AllocDesc buffer (decribing UserData)
1220 for(i=0;i<len;i++) {
1221 Alloc[i].extLength =
1222 Alloc[i].recordedLength =
1223 Alloc[i].informationLength = Extent[i].extLength;
1224 Alloc[i].extLocation.logicalBlockNum = UDFPhysLbaToPart(Vcb, PartNum, Extent[i].extLocation);
1225 Alloc[i].extLocation.partitionReferenceNum = (uint16)PartNum;
1226 }
1227 RtlZeroMemory(&(Alloc[i]), sizeof(EXT_AD));
1228 j = len*sizeof(EXT_AD); // required space
1229 len = InitSz; // space available in 1st block
1230
1231 // Ok. Let's init AllocLoc
1232 if(!(FileInfo->Dloc->AllocLoc.Mapping)) {
1233 FileInfo->Dloc->AllocLoc.Mapping = (PEXTENT_MAP)MyAllocatePool__(NonPagedPool, 2 * sizeof(EXTENT_MAP));
1234 if(!(FileInfo->Dloc->AllocLoc.Mapping)) {
1235 MyFreePool__(Alloc);
1236 return STATUS_INSUFFICIENT_RESOURCES;
1237 }
1238 // allocation descriptors are located in the same sector as FileEntry
1239 // (at least their 1st part), just after it
1240 FileInfo->Dloc->AllocLoc.Mapping[0] = FileInfo->Dloc->FELoc.Mapping[0];
1241 FileInfo->Dloc->AllocLoc.Offset = FileInfo->Dloc->FileEntryLen;
1242 FileInfo->Dloc->AllocLoc.Length = 0;
1243 // set terminator
1244 FileInfo->Dloc->AllocLoc.Mapping[1].extLength =
1245 FileInfo->Dloc->AllocLoc.Mapping[1].extLocation = 0;
1246 }
1247
1248 if(j <= len) {
1249 // we needn't allocating additional blocks to store AllocDescs
1250 RtlCopyMemory(*Buff, (int8*)Alloc, j);
1251 NewLen = j;
1252 MyFreePool__(Alloc);
1253 } else {
1254 BufOffs = 0;
1255 TagLoc = prevTagLoc = 0;
1256 // calculate the space available for EXT_ADs in each block
1257 ac = (LBS - (sizeof(ALLOC_EXT_DESC) + sizeof(EXT_AD))) & ~(sizeof(EXT_AD)-1);
1258 // calculate actual AllocSequence length (in LBlocks)
1259 len -= sizeof(EXT_AD);
1260 NewLen = ( ((j - len + ac - 1) / ac) << LBSh) + len + sizeof(EXT_AD);
1261 MyFreePool__(*Buff);
1262 (*Buff) = (int8*)MyAllocatePool__(NonPagedPool, NewLen);
1263 if(UDFGetExtentLength(AllocExtent->Mapping) < NewLen) {
1264 status = UDFResizeExtent(Vcb, PartNum, NewLen, TRUE, AllocExtent);
1265 if(!OS_SUCCESS(status)) {
1266 MyFreePool__(Alloc);
1267 return status;
1268 }
1269 }
1270 RtlZeroMemory(*Buff, NewLen);
1271 NewLen = 0; // recorded length
1272 saved_Alloc = Alloc;
1273 len2 = len + sizeof(EXT_AD);
1274 // fill buffer sector by sector (adding links at the end of each one)
1275 while(TRUE) {
1276
1277 // j - remained AllocDescs length (in bytes)
1278 // len - bytes available for in AllocDescs each block
1279
1280 // leave space for terminator or pointer to next part of sequence
1281 if(j == len2) {
1282 // if we have only 1 EXT_AD that we can fit in last sector
1283 // we shall do it instead of recording link & allocating new block
1284 len =
1285 TagLen = len2;
1286 }
1287 RtlCopyMemory( (*Buff)+BufOffs, (int8*)Alloc, len);
1288 Alloc = (PEXT_AD)((int8*)Alloc + len);
1289 j -= len;
1290 BufOffs += len;
1291 if(Tag) {
1292 // Set up Tag for AllocDesc
1293 Tag->tagIdent = TID_ALLOC_EXTENT_DESC;
1294 UDFSetUpTag(Vcb, Tag, (uint16)TagLen, TagLoc);
1295 prevTagLoc = TagLoc;
1296 }
1297 if(!j) {
1298 // terminate loop
1299 NewLen = BufOffs;
1300 break;
1301 }
1302 len = ac;
1303 if(j <= (len + sizeof(EXT_AD)))
1304 len = j - sizeof(EXT_AD);
1305 len2 = len + sizeof(EXT_AD);
1306 // we have more than 1 EXT_AD that we can't fit in current block
1307 // so we shall set up pointer to the next block
1308 ((PEXT_AD)((*Buff)+BufOffs))->extLength =
1309 ((PEXT_AD)((*Buff)+BufOffs))->recordedLength = LBS;
1310 ((PEXT_AD)((*Buff)+BufOffs))->informationLength = len2 |
1311 (((uint32)EXTENT_NEXT_EXTENT_ALLOCDESC) << 30) ;
1312 ((PEXT_AD)((*Buff)+BufOffs))->extLocation.logicalBlockNum = TagLoc =
1313 UDFPhysLbaToPart(Vcb, PartNum,
1314 UDFExtentOffsetToLba(Vcb, AllocExtent->Mapping, ExtOffs + BufOffs + 2*sizeof(EXT_AD)-1, NULL, NULL, NULL, NULL) );
1315 ((PEXT_AD)((*Buff)+BufOffs))->extLocation.partitionReferenceNum = (uint16)PartNum;
1316 BufOffs = (BufOffs + 2*sizeof(EXT_AD) - 1) & ~(sizeof(EXT_AD)-1) ;
1317 // init AllocDesc
1318 ( (PALLOC_EXT_DESC) ((*Buff)+BufOffs))->lengthAllocDescs = len2;
1319 ( (PALLOC_EXT_DESC) ((*Buff)+BufOffs))->previousAllocExtLocation = prevTagLoc;
1320 Tag = (tag*)((*Buff)+BufOffs);
1321 TagLen = len2;
1322 BufOffs += sizeof(ALLOC_EXT_DESC);
1323 }
1324 MyFreePool__(saved_Alloc);
1325 }
1326 status = UDFResizeExtent(Vcb, PartNum, NewLen, TRUE, AllocExtent);
1327 return status;
1328 } // end UDFBuildExtAllocDescs()*/
1329
1330 void
1331 UDFDiscardFESpace(
1332 IN PVCB Vcb,
1333 IN PEXTENT_MAP Mapping,
1334 IN uint32 lim
1335 )
1336 {
1337 #ifdef UDF_FE_ALLOCATION_CHARGE // UDF_FE_ALLOCATION_CHARGE
1338 PEXTENT_MAP Mapping2;
1339 uint32 i;
1340
1341 KdPrint((" DiscardFESpace\n"));
1342 Mapping2 = Mapping;
1343 for(i=0;i<lim;i++, Mapping++) {
1344 // we should not discard allocated FEs
1345 if( (Mapping->extLength >> 30) == EXTENT_RECORDED_ALLOCATED) {
1346 KdPrint((" used @ %x\n", Mapping->extLocation));
1347 Mapping->extLength = Vcb->LBlockSize | (EXTENT_NOT_RECORDED_NOT_ALLOCATED << 30);
1348 Mapping->extLocation = 0;
1349 } else {
1350 KdPrint((" free @ %x\n", Mapping->extLocation));
1351 }
1352 }
1353 UDFMarkSpaceAsXXX(Vcb, 0, Mapping2, AS_DISCARDED);
1354
1355 MyFreePool__(Mapping2);
1356 #else // UDF_FE_ALLOCATION_CHARGE
1357 ASSERT(!Dloc->DirIndex->FECharge.Mapping);
1358 return;
1359 #endif // UDF_FE_ALLOCATION_CHARGE
1360 } // end UDFDiscardFESpace()
1361
1362 OSSTATUS
1363 UDFInitAllocationCache(
1364 IN PVCB Vcb,
1365 IN uint32 AllocClass,
1366 OUT PUDF_ALLOCATION_CACHE_ITEM* _AllocCache,
1367 OUT uint32* _lim,
1368 IN BOOLEAN Init
1369 )
1370 {
1371 PUDF_ALLOCATION_CACHE_ITEM AllocCache;
1372 PUDF_ALLOCATION_CACHE_ITEM* pAllocCache;
1373 uint32 i, lim;
1374 uint32* plim;
1375
1376 switch(AllocClass) {
1377 case UDF_PREALLOC_CLASS_FE:
1378 KdPrint(("AllocationCache FE:\n"));
1379 pAllocCache = &(Vcb->FEChargeCache);
1380 plim = &(Vcb->FEChargeCacheMaxSize);
1381 lim = 32;
1382 break;
1383 case UDF_PREALLOC_CLASS_DIR:
1384 KdPrint(("AllocationCache DIR:\n"));
1385 pAllocCache = &(Vcb->PreallocCache);
1386 plim = &(Vcb->PreallocCacheMaxSize);
1387 lim = 32;
1388 break;
1389 default:
1390 return STATUS_INVALID_PARAMETER;
1391 }
1392 if(!(*plim)) {
1393 if(!Init) {
1394 return STATUS_UNSUCCESSFUL;
1395 }
1396 (*pAllocCache) = AllocCache =
1397 (PUDF_ALLOCATION_CACHE_ITEM)
1398 MyAllocatePoolTag__(NonPagedPool , sizeof(UDF_ALLOCATION_CACHE_ITEM)*lim,
1399 MEM_ALLOC_CACHE_TAG);
1400 if(!AllocCache) {
1401 return STATUS_INSUFFICIENT_RESOURCES;
1402 }
1403 RtlZeroMemory(AllocCache, sizeof(UDF_ALLOCATION_CACHE_ITEM)*lim);
1404 for(i=0; i<lim; i++) {
1405 AllocCache[i].ParentLocation = LBA_NOT_ALLOCATED;
1406 }
1407 (*plim) = lim;
1408 } else {
1409 lim = (*plim);
1410 AllocCache = (*pAllocCache);
1411 }
1412 (*_lim) = lim;
1413 (*_AllocCache) = AllocCache;
1414
1415 return STATUS_SUCCESS;
1416 } // end UDFInitAllocationCache()
1417
1418 OSSTATUS
1419 UDFGetCachedAllocation(
1420 IN PVCB Vcb,
1421 IN uint32 ParentLocation,
1422 OUT PEXTENT_INFO Ext,
1423 OUT uint32* Items, // optional
1424 IN uint32 AllocClass
1425 )
1426 {
1427 PUDF_ALLOCATION_CACHE_ITEM AllocCache;
1428 uint32 i, lim;
1429 OSSTATUS status;
1430
1431 UDFAcquireResourceExclusive(&(Vcb->PreallocResource),TRUE);
1432
1433 status = UDFInitAllocationCache(Vcb, AllocClass, &AllocCache, &lim, FALSE);
1434 if(!OS_SUCCESS(status)) {
1435 UDFReleaseResource(&(Vcb->PreallocResource));
1436 return status;
1437 }
1438 KdPrint(("Get AllocationCache for %x\n", ParentLocation));
1439
1440 for(i=0; i<lim; i++) {
1441 if(AllocCache[i].ParentLocation == ParentLocation) {
1442 (*Ext) = AllocCache[i].Ext;
1443 AdPrint((" map %x (%x)\n", Ext->Mapping, i));
1444 if(Items) {
1445 (*Items) = AllocCache[i].Items;
1446 }
1447 RtlZeroMemory(&(AllocCache[i]), sizeof(AllocCache[i]));
1448 AllocCache[i].ParentLocation = LBA_NOT_ALLOCATED;
1449 UDFReleaseResource(&(Vcb->PreallocResource));
1450 return STATUS_SUCCESS;
1451 }
1452 }
1453 AdPrint((" no map\n"));
1454 UDFReleaseResource(&(Vcb->PreallocResource));
1455 return STATUS_UNSUCCESSFUL;
1456 } // end UDFGetCachedAllocation()
1457
1458 OSSTATUS
1459 UDFStoreCachedAllocation(
1460 IN PVCB Vcb,
1461 IN uint32 ParentLocation,
1462 IN PEXTENT_INFO Ext,
1463 IN uint32 Items,
1464 IN uint32 AllocClass
1465 )
1466 {
1467 PUDF_ALLOCATION_CACHE_ITEM AllocCache;
1468 uint32 i, lim;
1469 OSSTATUS status;
1470
1471 UDFAcquireResourceExclusive(&(Vcb->PreallocResource),TRUE);
1472
1473 status = UDFInitAllocationCache(Vcb, AllocClass, &AllocCache, &lim, TRUE);
1474 if(!OS_SUCCESS(status)) {
1475 UDFReleaseResource(&(Vcb->PreallocResource));
1476 return status;
1477 }
1478 KdPrint(("Store AllocationCache for %x, map %x\n", ParentLocation, Ext->Mapping));
1479
1480 for(i=0; i<lim; i++) {
1481 if(AllocCache[i].ParentLocation == LBA_NOT_ALLOCATED) {
1482 AdPrint((" stored in %x\n", i));
1483 AllocCache[i].Ext = (*Ext);
1484 AllocCache[i].Items = Items;
1485 AllocCache[i].ParentLocation = ParentLocation;
1486 UDFReleaseResource(&(Vcb->PreallocResource));
1487 return STATUS_SUCCESS;
1488 }
1489 }
1490 //
1491 AdPrint((" drop map %x (%x)\n", AllocCache[lim-1].Ext.Mapping, lim-1));
1492 switch(AllocClass) {
1493 case UDF_PREALLOC_CLASS_FE:
1494 UDFDiscardFESpace(Vcb, AllocCache[lim-1].Ext.Mapping, AllocCache[lim-1].Items);
1495 break;
1496 case UDF_PREALLOC_CLASS_DIR:
1497 UDFMarkSpaceAsXXX(Vcb, 0, AllocCache[lim-1].Ext.Mapping, AS_DISCARDED);
1498 break;
1499 }
1500 RtlMoveMemory(&(AllocCache[1]), &(AllocCache[0]), sizeof(UDF_ALLOCATION_CACHE_ITEM)*(lim-1));
1501 AllocCache[0].Ext = (*Ext);
1502 AllocCache[0].Items = Items;
1503 AllocCache[0].ParentLocation = ParentLocation;
1504 AdPrint((" stored in 0\n"));
1505 UDFReleaseResource(&(Vcb->PreallocResource));
1506 return STATUS_SUCCESS;
1507 } // end UDFStoreCachedAllocation()
1508
1509 OSSTATUS
1510 UDFFlushAllCachedAllocations(
1511 IN PVCB Vcb,
1512 IN uint32 AllocClass
1513 )
1514 {
1515 PUDF_ALLOCATION_CACHE_ITEM AllocCache;
1516 uint32 i, lim;
1517 OSSTATUS status;
1518
1519 KdPrint(("Flush AllocationCache\n"));
1520 UDFAcquireResourceExclusive(&(Vcb->PreallocResource),TRUE);
1521
1522 status = UDFInitAllocationCache(Vcb, AllocClass, &AllocCache, &lim, FALSE);
1523 if(!OS_SUCCESS(status)) {
1524 UDFReleaseResource(&(Vcb->PreallocResource));
1525 return status;
1526 }
1527
1528 for(i=0; i<lim; i++) {
1529 if(AllocCache[i].ParentLocation != LBA_NOT_ALLOCATED) {
1530 switch(AllocClass) {
1531 case UDF_PREALLOC_CLASS_FE:
1532 UDFDiscardFESpace(Vcb, AllocCache[i].Ext.Mapping, AllocCache[i].Items);
1533 break;
1534 case UDF_PREALLOC_CLASS_DIR:
1535 UDFMarkSpaceAsXXX(Vcb, 0, AllocCache[i].Ext.Mapping, AS_DISCARDED);
1536 break;
1537 }
1538 }
1539 }
1540 MyFreePool__(AllocCache);
1541 switch(AllocClass) {
1542 case UDF_PREALLOC_CLASS_FE:
1543 Vcb->FEChargeCache = NULL;
1544 Vcb->FEChargeCacheMaxSize = 0;
1545 break;
1546 case UDF_PREALLOC_CLASS_DIR:
1547 Vcb->PreallocCache = NULL;
1548 Vcb->PreallocCacheMaxSize = 0;
1549 break;
1550 }
1551 UDFReleaseResource(&(Vcb->PreallocResource));
1552 //
1553 return STATUS_SUCCESS;
1554 } // end UDFFlushAllCachedAllocations()
1555
1556 /*
1557 This routine allocates space for FE of the file being created
1558 If FE-Charge is enabled it reserves an extent & allocates
1559 space in it. It works much faster then usual way both while
1560 allocating & accessing on disk
1561 If FE-Charge is disabled FE may be allocated at any suitable
1562 location
1563 */
1564 OSSTATUS
1565 UDFAllocateFESpace(
1566 IN PVCB Vcb,
1567 IN PUDF_FILE_INFO DirInfo,
1568 IN uint32 PartNum,
1569 IN PEXTENT_INFO FEExtInfo,
1570 IN uint32 Len
1571 )
1572 {
1573 #ifdef UDF_FE_ALLOCATION_CHARGE // UDF_FE_ALLOCATION_CHARGE
1574 OSSTATUS status;
1575 PEXTENT_INFO Ext;
1576 EXTENT_AD Extent;
1577 BOOLEAN retry = FALSE;
1578 uint32 i, lim;
1579
1580 /*
1581 1. #Dir1#->*File* -> Dir1's FECharge
1582 2. #Dir1#->*Dir* -> Dir1's FECharge
1583 3. #Dir1#->*SDir* -> Dir1's FECharge
1584 4. Dir1->#SDir#->*Stream* -> Dir1's FEChargeSDir
1585 5. Dir1->#File#->*SDir* -> Dir1's FEChargeSDir
1586 6. Dir1->#Dir#->*SDir* -> (see p.2)
1587 7. Dir1->File->#SDir#->*Stream* -> Dir1's FEChargeSDir
1588 8. Dir1->Dir->#SDir#->*Stream* -> (see p.4)
1589
1590 ## ~ DirInfo
1591 ** ~ Object to be created
1592
1593 */
1594
1595 // ASSERT(!FEExtInfo->Mapping);
1596 // check if DirInfo we are called with is a Directory
1597 // (it can be a file with SDir)
1598 if(!DirInfo || !DirInfo->Dloc->DirIndex ||
1599 ((lim = ((DirInfo->Dloc->FE_Flags & UDF_FE_FLAG_IS_SDIR) ? Vcb->FEChargeSDir : Vcb->FECharge)) <= 1))
1600 #endif // UDF_FE_ALLOCATION_CHARGE
1601 return UDFAllocFreeExtent(Vcb, Len,
1602 UDFPartStart(Vcb, PartNum), UDFPartEnd(Vcb, PartNum), FEExtInfo, EXTENT_FLAG_VERIFY);
1603 #ifdef UDF_FE_ALLOCATION_CHARGE // UDF_FE_ALLOCATION_CHARGE
1604
1605 Ext = &(DirInfo->Dloc->DirIndex->FECharge);
1606
1607 while(TRUE) {
1608
1609 if(!Ext->Mapping) {
1610 ULONG p_start;
1611 ULONG p_end;
1612 ULONG fe_loc;
1613 ULONG l1, l2;
1614
1615 p_start = UDFPartStart(Vcb, PartNum);
1616 p_end = UDFPartEnd(Vcb, PartNum);
1617 fe_loc = DirInfo->Dloc->FELoc.Mapping[0].extLocation;
1618
1619 status = UDFGetCachedAllocation(Vcb, fe_loc, Ext, NULL, UDF_PREALLOC_CLASS_FE);
1620 if(OS_SUCCESS(status)) {
1621 // do nothing, even do not unpack
1622 } else
1623 if(Vcb->LowFreeSpace) {
1624 status = UDFAllocFreeExtent(Vcb, Len << Vcb->LBlockSizeBits,p_start, p_end, FEExtInfo, EXTENT_FLAG_VERIFY);
1625 if(OS_SUCCESS(status)) {
1626 KdPrint(("FE @ %x (1)\n", FEExtInfo->Mapping[0].extLocation ));
1627 }
1628 return status;
1629 } else {
1630 if(fe_loc > p_start + 512*16) {
1631 l1 = fe_loc - 512*16;
1632 } else {
1633 l1 = p_start;
1634 }
1635 if(fe_loc + 512*16 < p_end) {
1636 l2 = fe_loc + 512*16;
1637 } else {
1638 l2 = p_end;
1639 }
1640 status = UDFAllocFreeExtent(Vcb, lim << Vcb->LBlockSizeBits, l1, l2, Ext, EXTENT_FLAG_VERIFY);
1641 if(!OS_SUCCESS(status)) {
1642 status = UDFAllocFreeExtent(Vcb, lim << Vcb->LBlockSizeBits, (p_start+fe_loc)/2, (fe_loc+p_end)/2, Ext, EXTENT_FLAG_VERIFY);
1643 }
1644 if(!OS_SUCCESS(status)) {
1645 status = UDFAllocFreeExtent(Vcb, lim << Vcb->LBlockSizeBits, p_start, p_end, Ext, EXTENT_FLAG_VERIFY);
1646 }
1647 if(!OS_SUCCESS(status)) {
1648 status = UDFAllocFreeExtent(Vcb, lim << Vcb->LBlockSizeBits, p_start+1024, p_end-1024, Ext, EXTENT_FLAG_VERIFY);
1649 }
1650 if(!OS_SUCCESS(status = UDFAllocFreeExtent(Vcb, lim << Vcb->LBlockSizeBits, p_start, p_end, Ext, EXTENT_FLAG_VERIFY) )) {
1651 // can't pre-allocate space for multiple FEs. Try single FE
1652 KdPrint(("allocate single FE entry\n"));
1653 status = UDFAllocFreeExtent(Vcb, Len,
1654 p_start, p_end, FEExtInfo, EXTENT_FLAG_VERIFY);
1655 if(OS_SUCCESS(status)) {
1656 KdPrint(("FE @ %x (2)\n", FEExtInfo->Mapping[0].extLocation ));
1657 }
1658 return status;
1659 }
1660 status = UDFUnPackMapping(Vcb, Ext);
1661 if(!OS_SUCCESS(status)) {
1662 MyFreePool__(Ext->Mapping);
1663 Ext->Mapping = NULL;
1664 return status;
1665 }
1666 }
1667 }
1668
1669 for(i=0;i<lim;i++) {
1670 if( (Ext->Mapping[i].extLength >> 30) == EXTENT_NOT_RECORDED_ALLOCATED ) {
1671 Ext->Mapping[i].extLength &= UDF_EXTENT_LENGTH_MASK; // EXTENT_RECORDED_ALLOCATED
1672
1673 Extent.extLength = Vcb->LBlockSize | (EXTENT_NOT_RECORDED_ALLOCATED << 30);
1674 Extent.extLocation = Ext->Mapping[i].extLocation;
1675
1676 if(Vcb->BSBM_Bitmap) {
1677 uint32 lba = Ext->Mapping[i].extLocation;
1678 if(UDFGetBadBit((uint32*)(Vcb->BSBM_Bitmap), lba)) {
1679 KdPrint(("Remove BB @ %x from FE charge\n", lba));
1680 Ext->Mapping[i].extLength |= (EXTENT_NOT_RECORDED_NOT_ALLOCATED << 30);
1681 Ext->Mapping[i].extLocation = 0;
1682 continue;
1683 }
1684 }
1685
1686 FEExtInfo->Mapping = UDFExtentToMapping(&Extent);
1687 if(!FEExtInfo->Mapping) {
1688 ASSERT(!(Ext->Mapping[i].extLength >> 30));
1689 Ext->Mapping[i].extLength |= (EXTENT_NOT_RECORDED_ALLOCATED << 30);
1690 return STATUS_INSUFFICIENT_RESOURCES;
1691 }
1692 KdPrint(("FE @ %x (3)\n", FEExtInfo->Mapping[0].extLocation ));
1693 FEExtInfo->Length = Len;
1694 FEExtInfo->Offset = 0;
1695 FEExtInfo->Modified = TRUE;
1696 return STATUS_SUCCESS;
1697 }
1698 }
1699
1700 if(Vcb->LowFreeSpace) {
1701 status = UDFAllocFreeExtent(Vcb, Len,
1702 UDFPartStart(Vcb, PartNum), UDFPartEnd(Vcb, PartNum), FEExtInfo, EXTENT_FLAG_VERIFY);
1703 if(OS_SUCCESS(status)) {
1704 KdPrint(("FE @ %x (4)\n", FEExtInfo->Mapping[0].extLocation ));
1705 }
1706 return status;
1707 }
1708 if(retry)
1709 return STATUS_INSUFFICIENT_RESOURCES;
1710
1711 // we can get here if there are no free slots in
1712 // preallocated FE charge. So, we should release
1713 // memory and try to allocate space for new FE charge.
1714 MyFreePool__(Ext->Mapping);
1715 Ext->Mapping = NULL;
1716 retry = TRUE;
1717 }
1718 return STATUS_INSUFFICIENT_RESOURCES;
1719 #endif // UDF_FE_ALLOCATION_CHARGE
1720
1721 } // end UDFAllocateFESpace()
1722
1723 /*
1724 This routine frees space allocated for FE.
1725 */
1726 void
1727 UDFFreeFESpace(
1728 IN PVCB Vcb,
1729 IN PUDF_FILE_INFO DirInfo,
1730 IN PEXTENT_INFO FEExtInfo
1731 )
1732 {
1733 #ifdef UDF_FE_ALLOCATION_CHARGE // UDF_FE_ALLOCATION_CHARGE
1734 PEXTENT_INFO Ext;
1735 uint32 i, lim, j=-1;
1736 uint32 Lba;
1737
1738 // check if the DirInfo we are called with is a Directory
1739 // (it can be a file with SDir)
1740 if(DirInfo && DirInfo->Dloc->DirIndex &&
1741 (Ext = &(DirInfo->Dloc->DirIndex->FECharge))->Mapping) {
1742 if(!FEExtInfo->Mapping)
1743 return;
1744 Lba = FEExtInfo->Mapping[0].extLocation;
1745
1746 lim = (DirInfo->Dloc->FE_Flags & UDF_FE_FLAG_IS_SDIR) ? Vcb->FEChargeSDir : Vcb->FECharge;
1747 for(i=0;i<lim;i++) {
1748 if(Ext->Mapping[i].extLocation == Lba) {
1749 ASSERT(!(Ext->Mapping[i].extLength >> 30));
1750 Ext->Mapping[i].extLength |= (EXTENT_NOT_RECORDED_ALLOCATED << 30);
1751 goto clean_caller;
1752 }
1753 if(!Ext->Mapping[i].extLocation) {
1754 j = i;
1755 }
1756 }
1757 if(j != (ULONG)-1) {
1758 i = j;
1759 Ext->Mapping[i].extLocation = Lba;
1760 Ext->Mapping[i].extLength = Vcb->LBlockSize | (EXTENT_NOT_RECORDED_ALLOCATED << 30);
1761 goto clean_caller;
1762 }
1763 }
1764 #endif // UDF_FE_ALLOCATION_CHARGE
1765 UDFMarkSpaceAsXXX(Vcb, 0, FEExtInfo->Mapping, AS_DISCARDED); // free
1766 clean_caller:
1767 FEExtInfo->Mapping[0].extLocation = 0;
1768 FEExtInfo->Mapping[0].extLength = (EXTENT_NOT_RECORDED_NOT_ALLOCATED << 30);
1769 return;
1770 } // end UDFFreeFESpace()
1771 #endif //UDF_READ_ONLY_BUILD
1772
1773 /*
1774 This routine flushes FE-Charge buffer, marks unused blocks as free
1775 in bitmap & releases memory allocated for FE-Charge management
1776 */
1777 void
1778 UDFFlushFESpace(
1779 IN PVCB Vcb,
1780 IN PUDF_DATALOC_INFO Dloc,
1781 IN BOOLEAN Discard
1782 )
1783 {
1784 #ifdef UDF_FE_ALLOCATION_CHARGE // UDF_FE_ALLOCATION_CHARGE
1785 PEXTENT_MAP Mapping;
1786 uint32 lim;
1787
1788 if(!(Mapping = Dloc->DirIndex->FECharge.Mapping))
1789 return;
1790
1791 lim = (Dloc->FE_Flags & UDF_FE_FLAG_IS_SDIR) ? Vcb->FEChargeSDir : Vcb->FECharge;
1792
1793 if(!Discard) {
1794 // cache it!
1795 if(OS_SUCCESS(UDFStoreCachedAllocation(Vcb,
1796 Dloc->FELoc.Mapping[0].extLocation,
1797 &Dloc->DirIndex->FECharge, lim, UDF_PREALLOC_CLASS_FE))) {
1798 Dloc->DirIndex->FECharge.Mapping = NULL;
1799 return;
1800 }
1801 }
1802 Dloc->DirIndex->FECharge.Mapping = NULL;
1803 UDFDiscardFESpace(Vcb, Mapping, lim);
1804 #else // UDF_FE_ALLOCATION_CHARGE
1805 ASSERT(!Dloc->DirIndex->FECharge.Mapping);
1806 return;
1807 #endif // UDF_FE_ALLOCATION_CHARGE
1808 } // end UDFFlushFESpace()
1809
1810 #ifndef UDF_READ_ONLY_BUILD
1811 /*
1812 This routine rebuilds mapping on write attempts to Alloc-Not-Rec area.
1813 Here we assume that required area lays in a single frag.
1814 */
1815 OSSTATUS
1816 UDFMarkAllocatedAsRecorded(
1817 IN PVCB Vcb,
1818 IN int64 Offset,
1819 IN uint32 Length,
1820 IN PEXTENT_INFO ExtInfo // Extent array
1821 )
1822 {
1823 uint32 i, len, lba, sLen;
1824 PEXTENT_MAP Extent = ExtInfo->Mapping; // Extent array
1825 PEXTENT_MAP NewExtent;
1826 uint32 BS = Vcb->BlockSize;
1827 uint32 LBS = Vcb->LBlockSize;
1828 uint32 BSh = Vcb->BlockSizeBits;
1829 BOOLEAN TryPack = TRUE;
1830 #ifdef UDF_DBG
1831 int64 check_size;
1832 #endif //UDF_DBG
1833 // I don't know what else comment can be added here.
1834 // Just belive that it works
1835 lba = UDFExtentOffsetToLba(Vcb, ExtInfo->Mapping, (Offset & ~((int64)LBS-1)), NULL, NULL, NULL, &i);
1836 if(i == (ULONG)-1) return STATUS_INVALID_PARAMETER;
1837 #ifdef UDF_DBG
1838 check_size = UDFGetExtentLength(ExtInfo->Mapping);
1839 ASSERT(!(check_size & (LBS-1)));
1840 #endif //UDF_DBG
1841 AdPrint(("Alloc->Rec ExtInfo %x, Extent %x\n", ExtInfo, Extent));
1842 if((Extent[i].extLength >> 30) == EXTENT_RECORDED_ALLOCATED) return STATUS_SUCCESS;
1843 if((Extent[i].extLength >> 30) == EXTENT_NOT_RECORDED_NOT_ALLOCATED) return STATUS_INVALID_PARAMETER;
1844 ASSERT((((uint32)Offset) & (LBS-1)) + Length <= (Extent[i].extLength & UDF_EXTENT_LENGTH_MASK));
1845 sLen = (( (((uint32)Offset) & (LBS-1)) + Length+LBS-1) & ~(LBS-1)) >> BSh;
1846 if((Extent[i].extLocation == lba) && (((Extent[i].extLength & UDF_EXTENT_LENGTH_MASK ) >> BSh) == sLen)) {
1847 // xxxxxx -> RRRRRR
1848 Extent[i].extLength &= UDF_EXTENT_LENGTH_MASK;
1849 // Extent[i].extLength |= (EXTENT_RECORDED_ALLOCATED << 30); // = 0;
1850 ExtInfo->Modified = TRUE;
1851 if(i &&
1852 ((Extent[i-1].extLength >> 30) == EXTENT_RECORDED_ALLOCATED) &&
1853 (lba == (Extent[i-1].extLocation + ((len = Extent[i-1].extLength & UDF_EXTENT_LENGTH_MASK) >> BSh))) &&
1854 ((len + (Extent[i].extLength & UDF_EXTENT_LENGTH_MASK)) <= UDF_MAX_EXTENT_LENGTH) &&
1855 (i == ((UDFGetMappingLength(Extent) / sizeof(EXTENT_MAP)) - 2)) &&
1856 TRUE) {
1857 // make optimization for sequentially written files
1858 Extent[i-1].extLength += Extent[i].extLength;
1859 Extent[i].extLocation = 0;
1860 Extent[i].extLength = 0;
1861 } else {
1862 UDFPackMapping(Vcb, ExtInfo);
1863 }
1864 AdPrint(("Alloc->Rec (1) new %x\n", ExtInfo->Mapping));
1865 ASSERT(check_size == UDFGetExtentLength(ExtInfo->Mapping));
1866 AdPrint(("Alloc->Rec: ExtInfo %x, Extent %x\n", ExtInfo, ExtInfo->Mapping));
1867 return STATUS_SUCCESS;
1868 }
1869 if(Extent[i].extLocation < lba) {
1870 if( (((Extent[i].extLength & UDF_EXTENT_LENGTH_MASK) >> BSh) - (lba - Extent[i].extLocation))
1871 > sLen ) {
1872 // xxxxxx -> xxRRxx
1873 NewExtent = (PEXTENT_MAP)MyAllocatePoolTag__(NonPagedPool , UDFGetMappingLength(Extent) + sizeof(EXTENT_MAP)*2,
1874 MEM_EXTMAP_TAG);
1875 if(!NewExtent) return STATUS_INSUFFICIENT_RESOURCES;
1876 Extent[i].extLength &= UDF_EXTENT_LENGTH_MASK;
1877 RtlCopyMemory((int8*)NewExtent, (int8*)Extent, i*sizeof(EXTENT_MAP));
1878 RtlCopyMemory((int8*)&(NewExtent[i+3]), (int8*)&(Extent[i+1]), len = UDFGetMappingLength(&(Extent[i+1])) );
1879 NewExtent[i].extLocation = Extent[i].extLocation;
1880 NewExtent[i].extLength = (lba - Extent[i].extLocation) << BSh;
1881 NewExtent[i+1].extLength = (Length+BS-1) & ~(BS-1);
1882 NewExtent[i+1].extLocation = lba;
1883 NewExtent[i+2].extLength = Extent[i].extLength - NewExtent[i].extLength - NewExtent[i+1].extLength;
1884 NewExtent[i+2].extLocation = lba + ((Length+BS-1) >> BSh);
1885 ASSERT(!(NewExtent[i].extLength >> 30));
1886 ASSERT(!(NewExtent[i+2].extLength >> 30));
1887 NewExtent[i].extLength |= (EXTENT_NOT_RECORDED_ALLOCATED << 30);
1888 NewExtent[i+2].extLength |= (EXTENT_NOT_RECORDED_ALLOCATED << 30);
1889 TryPack = FALSE;
1890 AdPrint(("Alloc->Rec (2) new %x\n", NewExtent));
1891 } else {
1892 // xxxxxx -> xxRRRR
1893 NewExtent = (PEXTENT_MAP)MyAllocatePoolTag__(NonPagedPool , UDFGetMappingLength(Extent) + sizeof(EXTENT_MAP),
1894 MEM_EXTMAP_TAG);
1895 if(!NewExtent) return STATUS_INSUFFICIENT_RESOURCES;
1896 Extent[i].extLength &= UDF_EXTENT_LENGTH_MASK;
1897 RtlCopyMemory((int8*)NewExtent, (int8*)Extent, i*sizeof(EXTENT_MAP));
1898 RtlCopyMemory((int8*)&(NewExtent[i+2]), (int8*)&(Extent[i+1]), len = UDFGetMappingLength(&(Extent[i+1])) );
1899 NewExtent[i].extLocation = Extent[i].extLocation;
1900 NewExtent[i].extLength = (lba - Extent[i].extLocation) << BSh;
1901 NewExtent[i+1].extLength = Extent[i].extLength - NewExtent[i].extLength;
1902 NewExtent[i+1].extLocation = lba;
1903 ASSERT(!(NewExtent[i].extLength >> 30));
1904 NewExtent[i].extLength |= (EXTENT_NOT_RECORDED_ALLOCATED << 30);
1905 AdPrint(("Alloc->Rec (3) new %x\n", NewExtent));
1906 }
1907 } else {
1908 // xxxxxx -> RRRRxx
1909 NewExtent = (PEXTENT_MAP)MyAllocatePoolTag__(NonPagedPool , UDFGetMappingLength(Extent) + sizeof(EXTENT_MAP),
1910 MEM_EXTMAP_TAG);
1911 if(!NewExtent) return STATUS_INSUFFICIENT_RESOURCES;
1912 Extent[i].extLength &= UDF_EXTENT_LENGTH_MASK;
1913 RtlCopyMemory((int8*)NewExtent, (int8*)Extent, i*sizeof(EXTENT_MAP));
1914 RtlCopyMemory((int8*)&(NewExtent[i+2]), (int8*)&(Extent[i+1]), len = UDFGetMappingLength(&(Extent[i+1])) );
1915 NewExtent[i].extLocation = Extent[i].extLocation;
1916 NewExtent[i].extLength = (Length+BS-1) & ~(BS-1);
1917 NewExtent[i+1].extLength = Extent[i].extLength - NewExtent[i].extLength;
1918 NewExtent[i+1].extLocation = Extent[i].extLocation + (NewExtent[i].extLength >> BSh);
1919 ASSERT(!(NewExtent[i+1].extLength >> 30));
1920 NewExtent[i+1].extLength |= (EXTENT_NOT_RECORDED_ALLOCATED << 30);
1921 AdPrint(("Alloc->Rec (4) new %x\n", NewExtent));
1922 }
1923
1924 //ASSERT(check_size == UDFGetExtentLength(Extent));
1925 //ASSERT(!(check_size & (LBS-1)));
1926
1927 AdPrint(("Free Extent %x (new %x)\n", Extent, NewExtent));
1928 MyFreePool__(Extent);
1929 ExtInfo->Modified = TRUE;
1930 ExtInfo->Mapping = NewExtent;
1931 if(TryPack)
1932 UDFPackMapping(Vcb, ExtInfo);
1933 ASSERT(check_size == UDFGetExtentLength(ExtInfo->Mapping));
1934 ASSERT(!(check_size & (LBS-1)));
1935
1936 AdPrint(("Alloc->Rec: ExtInfo %x, Extent %x\n", ExtInfo, ExtInfo->Mapping));
1937
1938 return STATUS_SUCCESS;
1939 } // end UDFMarkAllocatedAsRecorded()
1940
1941 /*
1942 This routine rebuilds mapping on write attempts to Not-Alloc-Not-Rec area.
1943 Here we assume that required area lays in a single frag.
1944 */
1945 OSSTATUS
1946 UDFMarkNotAllocatedAsAllocated(
1947 IN PVCB Vcb,
1948 IN int64 Offset,
1949 IN uint32 Length,
1950 IN PEXTENT_INFO ExtInfo // Extent array
1951 )
1952 {
1953 uint32 i, len, /*lba,*/ d, l, BOffs, j;
1954 PEXTENT_MAP Extent = ExtInfo->Mapping; // Extent array
1955 PEXTENT_MAP NewExtent;
1956 // uint32 BS = Vcb->BlockSize;
1957 uint32 BSh = Vcb->BlockSizeBits;
1958 OSSTATUS status;
1959 EXTENT_INFO TmpExtInf;
1960 uint32 aLen, sLen;
1961 uint32 LBS = Vcb->LBlockSize;
1962 // I don't know what else comment can be added here.
1963 // Just belive that it works
1964 /*lba = */
1965 #ifndef ALLOW_SPARSE
1966 BrutePoint();
1967 #endif
1968 AdPrint(("Not->Alloc ExtInfo %x, Extent %x\n", ExtInfo, Extent));
1969 UDFExtentOffsetToLba(Vcb, ExtInfo->Mapping, Offset, NULL, NULL, NULL, &i);
1970 if(i == (ULONG)-1) return STATUS_INVALID_PARAMETER;
1971 if((Extent[i].extLength >> 30) != EXTENT_NOT_RECORDED_NOT_ALLOCATED) return STATUS_SUCCESS;
1972
1973 uint32 PartNum = UDFGetPartNumByPhysLba(Vcb, Extent[0].extLocation);
1974 BOffs = (uint32)(Offset >> BSh);
1975 // length of existing Not-Alloc-Not-Rec frag
1976 sLen = (( (((uint32)Offset) & (LBS-1)) + Length+LBS-1) & ~(LBS-1)) >> BSh;
1977 // required allocation length increment (in bytes)
1978 aLen = (uint32)( ((Offset+Length+LBS-1) & ~(LBS-1)) - (Offset & ~(LBS-1)));
1979
1980 // try to extend previous frag or allocate space _after_ it to
1981 // avoid backward seeks, if previous frag is not Not-Rec-Not-Alloc
1982 if(i && ((Extent[i-1].extLength >> 30) != EXTENT_NOT_RECORDED_NOT_ALLOCATED) ) {
1983 status = UDFAllocFreeExtent(Vcb, aLen,
1984 Extent[i-1].extLocation + ((Extent[i-1].extLength & UDF_EXTENT_LENGTH_MASK) >> BSh),
1985 min(UDFPartEnd(Vcb, PartNum), Extent[i-1].extLocation + ((Extent[i-1].extLength & UDF_EXTENT_LENGTH_MASK) >> BSh) + sLen ),
1986 &TmpExtInf, ExtInfo->Flags /*& EXTENT_FLAG_ALLOC_MASK*/);
1987 if(status == STATUS_DISK_FULL)
1988 // if there are not enough free blocks after that frag...
1989 goto try_alloc_anywhere;
1990 } else {
1991 try_alloc_anywhere:
1992 // ... try to alloc required disk space anywhere
1993 status = UDFAllocFreeExtent(Vcb, aLen,
1994 UDFPartStart(Vcb, PartNum),
1995 UDFPartEnd(Vcb, PartNum),
1996 &TmpExtInf, ExtInfo->Flags /*& EXTENT_FLAG_ALLOC_MASK*/);
1997 }
1998 // check for successfull allocation
1999 if(!OS_SUCCESS(status)) {
2000 AdPrint(("Not->Alloc no free\n"));
2001 return status;
2002 }
2003 // get number of frags in allocated block
2004 d = (UDFGetMappingLength(TmpExtInf.Mapping) / sizeof(EXTENT_MAP)) - 1;
2005 // calculate number of existing blocks before the frag to be changed
2006 l=0;
2007 for(j=0; j<i; j++) {
2008 l += (uint32)((Extent[j].extLength & UDF_EXTENT_LENGTH_MASK) >> BSh);
2009 }
2010 // and now just update mapping...
2011 if( (l == BOffs) && (((Extent[j].extLength & UDF_EXTENT_LENGTH_MASK) >> BSh) == sLen) ) {
2012 // xxxxxx -> RRRRRR
2013 // (d-1) - since we have to raplace last frag of Extent with 1 or more frags of TmpExtInf.Mapping
2014 NewExtent = (PEXTENT_AD)MyAllocatePool__(NonPagedPool, UDFGetMappingLength(Extent) + (d-1)*sizeof(EXTENT_MAP) );
2015 if(!NewExtent) {
2016 MyFreePool__(TmpExtInf.Mapping);
2017 return STATUS_INSUFFICIENT_RESOURCES;
2018 }
2019 RtlCopyMemory((int8*)NewExtent, (int8*)Extent, i*sizeof(EXTENT_MAP));
2020 RtlCopyMemory((int8*)&(NewExtent[i]), (int8*)(TmpExtInf.Mapping), d*sizeof(EXTENT_MAP) );
2021 RtlCopyMemory((int8*)&(NewExtent[i+d]), (int8*)&(Extent[i+1]), len = UDFGetMappingLength(&(Extent[i+1])) );
2022 AdPrint(("Not->Alloc (1) new %x\n", NewExtent));
2023 } else
2024 if(l < BOffs) {
2025 // .ExtLength, BOffs & l are already aligned...
2026 if( (((Extent[i].extLength & UDF_EXTENT_LENGTH_MASK) >> BSh) - (BOffs-l)) > sLen ) {
2027 // xxxxxx -> xxRRxx
2028 NewExtent = (PEXTENT_AD)MyAllocatePool__(NonPagedPool, UDFGetMappingLength(Extent) + (d+1)*sizeof(EXTENT_MAP) );
2029 if(!NewExtent) {
2030 MyFreePool__(TmpExtInf.Mapping);
2031 return STATUS_INSUFFICIENT_RESOURCES;
2032 }
2033 RtlCopyMemory((int8*)NewExtent, (int8*)Extent, i*sizeof(EXTENT_MAP));
2034 RtlCopyMemory((int8*)&(NewExtent[i+1]), (int8*)(TmpExtInf.Mapping), d*sizeof(EXTENT_MAP) );
2035 RtlCopyMemory((int8*)&(NewExtent[i+d+2]), (int8*)&(Extent[i+1]), len = UDFGetMappingLength(&(Extent[i+1])) );
2036 NewExtent[i].extLocation = 0;
2037 NewExtent[i].extLength = (BOffs - l) << BSh;
2038 NewExtent[i+d+1].extLength = Extent[i].extLength - NewExtent[i].extLength - aLen;
2039 NewExtent[i+d+1].extLocation = 0;
2040 NewExtent[i].extLength |= (EXTENT_NOT_RECORDED_NOT_ALLOCATED << 30);
2041 NewExtent[i+d+1].extLength |= (EXTENT_NOT_RECORDED_NOT_ALLOCATED << 30);
2042 AdPrint(("Not->Alloc (2) new %x\n", NewExtent));
2043 } else {
2044 // xxxxxx -> xxRRRR
2045 NewExtent = (PEXTENT_AD)MyAllocatePool__(NonPagedPool, UDFGetMappingLength(Extent) + d*sizeof(EXTENT_MAP) );
2046 if(!NewExtent) {
2047 MyFreePool__(TmpExtInf.Mapping);
2048 return STATUS_INSUFFICIENT_RESOURCES;
2049 }
2050 RtlCopyMemory((int8*)NewExtent, (int8*)Extent, i*sizeof(EXTENT_MAP));
2051 RtlCopyMemory((int8*)&(NewExtent[i+1]), (int8*)(TmpExtInf.Mapping), d*sizeof(EXTENT_MAP) );
2052 RtlCopyMemory((int8*)&(NewExtent[i+d+1]), (int8*)&(Extent[i+1]), len = UDFGetMappingLength(&(Extent[i+1])) );
2053 NewExtent[i].extLocation = 0;
2054 NewExtent[i].extLength = (BOffs - l) << BSh;
2055 NewExtent[i].extLength |= (EXTENT_NOT_RECORDED_NOT_ALLOCATED << 30);
2056 AdPrint(("Not->Alloc (3) new %x\n", NewExtent));
2057 }
2058 } else {
2059 // xxxxxx -> RRRRxx
2060 NewExtent = (PEXTENT_AD)MyAllocatePool__(NonPagedPool, UDFGetMappingLength(Extent) + d*sizeof(EXTENT_MAP) );
2061 if(!NewExtent) {
2062 MyFreePool__(TmpExtInf.Mapping);
2063 return STATUS_INSUFFICIENT_RESOURCES;
2064 }
2065 RtlCopyMemory((int8*)NewExtent, (int8*)Extent, i*sizeof(EXTENT_MAP));
2066 RtlCopyMemory((int8*)&(NewExtent[i]), (int8*)(TmpExtInf.Mapping), d*sizeof(EXTENT_MAP) );
2067 RtlCopyMemory((int8*)&(NewExtent[i+d+1]), (int8*)&(Extent[i+1]), len = UDFGetMappingLength(&(Extent[i+1])) );
2068 NewExtent[i+d].extLength = (Extent[i].extLength & UDF_EXTENT_LENGTH_MASK) - aLen;
2069 NewExtent[i+d].extLocation = 0;
2070 NewExtent[i+d].extLength |= (EXTENT_NOT_RECORDED_NOT_ALLOCATED << 30);
2071 AdPrint(("Not->Alloc (4) new %x\n", NewExtent));
2072 }
2073
2074 AdPrint(("Free Extent %x, TmpExtInf.Mapping, (new %x)\n", Extent, TmpExtInf.Mapping, NewExtent));
2075 MyFreePool__(Extent);
2076 MyFreePool__(TmpExtInf.Mapping);
2077 ExtInfo->Modified = TRUE;
2078 ExtInfo->Mapping = NewExtent;
2079
2080 AdPrint(("Not->Alloc: ExtInfo %x, Extent %x\n", ExtInfo, ExtInfo->Mapping));
2081
2082 return STATUS_SUCCESS;
2083 } // end UDFMarkNotAllocatedAsAllocated()
2084
2085 //#if 0
2086 /*
2087 This routine rebuilds mapping on write zero attempts to
2088 Alloc-Not-Rec area.
2089 Here we assume that required area lays in a single frag.
2090 */
2091 OSSTATUS
2092 UDFMarkAllocatedAsNotXXX(
2093 IN PVCB Vcb,
2094 IN int64 Offset,
2095 IN uint32 Length,
2096 IN PEXTENT_INFO ExtInfo, // Extent array
2097 IN BOOLEAN Deallocate
2098 )
2099 {
2100 uint32 i, len, /*lba, d,*/ l, BOffs, j;
2101 PEXTENT_MAP Extent = ExtInfo->Mapping; // Extent array
2102 PEXTENT_MAP NewExtent;
2103 // EXTENT_MAP TmpExtent;
2104 // uint32 BS = Vcb->BlockSize;
2105 uint32 BSh = Vcb->BlockSizeBits;
2106 // OSSTATUS status;
2107 EXTENT_INFO TmpExtInf;
2108 uint32 aLen, sLen;
2109 uint32 flags;
2110 uint32 target_flags = Deallocate ?
2111 EXTENT_NOT_RECORDED_NOT_ALLOCATED :
2112 EXTENT_NOT_RECORDED_ALLOCATED;
2113 uint32 LBS = Vcb->LBlockSize;
2114 EXTENT_MAP DeadMapping[2];
2115 // I don't know what else comment can be added here.
2116 // Just belive that it works
2117 /*lba = */
2118 #ifndef ALLOW_SPARSE
2119 if(Deallocate) {
2120 BrutePoint();
2121 }
2122 #endif
2123
2124 AdPrint(("Alloc->Not ExtInfo %x, Extent %x\n", ExtInfo, Extent));
2125
2126 DeadMapping[0].extLocation =
2127 UDFExtentOffsetToLba(Vcb, ExtInfo->Mapping, Offset, NULL, NULL, NULL, &i);
2128 if(i == (ULONG)-1) {
2129 BrutePoint();
2130 return STATUS_INVALID_PARAMETER;
2131 }
2132 DeadMapping[0].extLength = Extent[i].extLength;
2133 DeadMapping[1].extLocation =
2134 DeadMapping[1].extLength = 0;
2135 TmpExtInf.Mapping = (PEXTENT_MAP)&DeadMapping;
2136 TmpExtInf.Offset = 0;
2137 TmpExtInf.Length = Extent[i].extLength & UDF_EXTENT_LENGTH_MASK;
2138
2139 flags = Extent[i].extLength >> 30;
2140 if(flags == target_flags) return STATUS_SUCCESS;
2141
2142 // uint32 PartNum = UDFGetPartNumByPhysLba(Vcb, Extent[0].extLocation);
2143 BOffs = (uint32)(Offset >> BSh);
2144 // length of existing Alloc-(Not-)Rec frag (in sectors)
2145 sLen = (( (((uint32)Offset) & (LBS-1)) + Length+LBS-1) & ~(LBS-1)) >> BSh;
2146 // required deallocation length increment (in bytes)
2147 aLen = (uint32)( ((Offset+Length+LBS-1) & ~(LBS-1)) - (Offset & ~(LBS-1)) );
2148
2149 l=0;
2150 for(j=0; j<i; j++) {
2151 l += (uint32)((Extent[j].extLength & UDF_EXTENT_LENGTH_MASK) >> BSh);
2152 }
2153 flags <<= 30;
2154 if( (l == BOffs) && (((Extent[j].extLength & UDF_EXTENT_LENGTH_MASK) >> BSh) == sLen) ) {
2155 // xxxxxx -> RRRRRR
2156 Extent[i].extLocation = 0;
2157 Extent[i].extLength = (Extent[i].extLength & UDF_EXTENT_LENGTH_MASK) | flags;
2158 NewExtent = Extent;
2159 AdPrint(("Alloc->Not (1) NewExtent = Extent = %x\n", NewExtent));
2160 } else
2161 if(l < BOffs) {
2162 // .ExtLength, BOffs & l are already aligned...
2163 if( (((Extent[i].extLength & UDF_EXTENT_LENGTH_MASK) >> BSh) - (BOffs-l)) > sLen ) {
2164 // xxxxxx -> xxRRxx
2165 NewExtent = (PEXTENT_MAP)MyAllocatePool__(NonPagedPool, UDFGetMappingLength(Extent) + 2*sizeof(EXTENT_MAP) );
2166 if(!NewExtent) {
2167 return STATUS_INSUFFICIENT_RESOURCES;
2168 }
2169 RtlCopyMemory((int8*)NewExtent, (int8*)Extent, i*sizeof(EXTENT_MAP));
2170 RtlCopyMemory((int8*)&(NewExtent[i+3]), (int8*)&(Extent[i+1]), len = UDFGetMappingLength(&(Extent[i+1])) );
2171 NewExtent[i].extLength = (BOffs - l) << BSh;
2172 NewExtent[i].extLength |= flags;
2173 NewExtent[i+1].extLocation = 0;
2174 NewExtent[i+1].extLength = aLen | (target_flags << 30);
2175 NewExtent[i+2].extLength = (Extent[i].extLength & UDF_EXTENT_LENGTH_MASK) -
2176 (NewExtent[i].extLength & UDF_EXTENT_LENGTH_MASK) - aLen ;
2177 NewExtent[i+2].extLocation = Extent[i].extLocation +
2178 (NewExtent[i+2].extLength >> BSh);
2179 NewExtent[i+2].extLength |= flags;
2180 AdPrint(("Alloc->Not (2) new %x\n", NewExtent));
2181 } else {
2182 // xxxxxx -> xxRRRR
2183 NewExtent = (PEXTENT_MAP)MyAllocatePool__(NonPagedPool, UDFGetMappingLength(Extent) + sizeof(EXTENT_MAP) );
2184 if(!NewExtent) {
2185 return STATUS_INSUFFICIENT_RESOURCES;
2186 }
2187 RtlCopyMemory((int8*)NewExtent, (int8*)Extent, i*sizeof(EXTENT_MAP));
2188 RtlCopyMemory((int8*)&(NewExtent[i+2]), (int8*)&(Extent[i+1]), len = UDFGetMappingLength(&(Extent[i+1])) );
2189 NewExtent[i].extLength = ((BOffs - l) << BSh) | flags;
2190 NewExtent[i+1].extLocation = 0;
2191 NewExtent[i+1].extLength = aLen | (target_flags << 30);
2192 AdPrint(("Alloc->Not (3) new %x\n", NewExtent));
2193 }
2194 } else {
2195 // xxxxxx -> RRRRxx
2196 NewExtent = (PEXTENT_MAP)MyAllocatePool__(NonPagedPool, UDFGetMappingLength(Extent) + sizeof(EXTENT_MAP) );
2197 if(!NewExtent) {
2198 return STATUS_INSUFFICIENT_RESOURCES;
2199 }
2200 RtlCopyMemory((int8*)NewExtent, (int8*)Extent, i*sizeof(EXTENT_MAP));
2201 RtlCopyMemory((int8*)&(NewExtent[i+2]), (int8*)&(Extent[i+1]), len = UDFGetMappingLength(&(Extent[i+1])) );
2202 NewExtent[i+1].extLength = (Extent[i].extLength & UDF_EXTENT_LENGTH_MASK) - aLen;
2203 NewExtent[i+1].extLength |= flags;
2204 NewExtent[i].extLocation = 0;
2205 NewExtent[i].extLength = aLen | (target_flags << 30);
2206 AdPrint(("Alloc->Not (4) new %x\n", NewExtent));
2207 }
2208
2209 if(Deallocate)
2210 UDFMarkSpaceAsXXX(Vcb, (-1), TmpExtInf.Mapping, AS_DISCARDED); // mark as free
2211
2212 if(Extent) {
2213 AdPrint(("Alloc->Not kill %x\n", Extent));
2214 MyFreePool__(Extent);
2215 } else {
2216 AdPrint(("Alloc->Not keep %x\n", Extent));
2217 }
2218 ExtInfo->Modified = TRUE;
2219 ExtInfo->Mapping = NewExtent;
2220 AdPrint(("Alloc->Not: ExtInfo %x, Extent %x\n", ExtInfo, ExtInfo->Mapping));
2221
2222 return STATUS_SUCCESS;
2223 } // end UDFMarkAllocatedAsNotXXX()
2224 //#endif //0
2225
2226 /*
2227 This routine resizes extent & updates associated mapping
2228 */
2229 OSSTATUS
2230 UDFResizeExtent(
2231 IN PVCB Vcb,
2232 IN uint32 PartNum,
2233 IN int64 Length, // Required Length
2234 IN BOOLEAN AlwaysInIcb, // must be TRUE for AllocDescs
2235 OUT PEXTENT_INFO ExtInfo
2236 )
2237 {
2238 uint32 i, flags, lba, lim;
2239 int64 l;
2240 OSSTATUS status;
2241 EXTENT_INFO TmpExtInf;
2242 EXTENT_MAP TmpMapping[2];
2243 uint32 s, req_s, pe, BSh, LBS, PS;
2244 LBS = Vcb->LBlockSize;
2245 BSh = Vcb->BlockSizeBits;
2246 PS = Vcb->WriteBlockSize >> Vcb->BlockSizeBits;
2247 uint32 MaxGrow = (UDF_MAX_EXTENT_LENGTH & ~(LBS-1));
2248 BOOLEAN Sequential = FALSE;
2249
2250 ASSERT(PartNum < 3);
2251
2252 ExtPrint(("Resize ExtInfo %x, %I64x -> %I64x\n", ExtInfo, ExtInfo->Length, Length));
2253
2254 if(ExtInfo->Flags & EXTENT_FLAG_CUT_PREALLOCATED) {
2255 AdPrint((" cut preallocated\n"));
2256 } else
2257 if(ExtInfo->Length == Length) {
2258 return STATUS_SUCCESS;
2259 }
2260 if((ExtInfo->Flags & EXTENT_FLAG_ALLOC_MASK) == EXTENT_FLAG_ALLOC_SEQUENTIAL) {
2261 MaxGrow &= ~(Vcb->WriteBlockSize-1);
2262 Sequential = TRUE;
2263 }
2264
2265 UDFCheckSpaceAllocation(Vcb, 0, ExtInfo->Mapping, AS_USED); // check if used
2266 if(ExtInfo->Offset) {
2267 if(ExtInfo->Offset + Length <= LBS) {
2268 ExtPrint(("Resize IN-ICB\n"));
2269 ExtInfo->Length = Length;
2270 return STATUS_SUCCESS;
2271 }
2272 if(!AlwaysInIcb) // simulate unused 1st sector in extent
2273 ExtInfo->Offset = LBS; // it'll be truncated later
2274 Length += ExtInfo->Offset; // convert to real offset in extent
2275 }
2276 lba = UDFExtentOffsetToLba(Vcb, ExtInfo->Mapping, Length, NULL, NULL, &flags, &i);
2277 if(ExtInfo->Length < Length) {
2278 // increase extent
2279 if(OS_SUCCESS(UDFGetCachedAllocation(Vcb, ExtInfo->Mapping[0].extLocation,
2280 &TmpExtInf, NULL, UDF_PREALLOC_CLASS_DIR))) {
2281 AdPrint(("Resize found cached(1)\n"));
2282 ExtInfo->Mapping = UDFMergeMappings(ExtInfo->Mapping, TmpExtInf.Mapping);
2283 MyFreePool__(TmpExtInf.Mapping);
2284 }
2285 if((l = UDFGetExtentLength(ExtInfo->Mapping)) >= Length) {
2286 // we have enough space inside extent
2287 ExtInfo->Length = Length;
2288 AdPrint(("Resize do nothing (1)\n"));
2289 } else /*if(lba == LBA_OUT_OF_EXTENT)*/ {
2290
2291 Length -= ExtInfo->Offset;
2292 if(/*Length && l &&*/ (l % MaxGrow) &&
2293 (Length-1)/MaxGrow != (l-1)/MaxGrow) {
2294 AdPrint(("Crossing MAX_FRAG boundary...\n"));
2295 int64 l2 = ((l-1)/MaxGrow + 1)*MaxGrow;
2296 status = UDFResizeExtent(Vcb, PartNum, l2, AlwaysInIcb, ExtInfo);
2297 if(!OS_SUCCESS(status)) {
2298 KdPrint(("Sub-call to UDFResizeExtent() failed (%x)\n", status));
2299 return status;
2300 }
2301 l = ExtInfo->Length;
2302 ASSERT(l == l2);
2303 }
2304 while((Length - l) > MaxGrow) {
2305 status = UDFResizeExtent(Vcb, PartNum, l+MaxGrow, AlwaysInIcb, ExtInfo);
2306 if(!OS_SUCCESS(status)) {
2307 KdPrint(("Sub-call (2) to UDFResizeExtent() failed (%x)\n", status));
2308 return status;
2309 }
2310 l = ExtInfo->Length;
2311 }
2312 Length += ExtInfo->Offset;
2313 // at first, try to resize existing frag
2314 #ifndef UDF_ALLOW_FRAG_AD
2315 i = UDFGetMappingLength(ExtInfo->Mapping);
2316 if(i > (LBS-sizeof(EXTENDED_FILE_ENTRY))) {
2317 // this is very important check since we will not
2318 // be able to _record_ too long AllocDesc because of
2319 // some DEMO limitations in UDFBuildXXXAllocDescs()
2320 AdPrint((" DISK_FULL\n"));
2321 return STATUS_DISK_FULL;
2322 }
2323 i /= sizeof(EXTENT_MAP);
2324 #else //UDF_ALLOW_FRAG_AD
2325 i = UDFGetMappingLength(ExtInfo->Mapping) / sizeof(EXTENT_MAP);
2326 #endif //UDF_ALLOW_FRAG_AD
2327 #ifdef ALLOW_SPARSE
2328 if(!AlwaysInIcb && !(ExtInfo->Offset) &&
2329 (Length - l >= (Vcb->SparseThreshold << BSh))) {
2330 // last frag will be Not-Alloc-Not-Rec...
2331 AdPrint(("Resize sparse (2)\n"));
2332 RtlZeroMemory(&TmpExtInf, sizeof(EXTENT_INFO));
2333 TmpExtInf.Mapping = (PEXTENT_MAP)MyAllocatePoolTag__(NonPagedPool , sizeof(EXTENT_MAP)*2,
2334 MEM_EXTMAP_TAG);
2335 if(!TmpExtInf.Mapping) return STATUS_INSUFFICIENT_RESOURCES;
2336 TmpExtInf.Mapping[0].extLength = (((uint32)(Length - l) + LBS-1) & ~(LBS-1)) | (EXTENT_NOT_RECORDED_NOT_ALLOCATED << 30);
2337 TmpExtInf.Mapping[0].extLocation =// 0;
2338 TmpExtInf.Mapping[1].extLength =
2339 TmpExtInf.Mapping[1].extLocation = 0;
2340 l = Length;
2341 ExtInfo->Mapping = UDFMergeMappings(ExtInfo->Mapping, TmpExtInf.Mapping);
2342 MyFreePool__(TmpExtInf.Mapping);
2343 } else
2344 #endif //ALLOW_SPARSE
2345 // allocate some sectors
2346 if(i>1 && !(ExtInfo->Offset)) {
2347 i-=2;
2348 // check if Not-Alloc-Not-Rec at the end of mapping
2349 if((uint32)Length - (uint32)l + (ExtInfo->Mapping[i].extLength & UDF_EXTENT_LENGTH_MASK) > MaxGrow) {
2350 // do nothing, but jump directly to allocator
2351 } else
2352 if((ExtInfo->Mapping[i].extLength >> 30) == EXTENT_NOT_RECORDED_NOT_ALLOCATED) {
2353 AdPrint(("Resize grow sparse (3)\n"));
2354 ExtInfo->Mapping[i].extLength +=
2355 (((uint32)Length-(uint32)l+LBS-1) & ~(LBS-1)) ;
2356 l = Length;
2357 // check if Alloc-Not-Rec at the end of mapping
2358 } else if((ExtInfo->Mapping[i].extLength >> 30) == EXTENT_NOT_RECORDED_ALLOCATED) {
2359 AdPrint(("Resize grow Not-Rec (3)\n"));
2360 // current length of last frag
2361 s = ((ExtInfo->Mapping[i].extLength & UDF_EXTENT_LENGTH_MASK) >> BSh);
2362 // prefered location of the next frag
2363 lba = ExtInfo->Mapping[i].extLocation + s;
2364 pe=UDFPartEnd(Vcb,PartNum);
2365 // maximum frag length
2366 if(Sequential) {
2367 lim = (((uint32)UDF_MAX_EXTENT_LENGTH) >> BSh) & ~(PS-1);
2368 } else {
2369 lim = (((uint32)UDF_MAX_EXTENT_LENGTH) >> BSh) & ~(LBS-1);
2370 }
2371 // required last extent length
2372 req_s = s + (uint32)( (((Length + LBS - 1) & ~(LBS-1)) -
2373 ((l + LBS - 1) & ~(LBS-1)) ) >> BSh);
2374 if(lim > req_s) {
2375 lim = req_s;
2376 }
2377 UDFAcquireResourceExclusive(&(Vcb->BitMapResource1),TRUE);
2378 /* if((ExtInfo->Flags & EXTENT_FLAG_SEQUENTIAL) &&
2379 ((Length & ~(PS-1)) > (l & ~(PS-1))) &&
2380 TRUE) {
2381 status = UDFResizeExtent(Vcb, PartNum, l+MaxGrow, AlwaysInIcb, ExtInfo);
2382 }*/
2383 // how many sectors we should add
2384 req_s = lim - s;
2385 ASSERT(req_s);
2386 if((lba < pe) && UDFGetFreeBit(Vcb->FSBM_Bitmap, lba)) {
2387 s += UDFGetBitmapLen((uint32*)(Vcb->FSBM_Bitmap), lba, min(pe, lba+req_s-1));
2388 }
2389 /* for(s1=lba; (s<lim) && (s1<pe) && UDFGetFreeBit(Vcb->FSBM_Bitmap, s1); s1++) {
2390 s++;
2391 }*/
2392 if(s==lim) {
2393 // we can just increase the last frag
2394 AdPrint(("Resize grow last Not-Rec (4)\n"));
2395 ExtInfo->Mapping[i].extLength = (lim << BSh) | (EXTENT_NOT_RECORDED_ALLOCATED << 30);
2396 l = Length;
2397 UDFMarkSpaceAsXXXNoProtect(Vcb, 0, &(ExtInfo->Mapping[i]), AS_USED); // mark as used
2398 } else {
2399 // we get here if simple increasing of last frag failed
2400 // it worth truncating last frag and try to allocate
2401 // all required data as a single frag
2402
2403 /* if(Sequential && s>=PS) {
2404 s &= ~(PS-1);
2405 AdPrint(("Resize grow last Not-Rec (4/2)\n"));
2406 ExtInfo->Mapping[i].extLength = (s << BSh) | (EXTENT_NOT_RECORDED_ALLOCATED << 30);
2407 l += (s << BSh);
2408 UDFMarkSpaceAsXXXNoProtect(Vcb, 0, &(ExtInfo->Mapping[i]), AS_USED); // mark as used
2409 }*/
2410 AdPrint(("Resize reloc last Not-Rec (5)\n"));
2411 TmpExtInf.Mapping = (PEXTENT_MAP)MyAllocatePoolTag__(NonPagedPool , (i+1)*sizeof(EXTENT_MAP),
2412 MEM_EXTMAP_TAG);
2413 if(!TmpExtInf.Mapping) {
2414 KdPrint(("UDFResizeExtent: !TmpExtInf.Mapping\n"));
2415 UDFReleaseResource(&(Vcb->BitMapResource1));
2416 return STATUS_INSUFFICIENT_RESOURCES;
2417 }
2418 RtlCopyMemory(TmpExtInf.Mapping, ExtInfo->Mapping, i*sizeof(EXTENT_MAP));
2419 TmpExtInf.Mapping[i].extLength =
2420 TmpExtInf.Mapping[i].extLocation = 0;
2421 TmpExtInf.Offset = ExtInfo->Offset;
2422 l -= (ExtInfo->Mapping[i].extLength & UDF_EXTENT_LENGTH_MASK);
2423 TmpExtInf.Length = l;
2424 ASSERT(i || !ExtInfo->Offset);
2425 UDFMarkSpaceAsXXXNoProtect(Vcb, 0, &(ExtInfo->Mapping[i]), AS_DISCARDED); // mark as free
2426 MyFreePool__(ExtInfo->Mapping);
2427 (*ExtInfo) = TmpExtInf;
2428 }
2429 UDFCheckSpaceAllocation(Vcb, 0, ExtInfo->Mapping, AS_USED); // check if used
2430 UDFReleaseResource(&(Vcb->BitMapResource1));
2431 // check if Alloc-Rec
2432 } else {
2433 // current length of last frag
2434 s = ((ExtInfo->Mapping[i].extLength & UDF_EXTENT_LENGTH_MASK) >> BSh);
2435 // prefered location of the next frag
2436 lba = ExtInfo->Mapping[i].extLocation + s;
2437 pe=UDFPartEnd(Vcb,PartNum);
2438 // maximum frag length
2439 if(Sequential) {
2440 lim = (((uint32)UDF_MAX_EXTENT_LENGTH) >> BSh) & ~(PS-1);
2441 } else {
2442 lim = (((uint32)UDF_MAX_EXTENT_LENGTH) >> BSh) & ~(LBS-1);
2443 }
2444 // required last extent length
2445 req_s = s + (uint32)( (((Length + LBS - 1) & ~(LBS-1)) -
2446 ((l + LBS - 1) & ~(LBS-1)) ) >> BSh);
2447 if(lim > req_s) {
2448 lim = req_s;
2449 }
2450 // s=0;
2451 // how many sectors we should add
2452 req_s = lim - s;
2453 if(req_s) {
2454 uint32 d=0;
2455
2456 UDFAcquireResourceExclusive(&(Vcb->BitMapResource1),TRUE);
2457 //ASSERT(req_s);
2458 if((lba < pe) && UDFGetFreeBit(Vcb->FSBM_Bitmap, lba)) {
2459 s += (d = UDFGetBitmapLen((uint32*)(Vcb->FSBM_Bitmap), lba, min(pe, lba+req_s-1)));
2460 }
2461 /* for(s1=lba; (s<lim) && (s1<pe) && UDFGetFreeBit(Vcb->FSBM_Bitmap, s1); s1++) {
2462 s++;
2463 }*/
2464
2465 if(s==lim) {
2466 AdPrint(("Resize grow last Rec (6)\n"));
2467 // we can just increase last frag
2468 TmpMapping[0].extLength = req_s << BSh;
2469 TmpMapping[0].extLocation = lba;
2470 TmpMapping[1].extLength =
2471 TmpMapping[1].extLocation = 0;
2472 UDFMarkSpaceAsXXXNoProtect(Vcb, 0, &TmpMapping[0], AS_USED); // mark as used
2473 l += (s << BSh) - (ExtInfo->Mapping[i].extLength & UDF_EXTENT_LENGTH_MASK);
2474 ExtInfo->Mapping[i].extLength = (ExtInfo->Mapping[i].extLength & UDF_EXTENT_FLAG_MASK) | (s << BSh);
2475 } else if(d) {
2476 AdPrint(("Resize part-grow last Rec (6)\n"));
2477 // increase last frag, then alloc rest
2478 TmpMapping[0].extLength = d << BSh;
2479 TmpMapping[0].extLocation = lba;
2480 TmpMapping[1].extLength =
2481 TmpMapping[1].extLocation = 0;
2482 UDFMarkSpaceAsXXXNoProtect(Vcb, 0, &TmpMapping[0], AS_USED); // mark as used
2483 l += (s << BSh) - (ExtInfo->Mapping[i].extLength & UDF_EXTENT_LENGTH_MASK);
2484 ExtInfo->Mapping[i].extLength = (ExtInfo->Mapping[i].extLength & UDF_EXTENT_FLAG_MASK) | (s << BSh);
2485 } else {
2486 AdPrint(("Can't grow last Rec (6)\n"));
2487 }
2488 UDFReleaseResource(&(Vcb->BitMapResource1));
2489 } else {
2490 AdPrint(("Max frag length reached (6)\n"));
2491 }
2492 }
2493 }
2494 if(l < Length) {
2495 // we get here if simple increasing of the last frag failed
2496 AdPrint(("Resize add new frag (7)\n"));
2497 if(l < LBS && Length >= LBS &&
2498 (ExtInfo->Flags & EXTENT_FLAG_ALLOC_MASK) == EXTENT_FLAG_ALLOC_SEQUENTIAL) {
2499 AdPrint(("Resize tune for SEQUENTIAL i/o\n"));
2500 }
2501 status = UDFAllocFreeExtent(Vcb, Length - l,
2502 UDFPartStart(Vcb, PartNum),
2503 UDFPartEnd(Vcb, PartNum),
2504 &TmpExtInf,
2505 ExtInfo->Flags /*& EXTENT_FLAG_ALLOC_MASK*/);
2506 if(!OS_SUCCESS(status)) {
2507 KdPrint(("UDFResizeExtent: UDFAllocFreeExtent() failed (%x)\n", status));
2508 return status;
2509 }
2510 ExtInfo->Mapping = UDFMergeMappings(ExtInfo->Mapping, TmpExtInf.Mapping);
2511 MyFreePool__(TmpExtInf.Mapping);
2512 }
2513 UDFPackMapping(Vcb, ExtInfo);
2514 }
2515 } else
2516 if(Length) {
2517 // decrease extent
2518 AdPrint(("Resize cut (8)\n"));
2519 lba = UDFExtentOffsetToLba(Vcb, ExtInfo->Mapping, Length-1, NULL, &lim, &flags, &i);
2520 i++;
2521 ASSERT(lba != LBA_OUT_OF_EXTENT);
2522 ASSERT(lba != LBA_NOT_ALLOCATED);
2523 ASSERT(i);
2524 if(ExtInfo->Mapping[i].extLength) {
2525 UDFCheckSpaceAllocation(Vcb, 0, &(ExtInfo->Mapping[i]), AS_USED); // check if used
2526 if(!ExtInfo->Offset && (ExtInfo->Flags & EXTENT_FLAG_PREALLOCATED)) {
2527
2528 AdPrint(("Resize try save cutted (8)\n"));
2529 RtlZeroMemory(&TmpExtInf, sizeof(EXTENT_INFO));
2530 s = UDFGetMappingLength(&(ExtInfo->Mapping[i]));
2531
2532 TmpExtInf.Mapping = (PEXTENT_MAP)MyAllocatePoolTag__(NonPagedPool , s, MEM_EXTMAP_TAG);
2533 if(TmpExtInf.Mapping) {
2534 RtlCopyMemory(TmpExtInf.Mapping, &(ExtInfo->Mapping[i]), s);
2535 AdPrint(("Resize save cutted (8)\n"));
2536 if(OS_SUCCESS(UDFStoreCachedAllocation(Vcb, ExtInfo->Mapping[0].extLocation,
2537 &TmpExtInf, 0, UDF_PREALLOC_CLASS_DIR))) {
2538 ExtInfo->Mapping[i].extLength = 0;
2539 ExtInfo->Mapping[i].extLocation = 0;
2540 goto tail_cached;
2541 }
2542 }
2543 }
2544 UDFMarkSpaceAsXXX(Vcb, 0, &(ExtInfo->Mapping[i]), AS_DISCARDED); // mark as free
2545 tail_cached:;
2546 }
2547 if((lim-1 >= LBS) &&
2548 (flags != EXTENT_NOT_RECORDED_NOT_ALLOCATED)) {
2549 AdPrint(("i=%x, lba=%x, len=%x\n",i,lba,lim));
2550 ASSERT(lim);
2551 // BrutePoint();
2552 EXTENT_MAP ClrMap[2];
2553 ClrMap[0].extLength = lim & ~(LBS-1);
2554 s = (ExtInfo->Mapping[i-1].extLength - ClrMap[0].extLength) & UDF_EXTENT_LENGTH_MASK;
2555 ClrMap[0].extLocation = ExtInfo->Mapping[i-1].extLocation +
2556 (s >> BSh);
2557 ClrMap[1].extLength =
2558 ClrMap[1].extLocation = 0;
2559 ASSERT((ExtInfo->Mapping[i].extLocation < ClrMap[0].extLocation) ||
2560 (ExtInfo->Mapping[i].extLocation >= (ClrMap[0].extLocation + (ClrMap[0].extLength >> BSh))));
2561 UDFCheckSpaceAllocation(Vcb, 0, (PEXTENT_MAP)(&ClrMap), AS_USED); // check if used
2562 UDFMarkSpaceAsXXX(Vcb, 0, (PEXTENT_MAP)(&ClrMap), AS_DISCARDED); // mark as free
2563 ExtInfo->Mapping[i-1].extLength = s | (flags << 30);
2564 }
2565
2566 s = UDFGetMappingLength(ExtInfo->Mapping);
2567 if(!MyReallocPool__((int8*)(ExtInfo->Mapping), s, (int8**)&(ExtInfo->Mapping), (i+1)*sizeof(EXTENT_MAP))) {
2568 // This must never happen on truncate !!!
2569 AdPrint(("ResizeExtent: MyReallocPool__(8) failed\n"));
2570 }
2571 ExtInfo->Mapping[i].extLength =
2572 ExtInfo->Mapping[i].extLocation = 0;
2573 } else {
2574 AdPrint(("Resize zero (9)\n"));
2575 ASSERT(!ExtInfo->Offset);
2576 UDFMarkSpaceAsXXX(Vcb, 0, ExtInfo->Mapping, AS_DISCARDED); // mark as free
2577 s = UDFGetMappingLength(ExtInfo->Mapping);
2578 if(!MyReallocPool__((int8*)(ExtInfo->Mapping), s, (int8**)&(ExtInfo->Mapping), 2*sizeof(EXTENT_MAP))) {
2579 // This must never happen on truncate !!!
2580 AdPrint(("ResizeExtent: MyReallocPool__(9) failed\n"));
2581 }
2582 ExtInfo->Mapping[0].extLength = LBS | (EXTENT_NOT_RECORDED_NOT_ALLOCATED << 30);
2583 ExtInfo->Mapping[0].extLocation =
2584 ExtInfo->Mapping[1].extLength =
2585 ExtInfo->Mapping[1].extLocation = 0;
2586 }
2587 if(ExtInfo->Offset) {
2588 if(!AlwaysInIcb) {
2589 // remove 1st entry pointing to FileEntry
2590 s = UDFGetMappingLength(ExtInfo->Mapping);
2591 RtlMoveMemory(&(ExtInfo->Mapping[0]), &(ExtInfo->Mapping[1]), s - sizeof(EXTENT_MAP));
2592 if(!MyReallocPool__((int8*)(ExtInfo->Mapping), s,
2593 (int8**)&(ExtInfo->Mapping), s - sizeof(EXTENT_MAP) )) {
2594 // This must never happen on truncate !!!
2595 AdPrint(("ResizeExtent: MyReallocPool__(10) failed\n"));
2596 }
2597 Length -= ExtInfo->Offset;
2598 ExtInfo->Offset = 0;
2599 } else {
2600 Length -= ExtInfo->Offset; // back to in-icb
2601 }
2602 }
2603 ExtInfo->Length = Length;
2604 UDFCheckSpaceAllocation(Vcb, 0, ExtInfo->Mapping, AS_USED); // check if used
2605
2606 for(i=0; (ExtInfo->Mapping[i].extLength & UDF_EXTENT_LENGTH_MASK); i++) {
2607 ExtPrint(("Resized Ext: type %x, loc %x, len %x\n",
2608 ExtInfo->Mapping[i].extLength >> 30, ExtInfo->Mapping[i].extLocation, ExtInfo->Mapping[i].extLength & UDF_EXTENT_LENGTH_MASK));
2609 }
2610
2611 return STATUS_SUCCESS;
2612 } // end UDFResizeExtent()
2613
2614 /*
2615 This routine (re)builds AllocDescs data for all allocation modes except
2616 in-ICB & resizes associated extent (FileInfo->Dloc->AllocLoc) for
2617 already allocated user data extent (FileInfo->Dloc->DataLoc).
2618 AllocMode in FileEntry pointed by FileInfo must be already initialized.
2619 */
2620 OSSTATUS
2621 UDFBuildAllocDescs(
2622 IN PVCB Vcb,
2623 IN uint32 PartNum,
2624 IN OUT PUDF_FILE_INFO FileInfo,
2625 OUT int8** AllocData
2626 )
2627 {
2628 // PEXTENT_MAP InMap;
2629 // uint32 i=0;
2630 int8* Allocs;
2631 uint16 AllocMode;
2632 uint32 InitSz;
2633 OSSTATUS status;
2634
2635 ValidateFileInfo(FileInfo);
2636 AdPrint(("BuildAllocDesc\n"));
2637 // get space available in the 1st LBlock after FE
2638 InitSz = Vcb->LBlockSize - FileInfo->Dloc->FileEntryLen;
2639 Allocs = (int8*)MyAllocatePool__(NonPagedPool, InitSz);
2640 if(!Allocs) {
2641 AdPrint(("BuildAllocDesc: cant alloc %x bytes for Allocs\n", InitSz));
2642 return STATUS_INSUFFICIENT_RESOURCES;
2643 }
2644 RtlZeroMemory(Allocs, InitSz);
2645 // InMap = FileInfo->Dloc->DataLoc.Mapping;
2646 UDFCheckSpaceAllocation(Vcb, 0, InMap, AS_USED); // check if used
2647
2648 // TODO: move data from mapped locations here
2649
2650 AllocMode = ((PFILE_ENTRY)(FileInfo->Dloc->FileEntry))->icbTag.flags & ICB_FLAG_ALLOC_MASK;
2651 switch(AllocMode) {
2652 case ICB_FLAG_AD_IN_ICB: {
2653 MyFreePool__(Allocs);
2654 ASSERT(!FileInfo->Dloc->AllocLoc.Mapping);
2655 Allocs = NULL;
2656 status = STATUS_SUCCESS;
2657 break;
2658 }
2659 case ICB_FLAG_AD_SHORT: {
2660 status = UDFBuildShortAllocDescs(Vcb, PartNum, &Allocs, InitSz, FileInfo);
2661 break;
2662 }
2663 case ICB_FLAG_AD_LONG: {
2664 status = UDFBuildLongAllocDescs(Vcb, PartNum, &Allocs, InitSz, FileInfo);
2665 break;
2666 }
2667 /* case ICB_FLAG_AD_EXTENDED: {
2668 status = UDFBuildExtAllocDescs(Vcb, PartNum, &Allocs, InitSz, FileInfo);
2669 break;
2670 }*/
2671 default: {
2672 MyFreePool__(Allocs);
2673 Allocs = NULL;
2674 status = STATUS_INVALID_PARAMETER;
2675 }
2676 }
2677
2678 *AllocData = Allocs;
2679 UDFCheckSpaceAllocation(Vcb, 0, FileInfo->Dloc->DataLoc.Mapping, AS_USED); // check if used
2680
2681 return status;
2682 } // end UDFBuildAllocDescs()
2683
2684 /*
2685 This routine discards file's allocation
2686 */
2687 void
2688 UDFFreeFileAllocation(
2689 IN PVCB Vcb,
2690 IN PUDF_FILE_INFO DirInfo,
2691 IN PUDF_FILE_INFO FileInfo
2692 )
2693 {
2694 if(FileInfo->Dloc->DataLoc.Offset) {
2695 // in-ICB data
2696 if(FileInfo->Dloc->DataLoc.Mapping) {
2697 ASSERT(FileInfo->Dloc->FELoc.Mapping[0].extLocation ==
2698 FileInfo->Dloc->DataLoc.Mapping[0].extLocation);
2699 UDFMarkSpaceAsXXX(Vcb, FileInfo->Dloc, &(FileInfo->Dloc->DataLoc.Mapping[1]), AS_DISCARDED); // free
2700 FileInfo->Dloc->DataLoc.Mapping[1].extLocation =
2701 FileInfo->Dloc->DataLoc.Mapping[1].extLength = 0;
2702 FileInfo->Dloc->DataLoc.Mapping[0].extLocation = 0;
2703 FileInfo->Dloc->DataLoc.Mapping[0].extLength = EXTENT_NOT_RECORDED_NOT_ALLOCATED << 30;
2704 }
2705 if(FileInfo->Dloc->AllocLoc.Mapping) {
2706 ASSERT(FileInfo->Dloc->FELoc.Mapping[0].extLocation ==
2707 FileInfo->Dloc->AllocLoc.Mapping[0].extLocation);
2708 UDFMarkSpaceAsXXX(Vcb, FileInfo->Dloc, &(FileInfo->Dloc->AllocLoc.Mapping[1]), AS_DISCARDED); // free
2709 FileInfo->Dloc->AllocLoc.Mapping[1].extLocation =
2710 FileInfo->Dloc->AllocLoc.Mapping[1].extLength = 0;
2711 FileInfo->Dloc->AllocLoc.Mapping[0].extLocation = 0;
2712 FileInfo->Dloc->AllocLoc.Mapping[0].extLength = EXTENT_NOT_RECORDED_NOT_ALLOCATED << 30;
2713 }
2714 UDFFreeFESpace(Vcb, DirInfo, &(FileInfo->Dloc->FELoc));
2715 } else {
2716 if(FileInfo->Dloc->AllocLoc.Mapping) {
2717 ASSERT(FileInfo->Dloc->FELoc.Mapping[0].extLocation ==
2718 FileInfo->Dloc->AllocLoc.Mapping[0].extLocation);
2719 UDFMarkSpaceAsXXX(Vcb, FileInfo->Dloc, &(FileInfo->Dloc->AllocLoc.Mapping[1]), AS_DISCARDED); // free
2720 FileInfo->Dloc->AllocLoc.Mapping[1].extLocation =
2721 FileInfo->Dloc->AllocLoc.Mapping[1].extLength = 0;
2722 FileInfo->Dloc->AllocLoc.Mapping[0].extLocation = 0;
2723 FileInfo->Dloc->AllocLoc.Mapping[0].extLength = EXTENT_NOT_RECORDED_NOT_ALLOCATED << 30;
2724 }
2725 UDFFreeFESpace(Vcb, DirInfo, &(FileInfo->Dloc->FELoc));
2726 UDFMarkSpaceAsXXX(Vcb, FileInfo->Dloc, FileInfo->Dloc->DataLoc.Mapping, AS_DISCARDED); // free
2727 }
2728 FileInfo->Dloc->DataLoc.Modified =
2729 FileInfo->Dloc->AllocLoc.Modified =
2730 FileInfo->Dloc->FELoc.Modified = FALSE;
2731 } // end UDFFreeFileAllocation()
2732 #endif //UDF_READ_ONLY_BUILD
2733
2734 /*
2735 This routine packs physically sequential extents into single one
2736 */
2737 void
2738 __fastcall
2739 UDFPackMapping(
2740 IN PVCB Vcb,
2741 IN PEXTENT_INFO ExtInfo // Extent array
2742 )
2743 {
2744 PEXTENT_MAP NewMap, OldMap;
2745 uint32 i, j, l;
2746 uint32 LastLba, LastType, OldLen;
2747 uint32 OldSize, NewSize;
2748 #ifdef UDF_DBG
2749 int64 check_size;
2750 #endif //UDF_DBG
2751
2752 AdPrint(("Pack ExtInfo %x, Mapping %x\n", ExtInfo, ExtInfo->Mapping));
2753 AdPrint((" Length %x\n", ExtInfo->Length));
2754
2755 OldMap = ExtInfo->Mapping;
2756 LastLba = OldMap[0].extLocation;
2757 OldLen = (OldMap[0].extLength & UDF_EXTENT_LENGTH_MASK) >> Vcb->BlockSizeBits;
2758 LastType = OldMap[0].extLength >> 30;
2759 OldSize =
2760 NewSize = UDFGetMappingLength(OldMap);
2761 #ifdef UDF_DBG
2762 check_size = UDFGetExtentLength(ExtInfo->Mapping);
2763 ASSERT(!(check_size & (2048-1)));
2764 #endif //UDF_DBG
2765
2766 l=OldMap[0].extLength & UDF_EXTENT_LENGTH_MASK;
2767 // calculate required length
2768 for(i=1; OldMap[i].extLength; i++) {
2769 if((LastType == (OldMap[i].extLength >> 30))
2770 &&
2771 ((OldMap[i].extLocation == LastLba + OldLen) ||
2772 (!OldMap[i].extLocation && !LastLba && (LastType == EXTENT_NOT_RECORDED_NOT_ALLOCATED)))
2773 &&
2774 (l + (OldMap[i].extLength & UDF_EXTENT_LENGTH_MASK) <= UDF_MAX_EXTENT_LENGTH)) {
2775 // we can pack two blocks in one
2776 l += OldMap[i].extLength & UDF_EXTENT_LENGTH_MASK;
2777 NewSize -= sizeof(EXTENT_MAP);
2778 } else {
2779 l = OldMap[i].extLength & UDF_EXTENT_LENGTH_MASK;
2780 }
2781 LastLba = OldMap[i].extLocation;
2782 LastType = OldMap[i].extLength >> 30;
2783 OldLen = (OldMap[i].extLength & UDF_EXTENT_LENGTH_MASK) >> Vcb->BlockSizeBits;
2784 }
2785 // no changes ?
2786 if(OldSize <= (NewSize + PACK_MAPPING_THRESHOLD)) {
2787 if(OldSize == NewSize)
2788 return;
2789 if(NewSize >= PACK_MAPPING_THRESHOLD)
2790 return;
2791 }
2792 AdPrint(("Pack ExtInfo %x, Mapping %x, realloc\n", ExtInfo, ExtInfo->Mapping));
2793 NewMap = (PEXTENT_MAP)MyAllocatePoolTag__(NonPagedPool , NewSize,
2794 MEM_EXTMAP_TAG);
2795 // can't alloc ?
2796 if(!NewMap) return;
2797 // Ok, lets pack it...
2798 j=0;
2799 NewMap[0] = OldMap[0];
2800 LastLba = OldMap[0].extLocation;
2801 OldLen = (OldMap[0].extLength & UDF_EXTENT_LENGTH_MASK) >> Vcb->BlockSizeBits;
2802 LastType = OldMap[0].extLength >> 30;
2803 for(i=1; OldMap[i].extLength; i++) {
2804
2805 ExtPrint(("oShExt: type %x, loc %x, len %x\n",
2806 OldMap[i].extLength >> 30, OldMap[i].extLocation, OldMap[i].extLength & UDF_EXTENT_LENGTH_MASK));
2807
2808 if((LastType == (OldMap[i].extLength >> 30))
2809 &&
2810 ((OldMap[i].extLocation == LastLba + OldLen) ||
2811 (!OldMap[i].extLocation && !LastLba && (LastType == EXTENT_NOT_RECORDED_NOT_ALLOCATED)))
2812 &&
2813 ((NewMap[j].extLength & UDF_EXTENT_LENGTH_MASK) + (OldMap[i].extLength & UDF_EXTENT_LENGTH_MASK) <= UDF_MAX_EXTENT_LENGTH)) {
2814 NewMap[j].extLength += OldMap[i].extLength & UDF_EXTENT_LENGTH_MASK;
2815 } else {
2816 j++;
2817 NewMap[j] = OldMap[i];
2818 }
2819
2820 ExtPrint(("nShExt: type %x, loc %x, len %x\n",
2821 NewMap[j].extLength >> 30, NewMap[j].extLocation, NewMap[j].extLength & UDF_EXTENT_LENGTH_MASK));
2822
2823 LastLba = OldMap[i].extLocation;
2824 LastType = OldMap[i].extLength >> 30;
2825 OldLen = (OldMap[i].extLength & UDF_EXTENT_LENGTH_MASK) >> Vcb->BlockSizeBits;
2826 }
2827 // write terminator
2828 j++;
2829 ASSERT(NewSize == (j+1)*sizeof(EXTENT_MAP));
2830 NewMap[j].extLength =
2831 NewMap[j].extLocation = 0;
2832
2833 ASSERT(check_size == UDFGetExtentLength(ExtInfo->Mapping));
2834 ASSERT(check_size == UDFGetExtentLength(NewMap));
2835
2836 AdPrint(("Pack ExtInfo %x, NewMap %x, OldMap %x\n", ExtInfo, NewMap, OldMap));
2837
2838 ExtInfo->Mapping = NewMap;
2839 MyFreePool__(OldMap);
2840
2841 AdPrint(("Pack ExtInfo %x, Mapping %x\n", ExtInfo, ExtInfo->Mapping));
2842 AdPrint((" Length %x\n", ExtInfo->Length));
2843 } // end UDFPackMapping()
2844
2845 /*
2846 This routine expands mapping to 'frag-per-LBlock' state
2847 */
2848 OSSTATUS
2849 __fastcall
2850 UDFUnPackMapping(
2851 IN PVCB Vcb,
2852 IN PEXTENT_INFO ExtInfo // Extent array
2853 )
2854 {
2855 PEXTENT_MAP NewMapping;
2856 PEXTENT_MAP Mapping = ExtInfo->Mapping;
2857 uint32 LBS = Vcb->LBlockSize;
2858 uint32 len = (uint32)(UDFGetExtentLength(Mapping) >> Vcb->LBlockSizeBits);
2859 uint32 i,j, type, base, d;
2860 LONG l;
2861
2862 NewMapping = (PEXTENT_MAP)MyAllocatePoolTag__(NonPagedPool , (len+1)*sizeof(EXTENT_MAP),
2863 MEM_EXTMAP_TAG);
2864 if(!NewMapping) return STATUS_INSUFFICIENT_RESOURCES;
2865
2866 j=0;
2867 d = LBS >> Vcb->BlockSizeBits;
2868 for(i=0; (l = (Mapping[i].extLength & UDF_EXTENT_LENGTH_MASK)); i++) {
2869 base = Mapping[i].extLocation;
2870 type = Mapping[i].extLength & UDF_EXTENT_FLAG_MASK;
2871 for(; l>=(LONG)LBS; j++) {
2872 NewMapping[j].extLength = LBS | type;
2873 NewMapping[j].extLocation = base;
2874 base+=d;
2875 l-=LBS;
2876 }
2877 }
2878 // record terminator
2879 ASSERT(NewMapping);
2880 RtlZeroMemory(&(NewMapping[j]), sizeof(EXTENT_MAP));
2881 MyFreePool__(Mapping);
2882 ExtInfo->Mapping = NewMapping;
2883
2884 return STATUS_SUCCESS;
2885 } // end UDFUnPackMapping()
2886
2887 /*
2888 Relocate a part of extent that starts from relative (inside extent)
2889 block number 'ExtBlock' and has length of 'BC' blocks to continuous
2890 run which starts at block 'Lba'
2891 */
2892 OSSTATUS
2893 UDFRelocateExtent(
2894 IN PVCB Vcb,
2895 IN PEXTENT_INFO ExtInfo,
2896 IN uint32 ExtBlock,
2897 IN uint32 Lba,
2898 IN uint32 BC
2899 )
2900 {
2901 return STATUS_ACCESS_DENIED;
2902 }
2903
2904 /*
2905 This routine checks if all the data required is in cache.
2906 */
2907 BOOLEAN
2908 UDFIsExtentCached(
2909 IN PVCB Vcb,
2910 IN PEXTENT_INFO ExtInfo, // Extent array
2911 IN int64 Offset, // offset in extent
2912 IN uint32 Length,
2913 IN BOOLEAN ForWrite
2914 )
2915 {
2916 BOOLEAN retstat = FALSE;
2917 PEXTENT_MAP Extent = ExtInfo->Mapping; // Extent array
2918 uint32 to_read, Lba, sect_offs, flags, i;
2919
2920 WCacheStartDirect__(&(Vcb->FastCache), Vcb, TRUE/*FALSE*//*ForWrite*/);
2921 if(!ExtInfo || !ExtInfo->Mapping) goto EO_IsCached;
2922 if(!Length) {
2923 retstat = TRUE;
2924 goto EO_IsCached;
2925 }
2926
2927 // prevent reading out of data space
2928 if(Offset > ExtInfo->Length) goto EO_IsCached;
2929 if(Offset+Length > ExtInfo->Length) goto EO_IsCached;
2930 Offset += ExtInfo->Offset; // used for in-ICB data
2931 // read maximal possible part of each frag of extent
2932 Lba = UDFExtentOffsetToLba(Vcb, Extent, Offset, &sect_offs, &to_read, &flags, &i);
2933 while(((LONG)Length) > 0) {
2934 // EOF check
2935 if(Lba == LBA_OUT_OF_EXTENT) goto EO_IsCached;
2936 Extent += (i + 1);
2937 // check for reading tail
2938 to_read = min(to_read, Length);
2939 if(flags == EXTENT_RECORDED_ALLOCATED) {
2940 retstat = UDFIsDataCached(Vcb, Lba, (to_read+sect_offs+Vcb->BlockSize-1)>>Vcb->BlockSizeBits);
2941 if(!retstat) goto EO_IsCached;
2942 } else if(ForWrite) {
2943 goto EO_IsCached;
2944 }
2945 Offset += to_read;
2946 Length -= to_read;
2947 Lba = UDFNextExtentToLba(Vcb, Extent, &to_read, &flags, &i);
2948 }
2949 retstat = TRUE;
2950 EO_IsCached:
2951 if(!retstat) {
2952 WCacheEODirect__(&(Vcb->FastCache), Vcb);
2953 }
2954 return retstat;
2955 } // end UDFIsExtentCached()
2956
2957 /*
2958 This routine reads cached data only.
2959 */
2960 /*OSSTATUS
2961 UDFReadExtentCached(
2962 IN PVCB Vcb,
2963 IN PEXTENT_INFO ExtInfo, // Extent array
2964 IN int64 Offset, // offset in extent
2965 IN uint32 Length,
2966 OUT int8* Buffer,
2967 OUT uint32* ReadBytes
2968 )
2969 {
2970 (*ReadBytes) = 0;
2971 if(!ExtInfo || !ExtInfo->Mapping) return STATUS_INVALID_PARAMETER;
2972
2973 PEXTENT_MAP Extent = ExtInfo->Mapping; // Extent array
2974 uint32 to_read, Lba, sect_offs, flags, _ReadBytes;
2975 OSSTATUS status;
2976 // prevent reading out of data space
2977 if(Offset > ExtInfo->Length) return STATUS_END_OF_FILE;
2978 if(Offset+Length > ExtInfo->Length) Length = (uint32)(ExtInfo->Length - Offset);
2979 Offset += ExtInfo->Offset; // used for in-ICB data
2980 // read maximal possible part of each frag of extent
2981 while(((LONG)Length) > 0) {
2982 Lba = UDFExtentOffsetToLba(Vcb, Extent, Offset, &sect_offs, &to_read, &flags, NULL);
2983 // EOF check
2984 if(Lba == LBA_OUT_OF_EXTENT) return STATUS_END_OF_FILE;
2985 // check for reading tail
2986 to_read = (to_read < Length) ?
2987 to_read : Length;
2988 if(flags == EXTENT_RECORDED_ALLOCATED) {
2989 status = UDFReadDataCached(Vcb, TRUE, ( ((uint64)Lba) << Vcb->BlockSizeBits) + sect_offs, to_read, Buffer, &_ReadBytes);
2990 (*ReadBytes) += _ReadBytes;
2991 } else {
2992 RtlZeroMemory(Buffer, to_read);
2993 (*ReadBytes) += to_read;
2994 status = STATUS_SUCCESS;
2995 }
2996 if(!OS_SUCCESS(status)) return status;
2997 // prepare for reading next frag...
2998 Buffer += to_read;
2999 Offset += to_read;
3000 Length -= to_read;
3001 }
3002 return STATUS_SUCCESS;
3003 } // end UDFReadExtentCached()*/
3004
3005 /*
3006 This routine reads data at any offset from specified extent.
3007 */
3008 OSSTATUS
3009 UDFReadExtent(
3010 IN PVCB Vcb,
3011 IN PEXTENT_INFO ExtInfo, // Extent array
3012 IN int64 Offset, // offset in extent
3013 IN uint32 Length,
3014 IN BOOLEAN Direct,
3015 OUT int8* Buffer,
3016 OUT uint32* ReadBytes
3017 )
3018 {
3019 (*ReadBytes) = 0;
3020 if(!ExtInfo || !ExtInfo->Mapping) return STATUS_INVALID_PARAMETER;
3021 ASSERT((uint32)Buffer > 0x1000);
3022
3023 AdPrint(("Read ExtInfo %x, Mapping %x\n", ExtInfo, ExtInfo->Mapping));
3024
3025 PEXTENT_MAP Extent = ExtInfo->Mapping; // Extent array
3026 uint32 to_read, Lba, sect_offs, flags, _ReadBytes;
3027 OSSTATUS status;
3028 // prevent reading out of data space
3029 if(Offset > ExtInfo->Length) return STATUS_END_OF_FILE;
3030 if(Offset+Length > ExtInfo->Length) Length = (uint32)(ExtInfo->Length - Offset);
3031 Offset += ExtInfo->Offset; // used for in-ICB data
3032 // read maximal possible part of each frag of extent
3033 Lba = UDFExtentOffsetToLba(Vcb, Extent, Offset, &sect_offs, &to_read, &flags, &_ReadBytes);
3034 while(Length) {
3035 // EOF check
3036 if(Lba == LBA_OUT_OF_EXTENT) return STATUS_END_OF_FILE;
3037 Extent += (_ReadBytes + 1);
3038 // check for reading tail
3039 to_read = min(to_read, Length);
3040 if(flags == EXTENT_RECORDED_ALLOCATED) {
3041 status = UDFReadData(Vcb, TRUE, ( ((uint64)Lba) << Vcb->BlockSizeBits) + sect_offs, to_read, Direct, Buffer, &_ReadBytes);
3042 (*ReadBytes) += _ReadBytes;
3043 if(!OS_SUCCESS(status)) return status;
3044 } else {
3045 RtlZeroMemory(Buffer, to_read);
3046 (*ReadBytes) += to_read;
3047 }
3048 // prepare for reading next frag...
3049 Length -= to_read;
3050 if(!Length)
3051 break;
3052 ASSERT(to_read);
3053 Buffer += to_read;
3054 // Offset += to_read;
3055 Lba = UDFNextExtentToLba(Vcb, Extent, &to_read, &flags, &_ReadBytes);
3056 sect_offs = 0;
3057 }
3058 return STATUS_SUCCESS;
3059 } // end UDFReadExtent()
3060
3061 /*
3062 This routine reads and builds mapping for
3063 specified amount of data at any offset from specified extent.
3064 Size of output buffer is limited by *_SubExtInfoSz
3065 */
3066 OSSTATUS
3067 UDFReadExtentLocation(
3068 IN PVCB Vcb,
3069 IN PEXTENT_INFO ExtInfo, // Extent array
3070 IN int64 Offset, // offset in extent to start SubExtent from
3071 OUT PEXTENT_MAP* _SubExtInfo, // SubExtent mapping array
3072 IN OUT uint32* _SubExtInfoSz, // IN: maximum number of fragments to get
3073 // OUT: actually obtained fragments
3074 OUT int64* _NextOffset // offset, caller can start from to continue
3075 )
3076 {
3077 if(!ExtInfo || !ExtInfo->Mapping)
3078 return STATUS_INVALID_PARAMETER;
3079
3080 PEXTENT_MAP Extent = ExtInfo->Mapping; // Extent array
3081 PEXTENT_MAP SubExtInfo;
3082 uint32 to_read, Lba, sect_offs, flags, Skip_MapEntries;
3083 int32 SubExtInfoSz = *_SubExtInfoSz;
3084 int64 Length;
3085 int64 NextOffset;
3086 // OSSTATUS status = STATUS_BUFFER_OVERFLOW;
3087
3088 (*_SubExtInfo) = NULL;
3089 (*_SubExtInfoSz) = 0;
3090 NextOffset = Offset;
3091 // prevent reading out of data space
3092 if(Offset >= ExtInfo->Length)
3093 return STATUS_END_OF_FILE;
3094 Length = ExtInfo->Length - Offset;
3095 Offset += ExtInfo->Offset; // used for in-ICB data
3096 // read maximal possible part of each frag of extent
3097 SubExtInfo = (PEXTENT_MAP)MyAllocatePoolTag__(NonPagedPool , SubExtInfoSz*sizeof(EXTENT_MAP),
3098 MEM_EXTMAP_TAG);
3099 (*_SubExtInfo) = SubExtInfo;
3100 if(!SubExtInfo)
3101 return STATUS_INSUFFICIENT_RESOURCES;
3102
3103 Lba = UDFExtentOffsetToLba(Vcb, Extent, Offset, &sect_offs, &to_read, &flags, &Skip_MapEntries);
3104 while(Length && SubExtInfoSz) {
3105 // EOF check
3106 if(Lba == LBA_OUT_OF_EXTENT) {
3107 BrutePoint();
3108 return STATUS_END_OF_FILE;
3109 }
3110 Extent += (Skip_MapEntries + 1);
3111 // check for reading tail
3112 to_read = (int32)min((int64)to_read, Length);
3113 SubExtInfo->extLength = to_read;
3114 if(flags == EXTENT_NOT_RECORDED_NOT_ALLOCATED) {
3115 SubExtInfo->extLocation = LBA_NOT_ALLOCATED;
3116 } else
3117 if(flags == EXTENT_NOT_RECORDED_ALLOCATED) {
3118 ASSERT(!(Lba & 0x80000000));
3119 SubExtInfo->extLocation = Lba | 0x80000000;
3120 } else {
3121 SubExtInfo->extLocation = Lba;
3122 }
3123 (*_SubExtInfoSz)++;
3124 SubExtInfoSz--;
3125 NextOffset += to_read;
3126 // prepare for reading next frag...
3127 Length -= to_read;
3128 if(!Length) {
3129 // status = STATUS_SUCCESS;
3130 break;
3131 }
3132 ASSERT(to_read);
3133 Lba = UDFNextExtentToLba(Vcb, Extent, &to_read, &flags, &Skip_MapEntries);
3134 sect_offs = 0;
3135 }
3136 (*_NextOffset) = NextOffset;
3137 return STATUS_SUCCESS;
3138 } // end UDFReadExtentLocation()
3139
3140 #ifdef _MSC_VER
3141 #pragma warning(push)
3142 #pragma warning(disable:4035) // re-enable below
3143 #endif
3144
3145 uint32
3146 UDFGetZeroLength(
3147 IN int8* Buffer,
3148 IN uint32 Length
3149 )
3150 {
3151 uint32 i;
3152 Length /= sizeof(uint32);
3153 for(i=0; i<Length; i++) {
3154 if( ((uint32*)Buffer)[i] )
3155 break;
3156 }
3157 return Length*sizeof(uint32);
3158 }
3159
3160 #ifdef _MSC_VER
3161 #pragma warning(pop) // re-enable warning #4035
3162 #endif
3163
3164 #ifndef UDF_READ_ONLY_BUILD
3165 /*
3166 This routine writes data at any offset to specified extent.
3167 */
3168 OSSTATUS
3169 UDFWriteExtent(
3170 IN PVCB Vcb,
3171 IN PEXTENT_INFO ExtInfo, // Extent array
3172 IN int64 Offset, // offset in extent
3173 IN uint32 Length,
3174 IN BOOLEAN Direct, // setting this flag delays flushing of given
3175 // data to indefinite term
3176 IN int8* Buffer,
3177 OUT uint32* WrittenBytes
3178 )
3179 {
3180 if(!ExtInfo || !ExtInfo->Mapping)
3181 return STATUS_INVALID_PARAMETER;
3182
3183 PEXTENT_MAP Extent = ExtInfo->Mapping; // Extent array
3184 uint32 to_write, Lba, sect_offs, flags;
3185 OSSTATUS status;
3186 uint32 _WrittenBytes;
3187 BOOLEAN reread_lba;
3188 // BOOLEAN already_prepared = FALSE;
3189 // BOOLEAN prepare = !Buffer;
3190
3191 AdPrint(("Write ExtInfo %x, Mapping %x\n", ExtInfo, ExtInfo->Mapping));
3192
3193 Offset += ExtInfo->Offset; // used for in-ICB data
3194 // write maximal possible part of each frag of extent
3195 while(((LONG)Length) > 0) {
3196 UDFCheckSpaceAllocation(Vcb, 0, Extent, AS_USED); // check if used
3197 Lba = UDFExtentOffsetToLba(Vcb, Extent, Offset, &sect_offs, &to_write, &flags, NULL);
3198 // EOF check
3199 if(Lba == LBA_OUT_OF_EXTENT) {
3200 return STATUS_END_OF_FILE;
3201 }
3202 /* if((to_write < Length) &&
3203 !Direct && !prepare && !already_prepared) {
3204 // rebuild mapping, allocate space, etc.
3205 // to indicate this, set Buffer to NULL
3206 AdPrint(("UDFWriteExtent: Prepare\n"));
3207 BrutePoint();
3208 _WrittenBytes = 0;
3209 status = UDFWriteExtent(Vcb, ExtInfo, Offset, Length, *//*Direct*//*FALSE, NULL, &_WrittenBytes);
3210 if(!OS_SUCCESS(status)) {
3211 return status;
3212 }
3213 Extent = ExtInfo->Mapping;
3214 Lba = UDFExtentOffsetToLba(Vcb, Extent, Offset, &sect_offs, &to_write, &flags, NULL);
3215 already_prepared = TRUE;
3216 }*/
3217 if(flags == EXTENT_NOT_RECORDED_NOT_ALLOCATED) {
3218 // here we should allocate space for this extent
3219 if(!OS_SUCCESS(status = UDFMarkNotAllocatedAsAllocated(Vcb, Offset, to_write, ExtInfo)))
3220 return status;
3221 Extent = ExtInfo->Mapping;
3222 UDFCheckSpaceAllocation(Vcb, 0, Extent, AS_USED); // check if used
3223 Lba = UDFExtentOffsetToLba(Vcb, Extent, Offset, &sect_offs, &to_write, &flags, NULL);
3224 if(Lba == LBA_OUT_OF_EXTENT) {
3225 return STATUS_END_OF_FILE;
3226 }
3227 // we have already re-read Lba
3228 reread_lba = FALSE;
3229 } else {
3230 // we may need to re-read Lba if some changes are
3231 // made while converting from Alloc-Not-Rec
3232 reread_lba = TRUE;
3233 }
3234 // check if writing to not recorded allocated
3235 // in this case we must pad blocks with zeros around
3236 // modified area
3237 //
3238 // ...|xxxxxxxx|xxxxxxxx|xxxxxxxx|...
3239 // . .
3240 // . || .
3241 // . \/ .
3242 // . .
3243 // ...|000ddddd|dddddddd|dd000000|...
3244 // . .
3245 // ^ ^
3246 // sect_offs sect_offs+to_write
3247 // . .
3248 // .<-- to_write -->.
3249 //
3250 to_write = min(to_write, Length);
3251 if(flags == EXTENT_NOT_RECORDED_ALLOCATED) {
3252 if(!OS_SUCCESS(status = UDFMarkAllocatedAsRecorded(Vcb, Offset, to_write, ExtInfo)))
3253 return status;
3254 Extent = ExtInfo->Mapping;
3255 UDFCheckSpaceAllocation(Vcb, 0, Extent, AS_USED); // check if used
3256 if(reread_lba) {
3257 Lba = UDFExtentOffsetToLba(Vcb, Extent, Offset, &sect_offs, &to_write, &flags, NULL);
3258 to_write = min(to_write, Length);
3259 }
3260 /*
3261 we must fill 1st block with zeros in 1 of 2 cases:
3262 1) start offset is not aligned on LBlock boundary
3263 OR
3264 2) end offset is not aligned on LBlock boundary and lays in
3265 the same LBlock
3266
3267 we must fill last block with zeros if both
3268 1) end offset is not aligned on LBlock boundary
3269 AND
3270 2) end offset DOESN'T lay in the 1st LBlock
3271 */
3272
3273 // if(!prepare) {
3274 // pad 1st logical block
3275 if((sect_offs || (sect_offs + to_write < Vcb->LBlockSize) )
3276 &&
3277 !Vcb->CDR_Mode) {
3278 status = UDFWriteData(Vcb, TRUE,
3279 ( ((uint64)Lba) << Vcb->BlockSizeBits),
3280 Vcb->LBlockSize, Direct, Vcb->ZBuffer, &_WrittenBytes);
3281 if(!OS_SUCCESS(status))
3282 return status;
3283 }
3284 // pad last logical block
3285 if((sect_offs + to_write > Vcb->LBlockSize) &&
3286 (sect_offs + to_write) & (Vcb->LBlockSize - 1)) {
3287 status = UDFWriteData(Vcb, TRUE,
3288 (( ((uint64)Lba) << Vcb->BlockSizeBits) + sect_offs + to_write) & ~((int64)(Vcb->LBlockSize)-1),
3289 Vcb->LBlockSize, Direct, Vcb->ZBuffer, &_WrittenBytes);
3290 }
3291 if(!OS_SUCCESS(status))
3292 return status;
3293 /* } else {
3294 status = STATUS_SUCCESS;
3295 }*/
3296 }
3297 ASSERT(to_write);
3298 // if(!prepare) {
3299 status = UDFWriteData(Vcb, TRUE, ( ((uint64)Lba) << Vcb->BlockSizeBits) + sect_offs, to_write, Direct, Buffer, &_WrittenBytes);
3300 *WrittenBytes += _WrittenBytes;
3301 if(!OS_SUCCESS(status)) return status;
3302 /* } else {
3303 status = STATUS_SUCCESS;
3304 *WrittenBytes += to_write;
3305 }*/
3306 // prepare for writing next frag...
3307 Buffer += to_write;
3308 Offset += to_write;
3309 Length -= to_write;
3310 }
3311 AdPrint(("Write: ExtInfo %x, Mapping %x\n", ExtInfo, ExtInfo->Mapping));
3312 return STATUS_SUCCESS;
3313 } // end UDFWriteExtent()
3314
3315 //#if 0
3316 /*
3317 This routine zeroes/deallocates data at any offset to specified extent.
3318 */
3319 OSSTATUS
3320 UDFZeroExtent(
3321 IN PVCB Vcb,
3322 IN PEXTENT_INFO ExtInfo, // Extent array
3323 IN int64 Offset, // offset in extent
3324 IN uint32 Length,
3325 IN BOOLEAN Deallocate, // deallocate frag or just mark as unrecorded
3326 IN BOOLEAN Direct, // setting this flag delays flushing of given
3327 // data to indefinite term
3328 OUT uint32* WrittenBytes
3329 )
3330 {
3331 if(!ExtInfo || !ExtInfo->Mapping)
3332 return STATUS_INVALID_PARAMETER;
3333
3334 PEXTENT_MAP Extent = ExtInfo->Mapping; // Extent array
3335 uint32 to_write, Lba, sect_offs, flags;
3336 OSSTATUS status;
3337 uint32 _WrittenBytes;
3338 uint32 LBS = Vcb->LBlockSize;
3339
3340 AdPrint(("Zero ExtInfo %x, Mapping %x\n", ExtInfo, ExtInfo->Mapping));
3341
3342 Offset += ExtInfo->Offset; // used for in-ICB data
3343 // fill/deallocate maximal possible part of each frag of extent
3344 while(((LONG)Length) > 0) {
3345 Lba = UDFExtentOffsetToLba(Vcb, Extent, Offset, &sect_offs, &to_write, &flags, NULL);
3346 // EOF check
3347 if(Lba == LBA_OUT_OF_EXTENT) {
3348 return STATUS_END_OF_FILE;
3349 }
3350 // check for writing tail
3351 to_write = min(to_write, Length);
3352
3353 if(flags == EXTENT_NOT_RECORDED_NOT_ALLOCATED) {
3354 // here we should do nothing
3355 *WrittenBytes += to_write;
3356 } else
3357 if(flags == EXTENT_NOT_RECORDED_ALLOCATED) {
3358 // we should just deallocate this frag
3359 if(Deallocate) {
3360 if(!OS_SUCCESS(status = UDFMarkAllocatedAsNotAllocated(Vcb, Offset, to_write, ExtInfo)))
3361 return status;
3362 }
3363 Extent = ExtInfo->Mapping;
3364 *WrittenBytes += to_write;
3365 } else {
3366 // fill tail of the 1st Block with ZEROs
3367 if(sect_offs) {
3368 status = UDFWriteData(Vcb, TRUE, ( ((uint64)Lba) << Vcb->BlockSizeBits) + sect_offs,
3369 min(to_write, LBS-sect_offs),
3370 Direct, Vcb->ZBuffer, &_WrittenBytes);
3371 *WrittenBytes += _WrittenBytes;
3372 if(!OS_SUCCESS(status))
3373 return status;
3374 Offset += _WrittenBytes;
3375 Length -= _WrittenBytes;
3376 to_write -= _WrittenBytes;
3377 Lba = UDFExtentOffsetToLba(Vcb, Extent, Offset, &sect_offs, &to_write, &flags, NULL);
3378 ASSERT(flags != EXTENT_NOT_RECORDED_NOT_ALLOCATED);
3379 ASSERT(flags != EXTENT_NOT_RECORDED_ALLOCATED);
3380 ASSERT(!sect_offs);
3381 }
3382 // deallocate Blocks
3383 if(to_write >= LBS) {
3384 // use 'sect_offs' as length of extent to be deallocated
3385 sect_offs = to_write & ~(LBS - 1);
3386 if(Deallocate) {
3387 status = UDFMarkAllocatedAsNotAllocated(Vcb, Offset, sect_offs, ExtInfo);
3388 } else {
3389 status = UDFMarkRecordedAsAllocated(Vcb, Offset, sect_offs, ExtInfo);
3390 }
3391 if(!OS_SUCCESS(status))
3392 return status;
3393 // reload extent mapping
3394 Extent = ExtInfo->Mapping;
3395 Offset += sect_offs;
3396 Length -= sect_offs;
3397 *WrittenBytes += sect_offs;
3398 to_write -= sect_offs;
3399 Lba = UDFExtentOffsetToLba(Vcb, Extent, Offset, &sect_offs, &to_write, &flags, NULL);
3400 ASSERT(flags != EXTENT_NOT_RECORDED_NOT_ALLOCATED);
3401 ASSERT(flags != EXTENT_NOT_RECORDED_ALLOCATED);
3402 ASSERT(!sect_offs);
3403 }
3404 // fill beginning of the last Block with ZEROs
3405 if(to_write) {
3406 status = UDFWriteData(Vcb, TRUE, ( ((uint64)Lba) << Vcb->BlockSizeBits), to_write, Direct, Vcb->ZBuffer, &_WrittenBytes);
3407 *WrittenBytes += _WrittenBytes;
3408 if(!OS_SUCCESS(status))
3409 return status;
3410 ASSERT(to_write == _WrittenBytes);
3411 }
3412 }
3413 AdPrint(("Zero... ExtInfo %x, Mapping %x\n", ExtInfo, ExtInfo->Mapping));
3414 // prepare for filling next frag...
3415 Offset += to_write;
3416 Length -= to_write;
3417 }
3418 AdPrint(("Zero: ExtInfo %x, Mapping %x\n", ExtInfo, ExtInfo->Mapping));
3419 return STATUS_SUCCESS;
3420 } // end UDFZeroExtent()
3421 //#endif //0
3422 #endif //UDF_READ_ONLY_BUILD