minor corrections by M.Taguchi
[reactos.git] / reactos / drivers / fs / ntfs / linux-ntfs / aops.c
1 /**
2 * aops.c - NTFS kernel address space operations and page cache handling.
3 * Part of the Linux-NTFS project.
4 *
5 * Copyright (c) 2001-2003 Anton Altaparmakov
6 * Copyright (c) 2002 Richard Russon
7 *
8 * This program/include file is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License as published
10 * by the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program/include file is distributed in the hope that it will be
14 * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program (in the main directory of the Linux-NTFS
20 * distribution in the file COPYING); if not, write to the Free Software
21 * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 */
23
24 #include <linux/errno.h>
25 #include <linux/mm.h>
26 #include <linux/pagemap.h>
27 #include <linux/swap.h>
28 #include <linux/buffer_head.h>
29
30 #include "ntfs.h"
31
32 /**
33 * ntfs_end_buffer_async_read - async io completion for reading attributes
34 * @bh: buffer head on which io is completed
35 * @uptodate: whether @bh is now uptodate or not
36 *
37 * Asynchronous I/O completion handler for reading pages belonging to the
38 * attribute address space of an inode. The inodes can either be files or
39 * directories or they can be fake inodes describing some attribute.
40 *
41 * If NInoMstProtected(), perform the post read mst fixups when all IO on the
42 * page has been completed and mark the page uptodate or set the error bit on
43 * the page. To determine the size of the records that need fixing up, we cheat
44 * a little bit by setting the index_block_size in ntfs_inode to the ntfs
45 * record size, and index_block_size_bits, to the log(base 2) of the ntfs
46 * record size.
47 */
48 static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
49 {
50 static spinlock_t page_uptodate_lock = SPIN_LOCK_UNLOCKED;
51 unsigned long flags;
52 struct buffer_head *tmp;
53 struct page *page;
54 ntfs_inode *ni;
55 int page_uptodate = 1;
56
57 page = bh->b_page;
58 ni = NTFS_I(page->mapping->host);
59
60 if (likely(uptodate)) {
61 s64 file_ofs;
62
63 set_buffer_uptodate(bh);
64
65 file_ofs = (page->index << PAGE_CACHE_SHIFT) + bh_offset(bh);
66 /* Check for the current buffer head overflowing. */
67 if (file_ofs + bh->b_size > ni->initialized_size) {
68 char *addr;
69 int ofs = 0;
70
71 if (file_ofs < ni->initialized_size)
72 ofs = ni->initialized_size - file_ofs;
73 addr = kmap_atomic(page, KM_BIO_SRC_IRQ);
74 memset(addr + bh_offset(bh) + ofs, 0, bh->b_size - ofs);
75 flush_dcache_page(page);
76 kunmap_atomic(addr, KM_BIO_SRC_IRQ);
77 }
78 } else {
79 clear_buffer_uptodate(bh);
80 ntfs_error(ni->vol->sb, "Buffer I/O error, logical block %Lu.",
81 (unsigned long long)bh->b_blocknr);
82 SetPageError(page);
83 }
84
85 spin_lock_irqsave(&page_uptodate_lock, flags);
86 clear_buffer_async_read(bh);
87 unlock_buffer(bh);
88 tmp = bh;
89 do {
90 if (!buffer_uptodate(tmp))
91 page_uptodate = 0;
92 if (buffer_async_read(tmp)) {
93 if (likely(buffer_locked(tmp)))
94 goto still_busy;
95 /* Async buffers must be locked. */
96 BUG();
97 }
98 tmp = tmp->b_this_page;
99 } while (tmp != bh);
100 spin_unlock_irqrestore(&page_uptodate_lock, flags);
101 /*
102 * If none of the buffers had errors then we can set the page uptodate,
103 * but we first have to perform the post read mst fixups, if the
104 * attribute is mst protected, i.e. if NInoMstProteced(ni) is true.
105 */
106 if (!NInoMstProtected(ni)) {
107 if (likely(page_uptodate && !PageError(page)))
108 SetPageUptodate(page);
109 } else {
110 char *addr;
111 unsigned int i, recs, nr_err;
112 u32 rec_size;
113
114 rec_size = ni->itype.index.block_size;
115 recs = PAGE_CACHE_SIZE / rec_size;
116 addr = kmap_atomic(page, KM_BIO_SRC_IRQ);
117 for (i = nr_err = 0; i < recs; i++) {
118 if (likely(!post_read_mst_fixup((NTFS_RECORD*)(addr +
119 i * rec_size), rec_size)))
120 continue;
121 nr_err++;
122 ntfs_error(ni->vol->sb, "post_read_mst_fixup() failed, "
123 "corrupt %s record 0x%Lx. Run chkdsk.",
124 ni->mft_no ? "index" : "mft",
125 (long long)(((s64)page->index <<
126 PAGE_CACHE_SHIFT >>
127 ni->itype.index.block_size_bits) + i));
128 }
129 flush_dcache_page(page);
130 kunmap_atomic(addr, KM_BIO_SRC_IRQ);
131 if (likely(!PageError(page))) {
132 if (likely(!nr_err && recs)) {
133 if (likely(page_uptodate))
134 SetPageUptodate(page);
135 } else {
136 ntfs_error(ni->vol->sb, "Setting page error, "
137 "index 0x%lx.", page->index);
138 SetPageError(page);
139 }
140 }
141 }
142 unlock_page(page);
143 return;
144 still_busy:
145 spin_unlock_irqrestore(&page_uptodate_lock, flags);
146 return;
147 }
148
149 /**
150 * ntfs_read_block - fill a @page of an address space with data
151 * @page: page cache page to fill with data
152 *
153 * Fill the page @page of the address space belonging to the @page->host inode.
154 * We read each buffer asynchronously and when all buffers are read in, our io
155 * completion handler ntfs_end_buffer_read_async(), if required, automatically
156 * applies the mst fixups to the page before finally marking it uptodate and
157 * unlocking it.
158 *
159 * We only enforce allocated_size limit because i_size is checked for in
160 * generic_file_read().
161 *
162 * Return 0 on success and -errno on error.
163 *
164 * Contains an adapted version of fs/buffer.c::block_read_full_page().
165 */
166 static int ntfs_read_block(struct page *page)
167 {
168 VCN vcn;
169 LCN lcn;
170 ntfs_inode *ni;
171 ntfs_volume *vol;
172 run_list_element *rl;
173 struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
174 sector_t iblock, lblock, zblock;
175 unsigned int blocksize, vcn_ofs;
176 int i, nr;
177 unsigned char blocksize_bits;
178
179 ni = NTFS_I(page->mapping->host);
180 vol = ni->vol;
181
182 blocksize_bits = VFS_I(ni)->i_blkbits;
183 blocksize = 1 << blocksize_bits;
184
185 if (!page_has_buffers(page))
186 create_empty_buffers(page, blocksize, 0);
187 bh = head = page_buffers(page);
188 if (unlikely(!bh)) {
189 unlock_page(page);
190 return -ENOMEM;
191 }
192
193 iblock = page->index << (PAGE_CACHE_SHIFT - blocksize_bits);
194 lblock = (ni->allocated_size + blocksize - 1) >> blocksize_bits;
195 zblock = (ni->initialized_size + blocksize - 1) >> blocksize_bits;
196
197 #ifdef DEBUG
198 if (unlikely(!ni->run_list.rl && !ni->mft_no && !NInoAttr(ni)))
199 panic("NTFS: $MFT/$DATA run list has been unmapped! This is a "
200 "very serious bug! Cannot continue...");
201 #endif
202
203 /* Loop through all the buffers in the page. */
204 rl = NULL;
205 nr = i = 0;
206 do {
207 if (unlikely(buffer_uptodate(bh)))
208 continue;
209 if (unlikely(buffer_mapped(bh))) {
210 arr[nr++] = bh;
211 continue;
212 }
213 bh->b_bdev = vol->sb->s_bdev;
214 /* Is the block within the allowed limits? */
215 if (iblock < lblock) {
216 BOOL is_retry = FALSE;
217
218 /* Convert iblock into corresponding vcn and offset. */
219 vcn = (VCN)iblock << blocksize_bits >>
220 vol->cluster_size_bits;
221 vcn_ofs = ((VCN)iblock << blocksize_bits) &
222 vol->cluster_size_mask;
223 if (!rl) {
224 lock_retry_remap:
225 down_read(&ni->run_list.lock);
226 rl = ni->run_list.rl;
227 }
228 if (likely(rl != NULL)) {
229 /* Seek to element containing target vcn. */
230 while (rl->length && rl[1].vcn <= vcn)
231 rl++;
232 lcn = vcn_to_lcn(rl, vcn);
233 } else
234 lcn = (LCN)LCN_RL_NOT_MAPPED;
235 /* Successful remap. */
236 if (lcn >= 0) {
237 /* Setup buffer head to correct block. */
238 bh->b_blocknr = ((lcn << vol->cluster_size_bits)
239 + vcn_ofs) >> blocksize_bits;
240 set_buffer_mapped(bh);
241 /* Only read initialized data blocks. */
242 if (iblock < zblock) {
243 arr[nr++] = bh;
244 continue;
245 }
246 /* Fully non-initialized data block, zero it. */
247 goto handle_zblock;
248 }
249 /* It is a hole, need to zero it. */
250 if (lcn == LCN_HOLE)
251 goto handle_hole;
252 /* If first try and run list unmapped, map and retry. */
253 if (!is_retry && lcn == LCN_RL_NOT_MAPPED) {
254 is_retry = TRUE;
255 /*
256 * Attempt to map run list, dropping lock for
257 * the duration.
258 */
259 up_read(&ni->run_list.lock);
260 if (!map_run_list(ni, vcn))
261 goto lock_retry_remap;
262 rl = NULL;
263 }
264 /* Hard error, zero out region. */
265 SetPageError(page);
266 ntfs_error(vol->sb, "vcn_to_lcn(vcn = 0x%Lx) failed "
267 "with error code 0x%Lx%s.",
268 (long long)vcn, (long long)-lcn,
269 is_retry ? " even after retrying" : "");
270 // FIXME: Depending on vol->on_errors, do something.
271 }
272 /*
273 * Either iblock was outside lblock limits or vcn_to_lcn()
274 * returned error. Just zero that portion of the page and set
275 * the buffer uptodate.
276 */
277 handle_hole:
278 bh->b_blocknr = -1UL;
279 clear_buffer_mapped(bh);
280 handle_zblock:
281 memset(kmap(page) + i * blocksize, 0, blocksize);
282 flush_dcache_page(page);
283 kunmap(page);
284 set_buffer_uptodate(bh);
285 } while (i++, iblock++, (bh = bh->b_this_page) != head);
286
287 /* Release the lock if we took it. */
288 if (rl)
289 up_read(&ni->run_list.lock);
290
291 /* Check we have at least one buffer ready for i/o. */
292 if (nr) {
293 struct buffer_head *tbh;
294
295 /* Lock the buffers. */
296 for (i = 0; i < nr; i++) {
297 tbh = arr[i];
298 lock_buffer(tbh);
299 tbh->b_end_io = ntfs_end_buffer_async_read;
300 set_buffer_async_read(tbh);
301 }
302 /* Finally, start i/o on the buffers. */
303 for (i = 0; i < nr; i++) {
304 tbh = arr[i];
305 if (likely(!buffer_uptodate(tbh)))
306 submit_bh(READ, tbh);
307 else
308 ntfs_end_buffer_async_read(tbh, 1);
309 }
310 return 0;
311 }
312 /* No i/o was scheduled on any of the buffers. */
313 if (likely(!PageError(page)))
314 SetPageUptodate(page);
315 else /* Signal synchronous i/o error. */
316 nr = -EIO;
317 unlock_page(page);
318 return nr;
319 }
320
321 /**
322 * ntfs_readpage - fill a @page of a @file with data from the device
323 * @file: open file to which the page @page belongs or NULL
324 * @page: page cache page to fill with data
325 *
326 * For non-resident attributes, ntfs_readpage() fills the @page of the open
327 * file @file by calling the ntfs version of the generic block_read_full_page()
328 * function, ntfs_read_block(), which in turn creates and reads in the buffers
329 * associated with the page asynchronously.
330 *
331 * For resident attributes, OTOH, ntfs_readpage() fills @page by copying the
332 * data from the mft record (which at this stage is most likely in memory) and
333 * fills the remainder with zeroes. Thus, in this case, I/O is synchronous, as
334 * even if the mft record is not cached at this point in time, we need to wait
335 * for it to be read in before we can do the copy.
336 *
337 * Return 0 on success and -errno on error.
338 *
339 * WARNING: Do not make this function static! It is used by mft.c!
340 */
341 int ntfs_readpage(struct file *file, struct page *page)
342 {
343 s64 attr_pos;
344 ntfs_inode *ni, *base_ni;
345 char *addr;
346 attr_search_context *ctx;
347 MFT_RECORD *mrec;
348 u32 attr_len;
349 int err = 0;
350
351 BUG_ON(!PageLocked(page));
352
353 /*
354 * This can potentially happen because we clear PageUptodate() during
355 * ntfs_writepage() of MstProtected() attributes.
356 */
357 if (PageUptodate(page)) {
358 unlock_page(page);
359 return 0;
360 }
361
362 ni = NTFS_I(page->mapping->host);
363
364 if (NInoNonResident(ni)) {
365 /*
366 * Only unnamed $DATA attributes can be compressed or
367 * encrypted.
368 */
369 if (ni->type == AT_DATA && !ni->name_len) {
370 /* If file is encrypted, deny access, just like NT4. */
371 if (NInoEncrypted(ni)) {
372 err = -EACCES;
373 goto err_out;
374 }
375 /* Compressed data streams are handled in compress.c. */
376 if (NInoCompressed(ni))
377 return ntfs_read_compressed_block(page);
378 }
379 /* Normal data stream. */
380 return ntfs_read_block(page);
381 }
382 /* Attribute is resident, implying it is not compressed or encrypted. */
383 if (!NInoAttr(ni))
384 base_ni = ni;
385 else
386 base_ni = ni->ext.base_ntfs_ino;
387
388 /* Map, pin, and lock the mft record. */
389 mrec = map_mft_record(base_ni);
390 if (unlikely(IS_ERR(mrec))) {
391 err = PTR_ERR(mrec);
392 goto err_out;
393 }
394 ctx = get_attr_search_ctx(base_ni, mrec);
395 if (unlikely(!ctx)) {
396 err = -ENOMEM;
397 goto unm_err_out;
398 }
399 if (unlikely(!lookup_attr(ni->type, ni->name, ni->name_len,
400 IGNORE_CASE, 0, NULL, 0, ctx))) {
401 err = -ENOENT;
402 goto put_unm_err_out;
403 }
404
405 /* Starting position of the page within the attribute value. */
406 attr_pos = page->index << PAGE_CACHE_SHIFT;
407
408 /* The total length of the attribute value. */
409 attr_len = le32_to_cpu(ctx->attr->data.resident.value_length);
410
411 addr = kmap(page);
412 /* Copy over in bounds data, zeroing the remainder of the page. */
413 if (attr_pos < attr_len) {
414 u32 bytes = attr_len - attr_pos;
415 if (bytes > PAGE_CACHE_SIZE)
416 bytes = PAGE_CACHE_SIZE;
417 else if (bytes < PAGE_CACHE_SIZE)
418 memset(addr + bytes, 0, PAGE_CACHE_SIZE - bytes);
419 /* Copy the data to the page. */
420 memcpy(addr, attr_pos + (char*)ctx->attr +
421 le16_to_cpu(
422 ctx->attr->data.resident.value_offset), bytes);
423 } else
424 memset(addr, 0, PAGE_CACHE_SIZE);
425 flush_dcache_page(page);
426 kunmap(page);
427
428 SetPageUptodate(page);
429 put_unm_err_out:
430 put_attr_search_ctx(ctx);
431 unm_err_out:
432 unmap_mft_record(base_ni);
433 err_out:
434 unlock_page(page);
435 return err;
436 }
437
438 #ifdef NTFS_RW
439
440 /**
441 * ntfs_write_block - write a @page to the backing store
442 * @page: page cache page to write out
443 *
444 * This function is for writing pages belonging to non-resident, non-mst
445 * protected attributes to their backing store.
446 *
447 * For a page with buffers, map and write the dirty buffers asynchronously
448 * under page writeback. For a page without buffers, create buffers for the
449 * page, then proceed as above.
450 *
451 * If a page doesn't have buffers the page dirty state is definitive. If a page
452 * does have buffers, the page dirty state is just a hint, and the buffer dirty
453 * state is definitive. (A hint which has rules: dirty buffers against a clean
454 * page is illegal. Other combinations are legal and need to be handled. In
455 * particular a dirty page containing clean buffers for example.)
456 *
457 * Return 0 on success and -errno on error.
458 *
459 * Based on ntfs_read_block() and __block_write_full_page().
460 */
461 static int ntfs_write_block(struct page *page)
462 {
463 VCN vcn;
464 LCN lcn;
465 sector_t block, dblock, iblock;
466 struct inode *vi;
467 ntfs_inode *ni;
468 ntfs_volume *vol;
469 run_list_element *rl;
470 struct buffer_head *bh, *head;
471 unsigned int blocksize, vcn_ofs;
472 int err;
473 BOOL need_end_writeback;
474 unsigned char blocksize_bits;
475
476 vi = page->mapping->host;
477 ni = NTFS_I(vi);
478 vol = ni->vol;
479
480 ntfs_debug("Entering for inode %li, attribute type 0x%x, page index "
481 "0x%lx.\n", vi->i_ino, ni->type, page->index);
482
483 BUG_ON(!NInoNonResident(ni));
484 BUG_ON(NInoMstProtected(ni));
485
486 blocksize_bits = vi->i_blkbits;
487 blocksize = 1 << blocksize_bits;
488
489 if (!page_has_buffers(page)) {
490 BUG_ON(!PageUptodate(page));
491 create_empty_buffers(page, blocksize,
492 (1 << BH_Uptodate) | (1 << BH_Dirty));
493 }
494 bh = head = page_buffers(page);
495 if (unlikely(!bh)) {
496 ntfs_warning(vol->sb, "Error allocating page buffers. "
497 "Redirtying page so we try again later.");
498 /*
499 * Put the page back on mapping->dirty_pages, but leave its
500 * buffer's dirty state as-is.
501 */
502 // FIXME: Once Andrew's -EAGAIN patch goes in, remove the
503 // __set_page_dirty_nobuffers(page) and return -EAGAIN instead
504 // of zero.
505 __set_page_dirty_nobuffers(page);
506 unlock_page(page);
507 return 0;
508 }
509
510 /* NOTE: Different naming scheme to ntfs_read_block()! */
511
512 /* The first block in the page. */
513 block = page->index << (PAGE_CACHE_SHIFT - blocksize_bits);
514
515 /* The first out of bounds block for the data size. */
516 dblock = (vi->i_size + blocksize - 1) >> blocksize_bits;
517
518 /* The last (fully or partially) initialized block. */
519 iblock = ni->initialized_size >> blocksize_bits;
520
521 /*
522 * Be very careful. We have no exclusion from __set_page_dirty_buffers
523 * here, and the (potentially unmapped) buffers may become dirty at
524 * any time. If a buffer becomes dirty here after we've inspected it
525 * then we just miss that fact, and the page stays dirty.
526 *
527 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
528 * handle that here by just cleaning them.
529 */
530
531 /*
532 * Loop through all the buffers in the page, mapping all the dirty
533 * buffers to disk addresses and handling any aliases from the
534 * underlying block device's mapping.
535 */
536 rl = NULL;
537 err = 0;
538 do {
539 BOOL is_retry = FALSE;
540
541 if (unlikely(block >= dblock)) {
542 /*
543 * Mapped buffers outside i_size will occur, because
544 * this page can be outside i_size when there is a
545 * truncate in progress. The contents of such buffers
546 * were zeroed by ntfs_writepage().
547 *
548 * FIXME: What about the small race window where
549 * ntfs_writepage() has not done any clearing because
550 * the page was within i_size but before we get here,
551 * vmtruncate() modifies i_size?
552 */
553 clear_buffer_dirty(bh);
554 set_buffer_uptodate(bh);
555 continue;
556 }
557
558 /* Clean buffers are not written out, so no need to map them. */
559 if (!buffer_dirty(bh))
560 continue;
561
562 /* Make sure we have enough initialized size. */
563 if (unlikely((block >= iblock) &&
564 (ni->initialized_size < vi->i_size))) {
565 /*
566 * If this page is fully outside initialized size, zero
567 * out all pages between the current initialized size
568 * and the current page. Just use ntfs_readpage() to do
569 * the zeroing transparently.
570 */
571 if (block > iblock) {
572 // TODO:
573 // For each page do:
574 // - read_cache_page()
575 // Again for each page do:
576 // - wait_on_page_locked()
577 // - Check (PageUptodate(page) &&
578 // !PageError(page))
579 // Update initialized size in the attribute and
580 // in the inode.
581 // Again, for each page do:
582 // __set_page_dirty_buffers();
583 // page_cache_release()
584 // We don't need to wait on the writes.
585 // Update iblock.
586 }
587 /*
588 * The current page straddles initialized size. Zero
589 * all non-uptodate buffers and set them uptodate (and
590 * dirty?). Note, there aren't any non-uptodate buffers
591 * if the page is uptodate.
592 * FIXME: For an uptodate page, the buffers may need to
593 * be written out because they were not initialized on
594 * disk before.
595 */
596 if (!PageUptodate(page)) {
597 // TODO:
598 // Zero any non-uptodate buffers up to i_size.
599 // Set them uptodate and dirty.
600 }
601 // TODO:
602 // Update initialized size in the attribute and in the
603 // inode (up to i_size).
604 // Update iblock.
605 // FIXME: This is inefficient. Try to batch the two
606 // size changes to happen in one go.
607 ntfs_error(vol->sb, "Writing beyond initialized size "
608 "is not supported yet. Sorry.");
609 err = -EOPNOTSUPP;
610 break;
611 // Do NOT set_buffer_new() BUT DO clear buffer range
612 // outside write request range.
613 // set_buffer_uptodate() on complete buffers as well as
614 // set_buffer_dirty().
615 }
616
617 /* No need to map buffers that are already mapped. */
618 if (buffer_mapped(bh))
619 continue;
620
621 /* Unmapped, dirty buffer. Need to map it. */
622 bh->b_bdev = vol->sb->s_bdev;
623
624 /* Convert block into corresponding vcn and offset. */
625 vcn = (VCN)block << blocksize_bits >> vol->cluster_size_bits;
626 vcn_ofs = ((VCN)block << blocksize_bits) &
627 vol->cluster_size_mask;
628 if (!rl) {
629 lock_retry_remap:
630 down_read(&ni->run_list.lock);
631 rl = ni->run_list.rl;
632 }
633 if (likely(rl != NULL)) {
634 /* Seek to element containing target vcn. */
635 while (rl->length && rl[1].vcn <= vcn)
636 rl++;
637 lcn = vcn_to_lcn(rl, vcn);
638 } else
639 lcn = (LCN)LCN_RL_NOT_MAPPED;
640 /* Successful remap. */
641 if (lcn >= 0) {
642 /* Setup buffer head to point to correct block. */
643 bh->b_blocknr = ((lcn << vol->cluster_size_bits) +
644 vcn_ofs) >> blocksize_bits;
645 set_buffer_mapped(bh);
646 continue;
647 }
648 /* It is a hole, need to instantiate it. */
649 if (lcn == LCN_HOLE) {
650 // TODO: Instantiate the hole.
651 // clear_buffer_new(bh);
652 // unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
653 ntfs_error(vol->sb, "Writing into sparse regions is "
654 "not supported yet. Sorry.");
655 err = -EOPNOTSUPP;
656 break;
657 }
658 /* If first try and run list unmapped, map and retry. */
659 if (!is_retry && lcn == LCN_RL_NOT_MAPPED) {
660 is_retry = TRUE;
661 /*
662 * Attempt to map run list, dropping lock for
663 * the duration.
664 */
665 up_read(&ni->run_list.lock);
666 err = map_run_list(ni, vcn);
667 if (likely(!err))
668 goto lock_retry_remap;
669 rl = NULL;
670 }
671 /* Failed to map the buffer, even after retrying. */
672 bh->b_blocknr = -1UL;
673 ntfs_error(vol->sb, "vcn_to_lcn(vcn = 0x%Lx) failed "
674 "with error code 0x%Lx%s.",
675 (long long)vcn, (long long)-lcn,
676 is_retry ? " even after retrying" : "");
677 // FIXME: Depending on vol->on_errors, do something.
678 if (!err)
679 err = -EIO;
680 break;
681 } while (block++, (bh = bh->b_this_page) != head);
682
683 /* Release the lock if we took it. */
684 if (rl)
685 up_read(&ni->run_list.lock);
686
687 /* For the error case, need to reset bh to the beginning. */
688 bh = head;
689
690 /* Just an optimization, so ->readpage() isn't called later. */
691 if (unlikely(!PageUptodate(page))) {
692 int uptodate = 1;
693 do {
694 if (!buffer_uptodate(bh)) {
695 uptodate = 0;
696 bh = head;
697 break;
698 }
699 } while ((bh = bh->b_this_page) != head);
700 if (uptodate)
701 SetPageUptodate(page);
702 }
703
704 /* Setup all mapped, dirty buffers for async write i/o. */
705 do {
706 get_bh(bh);
707 if (buffer_mapped(bh) && buffer_dirty(bh)) {
708 lock_buffer(bh);
709 if (test_clear_buffer_dirty(bh)) {
710 BUG_ON(!buffer_uptodate(bh));
711 mark_buffer_async_write(bh);
712 } else
713 unlock_buffer(bh);
714 } else if (unlikely(err)) {
715 /*
716 * For the error case. The buffer may have been set
717 * dirty during attachment to a dirty page.
718 */
719 if (err != -ENOMEM)
720 clear_buffer_dirty(bh);
721 }
722 } while ((bh = bh->b_this_page) != head);
723
724 if (unlikely(err)) {
725 // TODO: Remove the -EOPNOTSUPP check later on...
726 if (unlikely(err == -EOPNOTSUPP))
727 err = 0;
728 else if (err == -ENOMEM) {
729 ntfs_warning(vol->sb, "Error allocating memory. "
730 "Redirtying page so we try again "
731 "later.");
732 /*
733 * Put the page back on mapping->dirty_pages, but
734 * leave its buffer's dirty state as-is.
735 */
736 // FIXME: Once Andrew's -EAGAIN patch goes in, remove
737 // the __set_page_dirty_nobuffers(page) and set err to
738 // -EAGAIN instead of zero.
739 __set_page_dirty_nobuffers(page);
740 err = 0;
741 } else
742 SetPageError(page);
743 }
744
745 BUG_ON(PageWriteback(page));
746 SetPageWriteback(page); /* Keeps try_to_free_buffers() away. */
747 unlock_page(page);
748
749 /*
750 * Submit the prepared buffers for i/o. Note the page is unlocked,
751 * and the async write i/o completion handler can end_page_writeback()
752 * at any time after the *first* submit_bh(). So the buffers can then
753 * disappear...
754 */
755 need_end_writeback = TRUE;
756 do {
757 struct buffer_head *next = bh->b_this_page;
758 if (buffer_async_write(bh)) {
759 submit_bh(WRITE, bh);
760 need_end_writeback = FALSE;
761 }
762 put_bh(bh);
763 bh = next;
764 } while (bh != head);
765
766 /* If no i/o was started, need to end_page_writeback(). */
767 if (unlikely(need_end_writeback))
768 end_page_writeback(page);
769
770 ntfs_debug("Done.");
771 return err;
772 }
773
774 /**
775 * ntfs_writepage - write a @page to the backing store
776 * @page: page cache page to write out
777 *
778 * For non-resident attributes, ntfs_writepage() writes the @page by calling
779 * the ntfs version of the generic block_write_full_page() function,
780 * ntfs_write_block(), which in turn if necessary creates and writes the
781 * buffers associated with the page asynchronously.
782 *
783 * For resident attributes, OTOH, ntfs_writepage() writes the @page by copying
784 * the data to the mft record (which at this stage is most likely in memory).
785 * Thus, in this case, I/O is synchronous, as even if the mft record is not
786 * cached at this point in time, we need to wait for it to be read in before we
787 * can do the copy.
788 *
789 * Note the caller clears the page dirty flag before calling ntfs_writepage().
790 *
791 * Based on ntfs_readpage() and fs/buffer.c::block_write_full_page().
792 *
793 * Return 0 on success and -errno on error.
794 */
795 static int ntfs_writepage(struct page *page, struct writeback_control *wbc)
796 {
797 s64 attr_pos;
798 struct inode *vi;
799 ntfs_inode *ni, *base_ni;
800 char *kaddr;
801 attr_search_context *ctx;
802 MFT_RECORD *m;
803 u32 attr_len, bytes;
804 int err;
805
806 BUG_ON(!PageLocked(page));
807
808 vi = page->mapping->host;
809
810 /* Is the page fully outside i_size? (truncate in progress) */
811 if (unlikely(page->index >= (vi->i_size + PAGE_CACHE_SIZE - 1) >>
812 PAGE_CACHE_SHIFT)) {
813 unlock_page(page);
814 ntfs_debug("Write outside i_size - truncated?");
815 return 0;
816 }
817
818 ni = NTFS_I(vi);
819
820 if (NInoNonResident(ni)) {
821 /*
822 * Only unnamed $DATA attributes can be compressed, encrypted,
823 * and/or sparse.
824 */
825 if (ni->type == AT_DATA && !ni->name_len) {
826 /* If file is encrypted, deny access, just like NT4. */
827 if (NInoEncrypted(ni)) {
828 unlock_page(page);
829 ntfs_debug("Denying write access to encrypted "
830 "file.");
831 return -EACCES;
832 }
833 /* Compressed data streams are handled in compress.c. */
834 if (NInoCompressed(ni)) {
835 // TODO: Implement and replace this check with
836 // return ntfs_write_compressed_block(page);
837 unlock_page(page);
838 ntfs_error(vi->i_sb, "Writing to compressed "
839 "files is not supported yet. "
840 "Sorry.");
841 return -EOPNOTSUPP;
842 }
843 // TODO: Implement and remove this check.
844 if (NInoSparse(ni)) {
845 unlock_page(page);
846 ntfs_error(vi->i_sb, "Writing to sparse files "
847 "is not supported yet. Sorry.");
848 return -EOPNOTSUPP;
849 }
850 }
851
852 /* We have to zero every time due to mmap-at-end-of-file. */
853 if (page->index >= (vi->i_size >> PAGE_CACHE_SHIFT)) {
854 /* The page straddles i_size. */
855 unsigned int ofs = vi->i_size & ~PAGE_CACHE_MASK;
856 kaddr = kmap_atomic(page, KM_USER0);
857 memset(kaddr + ofs, 0, PAGE_CACHE_SIZE - ofs);
858 flush_dcache_page(page);
859 kunmap_atomic(kaddr, KM_USER0);
860 }
861
862 // TODO: Implement and remove this check.
863 if (NInoMstProtected(ni)) {
864 unlock_page(page);
865 ntfs_error(vi->i_sb, "Writing to MST protected "
866 "attributes is not supported yet. "
867 "Sorry.");
868 return -EOPNOTSUPP;
869 }
870
871 /* Normal data stream. */
872 return ntfs_write_block(page);
873 }
874
875 /*
876 * Attribute is resident, implying it is not compressed, encrypted, or
877 * mst protected.
878 */
879 BUG_ON(page_has_buffers(page));
880 BUG_ON(!PageUptodate(page));
881
882 // TODO: Consider using PageWriteback() + unlock_page() in 2.5 once the
883 // "VM fiddling has ended". Note, don't forget to replace all the
884 // unlock_page() calls further below with end_page_writeback() ones.
885 // FIXME: Make sure it is ok to SetPageError() on unlocked page under
886 // writeback before doing the change!
887 #if 0
888 SetPageWriteback(page);
889 unlock_page(page);
890 #endif
891
892 if (!NInoAttr(ni))
893 base_ni = ni;
894 else
895 base_ni = ni->ext.base_ntfs_ino;
896
897 /* Map, pin, and lock the mft record. */
898 m = map_mft_record(base_ni);
899 if (unlikely(IS_ERR(m))) {
900 err = PTR_ERR(m);
901 m = NULL;
902 ctx = NULL;
903 goto err_out;
904 }
905 ctx = get_attr_search_ctx(base_ni, m);
906 if (unlikely(!ctx)) {
907 err = -ENOMEM;
908 goto err_out;
909 }
910 if (unlikely(!lookup_attr(ni->type, ni->name, ni->name_len,
911 IGNORE_CASE, 0, NULL, 0, ctx))) {
912 err = -ENOENT;
913 goto err_out;
914 }
915
916 /* Starting position of the page within the attribute value. */
917 attr_pos = page->index << PAGE_CACHE_SHIFT;
918
919 /* The total length of the attribute value. */
920 attr_len = le32_to_cpu(ctx->attr->data.resident.value_length);
921
922 if (unlikely(vi->i_size != attr_len)) {
923 ntfs_error(vi->i_sb, "BUG()! i_size (0x%Lx) doesn't match "
924 "attr_len (0x%x). Aborting write.", vi->i_size,
925 attr_len);
926 err = -EIO;
927 goto err_out;
928 }
929 if (unlikely(attr_pos >= attr_len)) {
930 ntfs_error(vi->i_sb, "BUG()! attr_pos (0x%Lx) > attr_len (0x%x)"
931 ". Aborting write.", attr_pos, attr_len);
932 err = -EIO;
933 goto err_out;
934 }
935
936 bytes = attr_len - attr_pos;
937 if (unlikely(bytes > PAGE_CACHE_SIZE))
938 bytes = PAGE_CACHE_SIZE;
939
940 /*
941 * Here, we don't need to zero the out of bounds area everytime because
942 * the below memcpy() already takes care of the mmap-at-end-of-file
943 * requirements. If the file is converted to a non-resident one, then
944 * the code path use is switched to the non-resident one where the
945 * zeroing happens on each ntfs_writepage() invocation.
946 *
947 * The above also applies nicely when i_size is decreased.
948 *
949 * When i_size is increased, the memory between the old and new i_size
950 * _must_ be zeroed (or overwritten with new data). Otherwise we will
951 * expose data to userspace/disk which should never have been exposed.
952 *
953 * FIXME: Ensure that i_size increases do the zeroing/overwriting and
954 * if we cannot guarantee that, then enable the zeroing below.
955 */
956
957 kaddr = kmap_atomic(page, KM_USER0);
958 /* Copy the data from the page to the mft record. */
959 memcpy((u8*)ctx->attr + le16_to_cpu(
960 ctx->attr->data.resident.value_offset) + attr_pos,
961 kaddr, bytes);
962 flush_dcache_mft_record_page(ctx->ntfs_ino);
963 #if 0
964 /* Zero out of bounds area. */
965 if (likely(bytes < PAGE_CACHE_SIZE)) {
966 memset(kaddr + bytes, 0, PAGE_CACHE_SIZE - bytes);
967 flush_dcache_page(page);
968 }
969 #endif
970 kunmap_atomic(kaddr, KM_USER0);
971
972 unlock_page(page);
973
974 // TODO: Mark mft record dirty so it gets written back.
975 ntfs_error(vi->i_sb, "Writing to resident files is not supported yet. "
976 "Wrote to memory only...");
977
978 put_attr_search_ctx(ctx);
979 unmap_mft_record(base_ni);
980 return 0;
981 err_out:
982 if (err == -ENOMEM) {
983 ntfs_warning(vi->i_sb, "Error allocating memory. Redirtying "
984 "page so we try again later.");
985 /*
986 * Put the page back on mapping->dirty_pages, but leave its
987 * buffer's dirty state as-is.
988 */
989 // FIXME: Once Andrew's -EAGAIN patch goes in, remove the
990 // __set_page_dirty_nobuffers(page) and set err to -EAGAIN
991 // instead of zero.
992 __set_page_dirty_nobuffers(page);
993 err = 0;
994 } else {
995 ntfs_error(vi->i_sb, "Resident attribute write failed with "
996 "error %i. Setting page error flag.", -err);
997 SetPageError(page);
998 }
999 unlock_page(page);
1000 if (ctx)
1001 put_attr_search_ctx(ctx);
1002 if (m)
1003 unmap_mft_record(base_ni);
1004 return err;
1005 }
1006
1007 /**
1008 * ntfs_prepare_nonresident_write -
1009 *
1010 */
1011 static int ntfs_prepare_nonresident_write(struct page *page,
1012 unsigned from, unsigned to)
1013 {
1014 VCN vcn;
1015 LCN lcn;
1016 sector_t block, ablock, iblock;
1017 struct inode *vi;
1018 ntfs_inode *ni;
1019 ntfs_volume *vol;
1020 run_list_element *rl;
1021 struct buffer_head *bh, *head, *wait[2], **wait_bh = wait;
1022 unsigned int vcn_ofs, block_start, block_end, blocksize;
1023 int err;
1024 BOOL is_retry;
1025 unsigned char blocksize_bits;
1026
1027 vi = page->mapping->host;
1028 ni = NTFS_I(vi);
1029 vol = ni->vol;
1030
1031 ntfs_debug("Entering for inode %li, attribute type 0x%x, page index "
1032 "0x%lx, from = %u, to = %u.", vi->i_ino, ni->type,
1033 page->index, from, to);
1034
1035 BUG_ON(!NInoNonResident(ni));
1036 BUG_ON(NInoMstProtected(ni));
1037
1038 blocksize_bits = vi->i_blkbits;
1039 blocksize = 1 << blocksize_bits;
1040
1041 /*
1042 * create_empty_buffers() will create uptodate/dirty buffers if the
1043 * page is uptodate/dirty.
1044 */
1045 if (!page_has_buffers(page))
1046 create_empty_buffers(page, blocksize, 0);
1047 bh = head = page_buffers(page);
1048 if (unlikely(!bh))
1049 return -ENOMEM;
1050
1051 /* The first block in the page. */
1052 block = page->index << (PAGE_CACHE_SHIFT - blocksize_bits);
1053
1054 /*
1055 * The first out of bounds block for the allocated size. No need to
1056 * round up as allocated_size is in multiples of cluster size and the
1057 * minimum cluster size is 512 bytes, which is equal to the smallest
1058 * blocksize.
1059 */
1060 ablock = ni->allocated_size >> blocksize_bits;
1061
1062 /* The last (fully or partially) initialized block. */
1063 iblock = ni->initialized_size >> blocksize_bits;
1064
1065 /* Loop through all the buffers in the page. */
1066 block_start = 0;
1067 rl = NULL;
1068 err = 0;
1069 do {
1070 block_end = block_start + blocksize;
1071 /*
1072 * If buffer @bh is outside the write, just mark it uptodate
1073 * if the page is uptodate and continue with the next buffer.
1074 */
1075 if (block_end <= from || block_start >= to) {
1076 if (PageUptodate(page)) {
1077 if (!buffer_uptodate(bh))
1078 set_buffer_uptodate(bh);
1079 }
1080 continue;
1081 }
1082 /*
1083 * @bh is at least partially being written to.
1084 * Make sure it is not marked as new.
1085 */
1086 //if (buffer_new(bh))
1087 // clear_buffer_new(bh);
1088
1089 if (block >= ablock) {
1090 // TODO: block is above allocated_size, need to
1091 // allocate it. Best done in one go to accommodate not
1092 // only block but all above blocks up to and including:
1093 // ((page->index << PAGE_CACHE_SHIFT) + to + blocksize
1094 // - 1) >> blobksize_bits. Obviously will need to round
1095 // up to next cluster boundary, too. This should be
1096 // done with a helper function, so it can be reused.
1097 ntfs_error(vol->sb, "Writing beyond allocated size "
1098 "is not supported yet. Sorry.");
1099 err = -EOPNOTSUPP;
1100 goto err_out;
1101 // Need to update ablock.
1102 // Need to set_buffer_new() on all block bhs that are
1103 // newly allocated.
1104 }
1105 /*
1106 * Now we have enough allocated size to fulfill the whole
1107 * request, i.e. block < ablock is true.
1108 */
1109 if (unlikely((block >= iblock) &&
1110 (ni->initialized_size < vi->i_size))) {
1111 /*
1112 * If this page is fully outside initialized size, zero
1113 * out all pages between the current initialized size
1114 * and the current page. Just use ntfs_readpage() to do
1115 * the zeroing transparently.
1116 */
1117 if (block > iblock) {
1118 // TODO:
1119 // For each page do:
1120 // - read_cache_page()
1121 // Again for each page do:
1122 // - wait_on_page_locked()
1123 // - Check (PageUptodate(page) &&
1124 // !PageError(page))
1125 // Update initialized size in the attribute and
1126 // in the inode.
1127 // Again, for each page do:
1128 // __set_page_dirty_buffers();
1129 // page_cache_release()
1130 // We don't need to wait on the writes.
1131 // Update iblock.
1132 }
1133 /*
1134 * The current page straddles initialized size. Zero
1135 * all non-uptodate buffers and set them uptodate (and
1136 * dirty?). Note, there aren't any non-uptodate buffers
1137 * if the page is uptodate.
1138 * FIXME: For an uptodate page, the buffers may need to
1139 * be written out because they were not initialized on
1140 * disk before.
1141 */
1142 if (!PageUptodate(page)) {
1143 // TODO:
1144 // Zero any non-uptodate buffers up to i_size.
1145 // Set them uptodate and dirty.
1146 }
1147 // TODO:
1148 // Update initialized size in the attribute and in the
1149 // inode (up to i_size).
1150 // Update iblock.
1151 // FIXME: This is inefficient. Try to batch the two
1152 // size changes to happen in one go.
1153 ntfs_error(vol->sb, "Writing beyond initialized size "
1154 "is not supported yet. Sorry.");
1155 err = -EOPNOTSUPP;
1156 goto err_out;
1157 // Do NOT set_buffer_new() BUT DO clear buffer range
1158 // outside write request range.
1159 // set_buffer_uptodate() on complete buffers as well as
1160 // set_buffer_dirty().
1161 }
1162
1163 /* Need to map unmapped buffers. */
1164 if (!buffer_mapped(bh)) {
1165 /* Unmapped buffer. Need to map it. */
1166 bh->b_bdev = vol->sb->s_bdev;
1167
1168 /* Convert block into corresponding vcn and offset. */
1169 vcn = (VCN)block << blocksize_bits >>
1170 vol->cluster_size_bits;
1171 vcn_ofs = ((VCN)block << blocksize_bits) &
1172 vol->cluster_size_mask;
1173
1174 is_retry = FALSE;
1175 if (!rl) {
1176 lock_retry_remap:
1177 down_read(&ni->run_list.lock);
1178 rl = ni->run_list.rl;
1179 }
1180 if (likely(rl != NULL)) {
1181 /* Seek to element containing target vcn. */
1182 while (rl->length && rl[1].vcn <= vcn)
1183 rl++;
1184 lcn = vcn_to_lcn(rl, vcn);
1185 } else
1186 lcn = (LCN)LCN_RL_NOT_MAPPED;
1187 if (unlikely(lcn < 0)) {
1188 /*
1189 * We extended the attribute allocation above.
1190 * If we hit an ENOENT here it means that the
1191 * allocation was insufficient which is a bug.
1192 */
1193 BUG_ON(lcn == LCN_ENOENT);
1194
1195 /* It is a hole, need to instantiate it. */
1196 if (lcn == LCN_HOLE) {
1197 // TODO: Instantiate the hole.
1198 // clear_buffer_new(bh);
1199 // unmap_underlying_metadata(bh->b_bdev,
1200 // bh->b_blocknr);
1201 // For non-uptodate buffers, need to
1202 // zero out the region outside the
1203 // request in this bh or all bhs,
1204 // depending on what we implemented
1205 // above.
1206 // Need to flush_dcache_page().
1207 // Or could use set_buffer_new()
1208 // instead?
1209 ntfs_error(vol->sb, "Writing into "
1210 "sparse regions is "
1211 "not supported yet. "
1212 "Sorry.");
1213 err = -EOPNOTSUPP;
1214 goto err_out;
1215 } else if (!is_retry &&
1216 lcn == LCN_RL_NOT_MAPPED) {
1217 is_retry = TRUE;
1218 /*
1219 * Attempt to map run list, dropping
1220 * lock for the duration.
1221 */
1222 up_read(&ni->run_list.lock);
1223 err = map_run_list(ni, vcn);
1224 if (likely(!err))
1225 goto lock_retry_remap;
1226 rl = NULL;
1227 }
1228 /*
1229 * Failed to map the buffer, even after
1230 * retrying.
1231 */
1232 bh->b_blocknr = -1UL;
1233 ntfs_error(vol->sb, "vcn_to_lcn(vcn = 0x%Lx) "
1234 "failed with error code "
1235 "0x%Lx%s.", (long long)vcn,
1236 (long long)-lcn, is_retry ?
1237 " even after retrying" : "");
1238 // FIXME: Depending on vol->on_errors, do
1239 // something.
1240 if (!err)
1241 err = -EIO;
1242 goto err_out;
1243 }
1244 /* We now have a successful remap, i.e. lcn >= 0. */
1245
1246 /* Setup buffer head to correct block. */
1247 bh->b_blocknr = ((lcn << vol->cluster_size_bits)
1248 + vcn_ofs) >> blocksize_bits;
1249 set_buffer_mapped(bh);
1250
1251 // FIXME: Something analogous to this is needed for
1252 // each newly allocated block, i.e. BH_New.
1253 // FIXME: Might need to take this out of the
1254 // if (!buffer_mapped(bh)) {}, depending on how we
1255 // implement things during the allocated_size and
1256 // initialized_size extension code above.
1257 if (buffer_new(bh)) {
1258 clear_buffer_new(bh);
1259 unmap_underlying_metadata(bh->b_bdev,
1260 bh->b_blocknr);
1261 if (PageUptodate(page)) {
1262 set_buffer_uptodate(bh);
1263 continue;
1264 }
1265 /*
1266 * Page is _not_ uptodate, zero surrounding
1267 * region. NOTE: This is how we decide if to
1268 * zero or not!
1269 */
1270 if (block_end > to || block_start < from) {
1271 void *kaddr;
1272
1273 kaddr = kmap_atomic(page, KM_USER0);
1274 if (block_end > to)
1275 memset(kaddr + to, 0,
1276 block_end - to);
1277 if (block_start < from)
1278 memset(kaddr + block_start, 0,
1279 from -
1280 block_start);
1281 flush_dcache_page(page);
1282 kunmap_atomic(kaddr, KM_USER0);
1283 }
1284 continue;
1285 }
1286 }
1287 /* @bh is mapped, set it uptodate if the page is uptodate. */
1288 if (PageUptodate(page)) {
1289 if (!buffer_uptodate(bh))
1290 set_buffer_uptodate(bh);
1291 continue;
1292 }
1293 /*
1294 * The page is not uptodate. The buffer is mapped. If it is not
1295 * uptodate, and it is only partially being written to, we need
1296 * to read the buffer in before the write, i.e. right now.
1297 */
1298 if (!buffer_uptodate(bh) &&
1299 (block_start < from || block_end > to)) {
1300 ll_rw_block(READ, 1, &bh);
1301 *wait_bh++ = bh;
1302 }
1303 } while (block++, block_start = block_end,
1304 (bh = bh->b_this_page) != head);
1305
1306 /* Release the lock if we took it. */
1307 if (rl) {
1308 up_read(&ni->run_list.lock);
1309 rl = NULL;
1310 }
1311
1312 /* If we issued read requests, let them complete. */
1313 while (wait_bh > wait) {
1314 wait_on_buffer(*--wait_bh);
1315 if (!buffer_uptodate(*wait_bh))
1316 return -EIO;
1317 }
1318
1319 ntfs_debug("Done.");
1320 return 0;
1321 err_out:
1322 /*
1323 * Zero out any newly allocated blocks to avoid exposing stale data.
1324 * If BH_New is set, we know that the block was newly allocated in the
1325 * above loop.
1326 * FIXME: What about initialized_size increments? Have we done all the
1327 * required zeroing above? If not this error handling is broken, and
1328 * in particular the if (block_end <= from) check is completely bogus.
1329 */
1330 bh = head;
1331 block_start = 0;
1332 is_retry = FALSE;
1333 do {
1334 block_end = block_start + blocksize;
1335 if (block_end <= from)
1336 continue;
1337 if (block_start >= to)
1338 break;
1339 if (buffer_new(bh)) {
1340 void *kaddr;
1341
1342 clear_buffer_new(bh);
1343 if (buffer_uptodate(bh))
1344 buffer_error();
1345 kaddr = kmap_atomic(page, KM_USER0);
1346 memset(kaddr + block_start, 0, bh->b_size);
1347 kunmap_atomic(kaddr, KM_USER0);
1348 set_buffer_uptodate(bh);
1349 mark_buffer_dirty(bh);
1350 is_retry = TRUE;
1351 }
1352 } while (block_start = block_end, (bh = bh->b_this_page) != head);
1353 if (is_retry)
1354 flush_dcache_page(page);
1355 if (rl)
1356 up_read(&ni->run_list.lock);
1357 return err;
1358 }
1359
1360 /**
1361 * ntfs_prepare_write - prepare a page for receiving data
1362 *
1363 * This is called from generic_file_write() with i_sem held on the inode
1364 * (@page->mapping->host). The @page is locked and kmap()ped so page_address()
1365 * can simply be used. The source data has not yet been copied into the @page.
1366 *
1367 * Need to extend the attribute/fill in holes if necessary, create blocks and
1368 * make partially overwritten blocks uptodate,
1369 *
1370 * i_size is not to be modified yet.
1371 *
1372 * Return 0 on success or -errno on error.
1373 *
1374 * Should be using block_prepare_write() [support for sparse files] or
1375 * cont_prepare_write() [no support for sparse files]. Can't do that due to
1376 * ntfs specifics but can look at them for implementation guidancea.
1377 *
1378 * Note: In the range, @from is inclusive and @to is exclusive, i.e. @from is
1379 * the first byte in the page that will be written to and @to is the first byte
1380 * after the last byte that will be written to.
1381 */
1382 static int ntfs_prepare_write(struct file *file, struct page *page,
1383 unsigned from, unsigned to)
1384 {
1385 struct inode *vi = page->mapping->host;
1386 ntfs_inode *ni = NTFS_I(vi);
1387
1388 ntfs_debug("Entering for inode %li, attribute type 0x%x, page index "
1389 "0x%lx, from = %u, to = %u.", vi->i_ino, ni->type,
1390 page->index, from, to);
1391
1392 BUG_ON(!PageLocked(page));
1393 BUG_ON(from > PAGE_CACHE_SIZE);
1394 BUG_ON(to > PAGE_CACHE_SIZE);
1395 BUG_ON(from > to);
1396
1397 if (NInoNonResident(ni)) {
1398 /*
1399 * Only unnamed $DATA attributes can be compressed, encrypted,
1400 * and/or sparse.
1401 */
1402 if (ni->type == AT_DATA && !ni->name_len) {
1403 /* If file is encrypted, deny access, just like NT4. */
1404 if (NInoEncrypted(ni)) {
1405 ntfs_debug("Denying write access to encrypted "
1406 "file.");
1407 return -EACCES;
1408 }
1409 /* Compressed data streams are handled in compress.c. */
1410 if (NInoCompressed(ni)) {
1411 // TODO: Implement and replace this check with
1412 // return ntfs_write_compressed_block(page);
1413 ntfs_error(vi->i_sb, "Writing to compressed "
1414 "files is not supported yet. "
1415 "Sorry.");
1416 return -EOPNOTSUPP;
1417 }
1418 // TODO: Implement and remove this check.
1419 if (NInoSparse(ni)) {
1420 ntfs_error(vi->i_sb, "Writing to sparse files "
1421 "is not supported yet. Sorry.");
1422 return -EOPNOTSUPP;
1423 }
1424 }
1425
1426 // TODO: Implement and remove this check.
1427 if (NInoMstProtected(ni)) {
1428 ntfs_error(vi->i_sb, "Writing to MST protected "
1429 "attributes is not supported yet. "
1430 "Sorry.");
1431 return -EOPNOTSUPP;
1432 }
1433
1434 /* Normal data stream. */
1435 return ntfs_prepare_nonresident_write(page, from, to);
1436 }
1437
1438 /*
1439 * Attribute is resident, implying it is not compressed, encrypted, or
1440 * mst protected.
1441 */
1442 BUG_ON(page_has_buffers(page));
1443
1444 /* Do we need to resize the attribute? */
1445 if (((s64)page->index << PAGE_CACHE_SHIFT) + to > vi->i_size) {
1446 // TODO: Implement resize...
1447 ntfs_error(vi->i_sb, "Writing beyond the existing file size is "
1448 "not supported yet. Sorry.");
1449 return -EOPNOTSUPP;
1450 }
1451
1452 /*
1453 * Because resident attributes are handled by memcpy() to/from the
1454 * corresponding MFT record, and because this form of i/o is byte
1455 * aligned rather than block aligned, there is no need to bring the
1456 * page uptodate here as in the non-resident case where we need to
1457 * bring the buffers straddled by the write uptodate before
1458 * generic_file_write() does the copying from userspace.
1459 *
1460 * We thus defer the uptodate bringing of the page region outside the
1461 * region written to to ntfs_commit_write(). The reason for doing this
1462 * is that we save one round of:
1463 * map_mft_record(), get_attr_search_ctx(), lookup_attr(),
1464 * kmap_atomic(), kunmap_atomic(), put_attr_search_ctx(),
1465 * unmap_mft_record().
1466 * Which is obviously a very worthwhile save.
1467 *
1468 * Thus we just return success now...
1469 */
1470 ntfs_debug("Done.");
1471 return 0;
1472 }
1473
1474 /*
1475 * NOTES: There is a disparity between the apparent need to extend the
1476 * attribute in prepare write but to update i_size only in commit write.
1477 * Need to make sure i_sem protection is sufficient. And if not will need to
1478 * handle this in some way or another.
1479 */
1480
1481 /**
1482 * ntfs_commit_nonresident_write -
1483 *
1484 */
1485 static int ntfs_commit_nonresident_write(struct page *page,
1486 unsigned from, unsigned to)
1487 {
1488 s64 pos = ((s64)page->index << PAGE_CACHE_SHIFT) + to;
1489 struct inode *vi;
1490 struct buffer_head *bh, *head;
1491 unsigned int block_start, block_end, blocksize;
1492 BOOL partial;
1493
1494 vi = page->mapping->host;
1495
1496 ntfs_debug("Entering for inode %li, attribute type 0x%x, page index "
1497 "0x%lx, from = %u, to = %u.", vi->i_ino,
1498 NTFS_I(vi)->type, page->index, from, to);
1499
1500 blocksize = 1 << vi->i_blkbits;
1501
1502 // FIXME: We need a whole slew of special cases in here for MST
1503 // protected attributes for example. For compressed files, too...
1504 // For now, we know ntfs_prepare_write() would have failed so we can't
1505 // get here in any of the cases which we have to special case, so we
1506 // are just a ripped off unrolled generic_commit_write() at present.
1507
1508 bh = head = page_buffers(page);
1509 block_start = 0;
1510 partial = FALSE;
1511 do {
1512 block_end = block_start + blocksize;
1513 if (block_end <= from || block_start >= to) {
1514 if (!buffer_uptodate(bh))
1515 partial = TRUE;
1516 } else {
1517 set_buffer_uptodate(bh);
1518 mark_buffer_dirty(bh);
1519 }
1520 } while (block_start = block_end, (bh = bh->b_this_page) != head);
1521
1522 /*
1523 * If this is a partial write which happened to make all buffers
1524 * uptodate then we can optimize away a bogus ->readpage() for the next
1525 * read(). Here we 'discover' whether the page went uptodate as a
1526 * result of this (potentially partial) write.
1527 */
1528 if (!partial)
1529 SetPageUptodate(page);
1530
1531 /*
1532 * Not convinced about this at all. See disparity comment above. For
1533 * now we know ntfs_prepare_write() would have failed in the write
1534 * exceeds i_size case, so this will never trigger which is fine.
1535 */
1536 if (pos > vi->i_size) {
1537 ntfs_error(vi->i_sb, "Writing beyond the existing file size is "
1538 "not supported yet. Sorry.");
1539 // vi->i_size = pos;
1540 // mark_inode_dirty(vi);
1541 }
1542 ntfs_debug("Done.");
1543 return 0;
1544 }
1545
1546 /**
1547 * ntfs_commit_write - commit the received data
1548 *
1549 * This is called from generic_file_write() with i_sem held on the inode
1550 * (@page->mapping->host). The @page is locked and kmap()ped so page_address()
1551 * can simply be used. The source data has already been copied into the @page.
1552 *
1553 * Need to mark modified blocks dirty so they get written out later when
1554 * ntfs_writepage() is invoked by the VM.
1555 *
1556 * Return 0 on success or -errno on error.
1557 *
1558 * Should be using generic_commit_write(). This marks buffers uptodate and
1559 * dirty, sets the page uptodate if all buffers in the page are uptodate, and
1560 * updates i_size if the end of io is beyond i_size. In that case, it also
1561 * marks the inode dirty. - We could still use this (obviously except for
1562 * NInoMstProtected() attributes, where we will need to duplicate the core code
1563 * because we need our own async_io completion handler) but we could just do
1564 * the i_size update in prepare write, when we resize the attribute. Then
1565 * we would avoid the i_size update and mark_inode_dirty() happening here.
1566 *
1567 * Can't use generic_commit_write() due to ntfs specialities but can look at
1568 * it for implementation guidance.
1569 *
1570 * If things have gone as outlined in ntfs_prepare_write(), then we do not
1571 * need to do any page content modifications here at all, except in the write
1572 * to resident attribute case, where we need to do the uptodate bringing here
1573 * which we combine with the copying into the mft record which means we only
1574 * need to map the mft record and find the attribute record in it only once.
1575 */
1576 static int ntfs_commit_write(struct file *file, struct page *page,
1577 unsigned from, unsigned to)
1578 {
1579 s64 attr_pos;
1580 struct inode *vi;
1581 ntfs_inode *ni, *base_ni;
1582 char *kaddr, *kattr;
1583 attr_search_context *ctx;
1584 MFT_RECORD *m;
1585 u32 attr_len, bytes;
1586 int err;
1587
1588 vi = page->mapping->host;
1589 ni = NTFS_I(vi);
1590
1591 ntfs_debug("Entering for inode %li, attribute type 0x%x, page index "
1592 "0x%lx, from = %u, to = %u.", vi->i_ino, ni->type,
1593 page->index, from, to);
1594
1595 if (NInoNonResident(ni)) {
1596 /*
1597 * Only unnamed $DATA attributes can be compressed, encrypted,
1598 * and/or sparse.
1599 */
1600 if (ni->type == AT_DATA && !ni->name_len) {
1601 /* If file is encrypted, deny access, just like NT4. */
1602 if (NInoEncrypted(ni)) {
1603 // Should never get here!
1604 ntfs_debug("Denying write access to encrypted "
1605 "file.");
1606 return -EACCES;
1607 }
1608 /* Compressed data streams are handled in compress.c. */
1609 if (NInoCompressed(ni)) {
1610 // TODO: Implement and replace this check with
1611 // return ntfs_write_compressed_block(page);
1612 // Should never get here!
1613 ntfs_error(vi->i_sb, "Writing to compressed "
1614 "files is not supported yet. "
1615 "Sorry.");
1616 return -EOPNOTSUPP;
1617 }
1618 // TODO: Implement and remove this check.
1619 if (NInoSparse(ni)) {
1620 // Should never get here!
1621 ntfs_error(vi->i_sb, "Writing to sparse files "
1622 "is not supported yet. Sorry.");
1623 return -EOPNOTSUPP;
1624 }
1625 }
1626
1627 // TODO: Implement and remove this check.
1628 if (NInoMstProtected(ni)) {
1629 // Should never get here!
1630 ntfs_error(vi->i_sb, "Writing to MST protected "
1631 "attributes is not supported yet. "
1632 "Sorry.");
1633 return -EOPNOTSUPP;
1634 }
1635
1636 /* Normal data stream. */
1637 return ntfs_commit_nonresident_write(page, from, to);
1638 }
1639
1640 /*
1641 * Attribute is resident, implying it is not compressed, encrypted, or
1642 * mst protected.
1643 */
1644
1645 /* Do we need to resize the attribute? */
1646 if (((s64)page->index << PAGE_CACHE_SHIFT) + to > vi->i_size) {
1647 // TODO: Implement resize...
1648 // pos = ((s64)page->index << PAGE_CACHE_SHIFT) + to;
1649 // vi->i_size = pos;
1650 // mark_inode_dirty(vi);
1651 // Should never get here!
1652 ntfs_error(vi->i_sb, "Writing beyond the existing file size is "
1653 "not supported yet. Sorry.");
1654 return -EOPNOTSUPP;
1655 }
1656
1657 if (!NInoAttr(ni))
1658 base_ni = ni;
1659 else
1660 base_ni = ni->ext.base_ntfs_ino;
1661
1662 /* Map, pin, and lock the mft record. */
1663 m = map_mft_record(base_ni);
1664 if (unlikely(IS_ERR(m))) {
1665 err = PTR_ERR(m);
1666 m = NULL;
1667 ctx = NULL;
1668 goto err_out;
1669 }
1670 ctx = get_attr_search_ctx(base_ni, m);
1671 if (unlikely(!ctx)) {
1672 err = -ENOMEM;
1673 goto err_out;
1674 }
1675 if (unlikely(!lookup_attr(ni->type, ni->name, ni->name_len,
1676 IGNORE_CASE, 0, NULL, 0, ctx))) {
1677 err = -ENOENT;
1678 goto err_out;
1679 }
1680
1681 /* Starting position of the page within the attribute value. */
1682 attr_pos = page->index << PAGE_CACHE_SHIFT;
1683
1684 /* The total length of the attribute value. */
1685 attr_len = le32_to_cpu(ctx->attr->data.resident.value_length);
1686
1687 if (unlikely(vi->i_size != attr_len)) {
1688 ntfs_error(vi->i_sb, "BUG()! i_size (0x%Lx) doesn't match "
1689 "attr_len (0x%x). Aborting write.", vi->i_size,
1690 attr_len);
1691 err = -EIO;
1692 goto err_out;
1693 }
1694 if (unlikely(attr_pos >= attr_len)) {
1695 ntfs_error(vi->i_sb, "BUG()! attr_pos (0x%Lx) > attr_len (0x%x)"
1696 ". Aborting write.", attr_pos, attr_len);
1697 err = -EIO;
1698 goto err_out;
1699 }
1700
1701 bytes = attr_len - attr_pos;
1702 if (unlikely(bytes > PAGE_CACHE_SIZE))
1703 bytes = PAGE_CACHE_SIZE;
1704
1705 /*
1706 * Calculate the address of the attribute value corresponding to the
1707 * beginning of the current data @page.
1708 */
1709 kattr = (u8*)ctx->attr + le16_to_cpu(
1710 ctx->attr->data.resident.value_offset) + attr_pos;
1711
1712 kaddr = kmap_atomic(page, KM_USER0);
1713
1714 /* Copy the received data from the page to the mft record. */
1715 memcpy(kattr + from, kaddr + from, to - from);
1716 flush_dcache_mft_record_page(ctx->ntfs_ino);
1717
1718 if (!PageUptodate(page)) {
1719 /*
1720 * Bring the out of bounds area(s) uptodate by copying data
1721 * from the mft record to the page.
1722 */
1723 if (from > 0)
1724 memcpy(kaddr, kattr, from);
1725 if (to < bytes)
1726 memcpy(kaddr + to, kattr + to, bytes - to);
1727
1728 /* Zero the region outside the end of the attribute value. */
1729 if (likely(bytes < PAGE_CACHE_SIZE))
1730 memset(kaddr + bytes, 0, PAGE_CACHE_SIZE - bytes);
1731
1732 /*
1733 * The probability of not having done any of the above is
1734 * extremely small, so we just flush unconditionally.
1735 */
1736 flush_dcache_page(page);
1737 SetPageUptodate(page);
1738 }
1739 kunmap_atomic(kaddr, KM_USER0);
1740
1741 // TODO: Mark mft record dirty so it gets written back.
1742 ntfs_error(vi->i_sb, "Writing to resident files is not supported yet. "
1743 "Wrote to memory only...");
1744
1745 put_attr_search_ctx(ctx);
1746 unmap_mft_record(base_ni);
1747 ntfs_debug("Done.");
1748 return 0;
1749 err_out:
1750 if (err == -ENOMEM) {
1751 ntfs_warning(vi->i_sb, "Error allocating memory required to "
1752 "commit the write.");
1753 if (PageUptodate(page)) {
1754 ntfs_warning(vi->i_sb, "Page is uptodate, setting "
1755 "dirty so the write will be retried "
1756 "later on by the VM.");
1757 /*
1758 * Put the page on mapping->dirty_pages, but leave its
1759 * buffer's dirty state as-is.
1760 */
1761 __set_page_dirty_nobuffers(page);
1762 err = 0;
1763 } else
1764 ntfs_error(vi->i_sb, "Page is not uptodate. Written "
1765 "data has been lost. )-:");
1766 } else {
1767 ntfs_error(vi->i_sb, "Resident attribute write failed with "
1768 "error %i. Setting page error flag.", -err);
1769 SetPageError(page);
1770 }
1771 if (ctx)
1772 put_attr_search_ctx(ctx);
1773 if (m)
1774 unmap_mft_record(base_ni);
1775 return err;
1776 }
1777
1778 #endif /* NTFS_RW */
1779
1780 /**
1781 * ntfs_aops - general address space operations for inodes and attributes
1782 */
1783 struct address_space_operations ntfs_aops = {
1784 .readpage = ntfs_readpage, /* Fill page with data. */
1785 .sync_page = block_sync_page, /* Currently, just unplugs the
1786 disk request queue. */
1787 #ifdef NTFS_RW
1788 .writepage = ntfs_writepage, /* Write dirty page to disk. */
1789 .prepare_write = ntfs_prepare_write, /* Prepare page and buffers
1790 ready to receive data. */
1791 .commit_write = ntfs_commit_write, /* Commit received data. */
1792 #endif
1793 };
1794