inode.c 140 KB
Newer Older
1
/*
2
 *  linux/fs/ext4/inode.c
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
 *
 * Copyright (C) 1992, 1993, 1994, 1995
 * Remy Card (card@masi.ibp.fr)
 * Laboratoire MASI - Institut Blaise Pascal
 * Universite Pierre et Marie Curie (Paris VI)
 *
 *  from
 *
 *  linux/fs/minix/inode.c
 *
 *  Copyright (C) 1991, 1992  Linus Torvalds
 *
 *  64-bit file support on 64-bit platforms by Jakub Jelinek
 *	(jj@sunsite.ms.mff.cuni.cz)
 *
18
 *  Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000
19
20
21
22
 */

#include <linux/fs.h>
#include <linux/time.h>
23
#include <linux/jbd2.h>
24
25
26
27
28
29
#include <linux/highuid.h>
#include <linux/pagemap.h>
#include <linux/quotaops.h>
#include <linux/string.h>
#include <linux/buffer_head.h>
#include <linux/writeback.h>
30
#include <linux/pagevec.h>
31
#include <linux/mpage.h>
32
#include <linux/namei.h>
33
34
#include <linux/uio.h>
#include <linux/bio.h>
35
#include <linux/workqueue.h>
36
#include <linux/kernel.h>
37
#include <linux/printk.h>
38
#include <linux/slab.h>
39
#include <linux/ratelimit.h>
40

41
#include "ext4_jbd2.h"
42
43
#include "xattr.h"
#include "acl.h"
44
#include "truncate.h"
45

46
47
#include <trace/events/ext4.h>

48
49
#define MPAGE_DA_EXTENT_TAIL 0x01

50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
static __u32 ext4_inode_csum(struct inode *inode, struct ext4_inode *raw,
			      struct ext4_inode_info *ei)
{
	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
	__u16 csum_lo;
	__u16 csum_hi = 0;
	__u32 csum;

	csum_lo = raw->i_checksum_lo;
	raw->i_checksum_lo = 0;
	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
	    EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) {
		csum_hi = raw->i_checksum_hi;
		raw->i_checksum_hi = 0;
	}

	csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)raw,
			   EXT4_INODE_SIZE(inode->i_sb));

	raw->i_checksum_lo = csum_lo;
	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
	    EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
		raw->i_checksum_hi = csum_hi;

	return csum;
}

static int ext4_inode_csum_verify(struct inode *inode, struct ext4_inode *raw,
				  struct ext4_inode_info *ei)
{
	__u32 provided, calculated;

	if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
	    cpu_to_le32(EXT4_OS_LINUX) ||
	    !EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
		EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
		return 1;

	provided = le16_to_cpu(raw->i_checksum_lo);
	calculated = ext4_inode_csum(inode, raw, ei);
	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
	    EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
		provided |= ((__u32)le16_to_cpu(raw->i_checksum_hi)) << 16;
	else
		calculated &= 0xFFFF;

	return provided == calculated;
}

static void ext4_inode_csum_set(struct inode *inode, struct ext4_inode *raw,
				struct ext4_inode_info *ei)
{
	__u32 csum;

	if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
	    cpu_to_le32(EXT4_OS_LINUX) ||
	    !EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
		EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
		return;

	csum = ext4_inode_csum(inode, raw, ei);
	raw->i_checksum_lo = cpu_to_le16(csum & 0xFFFF);
	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
	    EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
		raw->i_checksum_hi = cpu_to_le16(csum >> 16);
}

117
118
119
static inline int ext4_begin_ordered_truncate(struct inode *inode,
					      loff_t new_size)
{
120
	trace_ext4_begin_ordered_truncate(inode, new_size);
121
122
123
124
125
126
127
128
129
130
131
	/*
	 * If jinode is zero, then we never opened the file for
	 * writing, so there's no need to call
	 * jbd2_journal_begin_ordered_truncate() since there's no
	 * outstanding writes we need to flush.
	 */
	if (!EXT4_I(inode)->jinode)
		return 0;
	return jbd2_journal_begin_ordered_truncate(EXT4_JOURNAL(inode),
						   EXT4_I(inode)->jinode,
						   new_size);
132
133
}

134
static void ext4_invalidatepage(struct page *page, unsigned long offset);
135
136
137
138
139
140
static int noalloc_get_block_write(struct inode *inode, sector_t iblock,
				   struct buffer_head *bh_result, int create);
static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode);
static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate);
static int __ext4_journalled_writepage(struct page *page, unsigned int len);
static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh);
Eric Sandeen's avatar
Eric Sandeen committed
141
142
143
static int ext4_discard_partial_page_buffers_no_lock(handle_t *handle,
		struct inode *inode, struct page *page, loff_t from,
		loff_t length, int flags);
144

145
146
147
/*
 * Test whether an inode is a fast symlink.
 */
148
static int ext4_inode_is_fast_symlink(struct inode *inode)
149
{
150
	int ea_blocks = EXT4_I(inode)->i_file_acl ?
151
152
153
154
155
156
157
158
159
160
		(inode->i_sb->s_blocksize >> 9) : 0;

	return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
}

/*
 * Restart the transaction associated with *handle.  This does a commit,
 * so before we call here everything must be consistently dirtied against
 * this transaction.
 */
161
int ext4_truncate_restart_trans(handle_t *handle, struct inode *inode,
162
				 int nblocks)
163
{
164
165
166
	int ret;

	/*
167
	 * Drop i_data_sem to avoid deadlock with ext4_map_blocks.  At this
168
169
170
171
	 * moment, get_block can be called only for blocks inside i_size since
	 * page cache has been already dropped and writes are blocked by
	 * i_mutex. So we can safely drop the i_data_sem here.
	 */
172
	BUG_ON(EXT4_JOURNAL(inode) == NULL);
173
	jbd_debug(2, "restarting handle %p\n", handle);
174
	up_write(&EXT4_I(inode)->i_data_sem);
175
	ret = ext4_journal_restart(handle, nblocks);
176
	down_write(&EXT4_I(inode)->i_data_sem);
177
	ext4_discard_preallocations(inode);
178
179

	return ret;
180
181
182
183
184
}

/*
 * Called at the last iput() if i_nlink is zero.
 */
Al Viro's avatar
Al Viro committed
185
void ext4_evict_inode(struct inode *inode)
186
187
{
	handle_t *handle;
188
	int err;
189

190
	trace_ext4_evict_inode(inode);
191
192
193

	ext4_ioend_wait(inode);

Al Viro's avatar
Al Viro committed
194
	if (inode->i_nlink) {
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
		/*
		 * When journalling data dirty buffers are tracked only in the
		 * journal. So although mm thinks everything is clean and
		 * ready for reaping the inode might still have some pages to
		 * write in the running transaction or waiting to be
		 * checkpointed. Thus calling jbd2_journal_invalidatepage()
		 * (via truncate_inode_pages()) to discard these buffers can
		 * cause data loss. Also even if we did not discard these
		 * buffers, we would have no way to find them after the inode
		 * is reaped and thus user could see stale data if he tries to
		 * read them before the transaction is checkpointed. So be
		 * careful and force everything to disk here... We use
		 * ei->i_datasync_tid to store the newest transaction
		 * containing inode's data.
		 *
		 * Note that directories do not have this problem because they
		 * don't use page cache.
		 */
		if (ext4_should_journal_data(inode) &&
		    (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode))) {
			journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
			tid_t commit_tid = EXT4_I(inode)->i_datasync_tid;

			jbd2_log_start_commit(journal, commit_tid);
			jbd2_log_wait_commit(journal, commit_tid);
			filemap_write_and_wait(&inode->i_data);
		}
Al Viro's avatar
Al Viro committed
222
223
224
225
		truncate_inode_pages(&inode->i_data, 0);
		goto no_delete;
	}

226
	if (!is_bad_inode(inode))
227
		dquot_initialize(inode);
228

229
230
	if (ext4_should_order_data(inode))
		ext4_begin_ordered_truncate(inode, 0);
231
232
233
234
235
	truncate_inode_pages(&inode->i_data, 0);

	if (is_bad_inode(inode))
		goto no_delete;

236
237
238
239
240
	/*
	 * Protect us against freezing - iput() caller didn't have to have any
	 * protection against it
	 */
	sb_start_intwrite(inode->i_sb);
241
	handle = ext4_journal_start(inode, ext4_blocks_for_truncate(inode)+3);
242
	if (IS_ERR(handle)) {
243
		ext4_std_error(inode->i_sb, PTR_ERR(handle));
244
245
246
247
248
		/*
		 * If we're going to skip the normal cleanup, we still need to
		 * make sure that the in-core orphan linked list is properly
		 * cleaned up.
		 */
249
		ext4_orphan_del(NULL, inode);
250
		sb_end_intwrite(inode->i_sb);
251
252
253
254
		goto no_delete;
	}

	if (IS_SYNC(inode))
255
		ext4_handle_sync(handle);
256
	inode->i_size = 0;
257
258
	err = ext4_mark_inode_dirty(handle, inode);
	if (err) {
259
		ext4_warning(inode->i_sb,
260
261
262
			     "couldn't mark inode dirty (err %d)", err);
		goto stop_handle;
	}
263
	if (inode->i_blocks)
264
		ext4_truncate(inode);
265
266
267
268
269
270
271

	/*
	 * ext4_ext_truncate() doesn't reserve any slop when it
	 * restarts journal transactions; therefore there may not be
	 * enough credits left in the handle to remove the inode from
	 * the orphan list and set the dtime field.
	 */
272
	if (!ext4_handle_has_enough_credits(handle, 3)) {
273
274
275
276
		err = ext4_journal_extend(handle, 3);
		if (err > 0)
			err = ext4_journal_restart(handle, 3);
		if (err != 0) {
277
			ext4_warning(inode->i_sb,
278
279
280
				     "couldn't extend journal (err %d)", err);
		stop_handle:
			ext4_journal_stop(handle);
281
			ext4_orphan_del(NULL, inode);
282
			sb_end_intwrite(inode->i_sb);
283
284
285
286
			goto no_delete;
		}
	}

287
	/*
288
	 * Kill off the orphan record which ext4_truncate created.
289
	 * AKPM: I think this can be inside the above `if'.
290
	 * Note that ext4_orphan_del() has to be able to cope with the
291
	 * deletion of a non-existent orphan - this is because we don't
292
	 * know if ext4_truncate() actually created an orphan record.
293
294
	 * (Well, we could do this if we need to, but heck - it works)
	 */
295
296
	ext4_orphan_del(handle, inode);
	EXT4_I(inode)->i_dtime	= get_seconds();
297
298
299
300
301
302
303
304

	/*
	 * One subtle ordering requirement: if anything has gone wrong
	 * (transaction abort, IO errors, whatever), then we can still
	 * do these next steps (the fs will already have been marked as
	 * having errors), but we can't free the inode if the mark_dirty
	 * fails.
	 */
305
	if (ext4_mark_inode_dirty(handle, inode))
306
		/* If that failed, just do the required in-core inode clear. */
Al Viro's avatar
Al Viro committed
307
		ext4_clear_inode(inode);
308
	else
309
310
		ext4_free_inode(handle, inode);
	ext4_journal_stop(handle);
311
	sb_end_intwrite(inode->i_sb);
312
313
	return;
no_delete:
Al Viro's avatar
Al Viro committed
314
	ext4_clear_inode(inode);	/* We must guarantee clearing of inode... */
315
316
}

317
318
#ifdef CONFIG_QUOTA
qsize_t *ext4_get_reserved_space(struct inode *inode)
319
{
320
	return &EXT4_I(inode)->i_reserved_quota;
321
}
322
#endif
323

324
325
/*
 * Calculate the number of metadata blocks need to reserve
326
 * to allocate a block located at @lblock
327
 */
328
static int ext4_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock)
329
{
330
	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
331
		return ext4_ext_calc_metadata_amount(inode, lblock);
332

333
	return ext4_ind_calc_metadata_amount(inode, lblock);
334
335
}

336
337
338
339
/*
 * Called with i_data_sem down, which is important since we can call
 * ext4_discard_preallocations() from here.
 */
340
341
void ext4_da_update_reserve_space(struct inode *inode,
					int used, int quota_claim)
342
343
{
	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
344
345
346
	struct ext4_inode_info *ei = EXT4_I(inode);

	spin_lock(&ei->i_block_reservation_lock);
347
	trace_ext4_da_update_reserve_space(inode, used, quota_claim);
348
349
	if (unlikely(used > ei->i_reserved_data_blocks)) {
		ext4_msg(inode->i_sb, KERN_NOTICE, "%s: ino %lu, used %d "
350
			 "with only %d reserved data blocks",
351
352
353
354
355
			 __func__, inode->i_ino, used,
			 ei->i_reserved_data_blocks);
		WARN_ON(1);
		used = ei->i_reserved_data_blocks;
	}
356

357
358
359
360
361
362
363
364
365
	if (unlikely(ei->i_allocated_meta_blocks > ei->i_reserved_meta_blocks)) {
		ext4_msg(inode->i_sb, KERN_NOTICE, "%s: ino %lu, allocated %d "
			 "with only %d reserved metadata blocks\n", __func__,
			 inode->i_ino, ei->i_allocated_meta_blocks,
			 ei->i_reserved_meta_blocks);
		WARN_ON(1);
		ei->i_allocated_meta_blocks = ei->i_reserved_meta_blocks;
	}

366
367
368
	/* Update per-inode reservations */
	ei->i_reserved_data_blocks -= used;
	ei->i_reserved_meta_blocks -= ei->i_allocated_meta_blocks;
369
	percpu_counter_sub(&sbi->s_dirtyclusters_counter,
370
			   used + ei->i_allocated_meta_blocks);
371
	ei->i_allocated_meta_blocks = 0;
372

373
374
375
376
377
378
	if (ei->i_reserved_data_blocks == 0) {
		/*
		 * We can release all of the reserved metadata blocks
		 * only when we have written all of the delayed
		 * allocation blocks.
		 */
379
		percpu_counter_sub(&sbi->s_dirtyclusters_counter,
380
				   ei->i_reserved_meta_blocks);
381
		ei->i_reserved_meta_blocks = 0;
382
		ei->i_da_metadata_calc_len = 0;
383
	}
384
	spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
385

386
387
	/* Update quota subsystem for data blocks */
	if (quota_claim)
388
		dquot_claim_block(inode, EXT4_C2B(sbi, used));
389
	else {
390
391
392
		/*
		 * We did fallocate with an offset that is already delayed
		 * allocated. So on delayed allocated writeback we should
393
		 * not re-claim the quota for fallocated blocks.
394
		 */
395
		dquot_release_reservation_block(inode, EXT4_C2B(sbi, used));
396
	}
397
398
399
400
401
402

	/*
	 * If we have done all the pending block allocations and if
	 * there aren't any writers on the inode, we can discard the
	 * inode's preallocations.
	 */
403
404
	if ((ei->i_reserved_data_blocks == 0) &&
	    (atomic_read(&inode->i_writecount) == 0))
405
		ext4_discard_preallocations(inode);
406
407
}

408
static int __check_block_validity(struct inode *inode, const char *func,
409
410
				unsigned int line,
				struct ext4_map_blocks *map)
411
{
412
413
	if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), map->m_pblk,
				   map->m_len)) {
414
415
416
417
		ext4_error_inode(inode, func, line, map->m_pblk,
				 "lblock %lu mapped to illegal pblock "
				 "(length %d)", (unsigned long) map->m_lblk,
				 map->m_len);
418
419
420
421
422
		return -EIO;
	}
	return 0;
}

423
#define check_block_validity(inode, map)	\
424
	__check_block_validity((inode), __func__, __LINE__, (map))
425

426
/*
427
428
 * Return the number of contiguous dirty pages in a given inode
 * starting at page frame idx.
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
 */
static pgoff_t ext4_num_dirty_pages(struct inode *inode, pgoff_t idx,
				    unsigned int max_pages)
{
	struct address_space *mapping = inode->i_mapping;
	pgoff_t	index;
	struct pagevec pvec;
	pgoff_t num = 0;
	int i, nr_pages, done = 0;

	if (max_pages == 0)
		return 0;
	pagevec_init(&pvec, 0);
	while (!done) {
		index = idx;
		nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
					      PAGECACHE_TAG_DIRTY,
					      (pgoff_t)PAGEVEC_SIZE);
		if (nr_pages == 0)
			break;
		for (i = 0; i < nr_pages; i++) {
			struct page *page = pvec.pages[i];
			struct buffer_head *bh, *head;

			lock_page(page);
			if (unlikely(page->mapping != mapping) ||
			    !PageDirty(page) ||
			    PageWriteback(page) ||
			    page->index != idx) {
				done = 1;
				unlock_page(page);
				break;
			}
462
463
464
465
466
467
468
469
470
			if (page_has_buffers(page)) {
				bh = head = page_buffers(page);
				do {
					if (!buffer_delay(bh) &&
					    !buffer_unwritten(bh))
						done = 1;
					bh = bh->b_this_page;
				} while (!done && (bh != head));
			}
471
472
473
474
475
			unlock_page(page);
			if (done)
				break;
			idx++;
			num++;
476
477
			if (num >= max_pages) {
				done = 1;
478
				break;
479
			}
480
481
482
483
484
485
		}
		pagevec_release(&pvec);
	}
	return num;
}

486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
/*
 * Sets the BH_Da_Mapped bit on the buffer heads corresponding to the given map.
 */
static void set_buffers_da_mapped(struct inode *inode,
				   struct ext4_map_blocks *map)
{
	struct address_space *mapping = inode->i_mapping;
	struct pagevec pvec;
	int i, nr_pages;
	pgoff_t index, end;

	index = map->m_lblk >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
	end = (map->m_lblk + map->m_len - 1) >>
		(PAGE_CACHE_SHIFT - inode->i_blkbits);

	pagevec_init(&pvec, 0);
	while (index <= end) {
		nr_pages = pagevec_lookup(&pvec, mapping, index,
					  min(end - index + 1,
					      (pgoff_t)PAGEVEC_SIZE));
		if (nr_pages == 0)
			break;
		for (i = 0; i < nr_pages; i++) {
			struct page *page = pvec.pages[i];
			struct buffer_head *bh, *head;

			if (unlikely(page->mapping != mapping) ||
			    !PageDirty(page))
				break;

			if (page_has_buffers(page)) {
				bh = head = page_buffers(page);
				do {
					set_buffer_da_mapped(bh);
					bh = bh->b_this_page;
				} while (bh != head);
			}
			index++;
		}
		pagevec_release(&pvec);
	}
}

529
/*
530
 * The ext4_map_blocks() function tries to look up the requested blocks,
531
 * and returns if the blocks are already mapped.
532
533
534
535
536
 *
 * Otherwise it takes the write lock of the i_data_sem and allocate blocks
 * and store the allocated blocks in the result buffer head and mark it
 * mapped.
 *
537
538
 * If file type is extents based, it will call ext4_ext_map_blocks(),
 * Otherwise, call with ext4_ind_map_blocks() to handle indirect mapping
539
540
541
542
543
544
545
546
 * based files
 *
 * On success, it returns the number of blocks being mapped or allocate.
 * if create==0 and the blocks are pre-allocated and uninitialized block,
 * the result buffer head is unmapped. If the create ==1, it will make sure
 * the buffer head is mapped.
 *
 * It returns 0 if plain look up failed (blocks have not been allocated), in
547
 * that case, buffer head is unmapped
548
549
550
 *
 * It returns the error in case of allocation failure.
 */
551
552
int ext4_map_blocks(handle_t *handle, struct inode *inode,
		    struct ext4_map_blocks *map, int flags)
553
554
{
	int retval;
555

556
557
558
559
	map->m_flags = 0;
	ext_debug("ext4_map_blocks(): inode %lu, flag %d, max_blocks %u,"
		  "logical block %lu\n", inode->i_ino, flags, map->m_len,
		  (unsigned long) map->m_lblk);
560
	/*
561
562
	 * Try to see if we can get the block without requesting a new
	 * file system block.
563
	 */
564
565
	if (!(flags & EXT4_GET_BLOCKS_NO_LOCK))
		down_read((&EXT4_I(inode)->i_data_sem));
566
	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
567
568
		retval = ext4_ext_map_blocks(handle, inode, map, flags &
					     EXT4_GET_BLOCKS_KEEP_SIZE);
569
	} else {
570
571
		retval = ext4_ind_map_blocks(handle, inode, map, flags &
					     EXT4_GET_BLOCKS_KEEP_SIZE);
572
	}
573
574
	if (!(flags & EXT4_GET_BLOCKS_NO_LOCK))
		up_read((&EXT4_I(inode)->i_data_sem));
575

576
	if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
577
		int ret = check_block_validity(inode, map);
578
579
580
581
		if (ret != 0)
			return ret;
	}

582
	/* If it is only a block(s) look up */
583
	if ((flags & EXT4_GET_BLOCKS_CREATE) == 0)
584
585
586
587
588
589
		return retval;

	/*
	 * Returns if the blocks have already allocated
	 *
	 * Note that if blocks have been preallocated
590
	 * ext4_ext_get_block() returns the create = 0
591
592
	 * with buffer head unmapped.
	 */
593
	if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED)
594
595
		return retval;

596
597
598
599
600
601
602
603
604
605
	/*
	 * When we call get_blocks without the create flag, the
	 * BH_Unwritten flag could have gotten set if the blocks
	 * requested were part of a uninitialized extent.  We need to
	 * clear this flag now that we are committed to convert all or
	 * part of the uninitialized extent to be an initialized
	 * extent.  This is because we need to avoid the combination
	 * of BH_Unwritten and BH_Mapped flags being simultaneously
	 * set on the buffer_head.
	 */
606
	map->m_flags &= ~EXT4_MAP_UNWRITTEN;
607

608
	/*
609
610
611
612
	 * New blocks allocate and/or writing to uninitialized extent
	 * will possibly result in updating i_data, so we take
	 * the write lock of i_data_sem, and call get_blocks()
	 * with create == 1 flag.
613
614
	 */
	down_write((&EXT4_I(inode)->i_data_sem));
615
616
617
618
619
620
621

	/*
	 * if the caller is from delayed allocation writeout path
	 * we have already reserved fs blocks for allocation
	 * let the underlying get_block() function know to
	 * avoid double accounting
	 */
622
	if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
623
		ext4_set_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED);
624
625
626
627
	/*
	 * We need to check for EXT4 here because migrate
	 * could have changed the inode type in between
	 */
628
	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
629
		retval = ext4_ext_map_blocks(handle, inode, map, flags);
630
	} else {
631
		retval = ext4_ind_map_blocks(handle, inode, map, flags);
632

633
		if (retval > 0 && map->m_flags & EXT4_MAP_NEW) {
634
635
636
637
638
			/*
			 * We allocated new blocks which will result in
			 * i_data's format changing.  Force the migrate
			 * to fail by clearing migrate flags
			 */
639
			ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
640
		}
641

642
643
644
645
646
647
648
		/*
		 * Update reserved blocks/metadata blocks after successful
		 * block allocation which had been deferred till now. We don't
		 * support fallocate for non extent files. So we can update
		 * reserve space here.
		 */
		if ((retval > 0) &&
649
			(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE))
650
651
			ext4_da_update_reserve_space(inode, retval, 1);
	}
652
	if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) {
653
		ext4_clear_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED);
654

655
656
657
658
659
660
661
662
		/* If we have successfully mapped the delayed allocated blocks,
		 * set the BH_Da_Mapped bit on them. Its important to do this
		 * under the protection of i_data_sem.
		 */
		if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED)
			set_buffers_da_mapped(inode, map);
	}

663
	up_write((&EXT4_I(inode)->i_data_sem));
664
	if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
665
		int ret = check_block_validity(inode, map);
666
667
668
		if (ret != 0)
			return ret;
	}
669
670
671
	return retval;
}

672
673
674
/* Maximum number of blocks we map for direct IO at once. */
#define DIO_MAX_BLOCKS 4096

675
676
static int _ext4_get_block(struct inode *inode, sector_t iblock,
			   struct buffer_head *bh, int flags)
677
{
678
	handle_t *handle = ext4_journal_current_handle();
679
	struct ext4_map_blocks map;
Jan Kara's avatar
Jan Kara committed
680
	int ret = 0, started = 0;
681
	int dio_credits;
682

683
684
685
	map.m_lblk = iblock;
	map.m_len = bh->b_size >> inode->i_blkbits;

686
	if (flags && !(flags & EXT4_GET_BLOCKS_NO_LOCK) && !handle) {
Jan Kara's avatar
Jan Kara committed
687
		/* Direct IO write... */
688
689
690
		if (map.m_len > DIO_MAX_BLOCKS)
			map.m_len = DIO_MAX_BLOCKS;
		dio_credits = ext4_chunk_trans_blocks(inode, map.m_len);
691
		handle = ext4_journal_start(inode, dio_credits);
Jan Kara's avatar
Jan Kara committed
692
		if (IS_ERR(handle)) {
693
			ret = PTR_ERR(handle);
694
			return ret;
695
		}
Jan Kara's avatar
Jan Kara committed
696
		started = 1;
697
698
	}

699
	ret = ext4_map_blocks(handle, inode, &map, flags);
Jan Kara's avatar
Jan Kara committed
700
	if (ret > 0) {
701
702
703
		map_bh(bh, inode->i_sb, map.m_pblk);
		bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags;
		bh->b_size = inode->i_sb->s_blocksize * map.m_len;
Jan Kara's avatar
Jan Kara committed
704
		ret = 0;
705
	}
Jan Kara's avatar
Jan Kara committed
706
707
	if (started)
		ext4_journal_stop(handle);
708
709
710
	return ret;
}

711
712
713
714
715
716
717
int ext4_get_block(struct inode *inode, sector_t iblock,
		   struct buffer_head *bh, int create)
{
	return _ext4_get_block(inode, iblock, bh,
			       create ? EXT4_GET_BLOCKS_CREATE : 0);
}

718
719
720
/*
 * `handle' can be NULL if create is zero
 */
721
struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
Aneesh Kumar K.V's avatar
Aneesh Kumar K.V committed
722
				ext4_lblk_t block, int create, int *errp)
723
{
724
725
	struct ext4_map_blocks map;
	struct buffer_head *bh;
726
727
728
729
	int fatal = 0, err;

	J_ASSERT(handle != NULL || create == 0);

730
731
732
733
	map.m_lblk = block;
	map.m_len = 1;
	err = ext4_map_blocks(handle, inode, &map,
			      create ? EXT4_GET_BLOCKS_CREATE : 0);
734

735
736
737
	/* ensure we send some value back into *errp */
	*errp = 0;

738
739
740
741
742
743
744
745
746
	if (err < 0)
		*errp = err;
	if (err <= 0)
		return NULL;

	bh = sb_getblk(inode->i_sb, map.m_pblk);
	if (!bh) {
		*errp = -EIO;
		return NULL;
747
	}
748
749
750
	if (map.m_flags & EXT4_MAP_NEW) {
		J_ASSERT(create != 0);
		J_ASSERT(handle != NULL);
751

752
753
754
755
756
757
758
759
760
761
762
763
764
		/*
		 * Now that we do not always journal data, we should
		 * keep in mind whether this should always journal the
		 * new buffer as metadata.  For now, regular file
		 * writes use ext4_get_block instead, so it's not a
		 * problem.
		 */
		lock_buffer(bh);
		BUFFER_TRACE(bh, "call get_create_access");
		fatal = ext4_journal_get_create_access(handle, bh);
		if (!fatal && !buffer_uptodate(bh)) {
			memset(bh->b_data, 0, inode->i_sb->s_blocksize);
			set_buffer_uptodate(bh);
765
		}
766
767
768
769
770
771
772
		unlock_buffer(bh);
		BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
		err = ext4_handle_dirty_metadata(handle, inode, bh);
		if (!fatal)
			fatal = err;
	} else {
		BUFFER_TRACE(bh, "not a new buffer");
773
	}
774
775
776
777
778
779
	if (fatal) {
		*errp = fatal;
		brelse(bh);
		bh = NULL;
	}
	return bh;
780
781
}

782
struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
Aneesh Kumar K.V's avatar
Aneesh Kumar K.V committed
783
			       ext4_lblk_t block, int create, int *err)
784
{
785
	struct buffer_head *bh;
786

787
	bh = ext4_getblk(handle, inode, block, create, err);
788
789
790
791
	if (!bh)
		return bh;
	if (buffer_uptodate(bh))
		return bh;
792
	ll_rw_block(READ | REQ_META | REQ_PRIO, 1, &bh);
793
794
795
796
797
798
799
800
	wait_on_buffer(bh);
	if (buffer_uptodate(bh))
		return bh;
	put_bh(bh);
	*err = -EIO;
	return NULL;
}

801
802
803
804
805
806
807
static int walk_page_buffers(handle_t *handle,
			     struct buffer_head *head,
			     unsigned from,
			     unsigned to,
			     int *partial,
			     int (*fn)(handle_t *handle,
				       struct buffer_head *bh))
808
809
810
811
812
813
814
{
	struct buffer_head *bh;
	unsigned block_start, block_end;
	unsigned blocksize = head->b_size;
	int err, ret = 0;
	struct buffer_head *next;

815
816
	for (bh = head, block_start = 0;
	     ret == 0 && (bh != head || !block_start);
817
	     block_start = block_end, bh = next) {
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
		next = bh->b_this_page;
		block_end = block_start + blocksize;
		if (block_end <= from || block_start >= to) {
			if (partial && !buffer_uptodate(bh))
				*partial = 1;
			continue;
		}
		err = (*fn)(handle, bh);
		if (!ret)
			ret = err;
	}
	return ret;
}

/*
 * To preserve ordering, it is essential that the hole instantiation and
 * the data write be encapsulated in a single transaction.  We cannot
835
 * close off a transaction and start a new one between the ext4_get_block()
836
 * and the commit_write().  So doing the jbd2_journal_start at the start of
837
838
 * prepare_write() is the right place.
 *
839
840
 * Also, this function can nest inside ext4_writepage() ->
 * block_write_full_page(). In that case, we *know* that ext4_writepage()
841
842
843
844
 * has generated enough buffer credits to do the whole page.  So we won't
 * block on the journal in that case, which is good, because the caller may
 * be PF_MEMALLOC.
 *
845
 * By accident, ext4 can be reentered when a transaction is open via
846
847
848
849
850
851
 * quota file writes.  If we were to commit the transaction while thus
 * reentered, there can be a deadlock - we would be holding a quota
 * lock, and the commit would never complete if another thread had a
 * transaction open and was blocking on the quota lock - a ranking
 * violation.
 *
852
 * So what we do is to rely on the fact that jbd2_journal_stop/journal_start
853
854
855
856
857
 * will _not_ run commit under these circumstances because handle->h_ref
 * is elevated.  We'll still have enough credits for the tiny quotafile
 * write.
 */
static int do_journal_get_write_access(handle_t *handle,
858
				       struct buffer_head *bh)
859
{
860
861
862
	int dirty = buffer_dirty(bh);
	int ret;

863
864
	if (!buffer_mapped(bh) || buffer_freed(bh))
		return 0;
865
	/*
866
	 * __block_write_begin() could have dirtied some buffers. Clean
867
868
	 * the dirty bit as jbd2_journal_get_write_access() could complain
	 * otherwise about fs integrity issues. Setting of the dirty bit
869
	 * by __block_write_begin() isn't a real problem here as we clear
870
871
872
873
874
875
876
877
878
	 * the bit before releasing a page lock and thus writeback cannot
	 * ever write the buffer.
	 */
	if (dirty)
		clear_buffer_dirty(bh);
	ret = ext4_journal_get_write_access(handle, bh);
	if (!ret && dirty)
		ret = ext4_handle_dirty_metadata(handle, NULL, bh);
	return ret;
879
880
}

881
882
static int ext4_get_block_write(struct inode *inode, sector_t iblock,
		   struct buffer_head *bh_result, int create);
883
884
static int ext4_get_block_write_nolock(struct inode *inode, sector_t iblock,
		   struct buffer_head *bh_result, int create);
Nick Piggin's avatar
Nick Piggin committed
885
static int ext4_write_begin(struct file *file, struct address_space *mapping,
886
887
			    loff_t pos, unsigned len, unsigned flags,
			    struct page **pagep, void **fsdata)
888
{
889
	struct inode *inode = mapping->host;
890
	int ret, needed_blocks;
891
892
	handle_t *handle;
	int retries = 0;
893
	struct page *page;
894
	pgoff_t index;
895
	unsigned from, to;
Nick Piggin's avatar
Nick Piggin committed
896

897
	trace_ext4_write_begin(inode, pos, len, flags);
898
899
900
901
902
	/*
	 * Reserve one block more for addition to orphan list in case
	 * we allocate blocks but write fails for some reason
	 */
	needed_blocks = ext4_writepage_trans_blocks(inode) + 1;
903
	index = pos >> PAGE_CACHE_SHIFT;
904
905
	from = pos & (PAGE_CACHE_SIZE - 1);
	to = from + len;
906
907

retry:
908
909
910
911
	handle = ext4_journal_start(inode, needed_blocks);
	if (IS_ERR(handle)) {
		ret = PTR_ERR(handle);
		goto out;
912
	}
913

914
915
916
917
	/* We cannot recurse into the filesystem as the transaction is already
	 * started */
	flags |= AOP_FLAG_NOFS;

918
	page = grab_cache_page_write_begin(mapping, index, flags);
919
920
921
922
923
924
925
	if (!page) {
		ext4_journal_stop(handle);
		ret = -ENOMEM;
		goto out;
	}
	*pagep = page;

926
	if (ext4_should_dioread_nolock(inode))
927
		ret = __block_write_begin(page, pos, len, ext4_get_block_write);
928
	else
929
		ret = __block_write_begin(page, pos, len, ext4_get_block);
Nick Piggin's avatar
Nick Piggin committed
930
931

	if (!ret && ext4_should_journal_data(inode)) {
932
933
934
		ret = walk_page_buffers(handle, page_buffers(page),
				from, to, NULL, do_journal_get_write_access);
	}
Nick Piggin's avatar
Nick Piggin committed
935
936

	if (ret) {
937
938
		unlock_page(page);
		page_cache_release(page);
939
		/*
940
		 * __block_write_begin may have instantiated a few blocks
941
942
		 * outside i_size.  Trim these off again. Don't need
		 * i_size_read because we hold i_mutex.
943
944
945
		 *
		 * Add inode to orphan list in case we crash before
		 * truncate finishes
946
		 */
947
		if (pos + len > inode->i_size && ext4_can_truncate(inode))
948
949
950
951
			ext4_orphan_add(handle, inode);

		ext4_journal_stop(handle);
		if (pos + len > inode->i_size) {
952
			ext4_truncate_failed_write(inode);
953
			/*
954
			 * If truncate failed early the inode might
955
956
957
958
959
960
961
			 * still be on the orphan list; we need to
			 * make sure the inode is removed from the
			 * orphan list in that case.
			 */
			if (inode->i_nlink)
				ext4_orphan_del(NULL, inode);
		}
Nick Piggin's avatar
Nick Piggin committed
962
963
	}

964
	if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
965
		goto retry;
966
out:
967
968
969
	return ret;
}

Nick Piggin's avatar
Nick Piggin committed
970
971
/* For write_end() in data=journal mode */
static int write_end_fn(handle_t *handle, struct buffer_head *bh)
972
973
974
975
{
	if (!buffer_mapped(bh) || buffer_freed(bh))
		return 0;
	set_buffer_uptodate(bh);
976
	return ext4_handle_dirty_metadata(handle, NULL, bh);
977
978
}

979
static int ext4_generic_write_end(struct file *file,
980
981
982
				  struct address_space *mapping,
				  loff_t pos, unsigned len, unsigned copied,
				  struct page *page, void *fsdata)
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
{
	int i_size_changed = 0;
	struct inode *inode = mapping->host;
	handle_t *handle = ext4_journal_current_handle();

	copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);

	/*
	 * No need to use i_size_read() here, the i_size
	 * cannot change under us because we hold i_mutex.
	 *
	 * But it's important to update i_size while still holding page lock:
	 * page writeout could otherwise come in and zero beyond i_size.
	 */
	if (pos + copied > inode->i_size) {
		i_size_write(inode, pos + copied);
		i_size_changed = 1;
	}
For faster browsing, not all history is shown. View entire blame