inode.c 136 KB
Newer Older
1
/*
2
 *  linux/fs/ext4/inode.c
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
 *
 * Copyright (C) 1992, 1993, 1994, 1995
 * Remy Card (card@masi.ibp.fr)
 * Laboratoire MASI - Institut Blaise Pascal
 * Universite Pierre et Marie Curie (Paris VI)
 *
 *  from
 *
 *  linux/fs/minix/inode.c
 *
 *  Copyright (C) 1991, 1992  Linus Torvalds
 *
 *  64-bit file support on 64-bit platforms by Jakub Jelinek
 *	(jj@sunsite.ms.mff.cuni.cz)
 *
18
 *  Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000
19
20
21
22
23
 */

#include <linux/module.h>
#include <linux/fs.h>
#include <linux/time.h>
24
#include <linux/jbd2.h>
25
26
27
28
29
30
#include <linux/highuid.h>
#include <linux/pagemap.h>
#include <linux/quotaops.h>
#include <linux/string.h>
#include <linux/buffer_head.h>
#include <linux/writeback.h>
31
#include <linux/pagevec.h>
32
#include <linux/mpage.h>
33
#include <linux/namei.h>
34
35
#include <linux/uio.h>
#include <linux/bio.h>
36
#include <linux/workqueue.h>
37
#include <linux/kernel.h>
38
#include <linux/printk.h>
39
#include <linux/slab.h>
40
#include <linux/ratelimit.h>
41

42
#include "ext4_jbd2.h"
43
44
#include "xattr.h"
#include "acl.h"
45
#include "truncate.h"
46

47
48
#include <trace/events/ext4.h>

49
50
#define MPAGE_DA_EXTENT_TAIL 0x01

51
52
53
static inline int ext4_begin_ordered_truncate(struct inode *inode,
					      loff_t new_size)
{
54
	trace_ext4_begin_ordered_truncate(inode, new_size);
55
56
57
58
59
60
61
62
63
64
65
	/*
	 * If jinode is zero, then we never opened the file for
	 * writing, so there's no need to call
	 * jbd2_journal_begin_ordered_truncate() since there's no
	 * outstanding writes we need to flush.
	 */
	if (!EXT4_I(inode)->jinode)
		return 0;
	return jbd2_journal_begin_ordered_truncate(EXT4_JOURNAL(inode),
						   EXT4_I(inode)->jinode,
						   new_size);
66
67
}

68
static void ext4_invalidatepage(struct page *page, unsigned long offset);
69
70
71
72
73
74
static int noalloc_get_block_write(struct inode *inode, sector_t iblock,
				   struct buffer_head *bh_result, int create);
static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode);
static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate);
static int __ext4_journalled_writepage(struct page *page, unsigned int len);
static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh);
75

76
77
78
/*
 * Test whether an inode is a fast symlink.
 */
79
static int ext4_inode_is_fast_symlink(struct inode *inode)
80
{
81
	int ea_blocks = EXT4_I(inode)->i_file_acl ?
82
83
84
85
86
87
88
89
90
91
		(inode->i_sb->s_blocksize >> 9) : 0;

	return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
}

/*
 * Restart the transaction associated with *handle.  This does a commit,
 * so before we call here everything must be consistently dirtied against
 * this transaction.
 */
92
int ext4_truncate_restart_trans(handle_t *handle, struct inode *inode,
93
				 int nblocks)
94
{
95
96
97
	int ret;

	/*
98
	 * Drop i_data_sem to avoid deadlock with ext4_map_blocks.  At this
99
100
101
102
	 * moment, get_block can be called only for blocks inside i_size since
	 * page cache has been already dropped and writes are blocked by
	 * i_mutex. So we can safely drop the i_data_sem here.
	 */
103
	BUG_ON(EXT4_JOURNAL(inode) == NULL);
104
	jbd_debug(2, "restarting handle %p\n", handle);
105
	up_write(&EXT4_I(inode)->i_data_sem);
106
	ret = ext4_journal_restart(handle, nblocks);
107
	down_write(&EXT4_I(inode)->i_data_sem);
108
	ext4_discard_preallocations(inode);
109
110

	return ret;
111
112
113
114
115
}

/*
 * Called at the last iput() if i_nlink is zero.
 */
Al Viro's avatar
Al Viro committed
116
void ext4_evict_inode(struct inode *inode)
117
118
{
	handle_t *handle;
119
	int err;
120

121
	trace_ext4_evict_inode(inode);
122
123
124

	ext4_ioend_wait(inode);

Al Viro's avatar
Al Viro committed
125
	if (inode->i_nlink) {
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
		/*
		 * When journalling data dirty buffers are tracked only in the
		 * journal. So although mm thinks everything is clean and
		 * ready for reaping the inode might still have some pages to
		 * write in the running transaction or waiting to be
		 * checkpointed. Thus calling jbd2_journal_invalidatepage()
		 * (via truncate_inode_pages()) to discard these buffers can
		 * cause data loss. Also even if we did not discard these
		 * buffers, we would have no way to find them after the inode
		 * is reaped and thus user could see stale data if he tries to
		 * read them before the transaction is checkpointed. So be
		 * careful and force everything to disk here... We use
		 * ei->i_datasync_tid to store the newest transaction
		 * containing inode's data.
		 *
		 * Note that directories do not have this problem because they
		 * don't use page cache.
		 */
		if (ext4_should_journal_data(inode) &&
		    (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode))) {
			journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
			tid_t commit_tid = EXT4_I(inode)->i_datasync_tid;

			jbd2_log_start_commit(journal, commit_tid);
			jbd2_log_wait_commit(journal, commit_tid);
			filemap_write_and_wait(&inode->i_data);
		}
Al Viro's avatar
Al Viro committed
153
154
155
156
		truncate_inode_pages(&inode->i_data, 0);
		goto no_delete;
	}

157
	if (!is_bad_inode(inode))
158
		dquot_initialize(inode);
159

160
161
	if (ext4_should_order_data(inode))
		ext4_begin_ordered_truncate(inode, 0);
162
163
164
165
166
	truncate_inode_pages(&inode->i_data, 0);

	if (is_bad_inode(inode))
		goto no_delete;

167
	handle = ext4_journal_start(inode, ext4_blocks_for_truncate(inode)+3);
168
	if (IS_ERR(handle)) {
169
		ext4_std_error(inode->i_sb, PTR_ERR(handle));
170
171
172
173
174
		/*
		 * If we're going to skip the normal cleanup, we still need to
		 * make sure that the in-core orphan linked list is properly
		 * cleaned up.
		 */
175
		ext4_orphan_del(NULL, inode);
176
177
178
179
		goto no_delete;
	}

	if (IS_SYNC(inode))
180
		ext4_handle_sync(handle);
181
	inode->i_size = 0;
182
183
	err = ext4_mark_inode_dirty(handle, inode);
	if (err) {
184
		ext4_warning(inode->i_sb,
185
186
187
			     "couldn't mark inode dirty (err %d)", err);
		goto stop_handle;
	}
188
	if (inode->i_blocks)
189
		ext4_truncate(inode);
190
191
192
193
194
195
196

	/*
	 * ext4_ext_truncate() doesn't reserve any slop when it
	 * restarts journal transactions; therefore there may not be
	 * enough credits left in the handle to remove the inode from
	 * the orphan list and set the dtime field.
	 */
197
	if (!ext4_handle_has_enough_credits(handle, 3)) {
198
199
200
201
		err = ext4_journal_extend(handle, 3);
		if (err > 0)
			err = ext4_journal_restart(handle, 3);
		if (err != 0) {
202
			ext4_warning(inode->i_sb,
203
204
205
				     "couldn't extend journal (err %d)", err);
		stop_handle:
			ext4_journal_stop(handle);
206
			ext4_orphan_del(NULL, inode);
207
208
209
210
			goto no_delete;
		}
	}

211
	/*
212
	 * Kill off the orphan record which ext4_truncate created.
213
	 * AKPM: I think this can be inside the above `if'.
214
	 * Note that ext4_orphan_del() has to be able to cope with the
215
	 * deletion of a non-existent orphan - this is because we don't
216
	 * know if ext4_truncate() actually created an orphan record.
217
218
	 * (Well, we could do this if we need to, but heck - it works)
	 */
219
220
	ext4_orphan_del(handle, inode);
	EXT4_I(inode)->i_dtime	= get_seconds();
221
222
223
224
225
226
227
228

	/*
	 * One subtle ordering requirement: if anything has gone wrong
	 * (transaction abort, IO errors, whatever), then we can still
	 * do these next steps (the fs will already have been marked as
	 * having errors), but we can't free the inode if the mark_dirty
	 * fails.
	 */
229
	if (ext4_mark_inode_dirty(handle, inode))
230
		/* If that failed, just do the required in-core inode clear. */
Al Viro's avatar
Al Viro committed
231
		ext4_clear_inode(inode);
232
	else
233
234
		ext4_free_inode(handle, inode);
	ext4_journal_stop(handle);
235
236
	return;
no_delete:
Al Viro's avatar
Al Viro committed
237
	ext4_clear_inode(inode);	/* We must guarantee clearing of inode... */
238
239
}

240
241
#ifdef CONFIG_QUOTA
qsize_t *ext4_get_reserved_space(struct inode *inode)
242
{
243
	return &EXT4_I(inode)->i_reserved_quota;
244
}
245
#endif
246

247
248
/*
 * Calculate the number of metadata blocks need to reserve
249
 * to allocate a block located at @lblock
250
 */
251
static int ext4_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock)
252
{
253
	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
254
		return ext4_ext_calc_metadata_amount(inode, lblock);
255

256
	return ext4_ind_calc_metadata_amount(inode, lblock);
257
258
}

259
260
261
262
/*
 * Called with i_data_sem down, which is important since we can call
 * ext4_discard_preallocations() from here.
 */
263
264
void ext4_da_update_reserve_space(struct inode *inode,
					int used, int quota_claim)
265
266
{
	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
267
268
269
	struct ext4_inode_info *ei = EXT4_I(inode);

	spin_lock(&ei->i_block_reservation_lock);
270
	trace_ext4_da_update_reserve_space(inode, used, quota_claim);
271
272
273
274
275
276
277
278
	if (unlikely(used > ei->i_reserved_data_blocks)) {
		ext4_msg(inode->i_sb, KERN_NOTICE, "%s: ino %lu, used %d "
			 "with only %d reserved data blocks\n",
			 __func__, inode->i_ino, used,
			 ei->i_reserved_data_blocks);
		WARN_ON(1);
		used = ei->i_reserved_data_blocks;
	}
279

280
281
282
	/* Update per-inode reservations */
	ei->i_reserved_data_blocks -= used;
	ei->i_reserved_meta_blocks -= ei->i_allocated_meta_blocks;
283
	percpu_counter_sub(&sbi->s_dirtyclusters_counter,
284
			   used + ei->i_allocated_meta_blocks);
285
	ei->i_allocated_meta_blocks = 0;
286

287
288
289
290
291
292
	if (ei->i_reserved_data_blocks == 0) {
		/*
		 * We can release all of the reserved metadata blocks
		 * only when we have written all of the delayed
		 * allocation blocks.
		 */
293
		percpu_counter_sub(&sbi->s_dirtyclusters_counter,
294
				   ei->i_reserved_meta_blocks);
295
		ei->i_reserved_meta_blocks = 0;
296
		ei->i_da_metadata_calc_len = 0;
297
	}
298
	spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
299

300
301
	/* Update quota subsystem for data blocks */
	if (quota_claim)
302
		dquot_claim_block(inode, EXT4_C2B(sbi, used));
303
	else {
304
305
306
		/*
		 * We did fallocate with an offset that is already delayed
		 * allocated. So on delayed allocated writeback we should
307
		 * not re-claim the quota for fallocated blocks.
308
		 */
309
		dquot_release_reservation_block(inode, EXT4_C2B(sbi, used));
310
	}
311
312
313
314
315
316

	/*
	 * If we have done all the pending block allocations and if
	 * there aren't any writers on the inode, we can discard the
	 * inode's preallocations.
	 */
317
318
	if ((ei->i_reserved_data_blocks == 0) &&
	    (atomic_read(&inode->i_writecount) == 0))
319
		ext4_discard_preallocations(inode);
320
321
}

322
static int __check_block_validity(struct inode *inode, const char *func,
323
324
				unsigned int line,
				struct ext4_map_blocks *map)
325
{
326
327
	if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), map->m_pblk,
				   map->m_len)) {
328
329
330
331
		ext4_error_inode(inode, func, line, map->m_pblk,
				 "lblock %lu mapped to illegal pblock "
				 "(length %d)", (unsigned long) map->m_lblk,
				 map->m_len);
332
333
334
335
336
		return -EIO;
	}
	return 0;
}

337
#define check_block_validity(inode, map)	\
338
	__check_block_validity((inode), __func__, __LINE__, (map))
339

340
/*
341
342
 * Return the number of contiguous dirty pages in a given inode
 * starting at page frame idx.
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
 */
static pgoff_t ext4_num_dirty_pages(struct inode *inode, pgoff_t idx,
				    unsigned int max_pages)
{
	struct address_space *mapping = inode->i_mapping;
	pgoff_t	index;
	struct pagevec pvec;
	pgoff_t num = 0;
	int i, nr_pages, done = 0;

	if (max_pages == 0)
		return 0;
	pagevec_init(&pvec, 0);
	while (!done) {
		index = idx;
		nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
					      PAGECACHE_TAG_DIRTY,
					      (pgoff_t)PAGEVEC_SIZE);
		if (nr_pages == 0)
			break;
		for (i = 0; i < nr_pages; i++) {
			struct page *page = pvec.pages[i];
			struct buffer_head *bh, *head;

			lock_page(page);
			if (unlikely(page->mapping != mapping) ||
			    !PageDirty(page) ||
			    PageWriteback(page) ||
			    page->index != idx) {
				done = 1;
				unlock_page(page);
				break;
			}
376
377
378
379
380
381
382
383
384
			if (page_has_buffers(page)) {
				bh = head = page_buffers(page);
				do {
					if (!buffer_delay(bh) &&
					    !buffer_unwritten(bh))
						done = 1;
					bh = bh->b_this_page;
				} while (!done && (bh != head));
			}
385
386
387
388
389
			unlock_page(page);
			if (done)
				break;
			idx++;
			num++;
390
391
			if (num >= max_pages) {
				done = 1;
392
				break;
393
			}
394
395
396
397
398
399
		}
		pagevec_release(&pvec);
	}
	return num;
}

400
/*
401
 * The ext4_map_blocks() function tries to look up the requested blocks,
402
 * and returns if the blocks are already mapped.
403
404
405
406
407
 *
 * Otherwise it takes the write lock of the i_data_sem and allocate blocks
 * and store the allocated blocks in the result buffer head and mark it
 * mapped.
 *
408
409
 * If file type is extents based, it will call ext4_ext_map_blocks(),
 * Otherwise, call with ext4_ind_map_blocks() to handle indirect mapping
410
411
412
413
414
415
416
417
418
419
420
421
 * based files
 *
 * On success, it returns the number of blocks being mapped or allocate.
 * if create==0 and the blocks are pre-allocated and uninitialized block,
 * the result buffer head is unmapped. If the create ==1, it will make sure
 * the buffer head is mapped.
 *
 * It returns 0 if plain look up failed (blocks have not been allocated), in
 * that casem, buffer head is unmapped
 *
 * It returns the error in case of allocation failure.
 */
422
423
int ext4_map_blocks(handle_t *handle, struct inode *inode,
		    struct ext4_map_blocks *map, int flags)
424
425
{
	int retval;
426

427
428
429
430
	map->m_flags = 0;
	ext_debug("ext4_map_blocks(): inode %lu, flag %d, max_blocks %u,"
		  "logical block %lu\n", inode->i_ino, flags, map->m_len,
		  (unsigned long) map->m_lblk);
431
	/*
432
433
	 * Try to see if we can get the block without requesting a new
	 * file system block.
434
435
	 */
	down_read((&EXT4_I(inode)->i_data_sem));
436
	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
437
		retval = ext4_ext_map_blocks(handle, inode, map, 0);
438
	} else {
439
		retval = ext4_ind_map_blocks(handle, inode, map, 0);
440
	}
441
	up_read((&EXT4_I(inode)->i_data_sem));
442

443
	if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
444
		int ret = check_block_validity(inode, map);
445
446
447
448
		if (ret != 0)
			return ret;
	}

449
	/* If it is only a block(s) look up */
450
	if ((flags & EXT4_GET_BLOCKS_CREATE) == 0)
451
452
453
454
455
456
457
458
459
		return retval;

	/*
	 * Returns if the blocks have already allocated
	 *
	 * Note that if blocks have been preallocated
	 * ext4_ext_get_block() returns th create = 0
	 * with buffer head unmapped.
	 */
460
	if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED)
461
462
		return retval;

463
464
465
466
467
468
469
470
471
472
	/*
	 * When we call get_blocks without the create flag, the
	 * BH_Unwritten flag could have gotten set if the blocks
	 * requested were part of a uninitialized extent.  We need to
	 * clear this flag now that we are committed to convert all or
	 * part of the uninitialized extent to be an initialized
	 * extent.  This is because we need to avoid the combination
	 * of BH_Unwritten and BH_Mapped flags being simultaneously
	 * set on the buffer_head.
	 */
473
	map->m_flags &= ~EXT4_MAP_UNWRITTEN;
474

475
	/*
476
477
478
479
	 * New blocks allocate and/or writing to uninitialized extent
	 * will possibly result in updating i_data, so we take
	 * the write lock of i_data_sem, and call get_blocks()
	 * with create == 1 flag.
480
481
	 */
	down_write((&EXT4_I(inode)->i_data_sem));
482
483
484
485
486
487
488

	/*
	 * if the caller is from delayed allocation writeout path
	 * we have already reserved fs blocks for allocation
	 * let the underlying get_block() function know to
	 * avoid double accounting
	 */
489
	if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
490
		ext4_set_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED);
491
492
493
494
	/*
	 * We need to check for EXT4 here because migrate
	 * could have changed the inode type in between
	 */
495
	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
496
		retval = ext4_ext_map_blocks(handle, inode, map, flags);
497
	} else {
498
		retval = ext4_ind_map_blocks(handle, inode, map, flags);
499

500
		if (retval > 0 && map->m_flags & EXT4_MAP_NEW) {
501
502
503
504
505
			/*
			 * We allocated new blocks which will result in
			 * i_data's format changing.  Force the migrate
			 * to fail by clearing migrate flags
			 */
506
			ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
507
		}
508

509
510
511
512
513
514
515
		/*
		 * Update reserved blocks/metadata blocks after successful
		 * block allocation which had been deferred till now. We don't
		 * support fallocate for non extent files. So we can update
		 * reserve space here.
		 */
		if ((retval > 0) &&
516
			(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE))
517
518
			ext4_da_update_reserve_space(inode, retval, 1);
	}
519
	if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
520
		ext4_clear_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED);
521

522
	up_write((&EXT4_I(inode)->i_data_sem));
523
	if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
524
		int ret = check_block_validity(inode, map);
525
526
527
		if (ret != 0)
			return ret;
	}
528
529
530
	return retval;
}

531
532
533
/* Maximum number of blocks we map for direct IO at once. */
#define DIO_MAX_BLOCKS 4096

534
535
static int _ext4_get_block(struct inode *inode, sector_t iblock,
			   struct buffer_head *bh, int flags)
536
{
537
	handle_t *handle = ext4_journal_current_handle();
538
	struct ext4_map_blocks map;
Jan Kara's avatar
Jan Kara committed
539
	int ret = 0, started = 0;
540
	int dio_credits;
541

542
543
544
545
	map.m_lblk = iblock;
	map.m_len = bh->b_size >> inode->i_blkbits;

	if (flags && !handle) {
Jan Kara's avatar
Jan Kara committed
546
		/* Direct IO write... */
547
548
549
		if (map.m_len > DIO_MAX_BLOCKS)
			map.m_len = DIO_MAX_BLOCKS;
		dio_credits = ext4_chunk_trans_blocks(inode, map.m_len);
550
		handle = ext4_journal_start(inode, dio_credits);
Jan Kara's avatar
Jan Kara committed
551
		if (IS_ERR(handle)) {
552
			ret = PTR_ERR(handle);
553
			return ret;
554
		}
Jan Kara's avatar
Jan Kara committed
555
		started = 1;
556
557
	}

558
	ret = ext4_map_blocks(handle, inode, &map, flags);
Jan Kara's avatar
Jan Kara committed
559
	if (ret > 0) {
560
561
562
		map_bh(bh, inode->i_sb, map.m_pblk);
		bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags;
		bh->b_size = inode->i_sb->s_blocksize * map.m_len;
Jan Kara's avatar
Jan Kara committed
563
		ret = 0;
564
	}
Jan Kara's avatar
Jan Kara committed
565
566
	if (started)
		ext4_journal_stop(handle);
567
568
569
	return ret;
}

570
571
572
573
574
575
576
int ext4_get_block(struct inode *inode, sector_t iblock,
		   struct buffer_head *bh, int create)
{
	return _ext4_get_block(inode, iblock, bh,
			       create ? EXT4_GET_BLOCKS_CREATE : 0);
}

577
578
579
/*
 * `handle' can be NULL if create is zero
 */
580
struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
Aneesh Kumar K.V's avatar
Aneesh Kumar K.V committed
581
				ext4_lblk_t block, int create, int *errp)
582
{
583
584
	struct ext4_map_blocks map;
	struct buffer_head *bh;
585
586
587
588
	int fatal = 0, err;

	J_ASSERT(handle != NULL || create == 0);

589
590
591
592
	map.m_lblk = block;
	map.m_len = 1;
	err = ext4_map_blocks(handle, inode, &map,
			      create ? EXT4_GET_BLOCKS_CREATE : 0);
593

594
595
596
597
598
599
600
601
602
603
	if (err < 0)
		*errp = err;
	if (err <= 0)
		return NULL;
	*errp = 0;

	bh = sb_getblk(inode->i_sb, map.m_pblk);
	if (!bh) {
		*errp = -EIO;
		return NULL;
604
	}
605
606
607
	if (map.m_flags & EXT4_MAP_NEW) {
		J_ASSERT(create != 0);
		J_ASSERT(handle != NULL);
608

609
610
611
612
613
614
615
616
617
618
619
620
621
		/*
		 * Now that we do not always journal data, we should
		 * keep in mind whether this should always journal the
		 * new buffer as metadata.  For now, regular file
		 * writes use ext4_get_block instead, so it's not a
		 * problem.
		 */
		lock_buffer(bh);
		BUFFER_TRACE(bh, "call get_create_access");
		fatal = ext4_journal_get_create_access(handle, bh);
		if (!fatal && !buffer_uptodate(bh)) {
			memset(bh->b_data, 0, inode->i_sb->s_blocksize);
			set_buffer_uptodate(bh);
622
		}
623
624
625
626
627
628
629
		unlock_buffer(bh);
		BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
		err = ext4_handle_dirty_metadata(handle, inode, bh);
		if (!fatal)
			fatal = err;
	} else {
		BUFFER_TRACE(bh, "not a new buffer");
630
	}
631
632
633
634
635
636
	if (fatal) {
		*errp = fatal;
		brelse(bh);
		bh = NULL;
	}
	return bh;
637
638
}

639
struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
Aneesh Kumar K.V's avatar
Aneesh Kumar K.V committed
640
			       ext4_lblk_t block, int create, int *err)
641
{
642
	struct buffer_head *bh;
643

644
	bh = ext4_getblk(handle, inode, block, create, err);
645
646
647
648
649
650
651
652
653
654
655
656
657
	if (!bh)
		return bh;
	if (buffer_uptodate(bh))
		return bh;
	ll_rw_block(READ_META, 1, &bh);
	wait_on_buffer(bh);
	if (buffer_uptodate(bh))
		return bh;
	put_bh(bh);
	*err = -EIO;
	return NULL;
}

658
659
660
661
662
663
664
static int walk_page_buffers(handle_t *handle,
			     struct buffer_head *head,
			     unsigned from,
			     unsigned to,
			     int *partial,
			     int (*fn)(handle_t *handle,
				       struct buffer_head *bh))
665
666
667
668
669
670
671
{
	struct buffer_head *bh;
	unsigned block_start, block_end;
	unsigned blocksize = head->b_size;
	int err, ret = 0;
	struct buffer_head *next;

672
673
	for (bh = head, block_start = 0;
	     ret == 0 && (bh != head || !block_start);
674
	     block_start = block_end, bh = next) {
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
		next = bh->b_this_page;
		block_end = block_start + blocksize;
		if (block_end <= from || block_start >= to) {
			if (partial && !buffer_uptodate(bh))
				*partial = 1;
			continue;
		}
		err = (*fn)(handle, bh);
		if (!ret)
			ret = err;
	}
	return ret;
}

/*
 * To preserve ordering, it is essential that the hole instantiation and
 * the data write be encapsulated in a single transaction.  We cannot
692
 * close off a transaction and start a new one between the ext4_get_block()
693
 * and the commit_write().  So doing the jbd2_journal_start at the start of
694
695
 * prepare_write() is the right place.
 *
696
697
 * Also, this function can nest inside ext4_writepage() ->
 * block_write_full_page(). In that case, we *know* that ext4_writepage()
698
699
700
701
 * has generated enough buffer credits to do the whole page.  So we won't
 * block on the journal in that case, which is good, because the caller may
 * be PF_MEMALLOC.
 *
702
 * By accident, ext4 can be reentered when a transaction is open via
703
704
705
706
707
708
 * quota file writes.  If we were to commit the transaction while thus
 * reentered, there can be a deadlock - we would be holding a quota
 * lock, and the commit would never complete if another thread had a
 * transaction open and was blocking on the quota lock - a ranking
 * violation.
 *
709
 * So what we do is to rely on the fact that jbd2_journal_stop/journal_start
710
711
712
713
714
 * will _not_ run commit under these circumstances because handle->h_ref
 * is elevated.  We'll still have enough credits for the tiny quotafile
 * write.
 */
static int do_journal_get_write_access(handle_t *handle,
715
				       struct buffer_head *bh)
716
{
717
718
719
	int dirty = buffer_dirty(bh);
	int ret;

720
721
	if (!buffer_mapped(bh) || buffer_freed(bh))
		return 0;
722
	/*
723
	 * __block_write_begin() could have dirtied some buffers. Clean
724
725
	 * the dirty bit as jbd2_journal_get_write_access() could complain
	 * otherwise about fs integrity issues. Setting of the dirty bit
726
	 * by __block_write_begin() isn't a real problem here as we clear
727
728
729
730
731
732
733
734
735
	 * the bit before releasing a page lock and thus writeback cannot
	 * ever write the buffer.
	 */
	if (dirty)
		clear_buffer_dirty(bh);
	ret = ext4_journal_get_write_access(handle, bh);
	if (!ret && dirty)
		ret = ext4_handle_dirty_metadata(handle, NULL, bh);
	return ret;
736
737
}

738
739
static int ext4_get_block_write(struct inode *inode, sector_t iblock,
		   struct buffer_head *bh_result, int create);
Nick Piggin's avatar
Nick Piggin committed
740
static int ext4_write_begin(struct file *file, struct address_space *mapping,
741
742
			    loff_t pos, unsigned len, unsigned flags,
			    struct page **pagep, void **fsdata)
743
{
744
	struct inode *inode = mapping->host;
745
	int ret, needed_blocks;
746
747
	handle_t *handle;
	int retries = 0;
748
	struct page *page;
749
	pgoff_t index;
750
	unsigned from, to;
Nick Piggin's avatar
Nick Piggin committed
751

752
	trace_ext4_write_begin(inode, pos, len, flags);
753
754
755
756
757
	/*
	 * Reserve one block more for addition to orphan list in case
	 * we allocate blocks but write fails for some reason
	 */
	needed_blocks = ext4_writepage_trans_blocks(inode) + 1;
758
	index = pos >> PAGE_CACHE_SHIFT;
759
760
	from = pos & (PAGE_CACHE_SIZE - 1);
	to = from + len;
761
762

retry:
763
764
765
766
	handle = ext4_journal_start(inode, needed_blocks);
	if (IS_ERR(handle)) {
		ret = PTR_ERR(handle);
		goto out;
767
	}
768

769
770
771
772
	/* We cannot recurse into the filesystem as the transaction is already
	 * started */
	flags |= AOP_FLAG_NOFS;

773
	page = grab_cache_page_write_begin(mapping, index, flags);
774
775
776
777
778
779
780
	if (!page) {
		ext4_journal_stop(handle);
		ret = -ENOMEM;
		goto out;
	}
	*pagep = page;

781
	if (ext4_should_dioread_nolock(inode))
782
		ret = __block_write_begin(page, pos, len, ext4_get_block_write);
783
	else
784
		ret = __block_write_begin(page, pos, len, ext4_get_block);
Nick Piggin's avatar
Nick Piggin committed
785
786

	if (!ret && ext4_should_journal_data(inode)) {
787
788
789
		ret = walk_page_buffers(handle, page_buffers(page),
				from, to, NULL, do_journal_get_write_access);
	}
Nick Piggin's avatar
Nick Piggin committed
790
791

	if (ret) {
792
793
		unlock_page(page);
		page_cache_release(page);
794
		/*
795
		 * __block_write_begin may have instantiated a few blocks
796
797
		 * outside i_size.  Trim these off again. Don't need
		 * i_size_read because we hold i_mutex.
798
799
800
		 *
		 * Add inode to orphan list in case we crash before
		 * truncate finishes
801
		 */
802
		if (pos + len > inode->i_size && ext4_can_truncate(inode))
803
804
805
806
			ext4_orphan_add(handle, inode);

		ext4_journal_stop(handle);
		if (pos + len > inode->i_size) {
807
			ext4_truncate_failed_write(inode);
808
			/*
809
			 * If truncate failed early the inode might
810
811
812
813
814
815
816
			 * still be on the orphan list; we need to
			 * make sure the inode is removed from the
			 * orphan list in that case.
			 */
			if (inode->i_nlink)
				ext4_orphan_del(NULL, inode);
		}
Nick Piggin's avatar
Nick Piggin committed
817
818
	}

819
	if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
820
		goto retry;
821
out:
822
823
824
	return ret;
}

Nick Piggin's avatar
Nick Piggin committed
825
826
/* For write_end() in data=journal mode */
static int write_end_fn(handle_t *handle, struct buffer_head *bh)
827
828
829
830
{
	if (!buffer_mapped(bh) || buffer_freed(bh))
		return 0;
	set_buffer_uptodate(bh);
831
	return ext4_handle_dirty_metadata(handle, NULL, bh);
832
833
}

834
static int ext4_generic_write_end(struct file *file,
835
836
837
				  struct address_space *mapping,
				  loff_t pos, unsigned len, unsigned copied,
				  struct page *page, void *fsdata)
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
{
	int i_size_changed = 0;
	struct inode *inode = mapping->host;
	handle_t *handle = ext4_journal_current_handle();

	copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);

	/*
	 * No need to use i_size_read() here, the i_size
	 * cannot change under us because we hold i_mutex.
	 *
	 * But it's important to update i_size while still holding page lock:
	 * page writeout could otherwise come in and zero beyond i_size.
	 */
	if (pos + copied > inode->i_size) {
		i_size_write(inode, pos + copied);
		i_size_changed = 1;
	}

	if (pos + copied >  EXT4_I(inode)->i_disksize) {
		/* We need to mark inode dirty even if
		 * new_i_size is less that inode->i_size
		 * bu greater than i_disksize.(hint delalloc)
		 */
		ext4_update_i_disksize(inode, (pos + copied));
		i_size_changed = 1;
	}
	unlock_page(page);
	page_cache_release(page);

	/*
	 * Don't mark the inode dirty under page lock. First, it unnecessarily
	 * makes the holding time of page lock longer. Second, it forces lock
	 * ordering of page lock and transaction start for journaling
	 * filesystems.
	 */
	if (i_size_changed)
		ext4_mark_inode_dirty(handle, inode);

	return copied;
}

880
881
882
883
/*
 * We need to pick up the new inode size which generic_commit_write gave us
 * `file' can be NULL - eg, when called from page_symlink().
 *
884
 * ext4 never places buffers on inode->i_mapping->private_list.  metadata
885
886
 * buffers are managed internally.
 */
Nick Piggin's avatar
Nick Piggin committed
887
static int ext4_ordered_write_end(struct file *file,
888
889
890
				  struct address_space *mapping,
				  loff_t pos, unsigned len, unsigned copied,
				  struct page *page, void *fsdata)
891
{
892
	handle_t *handle = ext4_journal_current_handle();
893
	struct inode *inode = mapping->host;
894
895
	int ret = 0, ret2;

896
	trace_ext4_ordered_write_end(inode, pos, len, copied);
897
	ret = ext4_jbd2_file_inode(handle, inode);
898
899

	if (ret == 0) {
900
		ret2 = ext4_generic_write_end(file, mapping, pos, len, copied,
Nick Piggin's avatar
Nick Piggin committed
901
							page, fsdata);
902
		copied = ret2;
903
		if (pos + len > inode->i_size && ext4_can_truncate(inode))
904
905
906
907
908
			/* if we have allocated more blocks and copied
			 * less. We will have blocks allocated outside
			 * inode->i_size. So truncate them
			 */
			ext4_orphan_add(handle, inode);
909
910
		if (ret2 < 0)
			ret = ret2;
911
	}
912
	ret2 = ext4_journal_stop(handle);
913
914
	if (!ret)
		ret = ret2;
Nick Piggin's avatar
Nick Piggin committed
915

916
	if (pos + len > inode->i_size) {
917
		ext4_truncate_failed_write(inode);
918
		/*
919
		 * If truncate failed early the inode might still be
920
921
922
923
924
925
926
927
		 * on the orphan list; we need to make sure the inode
		 * is removed from the orphan list in that case.
		 */
		if (inode->i_nlink)
			ext4_orphan_del(NULL, inode);
	}


Nick Piggin's avatar
Nick Piggin committed
928
	return ret ? ret : copied;
929
930
}

Nick Piggin's avatar
Nick Piggin committed
931
static int ext4_writeback_write_end(struct file *file,
932
933
934
				    struct address_space *mapping,
				    loff_t pos, unsigned len, unsigned copied,
				    struct page *page, void *fsdata)
935
{
936
	handle_t *handle = ext4_journal_current_handle();
937
	struct inode *inode = mapping->host;
938
939
	int ret = 0, ret2;

940
	trace_ext4_writeback_write_end(inode, pos, len, copied);
941
	ret2 = ext4_generic_write_end(file, mapping, pos, len, copied,
Nick Piggin's avatar
Nick Piggin committed
942
							page, fsdata);
943
	copied = ret2;
944
	if (pos + len > inode->i_size && ext4_can_truncate(inode))
945
946
947
948
949
950
		/* if we have allocated more blocks and copied
		 * less. We will have blocks allocated outside
		 * inode->i_size. So truncate them
		 */
		ext4_orphan_add(handle, inode);

951
952
	if (ret2 < 0)
		ret = ret2;
953

954
	ret2 = ext4_journal_stop(handle);
955
956
	if (!ret)
		ret = ret2;
Nick Piggin's avatar
Nick Piggin committed
957

958
	if (pos + len > inode->i_size) {
959
		ext4_truncate_failed_write(inode);
960
		/*
961
		 * If truncate failed early the inode might still be
962
963
964
965
966
967
968
		 * on the orphan list; we need to make sure the inode
		 * is removed from the orphan list in that case.
		 */
		if (inode->i_nlink)
			ext4_orphan_del(NULL, inode);
	}

Nick Piggin's avatar
Nick Piggin committed
969
	return ret ? ret : copied;
970
971
}

Nick Piggin's avatar
Nick Piggin committed
972
static int ext4_journalled_write_end(struct file *file,
973
974
975
				     struct address_space *mapping,
				     loff_t pos, unsigned len, unsigned copied,
				     struct page *page, void *fsdata)
976
{
977
	handle_t *handle = ext4_journal_current_handle();
Nick Piggin's avatar
Nick Piggin committed
978
	struct inode *inode = mapping->host;
979
980
	int ret = 0, ret2;
	int partial = 0;
Nick Piggin's avatar
Nick Piggin committed
981
	unsigned from, to;
982
	loff_t new_i_size;
983

984
	trace_ext4_journalled_write_end(inode, pos, len, copied);
Nick Piggin's avatar
Nick Piggin committed
985
986
987
	from = pos & (PAGE_CACHE_SIZE - 1);
	to = from + len;

988
989
	BUG_ON(!ext4_handle_valid(handle));

Nick Piggin's avatar
Nick Piggin committed
990
991
992
993
994
	if (copied < len) {
		if (!PageUptodate(page))
			copied = 0;
		page_zero_new_buffers(page, from+copied, to);
	}
995
996

	ret = walk_page_buffers(handle, page_buffers(page), from,
Nick Piggin's avatar
Nick Piggin committed
997
				to, &partial, write_end_fn);
998
999
	if (!partial)
		SetPageUptodate(page);
1000
	new_i_size = pos + copied;
For faster browsing, not all history is shown. View entire blame