inode.c 135 KB
Newer Older
1
/*
2
 *  linux/fs/ext4/inode.c
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
 *
 * Copyright (C) 1992, 1993, 1994, 1995
 * Remy Card (card@masi.ibp.fr)
 * Laboratoire MASI - Institut Blaise Pascal
 * Universite Pierre et Marie Curie (Paris VI)
 *
 *  from
 *
 *  linux/fs/minix/inode.c
 *
 *  Copyright (C) 1991, 1992  Linus Torvalds
 *
 *  64-bit file support on 64-bit platforms by Jakub Jelinek
 *	(jj@sunsite.ms.mff.cuni.cz)
 *
18
 *  Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000
19
20
21
22
23
 */

#include <linux/module.h>
#include <linux/fs.h>
#include <linux/time.h>
24
#include <linux/jbd2.h>
25
26
27
28
29
30
#include <linux/highuid.h>
#include <linux/pagemap.h>
#include <linux/quotaops.h>
#include <linux/string.h>
#include <linux/buffer_head.h>
#include <linux/writeback.h>
31
#include <linux/pagevec.h>
32
#include <linux/mpage.h>
33
#include <linux/namei.h>
34
35
#include <linux/uio.h>
#include <linux/bio.h>
36
#include <linux/workqueue.h>
37
#include <linux/kernel.h>
38
#include <linux/printk.h>
39
#include <linux/slab.h>
40
#include <linux/ratelimit.h>
41

42
#include "ext4_jbd2.h"
43
44
#include "xattr.h"
#include "acl.h"
45
#include "ext4_extents.h"
46
#include "truncate.h"
47

48
49
#include <trace/events/ext4.h>

50
51
#define MPAGE_DA_EXTENT_TAIL 0x01

52
53
54
static inline int ext4_begin_ordered_truncate(struct inode *inode,
					      loff_t new_size)
{
55
	trace_ext4_begin_ordered_truncate(inode, new_size);
56
57
58
59
60
61
62
63
64
65
66
	/*
	 * If jinode is zero, then we never opened the file for
	 * writing, so there's no need to call
	 * jbd2_journal_begin_ordered_truncate() since there's no
	 * outstanding writes we need to flush.
	 */
	if (!EXT4_I(inode)->jinode)
		return 0;
	return jbd2_journal_begin_ordered_truncate(EXT4_JOURNAL(inode),
						   EXT4_I(inode)->jinode,
						   new_size);
67
68
}

69
static void ext4_invalidatepage(struct page *page, unsigned long offset);
70
71
72
73
74
75
static int noalloc_get_block_write(struct inode *inode, sector_t iblock,
				   struct buffer_head *bh_result, int create);
static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode);
static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate);
static int __ext4_journalled_writepage(struct page *page, unsigned int len);
static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh);
76

77
78
79
/*
 * Test whether an inode is a fast symlink.
 */
80
static int ext4_inode_is_fast_symlink(struct inode *inode)
81
{
82
	int ea_blocks = EXT4_I(inode)->i_file_acl ?
83
84
85
86
87
88
89
90
91
92
		(inode->i_sb->s_blocksize >> 9) : 0;

	return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
}

/*
 * Restart the transaction associated with *handle.  This does a commit,
 * so before we call here everything must be consistently dirtied against
 * this transaction.
 */
93
int ext4_truncate_restart_trans(handle_t *handle, struct inode *inode,
94
				 int nblocks)
95
{
96
97
98
	int ret;

	/*
99
	 * Drop i_data_sem to avoid deadlock with ext4_map_blocks.  At this
100
101
102
103
	 * moment, get_block can be called only for blocks inside i_size since
	 * page cache has been already dropped and writes are blocked by
	 * i_mutex. So we can safely drop the i_data_sem here.
	 */
104
	BUG_ON(EXT4_JOURNAL(inode) == NULL);
105
	jbd_debug(2, "restarting handle %p\n", handle);
106
	up_write(&EXT4_I(inode)->i_data_sem);
107
	ret = ext4_journal_restart(handle, nblocks);
108
	down_write(&EXT4_I(inode)->i_data_sem);
109
	ext4_discard_preallocations(inode);
110
111

	return ret;
112
113
114
115
116
}

/*
 * Called at the last iput() if i_nlink is zero.
 */
Al Viro's avatar
Al Viro committed
117
void ext4_evict_inode(struct inode *inode)
118
119
{
	handle_t *handle;
120
	int err;
121

122
	trace_ext4_evict_inode(inode);
123
124
125

	ext4_ioend_wait(inode);

Al Viro's avatar
Al Viro committed
126
	if (inode->i_nlink) {
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
		/*
		 * When journalling data dirty buffers are tracked only in the
		 * journal. So although mm thinks everything is clean and
		 * ready for reaping the inode might still have some pages to
		 * write in the running transaction or waiting to be
		 * checkpointed. Thus calling jbd2_journal_invalidatepage()
		 * (via truncate_inode_pages()) to discard these buffers can
		 * cause data loss. Also even if we did not discard these
		 * buffers, we would have no way to find them after the inode
		 * is reaped and thus user could see stale data if he tries to
		 * read them before the transaction is checkpointed. So be
		 * careful and force everything to disk here... We use
		 * ei->i_datasync_tid to store the newest transaction
		 * containing inode's data.
		 *
		 * Note that directories do not have this problem because they
		 * don't use page cache.
		 */
		if (ext4_should_journal_data(inode) &&
		    (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode))) {
			journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
			tid_t commit_tid = EXT4_I(inode)->i_datasync_tid;

			jbd2_log_start_commit(journal, commit_tid);
			jbd2_log_wait_commit(journal, commit_tid);
			filemap_write_and_wait(&inode->i_data);
		}
Al Viro's avatar
Al Viro committed
154
155
156
157
		truncate_inode_pages(&inode->i_data, 0);
		goto no_delete;
	}

158
	if (!is_bad_inode(inode))
159
		dquot_initialize(inode);
160

161
162
	if (ext4_should_order_data(inode))
		ext4_begin_ordered_truncate(inode, 0);
163
164
165
166
167
	truncate_inode_pages(&inode->i_data, 0);

	if (is_bad_inode(inode))
		goto no_delete;

168
	handle = ext4_journal_start(inode, ext4_blocks_for_truncate(inode)+3);
169
	if (IS_ERR(handle)) {
170
		ext4_std_error(inode->i_sb, PTR_ERR(handle));
171
172
173
174
175
		/*
		 * If we're going to skip the normal cleanup, we still need to
		 * make sure that the in-core orphan linked list is properly
		 * cleaned up.
		 */
176
		ext4_orphan_del(NULL, inode);
177
178
179
180
		goto no_delete;
	}

	if (IS_SYNC(inode))
181
		ext4_handle_sync(handle);
182
	inode->i_size = 0;
183
184
	err = ext4_mark_inode_dirty(handle, inode);
	if (err) {
185
		ext4_warning(inode->i_sb,
186
187
188
			     "couldn't mark inode dirty (err %d)", err);
		goto stop_handle;
	}
189
	if (inode->i_blocks)
190
		ext4_truncate(inode);
191
192
193
194
195
196
197

	/*
	 * ext4_ext_truncate() doesn't reserve any slop when it
	 * restarts journal transactions; therefore there may not be
	 * enough credits left in the handle to remove the inode from
	 * the orphan list and set the dtime field.
	 */
198
	if (!ext4_handle_has_enough_credits(handle, 3)) {
199
200
201
202
		err = ext4_journal_extend(handle, 3);
		if (err > 0)
			err = ext4_journal_restart(handle, 3);
		if (err != 0) {
203
			ext4_warning(inode->i_sb,
204
205
206
				     "couldn't extend journal (err %d)", err);
		stop_handle:
			ext4_journal_stop(handle);
207
			ext4_orphan_del(NULL, inode);
208
209
210
211
			goto no_delete;
		}
	}

212
	/*
213
	 * Kill off the orphan record which ext4_truncate created.
214
	 * AKPM: I think this can be inside the above `if'.
215
	 * Note that ext4_orphan_del() has to be able to cope with the
216
	 * deletion of a non-existent orphan - this is because we don't
217
	 * know if ext4_truncate() actually created an orphan record.
218
219
	 * (Well, we could do this if we need to, but heck - it works)
	 */
220
221
	ext4_orphan_del(handle, inode);
	EXT4_I(inode)->i_dtime	= get_seconds();
222
223
224
225
226
227
228
229

	/*
	 * One subtle ordering requirement: if anything has gone wrong
	 * (transaction abort, IO errors, whatever), then we can still
	 * do these next steps (the fs will already have been marked as
	 * having errors), but we can't free the inode if the mark_dirty
	 * fails.
	 */
230
	if (ext4_mark_inode_dirty(handle, inode))
231
		/* If that failed, just do the required in-core inode clear. */
Al Viro's avatar
Al Viro committed
232
		ext4_clear_inode(inode);
233
	else
234
235
		ext4_free_inode(handle, inode);
	ext4_journal_stop(handle);
236
237
	return;
no_delete:
Al Viro's avatar
Al Viro committed
238
	ext4_clear_inode(inode);	/* We must guarantee clearing of inode... */
239
240
}

241
242
#ifdef CONFIG_QUOTA
qsize_t *ext4_get_reserved_space(struct inode *inode)
243
{
244
	return &EXT4_I(inode)->i_reserved_quota;
245
}
246
#endif
247

248
249
/*
 * Calculate the number of metadata blocks need to reserve
250
 * to allocate a block located at @lblock
251
 */
252
static int ext4_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock)
253
{
254
	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
255
		return ext4_ext_calc_metadata_amount(inode, lblock);
256

257
	return ext4_ind_calc_metadata_amount(inode, lblock);
258
259
}

260
261
262
263
/*
 * Called with i_data_sem down, which is important since we can call
 * ext4_discard_preallocations() from here.
 */
264
265
void ext4_da_update_reserve_space(struct inode *inode,
					int used, int quota_claim)
266
267
{
	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
268
269
270
	struct ext4_inode_info *ei = EXT4_I(inode);

	spin_lock(&ei->i_block_reservation_lock);
271
	trace_ext4_da_update_reserve_space(inode, used);
272
273
274
275
276
277
278
279
	if (unlikely(used > ei->i_reserved_data_blocks)) {
		ext4_msg(inode->i_sb, KERN_NOTICE, "%s: ino %lu, used %d "
			 "with only %d reserved data blocks\n",
			 __func__, inode->i_ino, used,
			 ei->i_reserved_data_blocks);
		WARN_ON(1);
		used = ei->i_reserved_data_blocks;
	}
280

281
282
283
	/* Update per-inode reservations */
	ei->i_reserved_data_blocks -= used;
	ei->i_reserved_meta_blocks -= ei->i_allocated_meta_blocks;
284
	percpu_counter_sub(&sbi->s_dirtyclusters_counter,
285
			   used + ei->i_allocated_meta_blocks);
286
	ei->i_allocated_meta_blocks = 0;
287

288
289
290
291
292
293
	if (ei->i_reserved_data_blocks == 0) {
		/*
		 * We can release all of the reserved metadata blocks
		 * only when we have written all of the delayed
		 * allocation blocks.
		 */
294
		percpu_counter_sub(&sbi->s_dirtyclusters_counter,
295
				   ei->i_reserved_meta_blocks);
296
		ei->i_reserved_meta_blocks = 0;
297
		ei->i_da_metadata_calc_len = 0;
298
	}
299
	spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
300

301
302
	/* Update quota subsystem for data blocks */
	if (quota_claim)
303
		dquot_claim_block(inode, used);
304
	else {
305
306
307
		/*
		 * We did fallocate with an offset that is already delayed
		 * allocated. So on delayed allocated writeback we should
308
		 * not re-claim the quota for fallocated blocks.
309
		 */
310
		dquot_release_reservation_block(inode, used);
311
	}
312
313
314
315
316
317

	/*
	 * If we have done all the pending block allocations and if
	 * there aren't any writers on the inode, we can discard the
	 * inode's preallocations.
	 */
318
319
	if ((ei->i_reserved_data_blocks == 0) &&
	    (atomic_read(&inode->i_writecount) == 0))
320
		ext4_discard_preallocations(inode);
321
322
}

323
static int __check_block_validity(struct inode *inode, const char *func,
324
325
				unsigned int line,
				struct ext4_map_blocks *map)
326
{
327
328
	if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), map->m_pblk,
				   map->m_len)) {
329
330
331
332
		ext4_error_inode(inode, func, line, map->m_pblk,
				 "lblock %lu mapped to illegal pblock "
				 "(length %d)", (unsigned long) map->m_lblk,
				 map->m_len);
333
334
335
336
337
		return -EIO;
	}
	return 0;
}

338
#define check_block_validity(inode, map)	\
339
	__check_block_validity((inode), __func__, __LINE__, (map))
340

341
/*
342
343
 * Return the number of contiguous dirty pages in a given inode
 * starting at page frame idx.
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
 */
static pgoff_t ext4_num_dirty_pages(struct inode *inode, pgoff_t idx,
				    unsigned int max_pages)
{
	struct address_space *mapping = inode->i_mapping;
	pgoff_t	index;
	struct pagevec pvec;
	pgoff_t num = 0;
	int i, nr_pages, done = 0;

	if (max_pages == 0)
		return 0;
	pagevec_init(&pvec, 0);
	while (!done) {
		index = idx;
		nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
					      PAGECACHE_TAG_DIRTY,
					      (pgoff_t)PAGEVEC_SIZE);
		if (nr_pages == 0)
			break;
		for (i = 0; i < nr_pages; i++) {
			struct page *page = pvec.pages[i];
			struct buffer_head *bh, *head;

			lock_page(page);
			if (unlikely(page->mapping != mapping) ||
			    !PageDirty(page) ||
			    PageWriteback(page) ||
			    page->index != idx) {
				done = 1;
				unlock_page(page);
				break;
			}
377
378
379
380
381
382
383
384
385
			if (page_has_buffers(page)) {
				bh = head = page_buffers(page);
				do {
					if (!buffer_delay(bh) &&
					    !buffer_unwritten(bh))
						done = 1;
					bh = bh->b_this_page;
				} while (!done && (bh != head));
			}
386
387
388
389
390
			unlock_page(page);
			if (done)
				break;
			idx++;
			num++;
391
392
			if (num >= max_pages) {
				done = 1;
393
				break;
394
			}
395
396
397
398
399
400
		}
		pagevec_release(&pvec);
	}
	return num;
}

401
/*
402
 * The ext4_map_blocks() function tries to look up the requested blocks,
403
 * and returns if the blocks are already mapped.
404
405
406
407
408
 *
 * Otherwise it takes the write lock of the i_data_sem and allocate blocks
 * and store the allocated blocks in the result buffer head and mark it
 * mapped.
 *
409
410
 * If file type is extents based, it will call ext4_ext_map_blocks(),
 * Otherwise, call with ext4_ind_map_blocks() to handle indirect mapping
411
412
413
414
415
416
417
418
419
420
421
422
 * based files
 *
 * On success, it returns the number of blocks being mapped or allocate.
 * if create==0 and the blocks are pre-allocated and uninitialized block,
 * the result buffer head is unmapped. If the create ==1, it will make sure
 * the buffer head is mapped.
 *
 * It returns 0 if plain look up failed (blocks have not been allocated), in
 * that casem, buffer head is unmapped
 *
 * It returns the error in case of allocation failure.
 */
423
424
int ext4_map_blocks(handle_t *handle, struct inode *inode,
		    struct ext4_map_blocks *map, int flags)
425
426
{
	int retval;
427

428
429
430
431
	map->m_flags = 0;
	ext_debug("ext4_map_blocks(): inode %lu, flag %d, max_blocks %u,"
		  "logical block %lu\n", inode->i_ino, flags, map->m_len,
		  (unsigned long) map->m_lblk);
432
	/*
433
434
	 * Try to see if we can get the block without requesting a new
	 * file system block.
435
436
	 */
	down_read((&EXT4_I(inode)->i_data_sem));
437
	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
438
		retval = ext4_ext_map_blocks(handle, inode, map, 0);
439
	} else {
440
		retval = ext4_ind_map_blocks(handle, inode, map, 0);
441
	}
442
	up_read((&EXT4_I(inode)->i_data_sem));
443

444
	if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
445
		int ret = check_block_validity(inode, map);
446
447
448
449
		if (ret != 0)
			return ret;
	}

450
	/* If it is only a block(s) look up */
451
	if ((flags & EXT4_GET_BLOCKS_CREATE) == 0)
452
453
454
455
456
457
458
459
460
		return retval;

	/*
	 * Returns if the blocks have already allocated
	 *
	 * Note that if blocks have been preallocated
	 * ext4_ext_get_block() returns th create = 0
	 * with buffer head unmapped.
	 */
461
	if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED)
462
463
		return retval;

464
465
466
467
468
469
470
471
472
473
	/*
	 * When we call get_blocks without the create flag, the
	 * BH_Unwritten flag could have gotten set if the blocks
	 * requested were part of a uninitialized extent.  We need to
	 * clear this flag now that we are committed to convert all or
	 * part of the uninitialized extent to be an initialized
	 * extent.  This is because we need to avoid the combination
	 * of BH_Unwritten and BH_Mapped flags being simultaneously
	 * set on the buffer_head.
	 */
474
	map->m_flags &= ~EXT4_MAP_UNWRITTEN;
475

476
	/*
477
478
479
480
	 * New blocks allocate and/or writing to uninitialized extent
	 * will possibly result in updating i_data, so we take
	 * the write lock of i_data_sem, and call get_blocks()
	 * with create == 1 flag.
481
482
	 */
	down_write((&EXT4_I(inode)->i_data_sem));
483
484
485
486
487
488
489

	/*
	 * if the caller is from delayed allocation writeout path
	 * we have already reserved fs blocks for allocation
	 * let the underlying get_block() function know to
	 * avoid double accounting
	 */
490
	if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
491
		ext4_set_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED);
492
493
494
495
	/*
	 * We need to check for EXT4 here because migrate
	 * could have changed the inode type in between
	 */
496
	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
497
		retval = ext4_ext_map_blocks(handle, inode, map, flags);
498
	} else {
499
		retval = ext4_ind_map_blocks(handle, inode, map, flags);
500

501
		if (retval > 0 && map->m_flags & EXT4_MAP_NEW) {
502
503
504
505
506
			/*
			 * We allocated new blocks which will result in
			 * i_data's format changing.  Force the migrate
			 * to fail by clearing migrate flags
			 */
507
			ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
508
		}
509

510
511
512
513
514
515
516
		/*
		 * Update reserved blocks/metadata blocks after successful
		 * block allocation which had been deferred till now. We don't
		 * support fallocate for non extent files. So we can update
		 * reserve space here.
		 */
		if ((retval > 0) &&
517
			(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE))
518
519
			ext4_da_update_reserve_space(inode, retval, 1);
	}
520
	if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
521
		ext4_clear_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED);
522

523
	up_write((&EXT4_I(inode)->i_data_sem));
524
	if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
525
		int ret = check_block_validity(inode, map);
526
527
528
		if (ret != 0)
			return ret;
	}
529
530
531
	return retval;
}

532
533
534
/* Maximum number of blocks we map for direct IO at once. */
#define DIO_MAX_BLOCKS 4096

535
536
static int _ext4_get_block(struct inode *inode, sector_t iblock,
			   struct buffer_head *bh, int flags)
537
{
538
	handle_t *handle = ext4_journal_current_handle();
539
	struct ext4_map_blocks map;
Jan Kara's avatar
Jan Kara committed
540
	int ret = 0, started = 0;
541
	int dio_credits;
542

543
544
545
546
	map.m_lblk = iblock;
	map.m_len = bh->b_size >> inode->i_blkbits;

	if (flags && !handle) {
Jan Kara's avatar
Jan Kara committed
547
		/* Direct IO write... */
548
549
550
		if (map.m_len > DIO_MAX_BLOCKS)
			map.m_len = DIO_MAX_BLOCKS;
		dio_credits = ext4_chunk_trans_blocks(inode, map.m_len);
551
		handle = ext4_journal_start(inode, dio_credits);
Jan Kara's avatar
Jan Kara committed
552
		if (IS_ERR(handle)) {
553
			ret = PTR_ERR(handle);
554
			return ret;
555
		}
Jan Kara's avatar
Jan Kara committed
556
		started = 1;
557
558
	}

559
	ret = ext4_map_blocks(handle, inode, &map, flags);
Jan Kara's avatar
Jan Kara committed
560
	if (ret > 0) {
561
562
563
		map_bh(bh, inode->i_sb, map.m_pblk);
		bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags;
		bh->b_size = inode->i_sb->s_blocksize * map.m_len;
Jan Kara's avatar
Jan Kara committed
564
		ret = 0;
565
	}
Jan Kara's avatar
Jan Kara committed
566
567
	if (started)
		ext4_journal_stop(handle);
568
569
570
	return ret;
}

571
572
573
574
575
576
577
int ext4_get_block(struct inode *inode, sector_t iblock,
		   struct buffer_head *bh, int create)
{
	return _ext4_get_block(inode, iblock, bh,
			       create ? EXT4_GET_BLOCKS_CREATE : 0);
}

578
579
580
/*
 * `handle' can be NULL if create is zero
 */
581
struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
Aneesh Kumar K.V's avatar
Aneesh Kumar K.V committed
582
				ext4_lblk_t block, int create, int *errp)
583
{
584
585
	struct ext4_map_blocks map;
	struct buffer_head *bh;
586
587
588
589
	int fatal = 0, err;

	J_ASSERT(handle != NULL || create == 0);

590
591
592
593
	map.m_lblk = block;
	map.m_len = 1;
	err = ext4_map_blocks(handle, inode, &map,
			      create ? EXT4_GET_BLOCKS_CREATE : 0);
594

595
596
597
598
599
600
601
602
603
604
	if (err < 0)
		*errp = err;
	if (err <= 0)
		return NULL;
	*errp = 0;

	bh = sb_getblk(inode->i_sb, map.m_pblk);
	if (!bh) {
		*errp = -EIO;
		return NULL;
605
	}
606
607
608
	if (map.m_flags & EXT4_MAP_NEW) {
		J_ASSERT(create != 0);
		J_ASSERT(handle != NULL);
609

610
611
612
613
614
615
616
617
618
619
620
621
622
		/*
		 * Now that we do not always journal data, we should
		 * keep in mind whether this should always journal the
		 * new buffer as metadata.  For now, regular file
		 * writes use ext4_get_block instead, so it's not a
		 * problem.
		 */
		lock_buffer(bh);
		BUFFER_TRACE(bh, "call get_create_access");
		fatal = ext4_journal_get_create_access(handle, bh);
		if (!fatal && !buffer_uptodate(bh)) {
			memset(bh->b_data, 0, inode->i_sb->s_blocksize);
			set_buffer_uptodate(bh);
623
		}
624
625
626
627
628
629
630
		unlock_buffer(bh);
		BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
		err = ext4_handle_dirty_metadata(handle, inode, bh);
		if (!fatal)
			fatal = err;
	} else {
		BUFFER_TRACE(bh, "not a new buffer");
631
	}
632
633
634
635
636
637
	if (fatal) {
		*errp = fatal;
		brelse(bh);
		bh = NULL;
	}
	return bh;
638
639
}

640
struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
Aneesh Kumar K.V's avatar
Aneesh Kumar K.V committed
641
			       ext4_lblk_t block, int create, int *err)
642
{
643
	struct buffer_head *bh;
644

645
	bh = ext4_getblk(handle, inode, block, create, err);
646
647
648
649
650
651
652
653
654
655
656
657
658
	if (!bh)
		return bh;
	if (buffer_uptodate(bh))
		return bh;
	ll_rw_block(READ_META, 1, &bh);
	wait_on_buffer(bh);
	if (buffer_uptodate(bh))
		return bh;
	put_bh(bh);
	*err = -EIO;
	return NULL;
}

659
660
661
662
663
664
665
static int walk_page_buffers(handle_t *handle,
			     struct buffer_head *head,
			     unsigned from,
			     unsigned to,
			     int *partial,
			     int (*fn)(handle_t *handle,
				       struct buffer_head *bh))
666
667
668
669
670
671
672
{
	struct buffer_head *bh;
	unsigned block_start, block_end;
	unsigned blocksize = head->b_size;
	int err, ret = 0;
	struct buffer_head *next;

673
674
	for (bh = head, block_start = 0;
	     ret == 0 && (bh != head || !block_start);
675
	     block_start = block_end, bh = next) {
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
		next = bh->b_this_page;
		block_end = block_start + blocksize;
		if (block_end <= from || block_start >= to) {
			if (partial && !buffer_uptodate(bh))
				*partial = 1;
			continue;
		}
		err = (*fn)(handle, bh);
		if (!ret)
			ret = err;
	}
	return ret;
}

/*
 * To preserve ordering, it is essential that the hole instantiation and
 * the data write be encapsulated in a single transaction.  We cannot
693
 * close off a transaction and start a new one between the ext4_get_block()
694
 * and the commit_write().  So doing the jbd2_journal_start at the start of
695
696
 * prepare_write() is the right place.
 *
697
698
 * Also, this function can nest inside ext4_writepage() ->
 * block_write_full_page(). In that case, we *know* that ext4_writepage()
699
700
701
702
 * has generated enough buffer credits to do the whole page.  So we won't
 * block on the journal in that case, which is good, because the caller may
 * be PF_MEMALLOC.
 *
703
 * By accident, ext4 can be reentered when a transaction is open via
704
705
706
707
708
709
 * quota file writes.  If we were to commit the transaction while thus
 * reentered, there can be a deadlock - we would be holding a quota
 * lock, and the commit would never complete if another thread had a
 * transaction open and was blocking on the quota lock - a ranking
 * violation.
 *
710
 * So what we do is to rely on the fact that jbd2_journal_stop/journal_start
711
712
713
714
715
 * will _not_ run commit under these circumstances because handle->h_ref
 * is elevated.  We'll still have enough credits for the tiny quotafile
 * write.
 */
static int do_journal_get_write_access(handle_t *handle,
716
				       struct buffer_head *bh)
717
{
718
719
720
	int dirty = buffer_dirty(bh);
	int ret;

721
722
	if (!buffer_mapped(bh) || buffer_freed(bh))
		return 0;
723
	/*
724
	 * __block_write_begin() could have dirtied some buffers. Clean
725
726
	 * the dirty bit as jbd2_journal_get_write_access() could complain
	 * otherwise about fs integrity issues. Setting of the dirty bit
727
	 * by __block_write_begin() isn't a real problem here as we clear
728
729
730
731
732
733
734
735
736
	 * the bit before releasing a page lock and thus writeback cannot
	 * ever write the buffer.
	 */
	if (dirty)
		clear_buffer_dirty(bh);
	ret = ext4_journal_get_write_access(handle, bh);
	if (!ret && dirty)
		ret = ext4_handle_dirty_metadata(handle, NULL, bh);
	return ret;
737
738
}

739
740
static int ext4_get_block_write(struct inode *inode, sector_t iblock,
		   struct buffer_head *bh_result, int create);
Nick Piggin's avatar
Nick Piggin committed
741
static int ext4_write_begin(struct file *file, struct address_space *mapping,
742
743
			    loff_t pos, unsigned len, unsigned flags,
			    struct page **pagep, void **fsdata)
744
{
745
	struct inode *inode = mapping->host;
746
	int ret, needed_blocks;
747
748
	handle_t *handle;
	int retries = 0;
749
	struct page *page;
750
	pgoff_t index;
751
	unsigned from, to;
Nick Piggin's avatar
Nick Piggin committed
752

753
	trace_ext4_write_begin(inode, pos, len, flags);
754
755
756
757
758
	/*
	 * Reserve one block more for addition to orphan list in case
	 * we allocate blocks but write fails for some reason
	 */
	needed_blocks = ext4_writepage_trans_blocks(inode) + 1;
759
	index = pos >> PAGE_CACHE_SHIFT;
760
761
	from = pos & (PAGE_CACHE_SIZE - 1);
	to = from + len;
762
763

retry:
764
765
766
767
	handle = ext4_journal_start(inode, needed_blocks);
	if (IS_ERR(handle)) {
		ret = PTR_ERR(handle);
		goto out;
768
	}
769

770
771
772
773
	/* We cannot recurse into the filesystem as the transaction is already
	 * started */
	flags |= AOP_FLAG_NOFS;

774
	page = grab_cache_page_write_begin(mapping, index, flags);
775
776
777
778
779
780
781
	if (!page) {
		ext4_journal_stop(handle);
		ret = -ENOMEM;
		goto out;
	}
	*pagep = page;

782
	if (ext4_should_dioread_nolock(inode))
783
		ret = __block_write_begin(page, pos, len, ext4_get_block_write);
784
	else
785
		ret = __block_write_begin(page, pos, len, ext4_get_block);
Nick Piggin's avatar
Nick Piggin committed
786
787

	if (!ret && ext4_should_journal_data(inode)) {
788
789
790
		ret = walk_page_buffers(handle, page_buffers(page),
				from, to, NULL, do_journal_get_write_access);
	}
Nick Piggin's avatar
Nick Piggin committed
791
792

	if (ret) {
793
794
		unlock_page(page);
		page_cache_release(page);
795
		/*
796
		 * __block_write_begin may have instantiated a few blocks
797
798
		 * outside i_size.  Trim these off again. Don't need
		 * i_size_read because we hold i_mutex.
799
800
801
		 *
		 * Add inode to orphan list in case we crash before
		 * truncate finishes
802
		 */
803
		if (pos + len > inode->i_size && ext4_can_truncate(inode))
804
805
806
807
			ext4_orphan_add(handle, inode);

		ext4_journal_stop(handle);
		if (pos + len > inode->i_size) {
808
			ext4_truncate_failed_write(inode);
809
			/*
810
			 * If truncate failed early the inode might
811
812
813
814
815
816
817
			 * still be on the orphan list; we need to
			 * make sure the inode is removed from the
			 * orphan list in that case.
			 */
			if (inode->i_nlink)
				ext4_orphan_del(NULL, inode);
		}
Nick Piggin's avatar
Nick Piggin committed
818
819
	}

820
	if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
821
		goto retry;
822
out:
823
824
825
	return ret;
}

Nick Piggin's avatar
Nick Piggin committed
826
827
/* For write_end() in data=journal mode */
static int write_end_fn(handle_t *handle, struct buffer_head *bh)
828
829
830
831
{
	if (!buffer_mapped(bh) || buffer_freed(bh))
		return 0;
	set_buffer_uptodate(bh);
832
	return ext4_handle_dirty_metadata(handle, NULL, bh);
833
834
}

835
static int ext4_generic_write_end(struct file *file,
836
837
838
				  struct address_space *mapping,
				  loff_t pos, unsigned len, unsigned copied,
				  struct page *page, void *fsdata)
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
{
	int i_size_changed = 0;
	struct inode *inode = mapping->host;
	handle_t *handle = ext4_journal_current_handle();

	copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);

	/*
	 * No need to use i_size_read() here, the i_size
	 * cannot change under us because we hold i_mutex.
	 *
	 * But it's important to update i_size while still holding page lock:
	 * page writeout could otherwise come in and zero beyond i_size.
	 */
	if (pos + copied > inode->i_size) {
		i_size_write(inode, pos + copied);
		i_size_changed = 1;
	}

	if (pos + copied >  EXT4_I(inode)->i_disksize) {
		/* We need to mark inode dirty even if
		 * new_i_size is less that inode->i_size
		 * bu greater than i_disksize.(hint delalloc)
		 */
		ext4_update_i_disksize(inode, (pos + copied));
		i_size_changed = 1;
	}
	unlock_page(page);
	page_cache_release(page);

	/*
	 * Don't mark the inode dirty under page lock. First, it unnecessarily
	 * makes the holding time of page lock longer. Second, it forces lock
	 * ordering of page lock and transaction start for journaling
	 * filesystems.
	 */
	if (i_size_changed)
		ext4_mark_inode_dirty(handle, inode);

	return copied;
}

881
882
883
884
/*
 * We need to pick up the new inode size which generic_commit_write gave us
 * `file' can be NULL - eg, when called from page_symlink().
 *
885
 * ext4 never places buffers on inode->i_mapping->private_list.  metadata
886
887
 * buffers are managed internally.
 */
Nick Piggin's avatar
Nick Piggin committed
888
static int ext4_ordered_write_end(struct file *file,
889
890
891
				  struct address_space *mapping,
				  loff_t pos, unsigned len, unsigned copied,
				  struct page *page, void *fsdata)
892
{
893
	handle_t *handle = ext4_journal_current_handle();
894
	struct inode *inode = mapping->host;
895
896
	int ret = 0, ret2;

897
	trace_ext4_ordered_write_end(inode, pos, len, copied);
898
	ret = ext4_jbd2_file_inode(handle, inode);
899
900

	if (ret == 0) {
901
		ret2 = ext4_generic_write_end(file, mapping, pos, len, copied,
Nick Piggin's avatar
Nick Piggin committed
902
							page, fsdata);
903
		copied = ret2;
904
		if (pos + len > inode->i_size && ext4_can_truncate(inode))
905
906
907
908
909
			/* if we have allocated more blocks and copied
			 * less. We will have blocks allocated outside
			 * inode->i_size. So truncate them
			 */
			ext4_orphan_add(handle, inode);
910
911
		if (ret2 < 0)
			ret = ret2;
912
	}
913
	ret2 = ext4_journal_stop(handle);
914
915
	if (!ret)
		ret = ret2;
Nick Piggin's avatar
Nick Piggin committed
916

917
	if (pos + len > inode->i_size) {
918
		ext4_truncate_failed_write(inode);
919
		/*
920
		 * If truncate failed early the inode might still be
921
922
923
924
925
926
927
928
		 * on the orphan list; we need to make sure the inode
		 * is removed from the orphan list in that case.
		 */
		if (inode->i_nlink)
			ext4_orphan_del(NULL, inode);
	}


Nick Piggin's avatar
Nick Piggin committed
929
	return ret ? ret : copied;
930
931
}

Nick Piggin's avatar
Nick Piggin committed
932
static int ext4_writeback_write_end(struct file *file,
933
934
935
				    struct address_space *mapping,
				    loff_t pos, unsigned len, unsigned copied,
				    struct page *page, void *fsdata)
936
{
937
	handle_t *handle = ext4_journal_current_handle();
938
	struct inode *inode = mapping->host;
939
940
	int ret = 0, ret2;

941
	trace_ext4_writeback_write_end(inode, pos, len, copied);
942
	ret2 = ext4_generic_write_end(file, mapping, pos, len, copied,
Nick Piggin's avatar
Nick Piggin committed
943
							page, fsdata);
944
	copied = ret2;
945
	if (pos + len > inode->i_size && ext4_can_truncate(inode))
946
947
948
949
950
951
		/* if we have allocated more blocks and copied
		 * less. We will have blocks allocated outside
		 * inode->i_size. So truncate them
		 */
		ext4_orphan_add(handle, inode);

952
953
	if (ret2 < 0)
		ret = ret2;
954

955
	ret2 = ext4_journal_stop(handle);
956
957
	if (!ret)
		ret = ret2;
Nick Piggin's avatar
Nick Piggin committed
958

959
	if (pos + len > inode->i_size) {
960
		ext4_truncate_failed_write(inode);
961
		/*
962
		 * If truncate failed early the inode might still be
963
964
965
966
967
968
969
		 * on the orphan list; we need to make sure the inode
		 * is removed from the orphan list in that case.
		 */
		if (inode->i_nlink)
			ext4_orphan_del(NULL, inode);
	}

Nick Piggin's avatar
Nick Piggin committed
970
	return ret ? ret : copied;
971
972
}

Nick Piggin's avatar
Nick Piggin committed
973
static int ext4_journalled_write_end(struct file *file,
974
975
976
				     struct address_space *mapping,
				     loff_t pos, unsigned len, unsigned copied,
				     struct page *page, void *fsdata)
977
{
978
	handle_t *handle = ext4_journal_current_handle();
Nick Piggin's avatar
Nick Piggin committed
979
	struct inode *inode = mapping->host;
980
981
	int ret = 0, ret2;
	int partial = 0;
Nick Piggin's avatar
Nick Piggin committed
982
	unsigned from, to;
983
	loff_t new_i_size;
984

985
	trace_ext4_journalled_write_end(inode, pos, len, copied);
Nick Piggin's avatar
Nick Piggin committed
986
987
988
	from = pos & (PAGE_CACHE_SIZE - 1);
	to = from + len;

989
990
	BUG_ON(!ext4_handle_valid(handle));

Nick Piggin's avatar
Nick Piggin committed
991
992
993
994
995
	if (copied < len) {
		if (!PageUptodate(page))
			copied = 0;
		page_zero_new_buffers(page, from+copied, to);
	}
996
997

	ret = walk_page_buffers(handle, page_buffers(page), from,
Nick Piggin's avatar
Nick Piggin committed
998
				to, &partial, write_end_fn);
999
1000
	if (!partial)
		SetPageUptodate(page);
For faster browsing, not all history is shown. View entire blame