inode.c 126 KB
Newer Older
1
/*
2
 *  linux/fs/ext4/inode.c
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
 *
 * Copyright (C) 1992, 1993, 1994, 1995
 * Remy Card (card@masi.ibp.fr)
 * Laboratoire MASI - Institut Blaise Pascal
 * Universite Pierre et Marie Curie (Paris VI)
 *
 *  from
 *
 *  linux/fs/minix/inode.c
 *
 *  Copyright (C) 1991, 1992  Linus Torvalds
 *
 *  64-bit file support on 64-bit platforms by Jakub Jelinek
 *	(jj@sunsite.ms.mff.cuni.cz)
 *
18
 *  Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000
19
20
21
22
23
 */

#include <linux/module.h>
#include <linux/fs.h>
#include <linux/time.h>
24
#include <linux/jbd2.h>
25
26
27
28
29
30
#include <linux/highuid.h>
#include <linux/pagemap.h>
#include <linux/quotaops.h>
#include <linux/string.h>
#include <linux/buffer_head.h>
#include <linux/writeback.h>
31
#include <linux/pagevec.h>
32
#include <linux/mpage.h>
33
#include <linux/namei.h>
34
35
#include <linux/uio.h>
#include <linux/bio.h>
36
#include <linux/workqueue.h>
37
#include <linux/kernel.h>
38
#include <linux/printk.h>
39
#include <linux/slab.h>
40
#include <linux/ratelimit.h>
41

42
#include "ext4_jbd2.h"
43
44
#include "xattr.h"
#include "acl.h"
45
#include "ext4_extents.h"
46
#include "truncate.h"
47

48
49
#include <trace/events/ext4.h>

50
51
#define MPAGE_DA_EXTENT_TAIL 0x01

52
53
54
static inline int ext4_begin_ordered_truncate(struct inode *inode,
					      loff_t new_size)
{
55
	trace_ext4_begin_ordered_truncate(inode, new_size);
56
57
58
59
60
61
62
63
64
65
66
	/*
	 * If jinode is zero, then we never opened the file for
	 * writing, so there's no need to call
	 * jbd2_journal_begin_ordered_truncate() since there's no
	 * outstanding writes we need to flush.
	 */
	if (!EXT4_I(inode)->jinode)
		return 0;
	return jbd2_journal_begin_ordered_truncate(EXT4_JOURNAL(inode),
						   EXT4_I(inode)->jinode,
						   new_size);
67
68
}

69
static void ext4_invalidatepage(struct page *page, unsigned long offset);
70
71
72
73
74
75
static int noalloc_get_block_write(struct inode *inode, sector_t iblock,
				   struct buffer_head *bh_result, int create);
static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode);
static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate);
static int __ext4_journalled_writepage(struct page *page, unsigned int len);
static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh);
76

77
78
79
/*
 * Test whether an inode is a fast symlink.
 */
80
static int ext4_inode_is_fast_symlink(struct inode *inode)
81
{
82
	int ea_blocks = EXT4_I(inode)->i_file_acl ?
83
84
85
86
87
88
89
90
91
92
		(inode->i_sb->s_blocksize >> 9) : 0;

	return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
}

/*
 * Restart the transaction associated with *handle.  This does a commit,
 * so before we call here everything must be consistently dirtied against
 * this transaction.
 */
93
int ext4_truncate_restart_trans(handle_t *handle, struct inode *inode,
94
				 int nblocks)
95
{
96
97
98
	int ret;

	/*
99
	 * Drop i_data_sem to avoid deadlock with ext4_map_blocks.  At this
100
101
102
103
	 * moment, get_block can be called only for blocks inside i_size since
	 * page cache has been already dropped and writes are blocked by
	 * i_mutex. So we can safely drop the i_data_sem here.
	 */
104
	BUG_ON(EXT4_JOURNAL(inode) == NULL);
105
	jbd_debug(2, "restarting handle %p\n", handle);
106
	up_write(&EXT4_I(inode)->i_data_sem);
107
	ret = ext4_journal_restart(handle, nblocks);
108
	down_write(&EXT4_I(inode)->i_data_sem);
109
	ext4_discard_preallocations(inode);
110
111

	return ret;
112
113
114
115
116
}

/*
 * Called at the last iput() if i_nlink is zero.
 */
Al Viro's avatar
Al Viro committed
117
void ext4_evict_inode(struct inode *inode)
118
119
{
	handle_t *handle;
120
	int err;
121

122
	trace_ext4_evict_inode(inode);
Al Viro's avatar
Al Viro committed
123
124
125
126
127
	if (inode->i_nlink) {
		truncate_inode_pages(&inode->i_data, 0);
		goto no_delete;
	}

128
	if (!is_bad_inode(inode))
129
		dquot_initialize(inode);
130

131
132
	if (ext4_should_order_data(inode))
		ext4_begin_ordered_truncate(inode, 0);
133
134
135
136
137
	truncate_inode_pages(&inode->i_data, 0);

	if (is_bad_inode(inode))
		goto no_delete;

138
	handle = ext4_journal_start(inode, ext4_blocks_for_truncate(inode)+3);
139
	if (IS_ERR(handle)) {
140
		ext4_std_error(inode->i_sb, PTR_ERR(handle));
141
142
143
144
145
		/*
		 * If we're going to skip the normal cleanup, we still need to
		 * make sure that the in-core orphan linked list is properly
		 * cleaned up.
		 */
146
		ext4_orphan_del(NULL, inode);
147
148
149
150
		goto no_delete;
	}

	if (IS_SYNC(inode))
151
		ext4_handle_sync(handle);
152
	inode->i_size = 0;
153
154
	err = ext4_mark_inode_dirty(handle, inode);
	if (err) {
155
		ext4_warning(inode->i_sb,
156
157
158
			     "couldn't mark inode dirty (err %d)", err);
		goto stop_handle;
	}
159
	if (inode->i_blocks)
160
		ext4_truncate(inode);
161
162
163
164
165
166
167

	/*
	 * ext4_ext_truncate() doesn't reserve any slop when it
	 * restarts journal transactions; therefore there may not be
	 * enough credits left in the handle to remove the inode from
	 * the orphan list and set the dtime field.
	 */
168
	if (!ext4_handle_has_enough_credits(handle, 3)) {
169
170
171
172
		err = ext4_journal_extend(handle, 3);
		if (err > 0)
			err = ext4_journal_restart(handle, 3);
		if (err != 0) {
173
			ext4_warning(inode->i_sb,
174
175
176
				     "couldn't extend journal (err %d)", err);
		stop_handle:
			ext4_journal_stop(handle);
177
			ext4_orphan_del(NULL, inode);
178
179
180
181
			goto no_delete;
		}
	}

182
	/*
183
	 * Kill off the orphan record which ext4_truncate created.
184
	 * AKPM: I think this can be inside the above `if'.
185
	 * Note that ext4_orphan_del() has to be able to cope with the
186
	 * deletion of a non-existent orphan - this is because we don't
187
	 * know if ext4_truncate() actually created an orphan record.
188
189
	 * (Well, we could do this if we need to, but heck - it works)
	 */
190
191
	ext4_orphan_del(handle, inode);
	EXT4_I(inode)->i_dtime	= get_seconds();
192
193
194
195
196
197
198
199

	/*
	 * One subtle ordering requirement: if anything has gone wrong
	 * (transaction abort, IO errors, whatever), then we can still
	 * do these next steps (the fs will already have been marked as
	 * having errors), but we can't free the inode if the mark_dirty
	 * fails.
	 */
200
	if (ext4_mark_inode_dirty(handle, inode))
201
		/* If that failed, just do the required in-core inode clear. */
Al Viro's avatar
Al Viro committed
202
		ext4_clear_inode(inode);
203
	else
204
205
		ext4_free_inode(handle, inode);
	ext4_journal_stop(handle);
206
207
	return;
no_delete:
Al Viro's avatar
Al Viro committed
208
	ext4_clear_inode(inode);	/* We must guarantee clearing of inode... */
209
210
}

211
212
#ifdef CONFIG_QUOTA
qsize_t *ext4_get_reserved_space(struct inode *inode)
213
{
214
	return &EXT4_I(inode)->i_reserved_quota;
215
}
216
#endif
217

218
219
/*
 * Calculate the number of metadata blocks need to reserve
220
 * to allocate a block located at @lblock
221
 */
222
static int ext4_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock)
223
{
224
	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
225
		return ext4_ext_calc_metadata_amount(inode, lblock);
226

227
	return ext4_ind_calc_metadata_amount(inode, lblock);
228
229
}

230
231
232
233
/*
 * Called with i_data_sem down, which is important since we can call
 * ext4_discard_preallocations() from here.
 */
234
235
void ext4_da_update_reserve_space(struct inode *inode,
					int used, int quota_claim)
236
237
{
	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
238
239
240
	struct ext4_inode_info *ei = EXT4_I(inode);

	spin_lock(&ei->i_block_reservation_lock);
241
	trace_ext4_da_update_reserve_space(inode, used);
242
243
244
245
246
247
248
249
	if (unlikely(used > ei->i_reserved_data_blocks)) {
		ext4_msg(inode->i_sb, KERN_NOTICE, "%s: ino %lu, used %d "
			 "with only %d reserved data blocks\n",
			 __func__, inode->i_ino, used,
			 ei->i_reserved_data_blocks);
		WARN_ON(1);
		used = ei->i_reserved_data_blocks;
	}
250

251
252
253
	/* Update per-inode reservations */
	ei->i_reserved_data_blocks -= used;
	ei->i_reserved_meta_blocks -= ei->i_allocated_meta_blocks;
254
255
	percpu_counter_sub(&sbi->s_dirtyblocks_counter,
			   used + ei->i_allocated_meta_blocks);
256
	ei->i_allocated_meta_blocks = 0;
257

258
259
260
261
262
263
	if (ei->i_reserved_data_blocks == 0) {
		/*
		 * We can release all of the reserved metadata blocks
		 * only when we have written all of the delayed
		 * allocation blocks.
		 */
264
265
		percpu_counter_sub(&sbi->s_dirtyblocks_counter,
				   ei->i_reserved_meta_blocks);
266
		ei->i_reserved_meta_blocks = 0;
267
		ei->i_da_metadata_calc_len = 0;
268
	}
269
	spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
270

271
272
	/* Update quota subsystem for data blocks */
	if (quota_claim)
273
		dquot_claim_block(inode, used);
274
	else {
275
276
277
		/*
		 * We did fallocate with an offset that is already delayed
		 * allocated. So on delayed allocated writeback we should
278
		 * not re-claim the quota for fallocated blocks.
279
		 */
280
		dquot_release_reservation_block(inode, used);
281
	}
282
283
284
285
286
287

	/*
	 * If we have done all the pending block allocations and if
	 * there aren't any writers on the inode, we can discard the
	 * inode's preallocations.
	 */
288
289
	if ((ei->i_reserved_data_blocks == 0) &&
	    (atomic_read(&inode->i_writecount) == 0))
290
		ext4_discard_preallocations(inode);
291
292
}

293
static int __check_block_validity(struct inode *inode, const char *func,
294
295
				unsigned int line,
				struct ext4_map_blocks *map)
296
{
297
298
	if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), map->m_pblk,
				   map->m_len)) {
299
300
301
302
		ext4_error_inode(inode, func, line, map->m_pblk,
				 "lblock %lu mapped to illegal pblock "
				 "(length %d)", (unsigned long) map->m_lblk,
				 map->m_len);
303
304
305
306
307
		return -EIO;
	}
	return 0;
}

308
#define check_block_validity(inode, map)	\
309
	__check_block_validity((inode), __func__, __LINE__, (map))
310

311
/*
312
313
 * Return the number of contiguous dirty pages in a given inode
 * starting at page frame idx.
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
 */
static pgoff_t ext4_num_dirty_pages(struct inode *inode, pgoff_t idx,
				    unsigned int max_pages)
{
	struct address_space *mapping = inode->i_mapping;
	pgoff_t	index;
	struct pagevec pvec;
	pgoff_t num = 0;
	int i, nr_pages, done = 0;

	if (max_pages == 0)
		return 0;
	pagevec_init(&pvec, 0);
	while (!done) {
		index = idx;
		nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
					      PAGECACHE_TAG_DIRTY,
					      (pgoff_t)PAGEVEC_SIZE);
		if (nr_pages == 0)
			break;
		for (i = 0; i < nr_pages; i++) {
			struct page *page = pvec.pages[i];
			struct buffer_head *bh, *head;

			lock_page(page);
			if (unlikely(page->mapping != mapping) ||
			    !PageDirty(page) ||
			    PageWriteback(page) ||
			    page->index != idx) {
				done = 1;
				unlock_page(page);
				break;
			}
347
348
349
350
351
352
353
354
355
			if (page_has_buffers(page)) {
				bh = head = page_buffers(page);
				do {
					if (!buffer_delay(bh) &&
					    !buffer_unwritten(bh))
						done = 1;
					bh = bh->b_this_page;
				} while (!done && (bh != head));
			}
356
357
358
359
360
			unlock_page(page);
			if (done)
				break;
			idx++;
			num++;
361
362
			if (num >= max_pages) {
				done = 1;
363
				break;
364
			}
365
366
367
368
369
370
		}
		pagevec_release(&pvec);
	}
	return num;
}

371
/*
372
 * The ext4_map_blocks() function tries to look up the requested blocks,
373
 * and returns if the blocks are already mapped.
374
375
376
377
378
 *
 * Otherwise it takes the write lock of the i_data_sem and allocate blocks
 * and store the allocated blocks in the result buffer head and mark it
 * mapped.
 *
379
380
 * If file type is extents based, it will call ext4_ext_map_blocks(),
 * Otherwise, call with ext4_ind_map_blocks() to handle indirect mapping
381
382
383
384
385
386
387
388
389
390
391
392
 * based files
 *
 * On success, it returns the number of blocks being mapped or allocate.
 * if create==0 and the blocks are pre-allocated and uninitialized block,
 * the result buffer head is unmapped. If the create ==1, it will make sure
 * the buffer head is mapped.
 *
 * It returns 0 if plain look up failed (blocks have not been allocated), in
 * that casem, buffer head is unmapped
 *
 * It returns the error in case of allocation failure.
 */
393
394
int ext4_map_blocks(handle_t *handle, struct inode *inode,
		    struct ext4_map_blocks *map, int flags)
395
396
{
	int retval;
397

398
399
400
401
	map->m_flags = 0;
	ext_debug("ext4_map_blocks(): inode %lu, flag %d, max_blocks %u,"
		  "logical block %lu\n", inode->i_ino, flags, map->m_len,
		  (unsigned long) map->m_lblk);
402
	/*
403
404
	 * Try to see if we can get the block without requesting a new
	 * file system block.
405
406
	 */
	down_read((&EXT4_I(inode)->i_data_sem));
407
	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
408
		retval = ext4_ext_map_blocks(handle, inode, map, 0);
409
	} else {
410
		retval = ext4_ind_map_blocks(handle, inode, map, 0);
411
	}
412
	up_read((&EXT4_I(inode)->i_data_sem));
413

414
	if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
415
		int ret = check_block_validity(inode, map);
416
417
418
419
		if (ret != 0)
			return ret;
	}

420
	/* If it is only a block(s) look up */
421
	if ((flags & EXT4_GET_BLOCKS_CREATE) == 0)
422
423
424
425
426
427
428
429
430
		return retval;

	/*
	 * Returns if the blocks have already allocated
	 *
	 * Note that if blocks have been preallocated
	 * ext4_ext_get_block() returns th create = 0
	 * with buffer head unmapped.
	 */
431
	if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED)
432
433
		return retval;

434
435
436
437
438
439
440
441
442
443
	/*
	 * When we call get_blocks without the create flag, the
	 * BH_Unwritten flag could have gotten set if the blocks
	 * requested were part of a uninitialized extent.  We need to
	 * clear this flag now that we are committed to convert all or
	 * part of the uninitialized extent to be an initialized
	 * extent.  This is because we need to avoid the combination
	 * of BH_Unwritten and BH_Mapped flags being simultaneously
	 * set on the buffer_head.
	 */
444
	map->m_flags &= ~EXT4_MAP_UNWRITTEN;
445

446
	/*
447
448
449
450
	 * New blocks allocate and/or writing to uninitialized extent
	 * will possibly result in updating i_data, so we take
	 * the write lock of i_data_sem, and call get_blocks()
	 * with create == 1 flag.
451
452
	 */
	down_write((&EXT4_I(inode)->i_data_sem));
453
454
455
456
457
458
459

	/*
	 * if the caller is from delayed allocation writeout path
	 * we have already reserved fs blocks for allocation
	 * let the underlying get_block() function know to
	 * avoid double accounting
	 */
460
	if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
461
		ext4_set_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED);
462
463
464
465
	/*
	 * We need to check for EXT4 here because migrate
	 * could have changed the inode type in between
	 */
466
	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
467
		retval = ext4_ext_map_blocks(handle, inode, map, flags);
468
	} else {
469
		retval = ext4_ind_map_blocks(handle, inode, map, flags);
470

471
		if (retval > 0 && map->m_flags & EXT4_MAP_NEW) {
472
473
474
475
476
			/*
			 * We allocated new blocks which will result in
			 * i_data's format changing.  Force the migrate
			 * to fail by clearing migrate flags
			 */
477
			ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
478
		}
479

480
481
482
483
484
485
486
		/*
		 * Update reserved blocks/metadata blocks after successful
		 * block allocation which had been deferred till now. We don't
		 * support fallocate for non extent files. So we can update
		 * reserve space here.
		 */
		if ((retval > 0) &&
487
			(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE))
488
489
			ext4_da_update_reserve_space(inode, retval, 1);
	}
490
	if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
491
		ext4_clear_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED);
492

493
	up_write((&EXT4_I(inode)->i_data_sem));
494
	if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
495
		int ret = check_block_validity(inode, map);
496
497
498
		if (ret != 0)
			return ret;
	}
499
500
501
	return retval;
}

502
503
504
/* Maximum number of blocks we map for direct IO at once. */
#define DIO_MAX_BLOCKS 4096

505
506
static int _ext4_get_block(struct inode *inode, sector_t iblock,
			   struct buffer_head *bh, int flags)
507
{
508
	handle_t *handle = ext4_journal_current_handle();
509
	struct ext4_map_blocks map;
Jan Kara's avatar
Jan Kara committed
510
	int ret = 0, started = 0;
511
	int dio_credits;
512

513
514
515
516
	map.m_lblk = iblock;
	map.m_len = bh->b_size >> inode->i_blkbits;

	if (flags && !handle) {
Jan Kara's avatar
Jan Kara committed
517
		/* Direct IO write... */
518
519
520
		if (map.m_len > DIO_MAX_BLOCKS)
			map.m_len = DIO_MAX_BLOCKS;
		dio_credits = ext4_chunk_trans_blocks(inode, map.m_len);
521
		handle = ext4_journal_start(inode, dio_credits);
Jan Kara's avatar
Jan Kara committed
522
		if (IS_ERR(handle)) {
523
			ret = PTR_ERR(handle);
524
			return ret;
525
		}
Jan Kara's avatar
Jan Kara committed
526
		started = 1;
527
528
	}

529
	ret = ext4_map_blocks(handle, inode, &map, flags);
Jan Kara's avatar
Jan Kara committed
530
	if (ret > 0) {
531
532
533
		map_bh(bh, inode->i_sb, map.m_pblk);
		bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags;
		bh->b_size = inode->i_sb->s_blocksize * map.m_len;
Jan Kara's avatar
Jan Kara committed
534
		ret = 0;
535
	}
Jan Kara's avatar
Jan Kara committed
536
537
	if (started)
		ext4_journal_stop(handle);
538
539
540
	return ret;
}

541
542
543
544
545
546
547
int ext4_get_block(struct inode *inode, sector_t iblock,
		   struct buffer_head *bh, int create)
{
	return _ext4_get_block(inode, iblock, bh,
			       create ? EXT4_GET_BLOCKS_CREATE : 0);
}

548
549
550
/*
 * `handle' can be NULL if create is zero
 */
551
struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
Aneesh Kumar K.V's avatar
Aneesh Kumar K.V committed
552
				ext4_lblk_t block, int create, int *errp)
553
{
554
555
	struct ext4_map_blocks map;
	struct buffer_head *bh;
556
557
558
559
	int fatal = 0, err;

	J_ASSERT(handle != NULL || create == 0);

560
561
562
563
	map.m_lblk = block;
	map.m_len = 1;
	err = ext4_map_blocks(handle, inode, &map,
			      create ? EXT4_GET_BLOCKS_CREATE : 0);
564

565
566
567
568
569
570
571
572
573
574
	if (err < 0)
		*errp = err;
	if (err <= 0)
		return NULL;
	*errp = 0;

	bh = sb_getblk(inode->i_sb, map.m_pblk);
	if (!bh) {
		*errp = -EIO;
		return NULL;
575
	}
576
577
578
	if (map.m_flags & EXT4_MAP_NEW) {
		J_ASSERT(create != 0);
		J_ASSERT(handle != NULL);
579

580
581
582
583
584
585
586
587
588
589
590
591
592
		/*
		 * Now that we do not always journal data, we should
		 * keep in mind whether this should always journal the
		 * new buffer as metadata.  For now, regular file
		 * writes use ext4_get_block instead, so it's not a
		 * problem.
		 */
		lock_buffer(bh);
		BUFFER_TRACE(bh, "call get_create_access");
		fatal = ext4_journal_get_create_access(handle, bh);
		if (!fatal && !buffer_uptodate(bh)) {
			memset(bh->b_data, 0, inode->i_sb->s_blocksize);
			set_buffer_uptodate(bh);
593
		}
594
595
596
597
598
599
600
		unlock_buffer(bh);
		BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
		err = ext4_handle_dirty_metadata(handle, inode, bh);
		if (!fatal)
			fatal = err;
	} else {
		BUFFER_TRACE(bh, "not a new buffer");
601
	}
602
603
604
605
606
607
	if (fatal) {
		*errp = fatal;
		brelse(bh);
		bh = NULL;
	}
	return bh;
608
609
}

610
struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
Aneesh Kumar K.V's avatar
Aneesh Kumar K.V committed
611
			       ext4_lblk_t block, int create, int *err)
612
{
613
	struct buffer_head *bh;
614

615
	bh = ext4_getblk(handle, inode, block, create, err);
616
617
618
619
620
621
622
623
624
625
626
627
628
	if (!bh)
		return bh;
	if (buffer_uptodate(bh))
		return bh;
	ll_rw_block(READ_META, 1, &bh);
	wait_on_buffer(bh);
	if (buffer_uptodate(bh))
		return bh;
	put_bh(bh);
	*err = -EIO;
	return NULL;
}

629
630
631
632
633
634
635
static int walk_page_buffers(handle_t *handle,
			     struct buffer_head *head,
			     unsigned from,
			     unsigned to,
			     int *partial,
			     int (*fn)(handle_t *handle,
				       struct buffer_head *bh))
636
637
638
639
640
641
642
{
	struct buffer_head *bh;
	unsigned block_start, block_end;
	unsigned blocksize = head->b_size;
	int err, ret = 0;
	struct buffer_head *next;

643
644
	for (bh = head, block_start = 0;
	     ret == 0 && (bh != head || !block_start);
645
	     block_start = block_end, bh = next) {
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
		next = bh->b_this_page;
		block_end = block_start + blocksize;
		if (block_end <= from || block_start >= to) {
			if (partial && !buffer_uptodate(bh))
				*partial = 1;
			continue;
		}
		err = (*fn)(handle, bh);
		if (!ret)
			ret = err;
	}
	return ret;
}

/*
 * To preserve ordering, it is essential that the hole instantiation and
 * the data write be encapsulated in a single transaction.  We cannot
663
 * close off a transaction and start a new one between the ext4_get_block()
664
 * and the commit_write().  So doing the jbd2_journal_start at the start of
665
666
 * prepare_write() is the right place.
 *
667
668
 * Also, this function can nest inside ext4_writepage() ->
 * block_write_full_page(). In that case, we *know* that ext4_writepage()
669
670
671
672
 * has generated enough buffer credits to do the whole page.  So we won't
 * block on the journal in that case, which is good, because the caller may
 * be PF_MEMALLOC.
 *
673
 * By accident, ext4 can be reentered when a transaction is open via
674
675
676
677
678
679
 * quota file writes.  If we were to commit the transaction while thus
 * reentered, there can be a deadlock - we would be holding a quota
 * lock, and the commit would never complete if another thread had a
 * transaction open and was blocking on the quota lock - a ranking
 * violation.
 *
680
 * So what we do is to rely on the fact that jbd2_journal_stop/journal_start
681
682
683
684
685
 * will _not_ run commit under these circumstances because handle->h_ref
 * is elevated.  We'll still have enough credits for the tiny quotafile
 * write.
 */
static int do_journal_get_write_access(handle_t *handle,
686
				       struct buffer_head *bh)
687
{
688
689
690
	int dirty = buffer_dirty(bh);
	int ret;

691
692
	if (!buffer_mapped(bh) || buffer_freed(bh))
		return 0;
693
	/*
694
	 * __block_write_begin() could have dirtied some buffers. Clean
695
696
	 * the dirty bit as jbd2_journal_get_write_access() could complain
	 * otherwise about fs integrity issues. Setting of the dirty bit
697
	 * by __block_write_begin() isn't a real problem here as we clear
698
699
700
701
702
703
704
705
706
	 * the bit before releasing a page lock and thus writeback cannot
	 * ever write the buffer.
	 */
	if (dirty)
		clear_buffer_dirty(bh);
	ret = ext4_journal_get_write_access(handle, bh);
	if (!ret && dirty)
		ret = ext4_handle_dirty_metadata(handle, NULL, bh);
	return ret;
707
708
}

709
710
static int ext4_get_block_write(struct inode *inode, sector_t iblock,
		   struct buffer_head *bh_result, int create);
Nick Piggin's avatar
Nick Piggin committed
711
static int ext4_write_begin(struct file *file, struct address_space *mapping,
712
713
			    loff_t pos, unsigned len, unsigned flags,
			    struct page **pagep, void **fsdata)
714
{
715
	struct inode *inode = mapping->host;
716
	int ret, needed_blocks;
717
718
	handle_t *handle;
	int retries = 0;
719
	struct page *page;
720
	pgoff_t index;
721
	unsigned from, to;
Nick Piggin's avatar
Nick Piggin committed
722

723
	trace_ext4_write_begin(inode, pos, len, flags);
724
725
726
727
728
	/*
	 * Reserve one block more for addition to orphan list in case
	 * we allocate blocks but write fails for some reason
	 */
	needed_blocks = ext4_writepage_trans_blocks(inode) + 1;
729
	index = pos >> PAGE_CACHE_SHIFT;
730
731
	from = pos & (PAGE_CACHE_SIZE - 1);
	to = from + len;
732
733

retry:
734
735
736
737
	handle = ext4_journal_start(inode, needed_blocks);
	if (IS_ERR(handle)) {
		ret = PTR_ERR(handle);
		goto out;
738
	}
739

740
741
742
743
	/* We cannot recurse into the filesystem as the transaction is already
	 * started */
	flags |= AOP_FLAG_NOFS;

744
	page = grab_cache_page_write_begin(mapping, index, flags);
745
746
747
748
749
750
751
	if (!page) {
		ext4_journal_stop(handle);
		ret = -ENOMEM;
		goto out;
	}
	*pagep = page;

752
	if (ext4_should_dioread_nolock(inode))
753
		ret = __block_write_begin(page, pos, len, ext4_get_block_write);
754
	else
755
		ret = __block_write_begin(page, pos, len, ext4_get_block);
Nick Piggin's avatar
Nick Piggin committed
756
757

	if (!ret && ext4_should_journal_data(inode)) {
758
759
760
		ret = walk_page_buffers(handle, page_buffers(page),
				from, to, NULL, do_journal_get_write_access);
	}
Nick Piggin's avatar
Nick Piggin committed
761
762

	if (ret) {
763
764
		unlock_page(page);
		page_cache_release(page);
765
		/*
766
		 * __block_write_begin may have instantiated a few blocks
767
768
		 * outside i_size.  Trim these off again. Don't need
		 * i_size_read because we hold i_mutex.
769
770
771
		 *
		 * Add inode to orphan list in case we crash before
		 * truncate finishes
772
		 */
773
		if (pos + len > inode->i_size && ext4_can_truncate(inode))
774
775
776
777
			ext4_orphan_add(handle, inode);

		ext4_journal_stop(handle);
		if (pos + len > inode->i_size) {
778
			ext4_truncate_failed_write(inode);
779
			/*
780
			 * If truncate failed early the inode might
781
782
783
784
785
786
787
			 * still be on the orphan list; we need to
			 * make sure the inode is removed from the
			 * orphan list in that case.
			 */
			if (inode->i_nlink)
				ext4_orphan_del(NULL, inode);
		}
Nick Piggin's avatar
Nick Piggin committed
788
789
	}

790
	if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
791
		goto retry;
792
out:
793
794
795
	return ret;
}

Nick Piggin's avatar
Nick Piggin committed
796
797
/* For write_end() in data=journal mode */
static int write_end_fn(handle_t *handle, struct buffer_head *bh)
798
799
800
801
{
	if (!buffer_mapped(bh) || buffer_freed(bh))
		return 0;
	set_buffer_uptodate(bh);
802
	return ext4_handle_dirty_metadata(handle, NULL, bh);
803
804
}

805
static int ext4_generic_write_end(struct file *file,
806
807
808
				  struct address_space *mapping,
				  loff_t pos, unsigned len, unsigned copied,
				  struct page *page, void *fsdata)
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
{
	int i_size_changed = 0;
	struct inode *inode = mapping->host;
	handle_t *handle = ext4_journal_current_handle();

	copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);

	/*
	 * No need to use i_size_read() here, the i_size
	 * cannot change under us because we hold i_mutex.
	 *
	 * But it's important to update i_size while still holding page lock:
	 * page writeout could otherwise come in and zero beyond i_size.
	 */
	if (pos + copied > inode->i_size) {
		i_size_write(inode, pos + copied);
		i_size_changed = 1;
	}

	if (pos + copied >  EXT4_I(inode)->i_disksize) {
		/* We need to mark inode dirty even if
		 * new_i_size is less that inode->i_size
		 * bu greater than i_disksize.(hint delalloc)
		 */
		ext4_update_i_disksize(inode, (pos + copied));
		i_size_changed = 1;
	}
	unlock_page(page);
	page_cache_release(page);

	/*
	 * Don't mark the inode dirty under page lock. First, it unnecessarily
	 * makes the holding time of page lock longer. Second, it forces lock
	 * ordering of page lock and transaction start for journaling
	 * filesystems.
	 */
	if (i_size_changed)
		ext4_mark_inode_dirty(handle, inode);

	return copied;
}

851
852
853
854
/*
 * We need to pick up the new inode size which generic_commit_write gave us
 * `file' can be NULL - eg, when called from page_symlink().
 *
855
 * ext4 never places buffers on inode->i_mapping->private_list.  metadata
856
857
 * buffers are managed internally.
 */
Nick Piggin's avatar
Nick Piggin committed
858
static int ext4_ordered_write_end(struct file *file,
859
860
861
				  struct address_space *mapping,
				  loff_t pos, unsigned len, unsigned copied,
				  struct page *page, void *fsdata)
862
{
863
	handle_t *handle = ext4_journal_current_handle();
864
	struct inode *inode = mapping->host;
865
866
	int ret = 0, ret2;

867
	trace_ext4_ordered_write_end(inode, pos, len, copied);
868
	ret = ext4_jbd2_file_inode(handle, inode);
869
870

	if (ret == 0) {
871
		ret2 = ext4_generic_write_end(file, mapping, pos, len, copied,
Nick Piggin's avatar
Nick Piggin committed
872
							page, fsdata);
873
		copied = ret2;
874
		if (pos + len > inode->i_size && ext4_can_truncate(inode))
875
876
877
878
879
			/* if we have allocated more blocks and copied
			 * less. We will have blocks allocated outside
			 * inode->i_size. So truncate them
			 */
			ext4_orphan_add(handle, inode);
880
881
		if (ret2 < 0)
			ret = ret2;
882
	}
883
	ret2 = ext4_journal_stop(handle);
884
885
	if (!ret)
		ret = ret2;
Nick Piggin's avatar
Nick Piggin committed
886

887
	if (pos + len > inode->i_size) {
888
		ext4_truncate_failed_write(inode);
889
		/*
890
		 * If truncate failed early the inode might still be
891
892
893
894
895
896
897
898
		 * on the orphan list; we need to make sure the inode
		 * is removed from the orphan list in that case.
		 */
		if (inode->i_nlink)
			ext4_orphan_del(NULL, inode);
	}


Nick Piggin's avatar
Nick Piggin committed
899
	return ret ? ret : copied;
900
901
}

Nick Piggin's avatar
Nick Piggin committed
902
static int ext4_writeback_write_end(struct file *file,
903
904
905
				    struct address_space *mapping,
				    loff_t pos, unsigned len, unsigned copied,
				    struct page *page, void *fsdata)
906
{
907
	handle_t *handle = ext4_journal_current_handle();
908
	struct inode *inode = mapping->host;
909
910
	int ret = 0, ret2;

911
	trace_ext4_writeback_write_end(inode, pos, len, copied);
912
	ret2 = ext4_generic_write_end(file, mapping, pos, len, copied,
Nick Piggin's avatar
Nick Piggin committed
913
							page, fsdata);
914
	copied = ret2;
915
	if (pos + len > inode->i_size && ext4_can_truncate(inode))
916
917
918
919
920
921
		/* if we have allocated more blocks and copied
		 * less. We will have blocks allocated outside
		 * inode->i_size. So truncate them
		 */
		ext4_orphan_add(handle, inode);

922
923
	if (ret2 < 0)
		ret = ret2;
924

925
	ret2 = ext4_journal_stop(handle);
926
927
	if (!ret)
		ret = ret2;
Nick Piggin's avatar
Nick Piggin committed
928

929
	if (pos + len > inode->i_size) {
930
		ext4_truncate_failed_write(inode);
931
		/*
932
		 * If truncate failed early the inode might still be
933
934
935
936
937
938
939
		 * on the orphan list; we need to make sure the inode
		 * is removed from the orphan list in that case.
		 */
		if (inode->i_nlink)
			ext4_orphan_del(NULL, inode);
	}

Nick Piggin's avatar
Nick Piggin committed
940
	return ret ? ret : copied;
941
942
}

Nick Piggin's avatar
Nick Piggin committed
943
static int ext4_journalled_write_end(struct file *file,
944
945
946
				     struct address_space *mapping,
				     loff_t pos, unsigned len, unsigned copied,
				     struct page *page, void *fsdata)
947
{
948
	handle_t *handle = ext4_journal_current_handle();
Nick Piggin's avatar
Nick Piggin committed
949
	struct inode *inode = mapping->host;
950
951
	int ret = 0, ret2;
	int partial = 0;
Nick Piggin's avatar
Nick Piggin committed
952
	unsigned from, to;
953
	loff_t new_i_size;
954

955
	trace_ext4_journalled_write_end(inode, pos, len, copied);
Nick Piggin's avatar
Nick Piggin committed
956
957
958
959
960
961
962
963
	from = pos & (PAGE_CACHE_SIZE - 1);
	to = from + len;

	if (copied < len) {
		if (!PageUptodate(page))
			copied = 0;
		page_zero_new_buffers(page, from+copied, to);
	}
964
965

	ret = walk_page_buffers(handle, page_buffers(page), from,
Nick Piggin's avatar
Nick Piggin committed
966
				to, &partial, write_end_fn);
967
968
	if (!partial)
		SetPageUptodate(page);
969
970
	new_i_size = pos + copied;
	if (new_i_size > inode->i_size)
Nick Piggin's avatar
Nick Piggin committed
971
		i_size_write(inode, pos+copied);
972
	ext4_set_inode_state(inode, EXT4_STATE_JDATA);
973
974
	if (new_i_size > EXT4_I(inode)->i_disksize) {
		ext4_update_i_disksize(inode, new_i_size);
975
		ret2 = ext4_mark_inode_dirty(handle, inode);
976
977
978
		if (!ret)
			ret = ret2;
	}
Nick Piggin's avatar
Nick Piggin committed
979

980
	unlock_page(page);
981
	page_cache_release(page);
982
	if (pos + len > inode->i_size && ext4_can_truncate(inode))
983
984
985
986
987
988
		/* if we have allocated more blocks and copied
		 * less. We will have blocks allocated outside
		 * inode->i_size. So truncate them
		 */
		ext4_orphan_add(handle, inode);

989
	ret2 = ext4_journal_stop(handle);
990
991
	if (!ret)
		ret = ret2;
992
	if (pos + len > inode->i_size) {
993
		ext4_truncate_failed_write(inode);
994
		/*
995
		 * If truncate failed early the inode might still be
996
997
998
999
1000
		 * on the orphan list; we need to make sure the inode
		 * is removed from the orphan list in that case.
		 */
		if (inode->i_nlink)
			ext4_orphan_del(NULL, inode);
For faster browsing, not all history is shown. View entire blame