inode.c 172 KB
Newer Older
1
/*
2
 *  linux/fs/ext4/inode.c
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
 *
 * Copyright (C) 1992, 1993, 1994, 1995
 * Remy Card (card@masi.ibp.fr)
 * Laboratoire MASI - Institut Blaise Pascal
 * Universite Pierre et Marie Curie (Paris VI)
 *
 *  from
 *
 *  linux/fs/minix/inode.c
 *
 *  Copyright (C) 1991, 1992  Linus Torvalds
 *
 *  Goal-directed block allocation by Stephen Tweedie
 *	(sct@redhat.com), 1993, 1998
 *  Big-endian to little-endian byte-swapping/bitmaps by
 *        David S. Miller (davem@caip.rutgers.edu), 1995
 *  64-bit file support on 64-bit platforms by Jakub Jelinek
 *	(jj@sunsite.ms.mff.cuni.cz)
 *
22
 *  Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000
23
24
25
26
27
 */

#include <linux/module.h>
#include <linux/fs.h>
#include <linux/time.h>
28
#include <linux/jbd2.h>
29
30
31
32
33
34
#include <linux/highuid.h>
#include <linux/pagemap.h>
#include <linux/quotaops.h>
#include <linux/string.h>
#include <linux/buffer_head.h>
#include <linux/writeback.h>
35
#include <linux/pagevec.h>
36
#include <linux/mpage.h>
37
#include <linux/namei.h>
38
39
#include <linux/uio.h>
#include <linux/bio.h>
40
#include <linux/workqueue.h>
41
#include <linux/kernel.h>
42
#include <linux/printk.h>
43
#include <linux/slab.h>
44
#include <linux/ratelimit.h>
45

46
#include "ext4_jbd2.h"
47
48
#include "xattr.h"
#include "acl.h"
49
#include "ext4_extents.h"
50

51
52
#include <trace/events/ext4.h>

53
54
#define MPAGE_DA_EXTENT_TAIL 0x01

55
56
57
static inline int ext4_begin_ordered_truncate(struct inode *inode,
					      loff_t new_size)
{
58
	trace_ext4_begin_ordered_truncate(inode, new_size);
59
60
61
62
63
64
65
66
67
68
69
	/*
	 * If jinode is zero, then we never opened the file for
	 * writing, so there's no need to call
	 * jbd2_journal_begin_ordered_truncate() since there's no
	 * outstanding writes we need to flush.
	 */
	if (!EXT4_I(inode)->jinode)
		return 0;
	return jbd2_journal_begin_ordered_truncate(EXT4_JOURNAL(inode),
						   EXT4_I(inode)->jinode,
						   new_size);
70
71
}

72
static void ext4_invalidatepage(struct page *page, unsigned long offset);
73
74
75
76
77
78
static int noalloc_get_block_write(struct inode *inode, sector_t iblock,
				   struct buffer_head *bh_result, int create);
static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode);
static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate);
static int __ext4_journalled_writepage(struct page *page, unsigned int len);
static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh);
79

80
81
82
/*
 * Test whether an inode is a fast symlink.
 */
83
static int ext4_inode_is_fast_symlink(struct inode *inode)
84
{
85
	int ea_blocks = EXT4_I(inode)->i_file_acl ?
86
87
88
89
90
91
92
93
94
95
96
		(inode->i_sb->s_blocksize >> 9) : 0;

	return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
}

/*
 * Work out how many blocks we need to proceed with the next chunk of a
 * truncate transaction.
 */
static unsigned long blocks_for_truncate(struct inode *inode)
{
Aneesh Kumar K.V's avatar
Aneesh Kumar K.V committed
97
	ext4_lblk_t needed;
98
99
100
101
102
103

	needed = inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9);

	/* Give ourselves just enough room to cope with inodes in which
	 * i_blocks is corrupt: we've seen disk corruptions in the past
	 * which resulted in random data in an inode which looked enough
104
	 * like a regular file for ext4 to try to delete it.  Things
105
106
107
108
109
110
111
	 * will go a bit crazy if that happens, but at least we should
	 * try not to panic the whole kernel. */
	if (needed < 2)
		needed = 2;

	/* But we need to bound the transaction so we don't overflow the
	 * journal. */
112
113
	if (needed > EXT4_MAX_TRANS_DATA)
		needed = EXT4_MAX_TRANS_DATA;
114

115
	return EXT4_DATA_TRANS_BLOCKS(inode->i_sb) + needed;
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
}

/*
 * Truncate transactions can be complex and absolutely huge.  So we need to
 * be able to restart the transaction at a conventient checkpoint to make
 * sure we don't overflow the journal.
 *
 * start_transaction gets us a new handle for a truncate transaction,
 * and extend_transaction tries to extend the existing one a bit.  If
 * extend fails, we need to propagate the failure up and restart the
 * transaction in the top-level truncate loop. --sct
 */
static handle_t *start_transaction(struct inode *inode)
{
	handle_t *result;

132
	result = ext4_journal_start(inode, blocks_for_truncate(inode));
133
134
135
	if (!IS_ERR(result))
		return result;

136
	ext4_std_error(inode->i_sb, PTR_ERR(result));
137
138
139
140
141
142
143
144
145
146
147
	return result;
}

/*
 * Try to extend this transaction for the purposes of truncation.
 *
 * Returns 0 if we managed to create more room.  If we can't create more
 * room, and the transaction must be restarted we return 1.
 */
static int try_to_extend_transaction(handle_t *handle, struct inode *inode)
{
148
149
150
	if (!ext4_handle_valid(handle))
		return 0;
	if (ext4_handle_has_enough_credits(handle, EXT4_RESERVE_TRANS_BLOCKS+1))
151
		return 0;
152
	if (!ext4_journal_extend(handle, blocks_for_truncate(inode)))
153
154
155
156
157
158
159
160
161
		return 0;
	return 1;
}

/*
 * Restart the transaction associated with *handle.  This does a commit,
 * so before we call here everything must be consistently dirtied against
 * this transaction.
 */
162
int ext4_truncate_restart_trans(handle_t *handle, struct inode *inode,
163
				 int nblocks)
164
{
165
166
167
	int ret;

	/*
168
	 * Drop i_data_sem to avoid deadlock with ext4_map_blocks.  At this
169
170
171
172
	 * moment, get_block can be called only for blocks inside i_size since
	 * page cache has been already dropped and writes are blocked by
	 * i_mutex. So we can safely drop the i_data_sem here.
	 */
173
	BUG_ON(EXT4_JOURNAL(inode) == NULL);
174
	jbd_debug(2, "restarting handle %p\n", handle);
175
	up_write(&EXT4_I(inode)->i_data_sem);
176
	ret = ext4_journal_restart(handle, nblocks);
177
	down_write(&EXT4_I(inode)->i_data_sem);
178
	ext4_discard_preallocations(inode);
179
180

	return ret;
181
182
183
184
185
}

/*
 * Called at the last iput() if i_nlink is zero.
 */
Al Viro's avatar
Al Viro committed
186
void ext4_evict_inode(struct inode *inode)
187
188
{
	handle_t *handle;
189
	int err;
190

191
	trace_ext4_evict_inode(inode);
Al Viro's avatar
Al Viro committed
192
193
194
195
196
	if (inode->i_nlink) {
		truncate_inode_pages(&inode->i_data, 0);
		goto no_delete;
	}

197
	if (!is_bad_inode(inode))
198
		dquot_initialize(inode);
199

200
201
	if (ext4_should_order_data(inode))
		ext4_begin_ordered_truncate(inode, 0);
202
203
204
205
206
	truncate_inode_pages(&inode->i_data, 0);

	if (is_bad_inode(inode))
		goto no_delete;

207
	handle = ext4_journal_start(inode, blocks_for_truncate(inode)+3);
208
	if (IS_ERR(handle)) {
209
		ext4_std_error(inode->i_sb, PTR_ERR(handle));
210
211
212
213
214
		/*
		 * If we're going to skip the normal cleanup, we still need to
		 * make sure that the in-core orphan linked list is properly
		 * cleaned up.
		 */
215
		ext4_orphan_del(NULL, inode);
216
217
218
219
		goto no_delete;
	}

	if (IS_SYNC(inode))
220
		ext4_handle_sync(handle);
221
	inode->i_size = 0;
222
223
	err = ext4_mark_inode_dirty(handle, inode);
	if (err) {
224
		ext4_warning(inode->i_sb,
225
226
227
			     "couldn't mark inode dirty (err %d)", err);
		goto stop_handle;
	}
228
	if (inode->i_blocks)
229
		ext4_truncate(inode);
230
231
232
233
234
235
236

	/*
	 * ext4_ext_truncate() doesn't reserve any slop when it
	 * restarts journal transactions; therefore there may not be
	 * enough credits left in the handle to remove the inode from
	 * the orphan list and set the dtime field.
	 */
237
	if (!ext4_handle_has_enough_credits(handle, 3)) {
238
239
240
241
		err = ext4_journal_extend(handle, 3);
		if (err > 0)
			err = ext4_journal_restart(handle, 3);
		if (err != 0) {
242
			ext4_warning(inode->i_sb,
243
244
245
				     "couldn't extend journal (err %d)", err);
		stop_handle:
			ext4_journal_stop(handle);
246
			ext4_orphan_del(NULL, inode);
247
248
249
250
			goto no_delete;
		}
	}

251
	/*
252
	 * Kill off the orphan record which ext4_truncate created.
253
	 * AKPM: I think this can be inside the above `if'.
254
	 * Note that ext4_orphan_del() has to be able to cope with the
255
	 * deletion of a non-existent orphan - this is because we don't
256
	 * know if ext4_truncate() actually created an orphan record.
257
258
	 * (Well, we could do this if we need to, but heck - it works)
	 */
259
260
	ext4_orphan_del(handle, inode);
	EXT4_I(inode)->i_dtime	= get_seconds();
261
262
263
264
265
266
267
268

	/*
	 * One subtle ordering requirement: if anything has gone wrong
	 * (transaction abort, IO errors, whatever), then we can still
	 * do these next steps (the fs will already have been marked as
	 * having errors), but we can't free the inode if the mark_dirty
	 * fails.
	 */
269
	if (ext4_mark_inode_dirty(handle, inode))
270
		/* If that failed, just do the required in-core inode clear. */
Al Viro's avatar
Al Viro committed
271
		ext4_clear_inode(inode);
272
	else
273
274
		ext4_free_inode(handle, inode);
	ext4_journal_stop(handle);
275
276
	return;
no_delete:
Al Viro's avatar
Al Viro committed
277
	ext4_clear_inode(inode);	/* We must guarantee clearing of inode... */
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
}

typedef struct {
	__le32	*p;
	__le32	key;
	struct buffer_head *bh;
} Indirect;

static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
{
	p->key = *(p->p = v);
	p->bh = bh;
}

/**
293
 *	ext4_block_to_path - parse the block number into array of offsets
294
295
296
 *	@inode: inode in question (we are only interested in its superblock)
 *	@i_block: block number to be parsed
 *	@offsets: array to store the offsets in
Dave Kleikamp's avatar
Dave Kleikamp committed
297
298
 *	@boundary: set this non-zero if the referred-to block is likely to be
 *	       followed (on disk) by an indirect block.
299
 *
300
 *	To store the locations of file's data ext4 uses a data structure common
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
 *	for UNIX filesystems - tree of pointers anchored in the inode, with
 *	data blocks at leaves and indirect blocks in intermediate nodes.
 *	This function translates the block number into path in that tree -
 *	return value is the path length and @offsets[n] is the offset of
 *	pointer to (n+1)th node in the nth one. If @block is out of range
 *	(negative or too large) warning is printed and zero returned.
 *
 *	Note: function doesn't find node addresses, so no IO is needed. All
 *	we need to know is the capacity of indirect blocks (taken from the
 *	inode->i_sb).
 */

/*
 * Portability note: the last comparison (check that we fit into triple
 * indirect block) is spelled differently, because otherwise on an
 * architecture with 32-bit longs and 8Kb pages we might get into trouble
 * if our filesystem had 8Kb blocks. We might use long long, but that would
 * kill us on x86. Oh, well, at least the sign propagation does not matter -
 * i_block would have to be negative in the very beginning, so we would not
 * get there at all.
 */

323
static int ext4_block_to_path(struct inode *inode,
324
325
			      ext4_lblk_t i_block,
			      ext4_lblk_t offsets[4], int *boundary)
326
{
327
328
329
	int ptrs = EXT4_ADDR_PER_BLOCK(inode->i_sb);
	int ptrs_bits = EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb);
	const long direct_blocks = EXT4_NDIR_BLOCKS,
330
331
332
333
334
		indirect_blocks = ptrs,
		double_blocks = (1 << (ptrs_bits * 2));
	int n = 0;
	int final = 0;

335
	if (i_block < direct_blocks) {
336
337
		offsets[n++] = i_block;
		final = direct_blocks;
338
	} else if ((i_block -= direct_blocks) < indirect_blocks) {
339
		offsets[n++] = EXT4_IND_BLOCK;
340
341
342
		offsets[n++] = i_block;
		final = ptrs;
	} else if ((i_block -= indirect_blocks) < double_blocks) {
343
		offsets[n++] = EXT4_DIND_BLOCK;
344
345
346
347
		offsets[n++] = i_block >> ptrs_bits;
		offsets[n++] = i_block & (ptrs - 1);
		final = ptrs;
	} else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
348
		offsets[n++] = EXT4_TIND_BLOCK;
349
350
351
352
353
		offsets[n++] = i_block >> (ptrs_bits * 2);
		offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
		offsets[n++] = i_block & (ptrs - 1);
		final = ptrs;
	} else {
354
		ext4_warning(inode->i_sb, "block %lu > max in inode %lu",
355
356
			     i_block + direct_blocks +
			     indirect_blocks + double_blocks, inode->i_ino);
357
358
359
360
361
362
	}
	if (boundary)
		*boundary = final - 1 - (i_block & (ptrs - 1));
	return n;
}

363
364
static int __ext4_check_blockref(const char *function, unsigned int line,
				 struct inode *inode,
365
366
				 __le32 *p, unsigned int max)
{
367
	struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
368
	__le32 *bref = p;
369
370
	unsigned int blk;

371
	while (bref < p+max) {
372
		blk = le32_to_cpu(*bref++);
373
374
		if (blk &&
		    unlikely(!ext4_data_block_valid(EXT4_SB(inode->i_sb),
375
						    blk, 1))) {
376
			es->s_last_error_block = cpu_to_le64(blk);
377
378
			ext4_error_inode(inode, function, line, blk,
					 "invalid block");
379
380
381
382
			return -EIO;
		}
	}
	return 0;
383
384
385
386
}


#define ext4_check_indirect_blockref(inode, bh)                         \
387
388
	__ext4_check_blockref(__func__, __LINE__, inode,		\
			      (__le32 *)(bh)->b_data,			\
389
390
391
			      EXT4_ADDR_PER_BLOCK((inode)->i_sb))

#define ext4_check_inode_blockref(inode)                                \
392
393
	__ext4_check_blockref(__func__, __LINE__, inode,		\
			      EXT4_I(inode)->i_data,			\
394
395
			      EXT4_NDIR_BLOCKS)

396
/**
397
 *	ext4_get_branch - read the chain of indirect blocks leading to data
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
 *	@inode: inode in question
 *	@depth: depth of the chain (1 - direct pointer, etc.)
 *	@offsets: offsets of pointers in inode/indirect blocks
 *	@chain: place to store the result
 *	@err: here we store the error value
 *
 *	Function fills the array of triples <key, p, bh> and returns %NULL
 *	if everything went OK or the pointer to the last filled triple
 *	(incomplete one) otherwise. Upon the return chain[i].key contains
 *	the number of (i+1)-th block in the chain (as it is stored in memory,
 *	i.e. little-endian 32-bit), chain[i].p contains the address of that
 *	number (it points into struct inode for i==0 and into the bh->b_data
 *	for i>0) and chain[i].bh points to the buffer_head of i-th indirect
 *	block for i>0 and NULL for i==0. In other words, it holds the block
 *	numbers of the chain, addresses they were taken from (and where we can
 *	verify that chain did not change) and buffer_heads hosting these
 *	numbers.
 *
 *	Function stops when it stumbles upon zero pointer (absent block)
 *		(pointer to last triple returned, *@err == 0)
 *	or when it gets an IO error reading an indirect block
 *		(ditto, *@err == -EIO)
 *	or when it reads all @depth-1 indirect blocks successfully and finds
 *	the whole chain, all way to the data (returns %NULL, *err == 0).
422
423
 *
 *      Need to be called with
424
 *      down_read(&EXT4_I(inode)->i_data_sem)
425
 */
Aneesh Kumar K.V's avatar
Aneesh Kumar K.V committed
426
427
static Indirect *ext4_get_branch(struct inode *inode, int depth,
				 ext4_lblk_t  *offsets,
428
429
430
431
432
433
434
435
				 Indirect chain[4], int *err)
{
	struct super_block *sb = inode->i_sb;
	Indirect *p = chain;
	struct buffer_head *bh;

	*err = 0;
	/* i_data is not going away, no lock needed */
436
	add_chain(chain, NULL, EXT4_I(inode)->i_data + *offsets);
437
438
439
	if (!p->key)
		goto no_block;
	while (--depth) {
440
441
		bh = sb_getblk(sb, le32_to_cpu(p->key));
		if (unlikely(!bh))
442
			goto failure;
443

444
445
446
447
448
449
450
451
452
453
454
		if (!bh_uptodate_or_lock(bh)) {
			if (bh_submit_read(bh) < 0) {
				put_bh(bh);
				goto failure;
			}
			/* validate block references */
			if (ext4_check_indirect_blockref(inode, bh)) {
				put_bh(bh);
				goto failure;
			}
		}
455

456
		add_chain(++p, bh, (__le32 *)bh->b_data + *++offsets);
457
458
459
460
461
462
463
464
465
466
467
468
469
		/* Reader: end */
		if (!p->key)
			goto no_block;
	}
	return NULL;

failure:
	*err = -EIO;
no_block:
	return p;
}

/**
470
 *	ext4_find_near - find a place for allocation with sufficient locality
471
472
473
 *	@inode: owner
 *	@ind: descriptor of indirect block.
 *
474
 *	This function returns the preferred place for block allocation.
475
476
477
478
479
480
481
482
483
484
485
486
487
488
 *	It is used when heuristic for sequential allocation fails.
 *	Rules are:
 *	  + if there is a block to the left of our position - allocate near it.
 *	  + if pointer will live in indirect block - allocate near that block.
 *	  + if pointer will live in inode - allocate in the same
 *	    cylinder group.
 *
 * In the latter case we colour the starting block by the callers PID to
 * prevent it from clashing with concurrent allocations for a different inode
 * in the same block group.   The PID is used here so that functionally related
 * files will be close-by on-disk.
 *
 *	Caller must make sure that @ind is valid and will stay that way.
 */
489
static ext4_fsblk_t ext4_find_near(struct inode *inode, Indirect *ind)
490
{
491
	struct ext4_inode_info *ei = EXT4_I(inode);
492
	__le32 *start = ind->bh ? (__le32 *) ind->bh->b_data : ei->i_data;
493
	__le32 *p;
494
	ext4_fsblk_t bg_start;
495
	ext4_fsblk_t last_block;
496
	ext4_grpblk_t colour;
497
498
	ext4_group_t block_group;
	int flex_size = ext4_flex_bg_size(EXT4_SB(inode->i_sb));
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513

	/* Try to find previous block */
	for (p = ind->p - 1; p >= start; p--) {
		if (*p)
			return le32_to_cpu(*p);
	}

	/* No such thing, so let's try location of indirect block */
	if (ind->bh)
		return ind->bh->b_blocknr;

	/*
	 * It is going to be referred to from the inode itself? OK, just put it
	 * into the same cylinder group then.
	 */
514
515
516
517
518
519
520
	block_group = ei->i_block_group;
	if (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) {
		block_group &= ~(flex_size-1);
		if (S_ISREG(inode->i_mode))
			block_group++;
	}
	bg_start = ext4_group_first_block_no(inode->i_sb, block_group);
521
522
	last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1;

523
524
525
526
527
528
529
	/*
	 * If we are doing delayed allocation, we don't need take
	 * colour into account.
	 */
	if (test_opt(inode->i_sb, DELALLOC))
		return bg_start;

530
531
	if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block)
		colour = (current->pid % 16) *
532
			(EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16);
533
534
	else
		colour = (current->pid % 16) * ((last_block - bg_start) / 16);
535
536
537
538
	return bg_start + colour;
}

/**
539
 *	ext4_find_goal - find a preferred place for allocation.
540
541
542
543
 *	@inode: owner
 *	@block:  block we want
 *	@partial: pointer to the last triple within a chain
 *
544
 *	Normally this function find the preferred place for block allocation,
545
 *	returns it.
546
547
 *	Because this is only used for non-extent files, we limit the block nr
 *	to 32 bits.
548
 */
Aneesh Kumar K.V's avatar
Aneesh Kumar K.V committed
549
static ext4_fsblk_t ext4_find_goal(struct inode *inode, ext4_lblk_t block,
550
				   Indirect *partial)
551
{
552
553
	ext4_fsblk_t goal;

554
	/*
555
	 * XXX need to get goal block from mballoc's data structures
556
557
	 */

558
559
560
	goal = ext4_find_near(inode, partial);
	goal = goal & EXT4_MAX_BLOCK_FILE_PHYS;
	return goal;
561
562
563
}

/**
564
 *	ext4_blks_to_allocate - Look up the block map and count the number
565
566
567
568
569
570
571
572
573
574
 *	of direct blocks need to be allocated for the given branch.
 *
 *	@branch: chain of indirect blocks
 *	@k: number of blocks need for indirect blocks
 *	@blks: number of data blocks to be mapped.
 *	@blocks_to_boundary:  the offset in the indirect block
 *
 *	return the total number of blocks to be allocate, including the
 *	direct and indirect blocks.
 */
575
static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned int blks,
576
				 int blocks_to_boundary)
577
{
578
	unsigned int count = 0;
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601

	/*
	 * Simple case, [t,d]Indirect block(s) has not allocated yet
	 * then it's clear blocks on that path have not allocated
	 */
	if (k > 0) {
		/* right now we don't handle cross boundary allocation */
		if (blks < blocks_to_boundary + 1)
			count += blks;
		else
			count += blocks_to_boundary + 1;
		return count;
	}

	count++;
	while (count < blks && count <= blocks_to_boundary &&
		le32_to_cpu(*(branch[0].p + count)) == 0) {
		count++;
	}
	return count;
}

/**
602
 *	ext4_alloc_blocks: multiple allocate blocks needed for a branch
603
604
605
606
 *	@handle: handle for this transaction
 *	@inode: inode which needs allocated blocks
 *	@iblock: the logical block to start allocated at
 *	@goal: preferred physical block of allocation
607
608
 *	@indirect_blks: the number of blocks need to allocate for indirect
 *			blocks
609
 *	@blks: number of desired blocks
610
611
 *	@new_blocks: on return it will store the new block numbers for
 *	the indirect blocks(if needed) and the first direct block,
612
613
614
615
 *	@err: on return it will store the error code
 *
 *	This function will return the number of blocks allocated as
 *	requested by the passed-in parameters.
616
 */
617
static int ext4_alloc_blocks(handle_t *handle, struct inode *inode,
618
619
620
			     ext4_lblk_t iblock, ext4_fsblk_t goal,
			     int indirect_blks, int blks,
			     ext4_fsblk_t new_blocks[4], int *err)
621
{
622
	struct ext4_allocation_request ar;
623
	int target, i;
624
	unsigned long count = 0, blk_allocated = 0;
625
	int index = 0;
626
	ext4_fsblk_t current_block = 0;
627
628
629
630
631
632
633
634
635
636
	int ret = 0;

	/*
	 * Here we try to allocate the requested multiple blocks at once,
	 * on a best-effort basis.
	 * To build a branch, we should allocate blocks for
	 * the indirect blocks(if not allocated yet), and at least
	 * the first direct block of this branch.  That's the
	 * minimum number of blocks need to allocate(required)
	 */
637
638
639
	/* first we try to allocate the indirect blocks */
	target = indirect_blks;
	while (target > 0) {
640
641
		count = target;
		/* allocating blocks for indirect blocks and direct blocks */
642
643
		current_block = ext4_new_meta_blocks(handle, inode, goal,
						     0, &count, err);
644
645
646
		if (*err)
			goto failed_out;

647
648
649
650
651
652
653
654
		if (unlikely(current_block + count > EXT4_MAX_BLOCK_FILE_PHYS)) {
			EXT4_ERROR_INODE(inode,
					 "current_block %llu + count %lu > %d!",
					 current_block, count,
					 EXT4_MAX_BLOCK_FILE_PHYS);
			*err = -EIO;
			goto failed_out;
		}
655

656
657
658
659
660
661
		target -= count;
		/* allocate blocks for indirect blocks */
		while (index < indirect_blks && count) {
			new_blocks[index++] = current_block++;
			count--;
		}
662
663
664
665
666
667
668
669
670
		if (count > 0) {
			/*
			 * save the new block number
			 * for the first direct block
			 */
			new_blocks[index] = current_block;
			printk(KERN_INFO "%s returned more blocks than "
						"requested\n", __func__);
			WARN_ON(1);
671
			break;
672
		}
673
674
	}

675
676
677
678
679
	target = blks - count ;
	blk_allocated = count;
	if (!target)
		goto allocated;
	/* Now allocate data blocks */
680
681
682
683
684
685
686
687
688
689
	memset(&ar, 0, sizeof(ar));
	ar.inode = inode;
	ar.goal = goal;
	ar.len = target;
	ar.logical = iblock;
	if (S_ISREG(inode->i_mode))
		/* enable in-core preallocation only for regular files */
		ar.flags = EXT4_MB_HINT_DATA;

	current_block = ext4_mb_new_blocks(handle, &ar, err);
690
691
692
693
694
695
696
697
	if (unlikely(current_block + ar.len > EXT4_MAX_BLOCK_FILE_PHYS)) {
		EXT4_ERROR_INODE(inode,
				 "current_block %llu + ar.len %d > %d!",
				 current_block, ar.len,
				 EXT4_MAX_BLOCK_FILE_PHYS);
		*err = -EIO;
		goto failed_out;
	}
698

699
700
701
702
703
704
705
706
707
	if (*err && (target == blks)) {
		/*
		 * if the allocation failed and we didn't allocate
		 * any blocks before
		 */
		goto failed_out;
	}
	if (!*err) {
		if (target == blks) {
708
709
710
711
			/*
			 * save the new block number
			 * for the first direct block
			 */
712
713
			new_blocks[index] = current_block;
		}
714
		blk_allocated += ar.len;
715
716
	}
allocated:
717
	/* total number of blocks allocated for direct blocks */
718
	ret = blk_allocated;
719
720
721
	*err = 0;
	return ret;
failed_out:
722
	for (i = 0; i < index; i++)
723
		ext4_free_blocks(handle, inode, NULL, new_blocks[i], 1, 0);
724
725
726
727
	return ret;
}

/**
728
 *	ext4_alloc_branch - allocate and set up a chain of blocks.
729
 *	@handle: handle for this transaction
730
731
732
 *	@inode: owner
 *	@indirect_blks: number of allocated indirect blocks
 *	@blks: number of allocated direct blocks
733
 *	@goal: preferred place for allocation
734
735
736
737
738
739
740
 *	@offsets: offsets (in the blocks) to store the pointers to next.
 *	@branch: place to store the chain in.
 *
 *	This function allocates blocks, zeroes out all but the last one,
 *	links them into chain and (if we are synchronous) writes them to disk.
 *	In other words, it prepares a branch that can be spliced onto the
 *	inode. It stores the information about that chain in the branch[], in
741
 *	the same format as ext4_get_branch() would do. We are calling it after
742
743
 *	we had read the existing part of chain and partial points to the last
 *	triple of that (one with zero ->key). Upon the exit we have the same
744
 *	picture as after the successful ext4_get_block(), except that in one
745
746
747
748
749
750
 *	place chain is disconnected - *branch->p is still zero (we did not
 *	set the last link), but branch->key contains the number that should
 *	be placed into *branch->p to fill that gap.
 *
 *	If allocation fails we free all blocks we've allocated (and forget
 *	their buffer_heads) and return the error value the from failed
751
 *	ext4_alloc_block() (normally -ENOSPC). Otherwise we set the chain
752
753
 *	as described above and return 0.
 */
754
static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
755
756
757
			     ext4_lblk_t iblock, int indirect_blks,
			     int *blks, ext4_fsblk_t goal,
			     ext4_lblk_t *offsets, Indirect *branch)
758
759
760
761
762
763
{
	int blocksize = inode->i_sb->s_blocksize;
	int i, n = 0;
	int err = 0;
	struct buffer_head *bh;
	int num;
764
765
	ext4_fsblk_t new_blocks[4];
	ext4_fsblk_t current_block;
766

767
	num = ext4_alloc_blocks(handle, inode, iblock, goal, indirect_blks,
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
				*blks, new_blocks, &err);
	if (err)
		return err;

	branch[0].key = cpu_to_le32(new_blocks[0]);
	/*
	 * metadata blocks and data blocks are allocated.
	 */
	for (n = 1; n <= indirect_blks;  n++) {
		/*
		 * Get buffer_head for parent block, zero it out
		 * and set the pointer to new one, then send
		 * parent to disk.
		 */
		bh = sb_getblk(inode->i_sb, new_blocks[n-1]);
783
784
785
786
787
		if (unlikely(!bh)) {
			err = -EIO;
			goto failed;
		}

788
789
790
		branch[n].bh = bh;
		lock_buffer(bh);
		BUFFER_TRACE(bh, "call get_create_access");
791
		err = ext4_journal_get_create_access(handle, bh);
792
		if (err) {
793
794
			/* Don't brelse(bh) here; it's done in
			 * ext4_journal_forget() below */
795
796
797
798
799
800
801
802
			unlock_buffer(bh);
			goto failed;
		}

		memset(bh->b_data, 0, blocksize);
		branch[n].p = (__le32 *) bh->b_data + offsets[n];
		branch[n].key = cpu_to_le32(new_blocks[n]);
		*branch[n].p = branch[n].key;
803
		if (n == indirect_blks) {
804
805
806
807
808
809
			current_block = new_blocks[n];
			/*
			 * End of chain, update the last new metablock of
			 * the chain to point to the new allocated
			 * data blocks numbers
			 */
810
			for (i = 1; i < num; i++)
811
812
813
814
815
816
				*(branch[n].p + i) = cpu_to_le32(++current_block);
		}
		BUFFER_TRACE(bh, "marking uptodate");
		set_buffer_uptodate(bh);
		unlock_buffer(bh);

817
818
		BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
		err = ext4_handle_dirty_metadata(handle, inode, bh);
819
820
821
822
823
824
825
		if (err)
			goto failed;
	}
	*blks = num;
	return err;
failed:
	/* Allocation failed, free what we already allocated */
826
	ext4_free_blocks(handle, inode, NULL, new_blocks[0], 1, 0);
827
	for (i = 1; i <= n ; i++) {
828
		/*
829
830
831
		 * branch[i].bh is newly allocated, so there is no
		 * need to revoke the block, which is why we don't
		 * need to set EXT4_FREE_BLOCKS_METADATA.
832
		 */
833
		ext4_free_blocks(handle, inode, NULL, new_blocks[i], 1,
834
				 EXT4_FREE_BLOCKS_FORGET);
835
	}
836
	for (i = n+1; i < indirect_blks; i++)
837
		ext4_free_blocks(handle, inode, NULL, new_blocks[i], 1, 0);
838

839
	ext4_free_blocks(handle, inode, NULL, new_blocks[i], num, 0);
840
841
842
843
844

	return err;
}

/**
845
 * ext4_splice_branch - splice the allocated branch onto inode.
846
 * @handle: handle for this transaction
847
848
849
 * @inode: owner
 * @block: (logical) number of block we are adding
 * @chain: chain of indirect blocks (with a missing link - see
850
 *	ext4_alloc_branch)
851
852
853
854
855
856
857
858
 * @where: location of missing link
 * @num:   number of indirect blocks we are adding
 * @blks:  number of direct blocks we are adding
 *
 * This function fills the missing link and does all housekeeping needed in
 * inode (->i_blocks, etc.). In case of success we end up with the full
 * chain to new block and return 0.
 */
859
static int ext4_splice_branch(handle_t *handle, struct inode *inode,
860
861
			      ext4_lblk_t block, Indirect *where, int num,
			      int blks)
862
863
864
{
	int i;
	int err = 0;
865
	ext4_fsblk_t current_block;
866
867
868
869
870
871
872
873

	/*
	 * If we're splicing into a [td]indirect block (as opposed to the
	 * inode) then we need to get write access to the [td]indirect block
	 * before the splice.
	 */
	if (where->bh) {
		BUFFER_TRACE(where->bh, "get_write_access");
874
		err = ext4_journal_get_write_access(handle, where->bh);
875
876
877
878
879
880
881
882
883
884
885
886
887
888
		if (err)
			goto err_out;
	}
	/* That's it */

	*where->p = where->key;

	/*
	 * Update the host buffer_head or inode to point to more just allocated
	 * direct blocks blocks
	 */
	if (num == 0 && blks > 1) {
		current_block = le32_to_cpu(where->key) + 1;
		for (i = 1; i < blks; i++)
889
			*(where->p + i) = cpu_to_le32(current_block++);
890
891
892
893
894
895
896
897
898
899
900
	}

	/* We are done with atomic stuff, now do the rest of housekeeping */
	/* had we spliced it onto indirect block? */
	if (where->bh) {
		/*
		 * If we spliced it onto an indirect block, we haven't
		 * altered the inode.  Note however that if it is being spliced
		 * onto an indirect block at the very end of the file (the
		 * file is growing) then we *will* alter the inode to reflect
		 * the new i_size.  But that is not done here - it is done in
901
		 * generic_commit_write->__mark_inode_dirty->ext4_dirty_inode.
902
903
		 */
		jbd_debug(5, "splicing indirect only\n");
904
905
		BUFFER_TRACE(where->bh, "call ext4_handle_dirty_metadata");
		err = ext4_handle_dirty_metadata(handle, inode, where->bh);
906
907
908
909
910
911
		if (err)
			goto err_out;
	} else {
		/*
		 * OK, we spliced it into the inode itself on a direct block.
		 */
912
		ext4_mark_inode_dirty(handle, inode);
913
914
915
916
917
918
		jbd_debug(5, "splicing direct\n");
	}
	return err;

err_out:
	for (i = 1; i <= num; i++) {
919
		/*
920
921
922
		 * branch[i].bh is newly allocated, so there is no
		 * need to revoke the block, which is why we don't
		 * need to set EXT4_FREE_BLOCKS_METADATA.
923
		 */
924
925
		ext4_free_blocks(handle, inode, where[i].bh, 0, 1,
				 EXT4_FREE_BLOCKS_FORGET);
926
	}
927
	ext4_free_blocks(handle, inode, NULL, le32_to_cpu(where[num].key),
928
			 blks, 0);
929
930
931
932
933

	return err;
}

/*
934
 * The ext4_ind_map_blocks() function handles non-extents inodes
935
 * (i.e., using the traditional indirect/double-indirect i_blocks
936
 * scheme) for ext4_map_blocks().
937
 *
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
 * Allocation strategy is simple: if we have to allocate something, we will
 * have to go the whole way to leaf. So let's do it before attaching anything
 * to tree, set linkage between the newborn blocks, write them if sync is
 * required, recheck the path, free and repeat if check fails, otherwise
 * set the last missing link (that will protect us from any truncate-generated
 * removals - all blocks on the path are immune now) and possibly force the
 * write on the parent block.
 * That has a nice additional property: no special recovery from the failed
 * allocations is needed - we simply release blocks and do not touch anything
 * reachable from inode.
 *
 * `handle' can be NULL if create == 0.
 *
 * return > 0, # of blocks mapped or allocated.
 * return = 0, if plain lookup failed.
 * return < 0, error case.
954
 *
955
956
957
958
959
 * The ext4_ind_get_blocks() function should be called with
 * down_write(&EXT4_I(inode)->i_data_sem) if allocating filesystem
 * blocks (i.e., flags has EXT4_GET_BLOCKS_CREATE set) or
 * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system
 * blocks.
960
 */
961
962
static int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
			       struct ext4_map_blocks *map,
963
			       int flags)
964
965
{
	int err = -EIO;
Aneesh Kumar K.V's avatar
Aneesh Kumar K.V committed
966
	ext4_lblk_t offsets[4];
967
968
	Indirect chain[4];
	Indirect *partial;
969
	ext4_fsblk_t goal;
970
971
972
973
	int indirect_blks;
	int blocks_to_boundary = 0;
	int depth;
	int count = 0;
974
	ext4_fsblk_t first_block = 0;
975

976
	trace_ext4_ind_map_blocks_enter(inode, map->m_lblk, map->m_len, flags);
977
	J_ASSERT(!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)));
978
	J_ASSERT(handle != NULL || (flags & EXT4_GET_BLOCKS_CREATE) == 0);
979
	depth = ext4_block_to_path(inode, map->m_lblk, offsets,
980
				   &blocks_to_boundary);
981
982
983
984

	if (depth == 0)
		goto out;

985
	partial = ext4_get_branch(inode, depth, offsets, chain, &err);
986
987
988
989
990
991

	/* Simplest case - block found, no allocation needed */
	if (!partial) {
		first_block = le32_to_cpu(chain[depth - 1].key);
		count++;
		/*map more blocks*/
992
		while (count < map->m_len && count <= blocks_to_boundary) {
993
			ext4_fsblk_t blk;
994
995
996
997
998
999
1000

			blk = le32_to_cpu(*(chain[depth-1].p + count));

			if (blk == first_block + count)
				count++;
			else
				break;
For faster browsing, not all history is shown. View entire blame