inode.c 174 KB
Newer Older
1
/*
2
 *  linux/fs/ext4/inode.c
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
 *
 * Copyright (C) 1992, 1993, 1994, 1995
 * Remy Card (card@masi.ibp.fr)
 * Laboratoire MASI - Institut Blaise Pascal
 * Universite Pierre et Marie Curie (Paris VI)
 *
 *  from
 *
 *  linux/fs/minix/inode.c
 *
 *  Copyright (C) 1991, 1992  Linus Torvalds
 *
 *  Goal-directed block allocation by Stephen Tweedie
 *	(sct@redhat.com), 1993, 1998
 *  Big-endian to little-endian byte-swapping/bitmaps by
 *        David S. Miller (davem@caip.rutgers.edu), 1995
 *  64-bit file support on 64-bit platforms by Jakub Jelinek
 *	(jj@sunsite.ms.mff.cuni.cz)
 *
22
 *  Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000
23
24
25
26
27
 */

#include <linux/module.h>
#include <linux/fs.h>
#include <linux/time.h>
28
#include <linux/jbd2.h>
29
30
31
32
33
34
#include <linux/highuid.h>
#include <linux/pagemap.h>
#include <linux/quotaops.h>
#include <linux/string.h>
#include <linux/buffer_head.h>
#include <linux/writeback.h>
35
#include <linux/pagevec.h>
36
#include <linux/mpage.h>
37
#include <linux/namei.h>
38
39
#include <linux/uio.h>
#include <linux/bio.h>
40
#include <linux/workqueue.h>
41
#include <linux/kernel.h>
42

43
#include "ext4_jbd2.h"
44
45
#include "xattr.h"
#include "acl.h"
46
#include "ext4_extents.h"
47

48
49
#include <trace/events/ext4.h>

50
51
#define MPAGE_DA_EXTENT_TAIL 0x01

52
53
54
static inline int ext4_begin_ordered_truncate(struct inode *inode,
					      loff_t new_size)
{
55
56
57
58
	return jbd2_journal_begin_ordered_truncate(
					EXT4_SB(inode->i_sb)->s_journal,
					&EXT4_I(inode)->jinode,
					new_size);
59
60
}

61
62
static void ext4_invalidatepage(struct page *page, unsigned long offset);

63
64
65
/*
 * Test whether an inode is a fast symlink.
 */
66
static int ext4_inode_is_fast_symlink(struct inode *inode)
67
{
68
	int ea_blocks = EXT4_I(inode)->i_file_acl ?
69
70
71
72
73
74
75
76
77
78
79
		(inode->i_sb->s_blocksize >> 9) : 0;

	return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
}

/*
 * Work out how many blocks we need to proceed with the next chunk of a
 * truncate transaction.
 */
static unsigned long blocks_for_truncate(struct inode *inode)
{
Aneesh Kumar K.V's avatar
Aneesh Kumar K.V committed
80
	ext4_lblk_t needed;
81
82
83
84
85
86

	needed = inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9);

	/* Give ourselves just enough room to cope with inodes in which
	 * i_blocks is corrupt: we've seen disk corruptions in the past
	 * which resulted in random data in an inode which looked enough
87
	 * like a regular file for ext4 to try to delete it.  Things
88
89
90
91
92
93
94
	 * will go a bit crazy if that happens, but at least we should
	 * try not to panic the whole kernel. */
	if (needed < 2)
		needed = 2;

	/* But we need to bound the transaction so we don't overflow the
	 * journal. */
95
96
	if (needed > EXT4_MAX_TRANS_DATA)
		needed = EXT4_MAX_TRANS_DATA;
97

98
	return EXT4_DATA_TRANS_BLOCKS(inode->i_sb) + needed;
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
}

/*
 * Truncate transactions can be complex and absolutely huge.  So we need to
 * be able to restart the transaction at a conventient checkpoint to make
 * sure we don't overflow the journal.
 *
 * start_transaction gets us a new handle for a truncate transaction,
 * and extend_transaction tries to extend the existing one a bit.  If
 * extend fails, we need to propagate the failure up and restart the
 * transaction in the top-level truncate loop. --sct
 */
static handle_t *start_transaction(struct inode *inode)
{
	handle_t *result;

115
	result = ext4_journal_start(inode, blocks_for_truncate(inode));
116
117
118
	if (!IS_ERR(result))
		return result;

119
	ext4_std_error(inode->i_sb, PTR_ERR(result));
120
121
122
123
124
125
126
127
128
129
130
	return result;
}

/*
 * Try to extend this transaction for the purposes of truncation.
 *
 * Returns 0 if we managed to create more room.  If we can't create more
 * room, and the transaction must be restarted we return 1.
 */
static int try_to_extend_transaction(handle_t *handle, struct inode *inode)
{
131
132
133
	if (!ext4_handle_valid(handle))
		return 0;
	if (ext4_handle_has_enough_credits(handle, EXT4_RESERVE_TRANS_BLOCKS+1))
134
		return 0;
135
	if (!ext4_journal_extend(handle, blocks_for_truncate(inode)))
136
137
138
139
140
141
142
143
144
		return 0;
	return 1;
}

/*
 * Restart the transaction associated with *handle.  This does a commit,
 * so before we call here everything must be consistently dirtied against
 * this transaction.
 */
145
int ext4_truncate_restart_trans(handle_t *handle, struct inode *inode,
146
				 int nblocks)
147
{
148
149
150
151
152
153
154
155
	int ret;

	/*
	 * Drop i_data_sem to avoid deadlock with ext4_get_blocks At this
	 * moment, get_block can be called only for blocks inside i_size since
	 * page cache has been already dropped and writes are blocked by
	 * i_mutex. So we can safely drop the i_data_sem here.
	 */
156
	BUG_ON(EXT4_JOURNAL(inode) == NULL);
157
	jbd_debug(2, "restarting handle %p\n", handle);
158
159
160
	up_write(&EXT4_I(inode)->i_data_sem);
	ret = ext4_journal_restart(handle, blocks_for_truncate(inode));
	down_write(&EXT4_I(inode)->i_data_sem);
161
	ext4_discard_preallocations(inode);
162
163

	return ret;
164
165
166
167
168
}

/*
 * Called at the last iput() if i_nlink is zero.
 */
169
void ext4_delete_inode(struct inode *inode)
170
171
{
	handle_t *handle;
172
	int err;
173

174
	if (!is_bad_inode(inode))
175
		dquot_initialize(inode);
176

177
178
	if (ext4_should_order_data(inode))
		ext4_begin_ordered_truncate(inode, 0);
179
180
181
182
183
	truncate_inode_pages(&inode->i_data, 0);

	if (is_bad_inode(inode))
		goto no_delete;

184
	handle = ext4_journal_start(inode, blocks_for_truncate(inode)+3);
185
	if (IS_ERR(handle)) {
186
		ext4_std_error(inode->i_sb, PTR_ERR(handle));
187
188
189
190
191
		/*
		 * If we're going to skip the normal cleanup, we still need to
		 * make sure that the in-core orphan linked list is properly
		 * cleaned up.
		 */
192
		ext4_orphan_del(NULL, inode);
193
194
195
196
		goto no_delete;
	}

	if (IS_SYNC(inode))
197
		ext4_handle_sync(handle);
198
	inode->i_size = 0;
199
200
	err = ext4_mark_inode_dirty(handle, inode);
	if (err) {
201
		ext4_warning(inode->i_sb,
202
203
204
			     "couldn't mark inode dirty (err %d)", err);
		goto stop_handle;
	}
205
	if (inode->i_blocks)
206
		ext4_truncate(inode);
207
208
209
210
211
212
213

	/*
	 * ext4_ext_truncate() doesn't reserve any slop when it
	 * restarts journal transactions; therefore there may not be
	 * enough credits left in the handle to remove the inode from
	 * the orphan list and set the dtime field.
	 */
214
	if (!ext4_handle_has_enough_credits(handle, 3)) {
215
216
217
218
		err = ext4_journal_extend(handle, 3);
		if (err > 0)
			err = ext4_journal_restart(handle, 3);
		if (err != 0) {
219
			ext4_warning(inode->i_sb,
220
221
222
223
224
225
226
				     "couldn't extend journal (err %d)", err);
		stop_handle:
			ext4_journal_stop(handle);
			goto no_delete;
		}
	}

227
	/*
228
	 * Kill off the orphan record which ext4_truncate created.
229
	 * AKPM: I think this can be inside the above `if'.
230
	 * Note that ext4_orphan_del() has to be able to cope with the
231
	 * deletion of a non-existent orphan - this is because we don't
232
	 * know if ext4_truncate() actually created an orphan record.
233
234
	 * (Well, we could do this if we need to, but heck - it works)
	 */
235
236
	ext4_orphan_del(handle, inode);
	EXT4_I(inode)->i_dtime	= get_seconds();
237
238
239
240
241
242
243
244

	/*
	 * One subtle ordering requirement: if anything has gone wrong
	 * (transaction abort, IO errors, whatever), then we can still
	 * do these next steps (the fs will already have been marked as
	 * having errors), but we can't free the inode if the mark_dirty
	 * fails.
	 */
245
	if (ext4_mark_inode_dirty(handle, inode))
246
247
248
		/* If that failed, just do the required in-core inode clear. */
		clear_inode(inode);
	else
249
250
		ext4_free_inode(handle, inode);
	ext4_journal_stop(handle);
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
	return;
no_delete:
	clear_inode(inode);	/* We must guarantee clearing of inode... */
}

typedef struct {
	__le32	*p;
	__le32	key;
	struct buffer_head *bh;
} Indirect;

static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
{
	p->key = *(p->p = v);
	p->bh = bh;
}

/**
269
 *	ext4_block_to_path - parse the block number into array of offsets
270
271
272
 *	@inode: inode in question (we are only interested in its superblock)
 *	@i_block: block number to be parsed
 *	@offsets: array to store the offsets in
Dave Kleikamp's avatar
Dave Kleikamp committed
273
274
 *	@boundary: set this non-zero if the referred-to block is likely to be
 *	       followed (on disk) by an indirect block.
275
 *
276
 *	To store the locations of file's data ext4 uses a data structure common
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
 *	for UNIX filesystems - tree of pointers anchored in the inode, with
 *	data blocks at leaves and indirect blocks in intermediate nodes.
 *	This function translates the block number into path in that tree -
 *	return value is the path length and @offsets[n] is the offset of
 *	pointer to (n+1)th node in the nth one. If @block is out of range
 *	(negative or too large) warning is printed and zero returned.
 *
 *	Note: function doesn't find node addresses, so no IO is needed. All
 *	we need to know is the capacity of indirect blocks (taken from the
 *	inode->i_sb).
 */

/*
 * Portability note: the last comparison (check that we fit into triple
 * indirect block) is spelled differently, because otherwise on an
 * architecture with 32-bit longs and 8Kb pages we might get into trouble
 * if our filesystem had 8Kb blocks. We might use long long, but that would
 * kill us on x86. Oh, well, at least the sign propagation does not matter -
 * i_block would have to be negative in the very beginning, so we would not
 * get there at all.
 */

299
static int ext4_block_to_path(struct inode *inode,
300
301
			      ext4_lblk_t i_block,
			      ext4_lblk_t offsets[4], int *boundary)
302
{
303
304
305
	int ptrs = EXT4_ADDR_PER_BLOCK(inode->i_sb);
	int ptrs_bits = EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb);
	const long direct_blocks = EXT4_NDIR_BLOCKS,
306
307
308
309
310
		indirect_blocks = ptrs,
		double_blocks = (1 << (ptrs_bits * 2));
	int n = 0;
	int final = 0;

311
	if (i_block < direct_blocks) {
312
313
		offsets[n++] = i_block;
		final = direct_blocks;
314
	} else if ((i_block -= direct_blocks) < indirect_blocks) {
315
		offsets[n++] = EXT4_IND_BLOCK;
316
317
318
		offsets[n++] = i_block;
		final = ptrs;
	} else if ((i_block -= indirect_blocks) < double_blocks) {
319
		offsets[n++] = EXT4_DIND_BLOCK;
320
321
322
323
		offsets[n++] = i_block >> ptrs_bits;
		offsets[n++] = i_block & (ptrs - 1);
		final = ptrs;
	} else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
324
		offsets[n++] = EXT4_TIND_BLOCK;
325
326
327
328
329
		offsets[n++] = i_block >> (ptrs_bits * 2);
		offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
		offsets[n++] = i_block & (ptrs - 1);
		final = ptrs;
	} else {
330
		ext4_warning(inode->i_sb, "block %lu > max in inode %lu",
331
332
			     i_block + direct_blocks +
			     indirect_blocks + double_blocks, inode->i_ino);
333
334
335
336
337
338
	}
	if (boundary)
		*boundary = final - 1 - (i_block & (ptrs - 1));
	return n;
}

339
static int __ext4_check_blockref(const char *function, struct inode *inode,
340
341
				 __le32 *p, unsigned int max)
{
342
	__le32 *bref = p;
343
344
	unsigned int blk;

345
	while (bref < p+max) {
346
		blk = le32_to_cpu(*bref++);
347
348
		if (blk &&
		    unlikely(!ext4_data_block_valid(EXT4_SB(inode->i_sb),
349
						    blk, 1))) {
350
			__ext4_error(inode->i_sb, function,
351
352
				   "invalid block reference %u "
				   "in inode #%lu", blk, inode->i_ino);
353
354
355
356
			return -EIO;
		}
	}
	return 0;
357
358
359
360
}


#define ext4_check_indirect_blockref(inode, bh)                         \
361
	__ext4_check_blockref(__func__, inode, (__le32 *)(bh)->b_data,  \
362
363
364
			      EXT4_ADDR_PER_BLOCK((inode)->i_sb))

#define ext4_check_inode_blockref(inode)                                \
365
	__ext4_check_blockref(__func__, inode, EXT4_I(inode)->i_data,   \
366
367
			      EXT4_NDIR_BLOCKS)

368
/**
369
 *	ext4_get_branch - read the chain of indirect blocks leading to data
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
 *	@inode: inode in question
 *	@depth: depth of the chain (1 - direct pointer, etc.)
 *	@offsets: offsets of pointers in inode/indirect blocks
 *	@chain: place to store the result
 *	@err: here we store the error value
 *
 *	Function fills the array of triples <key, p, bh> and returns %NULL
 *	if everything went OK or the pointer to the last filled triple
 *	(incomplete one) otherwise. Upon the return chain[i].key contains
 *	the number of (i+1)-th block in the chain (as it is stored in memory,
 *	i.e. little-endian 32-bit), chain[i].p contains the address of that
 *	number (it points into struct inode for i==0 and into the bh->b_data
 *	for i>0) and chain[i].bh points to the buffer_head of i-th indirect
 *	block for i>0 and NULL for i==0. In other words, it holds the block
 *	numbers of the chain, addresses they were taken from (and where we can
 *	verify that chain did not change) and buffer_heads hosting these
 *	numbers.
 *
 *	Function stops when it stumbles upon zero pointer (absent block)
 *		(pointer to last triple returned, *@err == 0)
 *	or when it gets an IO error reading an indirect block
 *		(ditto, *@err == -EIO)
 *	or when it reads all @depth-1 indirect blocks successfully and finds
 *	the whole chain, all way to the data (returns %NULL, *err == 0).
394
395
 *
 *      Need to be called with
396
 *      down_read(&EXT4_I(inode)->i_data_sem)
397
 */
Aneesh Kumar K.V's avatar
Aneesh Kumar K.V committed
398
399
static Indirect *ext4_get_branch(struct inode *inode, int depth,
				 ext4_lblk_t  *offsets,
400
401
402
403
404
405
406
407
				 Indirect chain[4], int *err)
{
	struct super_block *sb = inode->i_sb;
	Indirect *p = chain;
	struct buffer_head *bh;

	*err = 0;
	/* i_data is not going away, no lock needed */
408
	add_chain(chain, NULL, EXT4_I(inode)->i_data + *offsets);
409
410
411
	if (!p->key)
		goto no_block;
	while (--depth) {
412
413
		bh = sb_getblk(sb, le32_to_cpu(p->key));
		if (unlikely(!bh))
414
			goto failure;
415

416
417
418
419
420
421
422
423
424
425
426
		if (!bh_uptodate_or_lock(bh)) {
			if (bh_submit_read(bh) < 0) {
				put_bh(bh);
				goto failure;
			}
			/* validate block references */
			if (ext4_check_indirect_blockref(inode, bh)) {
				put_bh(bh);
				goto failure;
			}
		}
427

428
		add_chain(++p, bh, (__le32 *)bh->b_data + *++offsets);
429
430
431
432
433
434
435
436
437
438
439
440
441
		/* Reader: end */
		if (!p->key)
			goto no_block;
	}
	return NULL;

failure:
	*err = -EIO;
no_block:
	return p;
}

/**
442
 *	ext4_find_near - find a place for allocation with sufficient locality
443
444
445
 *	@inode: owner
 *	@ind: descriptor of indirect block.
 *
446
 *	This function returns the preferred place for block allocation.
447
448
449
450
451
452
453
454
455
456
457
458
459
460
 *	It is used when heuristic for sequential allocation fails.
 *	Rules are:
 *	  + if there is a block to the left of our position - allocate near it.
 *	  + if pointer will live in indirect block - allocate near that block.
 *	  + if pointer will live in inode - allocate in the same
 *	    cylinder group.
 *
 * In the latter case we colour the starting block by the callers PID to
 * prevent it from clashing with concurrent allocations for a different inode
 * in the same block group.   The PID is used here so that functionally related
 * files will be close-by on-disk.
 *
 *	Caller must make sure that @ind is valid and will stay that way.
 */
461
static ext4_fsblk_t ext4_find_near(struct inode *inode, Indirect *ind)
462
{
463
	struct ext4_inode_info *ei = EXT4_I(inode);
464
	__le32 *start = ind->bh ? (__le32 *) ind->bh->b_data : ei->i_data;
465
	__le32 *p;
466
	ext4_fsblk_t bg_start;
467
	ext4_fsblk_t last_block;
468
	ext4_grpblk_t colour;
469
470
	ext4_group_t block_group;
	int flex_size = ext4_flex_bg_size(EXT4_SB(inode->i_sb));
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485

	/* Try to find previous block */
	for (p = ind->p - 1; p >= start; p--) {
		if (*p)
			return le32_to_cpu(*p);
	}

	/* No such thing, so let's try location of indirect block */
	if (ind->bh)
		return ind->bh->b_blocknr;

	/*
	 * It is going to be referred to from the inode itself? OK, just put it
	 * into the same cylinder group then.
	 */
486
487
488
489
490
491
492
	block_group = ei->i_block_group;
	if (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) {
		block_group &= ~(flex_size-1);
		if (S_ISREG(inode->i_mode))
			block_group++;
	}
	bg_start = ext4_group_first_block_no(inode->i_sb, block_group);
493
494
	last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1;

495
496
497
498
499
500
501
	/*
	 * If we are doing delayed allocation, we don't need take
	 * colour into account.
	 */
	if (test_opt(inode->i_sb, DELALLOC))
		return bg_start;

502
503
	if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block)
		colour = (current->pid % 16) *
504
			(EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16);
505
506
	else
		colour = (current->pid % 16) * ((last_block - bg_start) / 16);
507
508
509
510
	return bg_start + colour;
}

/**
511
 *	ext4_find_goal - find a preferred place for allocation.
512
513
514
515
 *	@inode: owner
 *	@block:  block we want
 *	@partial: pointer to the last triple within a chain
 *
516
 *	Normally this function find the preferred place for block allocation,
517
 *	returns it.
518
519
 *	Because this is only used for non-extent files, we limit the block nr
 *	to 32 bits.
520
 */
Aneesh Kumar K.V's avatar
Aneesh Kumar K.V committed
521
static ext4_fsblk_t ext4_find_goal(struct inode *inode, ext4_lblk_t block,
522
				   Indirect *partial)
523
{
524
525
	ext4_fsblk_t goal;

526
	/*
527
	 * XXX need to get goal block from mballoc's data structures
528
529
	 */

530
531
532
	goal = ext4_find_near(inode, partial);
	goal = goal & EXT4_MAX_BLOCK_FILE_PHYS;
	return goal;
533
534
535
}

/**
536
 *	ext4_blks_to_allocate: Look up the block map and count the number
537
538
539
540
541
542
543
544
545
546
 *	of direct blocks need to be allocated for the given branch.
 *
 *	@branch: chain of indirect blocks
 *	@k: number of blocks need for indirect blocks
 *	@blks: number of data blocks to be mapped.
 *	@blocks_to_boundary:  the offset in the indirect block
 *
 *	return the total number of blocks to be allocate, including the
 *	direct and indirect blocks.
 */
547
static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned int blks,
548
				 int blocks_to_boundary)
549
{
550
	unsigned int count = 0;
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573

	/*
	 * Simple case, [t,d]Indirect block(s) has not allocated yet
	 * then it's clear blocks on that path have not allocated
	 */
	if (k > 0) {
		/* right now we don't handle cross boundary allocation */
		if (blks < blocks_to_boundary + 1)
			count += blks;
		else
			count += blocks_to_boundary + 1;
		return count;
	}

	count++;
	while (count < blks && count <= blocks_to_boundary &&
		le32_to_cpu(*(branch[0].p + count)) == 0) {
		count++;
	}
	return count;
}

/**
574
 *	ext4_alloc_blocks: multiple allocate blocks needed for a branch
575
576
577
578
579
580
581
582
 *	@indirect_blks: the number of blocks need to allocate for indirect
 *			blocks
 *
 *	@new_blocks: on return it will store the new block numbers for
 *	the indirect blocks(if needed) and the first direct block,
 *	@blks:	on return it will store the total number of allocated
 *		direct blocks
 */
583
static int ext4_alloc_blocks(handle_t *handle, struct inode *inode,
584
585
586
			     ext4_lblk_t iblock, ext4_fsblk_t goal,
			     int indirect_blks, int blks,
			     ext4_fsblk_t new_blocks[4], int *err)
587
{
588
	struct ext4_allocation_request ar;
589
	int target, i;
590
	unsigned long count = 0, blk_allocated = 0;
591
	int index = 0;
592
	ext4_fsblk_t current_block = 0;
593
594
595
596
597
598
599
600
601
602
	int ret = 0;

	/*
	 * Here we try to allocate the requested multiple blocks at once,
	 * on a best-effort basis.
	 * To build a branch, we should allocate blocks for
	 * the indirect blocks(if not allocated yet), and at least
	 * the first direct block of this branch.  That's the
	 * minimum number of blocks need to allocate(required)
	 */
603
604
605
	/* first we try to allocate the indirect blocks */
	target = indirect_blks;
	while (target > 0) {
606
607
		count = target;
		/* allocating blocks for indirect blocks and direct blocks */
608
609
		current_block = ext4_new_meta_blocks(handle, inode,
							goal, &count, err);
610
611
612
		if (*err)
			goto failed_out;

613
614
615
616
617
618
619
620
		if (unlikely(current_block + count > EXT4_MAX_BLOCK_FILE_PHYS)) {
			EXT4_ERROR_INODE(inode,
					 "current_block %llu + count %lu > %d!",
					 current_block, count,
					 EXT4_MAX_BLOCK_FILE_PHYS);
			*err = -EIO;
			goto failed_out;
		}
621

622
623
624
625
626
627
		target -= count;
		/* allocate blocks for indirect blocks */
		while (index < indirect_blks && count) {
			new_blocks[index++] = current_block++;
			count--;
		}
628
629
630
631
632
633
634
635
636
		if (count > 0) {
			/*
			 * save the new block number
			 * for the first direct block
			 */
			new_blocks[index] = current_block;
			printk(KERN_INFO "%s returned more blocks than "
						"requested\n", __func__);
			WARN_ON(1);
637
			break;
638
		}
639
640
	}

641
642
643
644
645
	target = blks - count ;
	blk_allocated = count;
	if (!target)
		goto allocated;
	/* Now allocate data blocks */
646
647
648
649
650
651
652
653
654
655
	memset(&ar, 0, sizeof(ar));
	ar.inode = inode;
	ar.goal = goal;
	ar.len = target;
	ar.logical = iblock;
	if (S_ISREG(inode->i_mode))
		/* enable in-core preallocation only for regular files */
		ar.flags = EXT4_MB_HINT_DATA;

	current_block = ext4_mb_new_blocks(handle, &ar, err);
656
657
658
659
660
661
662
663
	if (unlikely(current_block + ar.len > EXT4_MAX_BLOCK_FILE_PHYS)) {
		EXT4_ERROR_INODE(inode,
				 "current_block %llu + ar.len %d > %d!",
				 current_block, ar.len,
				 EXT4_MAX_BLOCK_FILE_PHYS);
		*err = -EIO;
		goto failed_out;
	}
664

665
666
667
668
669
670
671
672
673
	if (*err && (target == blks)) {
		/*
		 * if the allocation failed and we didn't allocate
		 * any blocks before
		 */
		goto failed_out;
	}
	if (!*err) {
		if (target == blks) {
674
675
676
677
			/*
			 * save the new block number
			 * for the first direct block
			 */
678
679
			new_blocks[index] = current_block;
		}
680
		blk_allocated += ar.len;
681
682
	}
allocated:
683
	/* total number of blocks allocated for direct blocks */
684
	ret = blk_allocated;
685
686
687
	*err = 0;
	return ret;
failed_out:
688
	for (i = 0; i < index; i++)
689
		ext4_free_blocks(handle, inode, 0, new_blocks[i], 1, 0);
690
691
692
693
	return ret;
}

/**
694
 *	ext4_alloc_branch - allocate and set up a chain of blocks.
695
696
697
698
699
700
701
702
703
704
 *	@inode: owner
 *	@indirect_blks: number of allocated indirect blocks
 *	@blks: number of allocated direct blocks
 *	@offsets: offsets (in the blocks) to store the pointers to next.
 *	@branch: place to store the chain in.
 *
 *	This function allocates blocks, zeroes out all but the last one,
 *	links them into chain and (if we are synchronous) writes them to disk.
 *	In other words, it prepares a branch that can be spliced onto the
 *	inode. It stores the information about that chain in the branch[], in
705
 *	the same format as ext4_get_branch() would do. We are calling it after
706
707
 *	we had read the existing part of chain and partial points to the last
 *	triple of that (one with zero ->key). Upon the exit we have the same
708
 *	picture as after the successful ext4_get_block(), except that in one
709
710
711
712
713
714
 *	place chain is disconnected - *branch->p is still zero (we did not
 *	set the last link), but branch->key contains the number that should
 *	be placed into *branch->p to fill that gap.
 *
 *	If allocation fails we free all blocks we've allocated (and forget
 *	their buffer_heads) and return the error value the from failed
715
 *	ext4_alloc_block() (normally -ENOSPC). Otherwise we set the chain
716
717
 *	as described above and return 0.
 */
718
static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
719
720
721
			     ext4_lblk_t iblock, int indirect_blks,
			     int *blks, ext4_fsblk_t goal,
			     ext4_lblk_t *offsets, Indirect *branch)
722
723
724
725
726
727
{
	int blocksize = inode->i_sb->s_blocksize;
	int i, n = 0;
	int err = 0;
	struct buffer_head *bh;
	int num;
728
729
	ext4_fsblk_t new_blocks[4];
	ext4_fsblk_t current_block;
730

731
	num = ext4_alloc_blocks(handle, inode, iblock, goal, indirect_blks,
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
				*blks, new_blocks, &err);
	if (err)
		return err;

	branch[0].key = cpu_to_le32(new_blocks[0]);
	/*
	 * metadata blocks and data blocks are allocated.
	 */
	for (n = 1; n <= indirect_blks;  n++) {
		/*
		 * Get buffer_head for parent block, zero it out
		 * and set the pointer to new one, then send
		 * parent to disk.
		 */
		bh = sb_getblk(inode->i_sb, new_blocks[n-1]);
		branch[n].bh = bh;
		lock_buffer(bh);
		BUFFER_TRACE(bh, "call get_create_access");
750
		err = ext4_journal_get_create_access(handle, bh);
751
		if (err) {
752
753
			/* Don't brelse(bh) here; it's done in
			 * ext4_journal_forget() below */
754
755
756
757
758
759
760
761
			unlock_buffer(bh);
			goto failed;
		}

		memset(bh->b_data, 0, blocksize);
		branch[n].p = (__le32 *) bh->b_data + offsets[n];
		branch[n].key = cpu_to_le32(new_blocks[n]);
		*branch[n].p = branch[n].key;
762
		if (n == indirect_blks) {
763
764
765
766
767
768
			current_block = new_blocks[n];
			/*
			 * End of chain, update the last new metablock of
			 * the chain to point to the new allocated
			 * data blocks numbers
			 */
769
			for (i = 1; i < num; i++)
770
771
772
773
774
775
				*(branch[n].p + i) = cpu_to_le32(++current_block);
		}
		BUFFER_TRACE(bh, "marking uptodate");
		set_buffer_uptodate(bh);
		unlock_buffer(bh);

776
777
		BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
		err = ext4_handle_dirty_metadata(handle, inode, bh);
778
779
780
781
782
783
784
		if (err)
			goto failed;
	}
	*blks = num;
	return err;
failed:
	/* Allocation failed, free what we already allocated */
785
	ext4_free_blocks(handle, inode, 0, new_blocks[0], 1, 0);
786
	for (i = 1; i <= n ; i++) {
787
		/* 
788
789
790
		 * branch[i].bh is newly allocated, so there is no
		 * need to revoke the block, which is why we don't
		 * need to set EXT4_FREE_BLOCKS_METADATA.
791
		 */
792
793
		ext4_free_blocks(handle, inode, 0, new_blocks[i], 1,
				 EXT4_FREE_BLOCKS_FORGET);
794
	}
795
796
	for (i = n+1; i < indirect_blks; i++)
		ext4_free_blocks(handle, inode, 0, new_blocks[i], 1, 0);
797

798
	ext4_free_blocks(handle, inode, 0, new_blocks[i], num, 0);
799
800
801
802
803

	return err;
}

/**
804
 * ext4_splice_branch - splice the allocated branch onto inode.
805
806
807
 * @inode: owner
 * @block: (logical) number of block we are adding
 * @chain: chain of indirect blocks (with a missing link - see
808
 *	ext4_alloc_branch)
809
810
811
812
813
814
815
816
 * @where: location of missing link
 * @num:   number of indirect blocks we are adding
 * @blks:  number of direct blocks we are adding
 *
 * This function fills the missing link and does all housekeeping needed in
 * inode (->i_blocks, etc.). In case of success we end up with the full
 * chain to new block and return 0.
 */
817
static int ext4_splice_branch(handle_t *handle, struct inode *inode,
818
819
			      ext4_lblk_t block, Indirect *where, int num,
			      int blks)
820
821
822
{
	int i;
	int err = 0;
823
	ext4_fsblk_t current_block;
824
825
826
827
828
829
830
831

	/*
	 * If we're splicing into a [td]indirect block (as opposed to the
	 * inode) then we need to get write access to the [td]indirect block
	 * before the splice.
	 */
	if (where->bh) {
		BUFFER_TRACE(where->bh, "get_write_access");
832
		err = ext4_journal_get_write_access(handle, where->bh);
833
834
835
836
837
838
839
840
841
842
843
844
845
846
		if (err)
			goto err_out;
	}
	/* That's it */

	*where->p = where->key;

	/*
	 * Update the host buffer_head or inode to point to more just allocated
	 * direct blocks blocks
	 */
	if (num == 0 && blks > 1) {
		current_block = le32_to_cpu(where->key) + 1;
		for (i = 1; i < blks; i++)
847
			*(where->p + i) = cpu_to_le32(current_block++);
848
849
850
851
852
853
854
855
856
857
858
	}

	/* We are done with atomic stuff, now do the rest of housekeeping */
	/* had we spliced it onto indirect block? */
	if (where->bh) {
		/*
		 * If we spliced it onto an indirect block, we haven't
		 * altered the inode.  Note however that if it is being spliced
		 * onto an indirect block at the very end of the file (the
		 * file is growing) then we *will* alter the inode to reflect
		 * the new i_size.  But that is not done here - it is done in
859
		 * generic_commit_write->__mark_inode_dirty->ext4_dirty_inode.
860
861
		 */
		jbd_debug(5, "splicing indirect only\n");
862
863
		BUFFER_TRACE(where->bh, "call ext4_handle_dirty_metadata");
		err = ext4_handle_dirty_metadata(handle, inode, where->bh);
864
865
866
867
868
869
		if (err)
			goto err_out;
	} else {
		/*
		 * OK, we spliced it into the inode itself on a direct block.
		 */
870
		ext4_mark_inode_dirty(handle, inode);
871
872
873
874
875
876
		jbd_debug(5, "splicing direct\n");
	}
	return err;

err_out:
	for (i = 1; i <= num; i++) {
877
		/* 
878
879
880
		 * branch[i].bh is newly allocated, so there is no
		 * need to revoke the block, which is why we don't
		 * need to set EXT4_FREE_BLOCKS_METADATA.
881
		 */
882
883
		ext4_free_blocks(handle, inode, where[i].bh, 0, 1,
				 EXT4_FREE_BLOCKS_FORGET);
884
	}
885
886
	ext4_free_blocks(handle, inode, 0, le32_to_cpu(where[num].key),
			 blks, 0);
887
888
889
890
891

	return err;
}

/*
892
893
894
895
 * The ext4_ind_get_blocks() function handles non-extents inodes
 * (i.e., using the traditional indirect/double-indirect i_blocks
 * scheme) for ext4_get_blocks().
 *
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
 * Allocation strategy is simple: if we have to allocate something, we will
 * have to go the whole way to leaf. So let's do it before attaching anything
 * to tree, set linkage between the newborn blocks, write them if sync is
 * required, recheck the path, free and repeat if check fails, otherwise
 * set the last missing link (that will protect us from any truncate-generated
 * removals - all blocks on the path are immune now) and possibly force the
 * write on the parent block.
 * That has a nice additional property: no special recovery from the failed
 * allocations is needed - we simply release blocks and do not touch anything
 * reachable from inode.
 *
 * `handle' can be NULL if create == 0.
 *
 * return > 0, # of blocks mapped or allocated.
 * return = 0, if plain lookup failed.
 * return < 0, error case.
912
 *
913
914
915
916
917
 * The ext4_ind_get_blocks() function should be called with
 * down_write(&EXT4_I(inode)->i_data_sem) if allocating filesystem
 * blocks (i.e., flags has EXT4_GET_BLOCKS_CREATE set) or
 * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system
 * blocks.
918
 */
919
static int ext4_ind_get_blocks(handle_t *handle, struct inode *inode,
920
921
922
			       ext4_lblk_t iblock, unsigned int maxblocks,
			       struct buffer_head *bh_result,
			       int flags)
923
924
{
	int err = -EIO;
Aneesh Kumar K.V's avatar
Aneesh Kumar K.V committed
925
	ext4_lblk_t offsets[4];
926
927
	Indirect chain[4];
	Indirect *partial;
928
	ext4_fsblk_t goal;
929
930
931
932
	int indirect_blks;
	int blocks_to_boundary = 0;
	int depth;
	int count = 0;
933
	ext4_fsblk_t first_block = 0;
934

935
	J_ASSERT(!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL));
936
	J_ASSERT(handle != NULL || (flags & EXT4_GET_BLOCKS_CREATE) == 0);
Aneesh Kumar K.V's avatar
Aneesh Kumar K.V committed
937
	depth = ext4_block_to_path(inode, iblock, offsets,
938
				   &blocks_to_boundary);
939
940
941
942

	if (depth == 0)
		goto out;

943
	partial = ext4_get_branch(inode, depth, offsets, chain, &err);
944
945
946
947
948
949
950
951

	/* Simplest case - block found, no allocation needed */
	if (!partial) {
		first_block = le32_to_cpu(chain[depth - 1].key);
		clear_buffer_new(bh_result);
		count++;
		/*map more blocks*/
		while (count < maxblocks && count <= blocks_to_boundary) {
952
			ext4_fsblk_t blk;
953
954
955
956
957
958
959
960

			blk = le32_to_cpu(*(chain[depth-1].p + count));

			if (blk == first_block + count)
				count++;
			else
				break;
		}
961
		goto got_it;
962
963
964
	}

	/* Next simple case - plain lookup or failed read of indirect block */
965
	if ((flags & EXT4_GET_BLOCKS_CREATE) == 0 || err == -EIO)
966
967
968
		goto cleanup;

	/*
969
	 * Okay, we need to do block allocation.
970
	*/
971
	goal = ext4_find_goal(inode, iblock, partial);
972
973
974
975
976
977
978
979

	/* the number of blocks need to allocate for [d,t]indirect blocks */
	indirect_blks = (chain + depth) - partial - 1;

	/*
	 * Next look up the indirect map to count the totoal number of
	 * direct blocks to allocate for this branch.
	 */
980
	count = ext4_blks_to_allocate(partial, indirect_blks,
981
982
					maxblocks, blocks_to_boundary);
	/*
983
	 * Block out ext4_truncate while we alter the tree
984
	 */
985
	err = ext4_alloc_branch(handle, inode, iblock, indirect_blks,
986
987
				&count, goal,
				offsets + (partial - chain), partial);
988
989

	/*
990
	 * The ext4_splice_branch call will free and forget any buffers
991
992
993
994
995
996
	 * on the new chain if there is a failure, but that risks using
	 * up transaction credits, especially for bitmaps where the
	 * credits cannot be returned.  Can we handle this somehow?  We
	 * may need to return -EAGAIN upwards in the worst case.  --sct
	 */
	if (!err)
997
		err = ext4_splice_branch(handle, inode, iblock,
998
					 partial, indirect_blks, count);
999
	if (err)
1000
		goto cleanup;
For faster browsing, not all history is shown. View entire blame