inode.c 169 KB
Newer Older
1
/*
2
 *  linux/fs/ext4/inode.c
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
 *
 * Copyright (C) 1992, 1993, 1994, 1995
 * Remy Card (card@masi.ibp.fr)
 * Laboratoire MASI - Institut Blaise Pascal
 * Universite Pierre et Marie Curie (Paris VI)
 *
 *  from
 *
 *  linux/fs/minix/inode.c
 *
 *  Copyright (C) 1991, 1992  Linus Torvalds
 *
 *  Goal-directed block allocation by Stephen Tweedie
 *	(sct@redhat.com), 1993, 1998
 *  Big-endian to little-endian byte-swapping/bitmaps by
 *        David S. Miller (davem@caip.rutgers.edu), 1995
 *  64-bit file support on 64-bit platforms by Jakub Jelinek
 *	(jj@sunsite.ms.mff.cuni.cz)
 *
22
 *  Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000
23
24
25
26
27
 */

#include <linux/module.h>
#include <linux/fs.h>
#include <linux/time.h>
28
#include <linux/jbd2.h>
29
30
31
32
33
34
#include <linux/highuid.h>
#include <linux/pagemap.h>
#include <linux/quotaops.h>
#include <linux/string.h>
#include <linux/buffer_head.h>
#include <linux/writeback.h>
35
#include <linux/pagevec.h>
36
#include <linux/mpage.h>
37
#include <linux/namei.h>
38
39
#include <linux/uio.h>
#include <linux/bio.h>
40
#include <linux/workqueue.h>
41

42
#include "ext4_jbd2.h"
43
44
#include "xattr.h"
#include "acl.h"
45
#include "ext4_extents.h"
46

47
48
#include <trace/events/ext4.h>

49
50
#define MPAGE_DA_EXTENT_TAIL 0x01

51
52
53
static inline int ext4_begin_ordered_truncate(struct inode *inode,
					      loff_t new_size)
{
54
55
56
57
	return jbd2_journal_begin_ordered_truncate(
					EXT4_SB(inode->i_sb)->s_journal,
					&EXT4_I(inode)->jinode,
					new_size);
58
59
}

60
61
static void ext4_invalidatepage(struct page *page, unsigned long offset);

62
63
64
/*
 * Test whether an inode is a fast symlink.
 */
65
static int ext4_inode_is_fast_symlink(struct inode *inode)
66
{
67
	int ea_blocks = EXT4_I(inode)->i_file_acl ?
68
69
70
71
72
73
74
75
76
77
78
		(inode->i_sb->s_blocksize >> 9) : 0;

	return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
}

/*
 * Work out how many blocks we need to proceed with the next chunk of a
 * truncate transaction.
 */
static unsigned long blocks_for_truncate(struct inode *inode)
{
Aneesh Kumar K.V's avatar
Aneesh Kumar K.V committed
79
	ext4_lblk_t needed;
80
81
82
83
84
85

	needed = inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9);

	/* Give ourselves just enough room to cope with inodes in which
	 * i_blocks is corrupt: we've seen disk corruptions in the past
	 * which resulted in random data in an inode which looked enough
86
	 * like a regular file for ext4 to try to delete it.  Things
87
88
89
90
91
92
93
	 * will go a bit crazy if that happens, but at least we should
	 * try not to panic the whole kernel. */
	if (needed < 2)
		needed = 2;

	/* But we need to bound the transaction so we don't overflow the
	 * journal. */
94
95
	if (needed > EXT4_MAX_TRANS_DATA)
		needed = EXT4_MAX_TRANS_DATA;
96

97
	return EXT4_DATA_TRANS_BLOCKS(inode->i_sb) + needed;
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
}

/*
 * Truncate transactions can be complex and absolutely huge.  So we need to
 * be able to restart the transaction at a conventient checkpoint to make
 * sure we don't overflow the journal.
 *
 * start_transaction gets us a new handle for a truncate transaction,
 * and extend_transaction tries to extend the existing one a bit.  If
 * extend fails, we need to propagate the failure up and restart the
 * transaction in the top-level truncate loop. --sct
 */
static handle_t *start_transaction(struct inode *inode)
{
	handle_t *result;

114
	result = ext4_journal_start(inode, blocks_for_truncate(inode));
115
116
117
	if (!IS_ERR(result))
		return result;

118
	ext4_std_error(inode->i_sb, PTR_ERR(result));
119
120
121
122
123
124
125
126
127
128
129
	return result;
}

/*
 * Try to extend this transaction for the purposes of truncation.
 *
 * Returns 0 if we managed to create more room.  If we can't create more
 * room, and the transaction must be restarted we return 1.
 */
static int try_to_extend_transaction(handle_t *handle, struct inode *inode)
{
130
131
132
	if (!ext4_handle_valid(handle))
		return 0;
	if (ext4_handle_has_enough_credits(handle, EXT4_RESERVE_TRANS_BLOCKS+1))
133
		return 0;
134
	if (!ext4_journal_extend(handle, blocks_for_truncate(inode)))
135
136
137
138
139
140
141
142
143
		return 0;
	return 1;
}

/*
 * Restart the transaction associated with *handle.  This does a commit,
 * so before we call here everything must be consistently dirtied against
 * this transaction.
 */
144
int ext4_truncate_restart_trans(handle_t *handle, struct inode *inode,
145
				 int nblocks)
146
{
147
148
149
150
151
152
153
154
	int ret;

	/*
	 * Drop i_data_sem to avoid deadlock with ext4_get_blocks At this
	 * moment, get_block can be called only for blocks inside i_size since
	 * page cache has been already dropped and writes are blocked by
	 * i_mutex. So we can safely drop the i_data_sem here.
	 */
155
	BUG_ON(EXT4_JOURNAL(inode) == NULL);
156
	jbd_debug(2, "restarting handle %p\n", handle);
157
158
159
	up_write(&EXT4_I(inode)->i_data_sem);
	ret = ext4_journal_restart(handle, blocks_for_truncate(inode));
	down_write(&EXT4_I(inode)->i_data_sem);
160
	ext4_discard_preallocations(inode);
161
162

	return ret;
163
164
165
166
167
}

/*
 * Called at the last iput() if i_nlink is zero.
 */
168
void ext4_delete_inode(struct inode *inode)
169
170
{
	handle_t *handle;
171
	int err;
172

173
174
	if (ext4_should_order_data(inode))
		ext4_begin_ordered_truncate(inode, 0);
175
176
177
178
179
	truncate_inode_pages(&inode->i_data, 0);

	if (is_bad_inode(inode))
		goto no_delete;

180
	handle = ext4_journal_start(inode, blocks_for_truncate(inode)+3);
181
	if (IS_ERR(handle)) {
182
		ext4_std_error(inode->i_sb, PTR_ERR(handle));
183
184
185
186
187
		/*
		 * If we're going to skip the normal cleanup, we still need to
		 * make sure that the in-core orphan linked list is properly
		 * cleaned up.
		 */
188
		ext4_orphan_del(NULL, inode);
189
190
191
192
		goto no_delete;
	}

	if (IS_SYNC(inode))
193
		ext4_handle_sync(handle);
194
	inode->i_size = 0;
195
196
	err = ext4_mark_inode_dirty(handle, inode);
	if (err) {
197
		ext4_warning(inode->i_sb,
198
199
200
			     "couldn't mark inode dirty (err %d)", err);
		goto stop_handle;
	}
201
	if (inode->i_blocks)
202
		ext4_truncate(inode);
203
204
205
206
207
208
209

	/*
	 * ext4_ext_truncate() doesn't reserve any slop when it
	 * restarts journal transactions; therefore there may not be
	 * enough credits left in the handle to remove the inode from
	 * the orphan list and set the dtime field.
	 */
210
	if (!ext4_handle_has_enough_credits(handle, 3)) {
211
212
213
214
		err = ext4_journal_extend(handle, 3);
		if (err > 0)
			err = ext4_journal_restart(handle, 3);
		if (err != 0) {
215
			ext4_warning(inode->i_sb,
216
217
218
219
220
221
222
				     "couldn't extend journal (err %d)", err);
		stop_handle:
			ext4_journal_stop(handle);
			goto no_delete;
		}
	}

223
	/*
224
	 * Kill off the orphan record which ext4_truncate created.
225
	 * AKPM: I think this can be inside the above `if'.
226
	 * Note that ext4_orphan_del() has to be able to cope with the
227
	 * deletion of a non-existent orphan - this is because we don't
228
	 * know if ext4_truncate() actually created an orphan record.
229
230
	 * (Well, we could do this if we need to, but heck - it works)
	 */
231
232
	ext4_orphan_del(handle, inode);
	EXT4_I(inode)->i_dtime	= get_seconds();
233
234
235
236
237
238
239
240

	/*
	 * One subtle ordering requirement: if anything has gone wrong
	 * (transaction abort, IO errors, whatever), then we can still
	 * do these next steps (the fs will already have been marked as
	 * having errors), but we can't free the inode if the mark_dirty
	 * fails.
	 */
241
	if (ext4_mark_inode_dirty(handle, inode))
242
243
244
		/* If that failed, just do the required in-core inode clear. */
		clear_inode(inode);
	else
245
246
		ext4_free_inode(handle, inode);
	ext4_journal_stop(handle);
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
	return;
no_delete:
	clear_inode(inode);	/* We must guarantee clearing of inode... */
}

typedef struct {
	__le32	*p;
	__le32	key;
	struct buffer_head *bh;
} Indirect;

static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
{
	p->key = *(p->p = v);
	p->bh = bh;
}

/**
265
 *	ext4_block_to_path - parse the block number into array of offsets
266
267
268
 *	@inode: inode in question (we are only interested in its superblock)
 *	@i_block: block number to be parsed
 *	@offsets: array to store the offsets in
Dave Kleikamp's avatar
Dave Kleikamp committed
269
270
 *	@boundary: set this non-zero if the referred-to block is likely to be
 *	       followed (on disk) by an indirect block.
271
 *
272
 *	To store the locations of file's data ext4 uses a data structure common
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
 *	for UNIX filesystems - tree of pointers anchored in the inode, with
 *	data blocks at leaves and indirect blocks in intermediate nodes.
 *	This function translates the block number into path in that tree -
 *	return value is the path length and @offsets[n] is the offset of
 *	pointer to (n+1)th node in the nth one. If @block is out of range
 *	(negative or too large) warning is printed and zero returned.
 *
 *	Note: function doesn't find node addresses, so no IO is needed. All
 *	we need to know is the capacity of indirect blocks (taken from the
 *	inode->i_sb).
 */

/*
 * Portability note: the last comparison (check that we fit into triple
 * indirect block) is spelled differently, because otherwise on an
 * architecture with 32-bit longs and 8Kb pages we might get into trouble
 * if our filesystem had 8Kb blocks. We might use long long, but that would
 * kill us on x86. Oh, well, at least the sign propagation does not matter -
 * i_block would have to be negative in the very beginning, so we would not
 * get there at all.
 */

295
static int ext4_block_to_path(struct inode *inode,
296
297
			      ext4_lblk_t i_block,
			      ext4_lblk_t offsets[4], int *boundary)
298
{
299
300
301
	int ptrs = EXT4_ADDR_PER_BLOCK(inode->i_sb);
	int ptrs_bits = EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb);
	const long direct_blocks = EXT4_NDIR_BLOCKS,
302
303
304
305
306
		indirect_blocks = ptrs,
		double_blocks = (1 << (ptrs_bits * 2));
	int n = 0;
	int final = 0;

307
	if (i_block < direct_blocks) {
308
309
		offsets[n++] = i_block;
		final = direct_blocks;
310
	} else if ((i_block -= direct_blocks) < indirect_blocks) {
311
		offsets[n++] = EXT4_IND_BLOCK;
312
313
314
		offsets[n++] = i_block;
		final = ptrs;
	} else if ((i_block -= indirect_blocks) < double_blocks) {
315
		offsets[n++] = EXT4_DIND_BLOCK;
316
317
318
319
		offsets[n++] = i_block >> ptrs_bits;
		offsets[n++] = i_block & (ptrs - 1);
		final = ptrs;
	} else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
320
		offsets[n++] = EXT4_TIND_BLOCK;
321
322
323
324
325
		offsets[n++] = i_block >> (ptrs_bits * 2);
		offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
		offsets[n++] = i_block & (ptrs - 1);
		final = ptrs;
	} else {
326
		ext4_warning(inode->i_sb, "block %lu > max in inode %lu",
327
328
			     i_block + direct_blocks +
			     indirect_blocks + double_blocks, inode->i_ino);
329
330
331
332
333
334
	}
	if (boundary)
		*boundary = final - 1 - (i_block & (ptrs - 1));
	return n;
}

335
static int __ext4_check_blockref(const char *function, struct inode *inode,
336
337
				 __le32 *p, unsigned int max)
{
338
	__le32 *bref = p;
339
340
	unsigned int blk;

341
	while (bref < p+max) {
342
		blk = le32_to_cpu(*bref++);
343
344
		if (blk &&
		    unlikely(!ext4_data_block_valid(EXT4_SB(inode->i_sb),
345
						    blk, 1))) {
346
			__ext4_error(inode->i_sb, function,
347
348
				   "invalid block reference %u "
				   "in inode #%lu", blk, inode->i_ino);
349
350
351
352
			return -EIO;
		}
	}
	return 0;
353
354
355
356
}


#define ext4_check_indirect_blockref(inode, bh)                         \
357
	__ext4_check_blockref(__func__, inode, (__le32 *)(bh)->b_data,  \
358
359
360
			      EXT4_ADDR_PER_BLOCK((inode)->i_sb))

#define ext4_check_inode_blockref(inode)                                \
361
	__ext4_check_blockref(__func__, inode, EXT4_I(inode)->i_data,   \
362
363
			      EXT4_NDIR_BLOCKS)

364
/**
365
 *	ext4_get_branch - read the chain of indirect blocks leading to data
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
 *	@inode: inode in question
 *	@depth: depth of the chain (1 - direct pointer, etc.)
 *	@offsets: offsets of pointers in inode/indirect blocks
 *	@chain: place to store the result
 *	@err: here we store the error value
 *
 *	Function fills the array of triples <key, p, bh> and returns %NULL
 *	if everything went OK or the pointer to the last filled triple
 *	(incomplete one) otherwise. Upon the return chain[i].key contains
 *	the number of (i+1)-th block in the chain (as it is stored in memory,
 *	i.e. little-endian 32-bit), chain[i].p contains the address of that
 *	number (it points into struct inode for i==0 and into the bh->b_data
 *	for i>0) and chain[i].bh points to the buffer_head of i-th indirect
 *	block for i>0 and NULL for i==0. In other words, it holds the block
 *	numbers of the chain, addresses they were taken from (and where we can
 *	verify that chain did not change) and buffer_heads hosting these
 *	numbers.
 *
 *	Function stops when it stumbles upon zero pointer (absent block)
 *		(pointer to last triple returned, *@err == 0)
 *	or when it gets an IO error reading an indirect block
 *		(ditto, *@err == -EIO)
 *	or when it reads all @depth-1 indirect blocks successfully and finds
 *	the whole chain, all way to the data (returns %NULL, *err == 0).
390
391
 *
 *      Need to be called with
392
 *      down_read(&EXT4_I(inode)->i_data_sem)
393
 */
Aneesh Kumar K.V's avatar
Aneesh Kumar K.V committed
394
395
static Indirect *ext4_get_branch(struct inode *inode, int depth,
				 ext4_lblk_t  *offsets,
396
397
398
399
400
401
402
403
				 Indirect chain[4], int *err)
{
	struct super_block *sb = inode->i_sb;
	Indirect *p = chain;
	struct buffer_head *bh;

	*err = 0;
	/* i_data is not going away, no lock needed */
404
	add_chain(chain, NULL, EXT4_I(inode)->i_data + *offsets);
405
406
407
	if (!p->key)
		goto no_block;
	while (--depth) {
408
409
		bh = sb_getblk(sb, le32_to_cpu(p->key));
		if (unlikely(!bh))
410
			goto failure;
411

412
413
414
415
416
417
418
419
420
421
422
		if (!bh_uptodate_or_lock(bh)) {
			if (bh_submit_read(bh) < 0) {
				put_bh(bh);
				goto failure;
			}
			/* validate block references */
			if (ext4_check_indirect_blockref(inode, bh)) {
				put_bh(bh);
				goto failure;
			}
		}
423

424
		add_chain(++p, bh, (__le32 *)bh->b_data + *++offsets);
425
426
427
428
429
430
431
432
433
434
435
436
437
		/* Reader: end */
		if (!p->key)
			goto no_block;
	}
	return NULL;

failure:
	*err = -EIO;
no_block:
	return p;
}

/**
438
 *	ext4_find_near - find a place for allocation with sufficient locality
439
440
441
 *	@inode: owner
 *	@ind: descriptor of indirect block.
 *
442
 *	This function returns the preferred place for block allocation.
443
444
445
446
447
448
449
450
451
452
453
454
455
456
 *	It is used when heuristic for sequential allocation fails.
 *	Rules are:
 *	  + if there is a block to the left of our position - allocate near it.
 *	  + if pointer will live in indirect block - allocate near that block.
 *	  + if pointer will live in inode - allocate in the same
 *	    cylinder group.
 *
 * In the latter case we colour the starting block by the callers PID to
 * prevent it from clashing with concurrent allocations for a different inode
 * in the same block group.   The PID is used here so that functionally related
 * files will be close-by on-disk.
 *
 *	Caller must make sure that @ind is valid and will stay that way.
 */
457
static ext4_fsblk_t ext4_find_near(struct inode *inode, Indirect *ind)
458
{
459
	struct ext4_inode_info *ei = EXT4_I(inode);
460
	__le32 *start = ind->bh ? (__le32 *) ind->bh->b_data : ei->i_data;
461
	__le32 *p;
462
	ext4_fsblk_t bg_start;
463
	ext4_fsblk_t last_block;
464
	ext4_grpblk_t colour;
465
466
	ext4_group_t block_group;
	int flex_size = ext4_flex_bg_size(EXT4_SB(inode->i_sb));
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481

	/* Try to find previous block */
	for (p = ind->p - 1; p >= start; p--) {
		if (*p)
			return le32_to_cpu(*p);
	}

	/* No such thing, so let's try location of indirect block */
	if (ind->bh)
		return ind->bh->b_blocknr;

	/*
	 * It is going to be referred to from the inode itself? OK, just put it
	 * into the same cylinder group then.
	 */
482
483
484
485
486
487
488
	block_group = ei->i_block_group;
	if (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) {
		block_group &= ~(flex_size-1);
		if (S_ISREG(inode->i_mode))
			block_group++;
	}
	bg_start = ext4_group_first_block_no(inode->i_sb, block_group);
489
490
	last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1;

491
492
493
494
495
496
497
	/*
	 * If we are doing delayed allocation, we don't need take
	 * colour into account.
	 */
	if (test_opt(inode->i_sb, DELALLOC))
		return bg_start;

498
499
	if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block)
		colour = (current->pid % 16) *
500
			(EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16);
501
502
	else
		colour = (current->pid % 16) * ((last_block - bg_start) / 16);
503
504
505
506
	return bg_start + colour;
}

/**
507
 *	ext4_find_goal - find a preferred place for allocation.
508
509
510
511
 *	@inode: owner
 *	@block:  block we want
 *	@partial: pointer to the last triple within a chain
 *
512
 *	Normally this function find the preferred place for block allocation,
513
 *	returns it.
514
515
 *	Because this is only used for non-extent files, we limit the block nr
 *	to 32 bits.
516
 */
Aneesh Kumar K.V's avatar
Aneesh Kumar K.V committed
517
static ext4_fsblk_t ext4_find_goal(struct inode *inode, ext4_lblk_t block,
518
				   Indirect *partial)
519
{
520
521
	ext4_fsblk_t goal;

522
	/*
523
	 * XXX need to get goal block from mballoc's data structures
524
525
	 */

526
527
528
	goal = ext4_find_near(inode, partial);
	goal = goal & EXT4_MAX_BLOCK_FILE_PHYS;
	return goal;
529
530
531
}

/**
532
 *	ext4_blks_to_allocate: Look up the block map and count the number
533
534
535
536
537
538
539
540
541
542
 *	of direct blocks need to be allocated for the given branch.
 *
 *	@branch: chain of indirect blocks
 *	@k: number of blocks need for indirect blocks
 *	@blks: number of data blocks to be mapped.
 *	@blocks_to_boundary:  the offset in the indirect block
 *
 *	return the total number of blocks to be allocate, including the
 *	direct and indirect blocks.
 */
543
static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned int blks,
544
				 int blocks_to_boundary)
545
{
546
	unsigned int count = 0;
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569

	/*
	 * Simple case, [t,d]Indirect block(s) has not allocated yet
	 * then it's clear blocks on that path have not allocated
	 */
	if (k > 0) {
		/* right now we don't handle cross boundary allocation */
		if (blks < blocks_to_boundary + 1)
			count += blks;
		else
			count += blocks_to_boundary + 1;
		return count;
	}

	count++;
	while (count < blks && count <= blocks_to_boundary &&
		le32_to_cpu(*(branch[0].p + count)) == 0) {
		count++;
	}
	return count;
}

/**
570
 *	ext4_alloc_blocks: multiple allocate blocks needed for a branch
571
572
573
574
575
576
577
578
 *	@indirect_blks: the number of blocks need to allocate for indirect
 *			blocks
 *
 *	@new_blocks: on return it will store the new block numbers for
 *	the indirect blocks(if needed) and the first direct block,
 *	@blks:	on return it will store the total number of allocated
 *		direct blocks
 */
579
static int ext4_alloc_blocks(handle_t *handle, struct inode *inode,
580
581
582
			     ext4_lblk_t iblock, ext4_fsblk_t goal,
			     int indirect_blks, int blks,
			     ext4_fsblk_t new_blocks[4], int *err)
583
{
584
	struct ext4_allocation_request ar;
585
	int target, i;
586
	unsigned long count = 0, blk_allocated = 0;
587
	int index = 0;
588
	ext4_fsblk_t current_block = 0;
589
590
591
592
593
594
595
596
597
598
	int ret = 0;

	/*
	 * Here we try to allocate the requested multiple blocks at once,
	 * on a best-effort basis.
	 * To build a branch, we should allocate blocks for
	 * the indirect blocks(if not allocated yet), and at least
	 * the first direct block of this branch.  That's the
	 * minimum number of blocks need to allocate(required)
	 */
599
600
601
	/* first we try to allocate the indirect blocks */
	target = indirect_blks;
	while (target > 0) {
602
603
		count = target;
		/* allocating blocks for indirect blocks and direct blocks */
604
605
		current_block = ext4_new_meta_blocks(handle, inode,
							goal, &count, err);
606
607
608
		if (*err)
			goto failed_out;

609
610
		BUG_ON(current_block + count > EXT4_MAX_BLOCK_FILE_PHYS);

611
612
613
614
615
616
		target -= count;
		/* allocate blocks for indirect blocks */
		while (index < indirect_blks && count) {
			new_blocks[index++] = current_block++;
			count--;
		}
617
618
619
620
621
622
623
624
625
		if (count > 0) {
			/*
			 * save the new block number
			 * for the first direct block
			 */
			new_blocks[index] = current_block;
			printk(KERN_INFO "%s returned more blocks than "
						"requested\n", __func__);
			WARN_ON(1);
626
			break;
627
		}
628
629
	}

630
631
632
633
634
	target = blks - count ;
	blk_allocated = count;
	if (!target)
		goto allocated;
	/* Now allocate data blocks */
635
636
637
638
639
640
641
642
643
644
	memset(&ar, 0, sizeof(ar));
	ar.inode = inode;
	ar.goal = goal;
	ar.len = target;
	ar.logical = iblock;
	if (S_ISREG(inode->i_mode))
		/* enable in-core preallocation only for regular files */
		ar.flags = EXT4_MB_HINT_DATA;

	current_block = ext4_mb_new_blocks(handle, &ar, err);
645
	BUG_ON(current_block + ar.len > EXT4_MAX_BLOCK_FILE_PHYS);
646

647
648
649
650
651
652
653
654
655
	if (*err && (target == blks)) {
		/*
		 * if the allocation failed and we didn't allocate
		 * any blocks before
		 */
		goto failed_out;
	}
	if (!*err) {
		if (target == blks) {
656
657
658
659
			/*
			 * save the new block number
			 * for the first direct block
			 */
660
661
			new_blocks[index] = current_block;
		}
662
		blk_allocated += ar.len;
663
664
	}
allocated:
665
	/* total number of blocks allocated for direct blocks */
666
	ret = blk_allocated;
667
668
669
	*err = 0;
	return ret;
failed_out:
670
	for (i = 0; i < index; i++)
671
		ext4_free_blocks(handle, inode, 0, new_blocks[i], 1, 0);
672
673
674
675
	return ret;
}

/**
676
 *	ext4_alloc_branch - allocate and set up a chain of blocks.
677
678
679
680
681
682
683
684
685
686
 *	@inode: owner
 *	@indirect_blks: number of allocated indirect blocks
 *	@blks: number of allocated direct blocks
 *	@offsets: offsets (in the blocks) to store the pointers to next.
 *	@branch: place to store the chain in.
 *
 *	This function allocates blocks, zeroes out all but the last one,
 *	links them into chain and (if we are synchronous) writes them to disk.
 *	In other words, it prepares a branch that can be spliced onto the
 *	inode. It stores the information about that chain in the branch[], in
687
 *	the same format as ext4_get_branch() would do. We are calling it after
688
689
 *	we had read the existing part of chain and partial points to the last
 *	triple of that (one with zero ->key). Upon the exit we have the same
690
 *	picture as after the successful ext4_get_block(), except that in one
691
692
693
694
695
696
 *	place chain is disconnected - *branch->p is still zero (we did not
 *	set the last link), but branch->key contains the number that should
 *	be placed into *branch->p to fill that gap.
 *
 *	If allocation fails we free all blocks we've allocated (and forget
 *	their buffer_heads) and return the error value the from failed
697
 *	ext4_alloc_block() (normally -ENOSPC). Otherwise we set the chain
698
699
 *	as described above and return 0.
 */
700
static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
701
702
703
			     ext4_lblk_t iblock, int indirect_blks,
			     int *blks, ext4_fsblk_t goal,
			     ext4_lblk_t *offsets, Indirect *branch)
704
705
706
707
708
709
{
	int blocksize = inode->i_sb->s_blocksize;
	int i, n = 0;
	int err = 0;
	struct buffer_head *bh;
	int num;
710
711
	ext4_fsblk_t new_blocks[4];
	ext4_fsblk_t current_block;
712

713
	num = ext4_alloc_blocks(handle, inode, iblock, goal, indirect_blks,
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
				*blks, new_blocks, &err);
	if (err)
		return err;

	branch[0].key = cpu_to_le32(new_blocks[0]);
	/*
	 * metadata blocks and data blocks are allocated.
	 */
	for (n = 1; n <= indirect_blks;  n++) {
		/*
		 * Get buffer_head for parent block, zero it out
		 * and set the pointer to new one, then send
		 * parent to disk.
		 */
		bh = sb_getblk(inode->i_sb, new_blocks[n-1]);
		branch[n].bh = bh;
		lock_buffer(bh);
		BUFFER_TRACE(bh, "call get_create_access");
732
		err = ext4_journal_get_create_access(handle, bh);
733
		if (err) {
734
735
			/* Don't brelse(bh) here; it's done in
			 * ext4_journal_forget() below */
736
737
738
739
740
741
742
743
			unlock_buffer(bh);
			goto failed;
		}

		memset(bh->b_data, 0, blocksize);
		branch[n].p = (__le32 *) bh->b_data + offsets[n];
		branch[n].key = cpu_to_le32(new_blocks[n]);
		*branch[n].p = branch[n].key;
744
		if (n == indirect_blks) {
745
746
747
748
749
750
			current_block = new_blocks[n];
			/*
			 * End of chain, update the last new metablock of
			 * the chain to point to the new allocated
			 * data blocks numbers
			 */
751
			for (i = 1; i < num; i++)
752
753
754
755
756
757
				*(branch[n].p + i) = cpu_to_le32(++current_block);
		}
		BUFFER_TRACE(bh, "marking uptodate");
		set_buffer_uptodate(bh);
		unlock_buffer(bh);

758
759
		BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
		err = ext4_handle_dirty_metadata(handle, inode, bh);
760
761
762
763
764
765
766
		if (err)
			goto failed;
	}
	*blks = num;
	return err;
failed:
	/* Allocation failed, free what we already allocated */
767
	ext4_free_blocks(handle, inode, 0, new_blocks[0], 1, 0);
768
	for (i = 1; i <= n ; i++) {
769
		/* 
770
771
772
		 * branch[i].bh is newly allocated, so there is no
		 * need to revoke the block, which is why we don't
		 * need to set EXT4_FREE_BLOCKS_METADATA.
773
		 */
774
775
		ext4_free_blocks(handle, inode, 0, new_blocks[i], 1,
				 EXT4_FREE_BLOCKS_FORGET);
776
	}
777
778
	for (i = n+1; i < indirect_blks; i++)
		ext4_free_blocks(handle, inode, 0, new_blocks[i], 1, 0);
779

780
	ext4_free_blocks(handle, inode, 0, new_blocks[i], num, 0);
781
782
783
784
785

	return err;
}

/**
786
 * ext4_splice_branch - splice the allocated branch onto inode.
787
788
789
 * @inode: owner
 * @block: (logical) number of block we are adding
 * @chain: chain of indirect blocks (with a missing link - see
790
 *	ext4_alloc_branch)
791
792
793
794
795
796
797
798
 * @where: location of missing link
 * @num:   number of indirect blocks we are adding
 * @blks:  number of direct blocks we are adding
 *
 * This function fills the missing link and does all housekeeping needed in
 * inode (->i_blocks, etc.). In case of success we end up with the full
 * chain to new block and return 0.
 */
799
static int ext4_splice_branch(handle_t *handle, struct inode *inode,
800
801
			      ext4_lblk_t block, Indirect *where, int num,
			      int blks)
802
803
804
{
	int i;
	int err = 0;
805
	ext4_fsblk_t current_block;
806
807
808
809
810
811
812
813

	/*
	 * If we're splicing into a [td]indirect block (as opposed to the
	 * inode) then we need to get write access to the [td]indirect block
	 * before the splice.
	 */
	if (where->bh) {
		BUFFER_TRACE(where->bh, "get_write_access");
814
		err = ext4_journal_get_write_access(handle, where->bh);
815
816
817
818
819
820
821
822
823
824
825
826
827
828
		if (err)
			goto err_out;
	}
	/* That's it */

	*where->p = where->key;

	/*
	 * Update the host buffer_head or inode to point to more just allocated
	 * direct blocks blocks
	 */
	if (num == 0 && blks > 1) {
		current_block = le32_to_cpu(where->key) + 1;
		for (i = 1; i < blks; i++)
829
			*(where->p + i) = cpu_to_le32(current_block++);
830
831
832
833
834
835
836
837
838
839
840
	}

	/* We are done with atomic stuff, now do the rest of housekeeping */
	/* had we spliced it onto indirect block? */
	if (where->bh) {
		/*
		 * If we spliced it onto an indirect block, we haven't
		 * altered the inode.  Note however that if it is being spliced
		 * onto an indirect block at the very end of the file (the
		 * file is growing) then we *will* alter the inode to reflect
		 * the new i_size.  But that is not done here - it is done in
841
		 * generic_commit_write->__mark_inode_dirty->ext4_dirty_inode.
842
843
		 */
		jbd_debug(5, "splicing indirect only\n");
844
845
		BUFFER_TRACE(where->bh, "call ext4_handle_dirty_metadata");
		err = ext4_handle_dirty_metadata(handle, inode, where->bh);
846
847
848
849
850
851
		if (err)
			goto err_out;
	} else {
		/*
		 * OK, we spliced it into the inode itself on a direct block.
		 */
852
		ext4_mark_inode_dirty(handle, inode);
853
854
855
856
857
858
		jbd_debug(5, "splicing direct\n");
	}
	return err;

err_out:
	for (i = 1; i <= num; i++) {
859
		/* 
860
861
862
		 * branch[i].bh is newly allocated, so there is no
		 * need to revoke the block, which is why we don't
		 * need to set EXT4_FREE_BLOCKS_METADATA.
863
		 */
864
865
		ext4_free_blocks(handle, inode, where[i].bh, 0, 1,
				 EXT4_FREE_BLOCKS_FORGET);
866
	}
867
868
	ext4_free_blocks(handle, inode, 0, le32_to_cpu(where[num].key),
			 blks, 0);
869
870
871
872
873

	return err;
}

/*
874
875
876
877
 * The ext4_ind_get_blocks() function handles non-extents inodes
 * (i.e., using the traditional indirect/double-indirect i_blocks
 * scheme) for ext4_get_blocks().
 *
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
 * Allocation strategy is simple: if we have to allocate something, we will
 * have to go the whole way to leaf. So let's do it before attaching anything
 * to tree, set linkage between the newborn blocks, write them if sync is
 * required, recheck the path, free and repeat if check fails, otherwise
 * set the last missing link (that will protect us from any truncate-generated
 * removals - all blocks on the path are immune now) and possibly force the
 * write on the parent block.
 * That has a nice additional property: no special recovery from the failed
 * allocations is needed - we simply release blocks and do not touch anything
 * reachable from inode.
 *
 * `handle' can be NULL if create == 0.
 *
 * return > 0, # of blocks mapped or allocated.
 * return = 0, if plain lookup failed.
 * return < 0, error case.
894
 *
895
896
897
898
899
 * The ext4_ind_get_blocks() function should be called with
 * down_write(&EXT4_I(inode)->i_data_sem) if allocating filesystem
 * blocks (i.e., flags has EXT4_GET_BLOCKS_CREATE set) or
 * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system
 * blocks.
900
 */
901
static int ext4_ind_get_blocks(handle_t *handle, struct inode *inode,
902
903
904
			       ext4_lblk_t iblock, unsigned int maxblocks,
			       struct buffer_head *bh_result,
			       int flags)
905
906
{
	int err = -EIO;
Aneesh Kumar K.V's avatar
Aneesh Kumar K.V committed
907
	ext4_lblk_t offsets[4];
908
909
	Indirect chain[4];
	Indirect *partial;
910
	ext4_fsblk_t goal;
911
912
913
914
	int indirect_blks;
	int blocks_to_boundary = 0;
	int depth;
	int count = 0;
915
	ext4_fsblk_t first_block = 0;
916

917
	J_ASSERT(!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL));
918
	J_ASSERT(handle != NULL || (flags & EXT4_GET_BLOCKS_CREATE) == 0);
Aneesh Kumar K.V's avatar
Aneesh Kumar K.V committed
919
	depth = ext4_block_to_path(inode, iblock, offsets,
920
				   &blocks_to_boundary);
921
922
923
924

	if (depth == 0)
		goto out;

925
	partial = ext4_get_branch(inode, depth, offsets, chain, &err);
926
927
928
929
930
931
932
933

	/* Simplest case - block found, no allocation needed */
	if (!partial) {
		first_block = le32_to_cpu(chain[depth - 1].key);
		clear_buffer_new(bh_result);
		count++;
		/*map more blocks*/
		while (count < maxblocks && count <= blocks_to_boundary) {
934
			ext4_fsblk_t blk;
935
936
937
938
939
940
941
942

			blk = le32_to_cpu(*(chain[depth-1].p + count));

			if (blk == first_block + count)
				count++;
			else
				break;
		}
943
		goto got_it;
944
945
946
	}

	/* Next simple case - plain lookup or failed read of indirect block */
947
	if ((flags & EXT4_GET_BLOCKS_CREATE) == 0 || err == -EIO)
948
949
950
		goto cleanup;

	/*
951
	 * Okay, we need to do block allocation.
952
	*/
953
	goal = ext4_find_goal(inode, iblock, partial);
954
955
956
957
958
959
960
961

	/* the number of blocks need to allocate for [d,t]indirect blocks */
	indirect_blks = (chain + depth) - partial - 1;

	/*
	 * Next look up the indirect map to count the totoal number of
	 * direct blocks to allocate for this branch.
	 */
962
	count = ext4_blks_to_allocate(partial, indirect_blks,
963
964
					maxblocks, blocks_to_boundary);
	/*
965
	 * Block out ext4_truncate while we alter the tree
966
	 */
967
	err = ext4_alloc_branch(handle, inode, iblock, indirect_blks,
968
969
				&count, goal,
				offsets + (partial - chain), partial);
970
971

	/*
972
	 * The ext4_splice_branch call will free and forget any buffers
973
974
975
976
977
978
	 * on the new chain if there is a failure, but that risks using
	 * up transaction credits, especially for bitmaps where the
	 * credits cannot be returned.  Can we handle this somehow?  We
	 * may need to return -EAGAIN upwards in the worst case.  --sct
	 */
	if (!err)
979
		err = ext4_splice_branch(handle, inode, iblock,
980
					 partial, indirect_blks, count);
981
	if (err)
982
983
984
		goto cleanup;

	set_buffer_new(bh_result);
985
986

	ext4_update_inode_fsync_trans(handle, inode, 1);
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
got_it:
	map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key));
	if (count > blocks_to_boundary)
		set_buffer_boundary(bh_result);
	err = count;
	/* Clean up and exit */
	partial = chain + depth - 1;	/* the whole chain */
cleanup:
	while (partial > chain) {
		BUFFER_TRACE(partial->bh, "call brelse");
		brelse(partial->bh);
		partial--;
	}
	BUFFER_TRACE(bh_result, "returned");
For faster browsing, not all history is shown. View entire blame