inode.c 104 KB
Newer Older
1
/*
2
 *  linux/fs/ext4/inode.c
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
 *
 * Copyright (C) 1992, 1993, 1994, 1995
 * Remy Card (card@masi.ibp.fr)
 * Laboratoire MASI - Institut Blaise Pascal
 * Universite Pierre et Marie Curie (Paris VI)
 *
 *  from
 *
 *  linux/fs/minix/inode.c
 *
 *  Copyright (C) 1991, 1992  Linus Torvalds
 *
 *  Goal-directed block allocation by Stephen Tweedie
 *	(sct@redhat.com), 1993, 1998
 *  Big-endian to little-endian byte-swapping/bitmaps by
 *        David S. Miller (davem@caip.rutgers.edu), 1995
 *  64-bit file support on 64-bit platforms by Jakub Jelinek
 *	(jj@sunsite.ms.mff.cuni.cz)
 *
22
 *  Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000
23
24
25
26
27
 */

#include <linux/module.h>
#include <linux/fs.h>
#include <linux/time.h>
28
#include <linux/jbd2.h>
29
30
31
32
33
34
35
36
37
#include <linux/highuid.h>
#include <linux/pagemap.h>
#include <linux/quotaops.h>
#include <linux/string.h>
#include <linux/buffer_head.h>
#include <linux/writeback.h>
#include <linux/mpage.h>
#include <linux/uio.h>
#include <linux/bio.h>
38
#include "ext4_jbd2.h"
39
40
41
#include "xattr.h"
#include "acl.h"

42
43
44
45
46
47
48
static inline int ext4_begin_ordered_truncate(struct inode *inode,
					      loff_t new_size)
{
	return jbd2_journal_begin_ordered_truncate(&EXT4_I(inode)->jinode,
						   new_size);
}

49
50
51
/*
 * Test whether an inode is a fast symlink.
 */
52
static int ext4_inode_is_fast_symlink(struct inode *inode)
53
{
54
	int ea_blocks = EXT4_I(inode)->i_file_acl ?
55
56
57
58
59
60
		(inode->i_sb->s_blocksize >> 9) : 0;

	return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
}

/*
61
 * The ext4 forget function must perform a revoke if we are freeing data
62
63
64
65
66
67
68
 * which has been journaled.  Metadata (eg. indirect blocks) must be
 * revoked in all cases.
 *
 * "bh" may be NULL: a metadata block may have been freed from memory
 * but there may still be a record of it in the journal, and that record
 * still needs to be revoked.
 */
69
70
int ext4_forget(handle_t *handle, int is_metadata, struct inode *inode,
			struct buffer_head *bh, ext4_fsblk_t blocknr)
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
{
	int err;

	might_sleep();

	BUFFER_TRACE(bh, "enter");

	jbd_debug(4, "forgetting bh %p: is_metadata = %d, mode %o, "
		  "data mode %lx\n",
		  bh, is_metadata, inode->i_mode,
		  test_opt(inode->i_sb, DATA_FLAGS));

	/* Never use the revoke function if we are doing full data
	 * journaling: there is no need to, and a V1 superblock won't
	 * support it.  Otherwise, only skip the revoke on un-journaled
	 * data blocks. */

88
89
	if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA ||
	    (!is_metadata && !ext4_should_journal_data(inode))) {
90
		if (bh) {
91
			BUFFER_TRACE(bh, "call jbd2_journal_forget");
92
			return ext4_journal_forget(handle, bh);
93
94
95
96
97
98
99
		}
		return 0;
	}

	/*
	 * data!=journal && (is_metadata || should_journal_data(inode))
	 */
100
101
	BUFFER_TRACE(bh, "call ext4_journal_revoke");
	err = ext4_journal_revoke(handle, blocknr, bh);
102
	if (err)
103
		ext4_abort(inode->i_sb, __func__,
104
105
106
107
108
109
110
111
112
113
114
			   "error %d when attempting revoke", err);
	BUFFER_TRACE(bh, "exit");
	return err;
}

/*
 * Work out how many blocks we need to proceed with the next chunk of a
 * truncate transaction.
 */
static unsigned long blocks_for_truncate(struct inode *inode)
{
Aneesh Kumar K.V's avatar
Aneesh Kumar K.V committed
115
	ext4_lblk_t needed;
116
117
118
119
120
121

	needed = inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9);

	/* Give ourselves just enough room to cope with inodes in which
	 * i_blocks is corrupt: we've seen disk corruptions in the past
	 * which resulted in random data in an inode which looked enough
122
	 * like a regular file for ext4 to try to delete it.  Things
123
124
125
126
127
128
129
	 * will go a bit crazy if that happens, but at least we should
	 * try not to panic the whole kernel. */
	if (needed < 2)
		needed = 2;

	/* But we need to bound the transaction so we don't overflow the
	 * journal. */
130
131
	if (needed > EXT4_MAX_TRANS_DATA)
		needed = EXT4_MAX_TRANS_DATA;
132

133
	return EXT4_DATA_TRANS_BLOCKS(inode->i_sb) + needed;
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
}

/*
 * Truncate transactions can be complex and absolutely huge.  So we need to
 * be able to restart the transaction at a conventient checkpoint to make
 * sure we don't overflow the journal.
 *
 * start_transaction gets us a new handle for a truncate transaction,
 * and extend_transaction tries to extend the existing one a bit.  If
 * extend fails, we need to propagate the failure up and restart the
 * transaction in the top-level truncate loop. --sct
 */
static handle_t *start_transaction(struct inode *inode)
{
	handle_t *result;

150
	result = ext4_journal_start(inode, blocks_for_truncate(inode));
151
152
153
	if (!IS_ERR(result))
		return result;

154
	ext4_std_error(inode->i_sb, PTR_ERR(result));
155
156
157
158
159
160
161
162
163
164
165
	return result;
}

/*
 * Try to extend this transaction for the purposes of truncation.
 *
 * Returns 0 if we managed to create more room.  If we can't create more
 * room, and the transaction must be restarted we return 1.
 */
static int try_to_extend_transaction(handle_t *handle, struct inode *inode)
{
166
	if (handle->h_buffer_credits > EXT4_RESERVE_TRANS_BLOCKS)
167
		return 0;
168
	if (!ext4_journal_extend(handle, blocks_for_truncate(inode)))
169
170
171
172
173
174
175
176
177
		return 0;
	return 1;
}

/*
 * Restart the transaction associated with *handle.  This does a commit,
 * so before we call here everything must be consistently dirtied against
 * this transaction.
 */
178
static int ext4_journal_test_restart(handle_t *handle, struct inode *inode)
179
180
{
	jbd_debug(2, "restarting handle %p\n", handle);
181
	return ext4_journal_restart(handle, blocks_for_truncate(inode));
182
183
184
185
186
}

/*
 * Called at the last iput() if i_nlink is zero.
 */
187
void ext4_delete_inode (struct inode * inode)
188
189
190
{
	handle_t *handle;

191
192
	if (ext4_should_order_data(inode))
		ext4_begin_ordered_truncate(inode, 0);
193
194
195
196
197
198
199
200
201
202
203
204
	truncate_inode_pages(&inode->i_data, 0);

	if (is_bad_inode(inode))
		goto no_delete;

	handle = start_transaction(inode);
	if (IS_ERR(handle)) {
		/*
		 * If we're going to skip the normal cleanup, we still need to
		 * make sure that the in-core orphan linked list is properly
		 * cleaned up.
		 */
205
		ext4_orphan_del(NULL, inode);
206
207
208
209
210
211
212
		goto no_delete;
	}

	if (IS_SYNC(inode))
		handle->h_sync = 1;
	inode->i_size = 0;
	if (inode->i_blocks)
213
		ext4_truncate(inode);
214
	/*
215
	 * Kill off the orphan record which ext4_truncate created.
216
	 * AKPM: I think this can be inside the above `if'.
217
	 * Note that ext4_orphan_del() has to be able to cope with the
218
	 * deletion of a non-existent orphan - this is because we don't
219
	 * know if ext4_truncate() actually created an orphan record.
220
221
	 * (Well, we could do this if we need to, but heck - it works)
	 */
222
223
	ext4_orphan_del(handle, inode);
	EXT4_I(inode)->i_dtime	= get_seconds();
224
225
226
227
228
229
230
231

	/*
	 * One subtle ordering requirement: if anything has gone wrong
	 * (transaction abort, IO errors, whatever), then we can still
	 * do these next steps (the fs will already have been marked as
	 * having errors), but we can't free the inode if the mark_dirty
	 * fails.
	 */
232
	if (ext4_mark_inode_dirty(handle, inode))
233
234
235
		/* If that failed, just do the required in-core inode clear. */
		clear_inode(inode);
	else
236
237
		ext4_free_inode(handle, inode);
	ext4_journal_stop(handle);
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
	return;
no_delete:
	clear_inode(inode);	/* We must guarantee clearing of inode... */
}

typedef struct {
	__le32	*p;
	__le32	key;
	struct buffer_head *bh;
} Indirect;

static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
{
	p->key = *(p->p = v);
	p->bh = bh;
}

/**
256
 *	ext4_block_to_path - parse the block number into array of offsets
257
258
259
 *	@inode: inode in question (we are only interested in its superblock)
 *	@i_block: block number to be parsed
 *	@offsets: array to store the offsets in
Dave Kleikamp's avatar
Dave Kleikamp committed
260
261
 *	@boundary: set this non-zero if the referred-to block is likely to be
 *	       followed (on disk) by an indirect block.
262
 *
263
 *	To store the locations of file's data ext4 uses a data structure common
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
 *	for UNIX filesystems - tree of pointers anchored in the inode, with
 *	data blocks at leaves and indirect blocks in intermediate nodes.
 *	This function translates the block number into path in that tree -
 *	return value is the path length and @offsets[n] is the offset of
 *	pointer to (n+1)th node in the nth one. If @block is out of range
 *	(negative or too large) warning is printed and zero returned.
 *
 *	Note: function doesn't find node addresses, so no IO is needed. All
 *	we need to know is the capacity of indirect blocks (taken from the
 *	inode->i_sb).
 */

/*
 * Portability note: the last comparison (check that we fit into triple
 * indirect block) is spelled differently, because otherwise on an
 * architecture with 32-bit longs and 8Kb pages we might get into trouble
 * if our filesystem had 8Kb blocks. We might use long long, but that would
 * kill us on x86. Oh, well, at least the sign propagation does not matter -
 * i_block would have to be negative in the very beginning, so we would not
 * get there at all.
 */

286
static int ext4_block_to_path(struct inode *inode,
Aneesh Kumar K.V's avatar
Aneesh Kumar K.V committed
287
288
			ext4_lblk_t i_block,
			ext4_lblk_t offsets[4], int *boundary)
289
{
290
291
292
	int ptrs = EXT4_ADDR_PER_BLOCK(inode->i_sb);
	int ptrs_bits = EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb);
	const long direct_blocks = EXT4_NDIR_BLOCKS,
293
294
295
296
297
298
		indirect_blocks = ptrs,
		double_blocks = (1 << (ptrs_bits * 2));
	int n = 0;
	int final = 0;

	if (i_block < 0) {
299
		ext4_warning (inode->i_sb, "ext4_block_to_path", "block < 0");
300
301
302
303
	} else if (i_block < direct_blocks) {
		offsets[n++] = i_block;
		final = direct_blocks;
	} else if ( (i_block -= direct_blocks) < indirect_blocks) {
304
		offsets[n++] = EXT4_IND_BLOCK;
305
306
307
		offsets[n++] = i_block;
		final = ptrs;
	} else if ((i_block -= indirect_blocks) < double_blocks) {
308
		offsets[n++] = EXT4_DIND_BLOCK;
309
310
311
312
		offsets[n++] = i_block >> ptrs_bits;
		offsets[n++] = i_block & (ptrs - 1);
		final = ptrs;
	} else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
313
		offsets[n++] = EXT4_TIND_BLOCK;
314
315
316
317
318
		offsets[n++] = i_block >> (ptrs_bits * 2);
		offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
		offsets[n++] = i_block & (ptrs - 1);
		final = ptrs;
	} else {
319
		ext4_warning(inode->i_sb, "ext4_block_to_path",
320
				"block %lu > max",
321
322
				i_block + direct_blocks +
				indirect_blocks + double_blocks);
323
324
325
326
327
328
329
	}
	if (boundary)
		*boundary = final - 1 - (i_block & (ptrs - 1));
	return n;
}

/**
330
 *	ext4_get_branch - read the chain of indirect blocks leading to data
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
 *	@inode: inode in question
 *	@depth: depth of the chain (1 - direct pointer, etc.)
 *	@offsets: offsets of pointers in inode/indirect blocks
 *	@chain: place to store the result
 *	@err: here we store the error value
 *
 *	Function fills the array of triples <key, p, bh> and returns %NULL
 *	if everything went OK or the pointer to the last filled triple
 *	(incomplete one) otherwise. Upon the return chain[i].key contains
 *	the number of (i+1)-th block in the chain (as it is stored in memory,
 *	i.e. little-endian 32-bit), chain[i].p contains the address of that
 *	number (it points into struct inode for i==0 and into the bh->b_data
 *	for i>0) and chain[i].bh points to the buffer_head of i-th indirect
 *	block for i>0 and NULL for i==0. In other words, it holds the block
 *	numbers of the chain, addresses they were taken from (and where we can
 *	verify that chain did not change) and buffer_heads hosting these
 *	numbers.
 *
 *	Function stops when it stumbles upon zero pointer (absent block)
 *		(pointer to last triple returned, *@err == 0)
 *	or when it gets an IO error reading an indirect block
 *		(ditto, *@err == -EIO)
 *	or when it reads all @depth-1 indirect blocks successfully and finds
 *	the whole chain, all way to the data (returns %NULL, *err == 0).
355
356
 *
 *      Need to be called with
357
 *      down_read(&EXT4_I(inode)->i_data_sem)
358
 */
Aneesh Kumar K.V's avatar
Aneesh Kumar K.V committed
359
360
static Indirect *ext4_get_branch(struct inode *inode, int depth,
				 ext4_lblk_t  *offsets,
361
362
363
364
365
366
367
368
				 Indirect chain[4], int *err)
{
	struct super_block *sb = inode->i_sb;
	Indirect *p = chain;
	struct buffer_head *bh;

	*err = 0;
	/* i_data is not going away, no lock needed */
369
	add_chain (chain, NULL, EXT4_I(inode)->i_data + *offsets);
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
	if (!p->key)
		goto no_block;
	while (--depth) {
		bh = sb_bread(sb, le32_to_cpu(p->key));
		if (!bh)
			goto failure;
		add_chain(++p, bh, (__le32*)bh->b_data + *++offsets);
		/* Reader: end */
		if (!p->key)
			goto no_block;
	}
	return NULL;

failure:
	*err = -EIO;
no_block:
	return p;
}

/**
390
 *	ext4_find_near - find a place for allocation with sufficient locality
391
392
393
 *	@inode: owner
 *	@ind: descriptor of indirect block.
 *
394
 *	This function returns the preferred place for block allocation.
395
396
397
398
399
400
401
402
403
404
405
406
407
408
 *	It is used when heuristic for sequential allocation fails.
 *	Rules are:
 *	  + if there is a block to the left of our position - allocate near it.
 *	  + if pointer will live in indirect block - allocate near that block.
 *	  + if pointer will live in inode - allocate in the same
 *	    cylinder group.
 *
 * In the latter case we colour the starting block by the callers PID to
 * prevent it from clashing with concurrent allocations for a different inode
 * in the same block group.   The PID is used here so that functionally related
 * files will be close-by on-disk.
 *
 *	Caller must make sure that @ind is valid and will stay that way.
 */
409
static ext4_fsblk_t ext4_find_near(struct inode *inode, Indirect *ind)
410
{
411
	struct ext4_inode_info *ei = EXT4_I(inode);
412
413
	__le32 *start = ind->bh ? (__le32*) ind->bh->b_data : ei->i_data;
	__le32 *p;
414
	ext4_fsblk_t bg_start;
415
	ext4_fsblk_t last_block;
416
	ext4_grpblk_t colour;
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431

	/* Try to find previous block */
	for (p = ind->p - 1; p >= start; p--) {
		if (*p)
			return le32_to_cpu(*p);
	}

	/* No such thing, so let's try location of indirect block */
	if (ind->bh)
		return ind->bh->b_blocknr;

	/*
	 * It is going to be referred to from the inode itself? OK, just put it
	 * into the same cylinder group then.
	 */
432
	bg_start = ext4_group_first_block_no(inode->i_sb, ei->i_block_group);
433
434
435
436
	last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1;

	if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block)
		colour = (current->pid % 16) *
437
			(EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16);
438
439
	else
		colour = (current->pid % 16) * ((last_block - bg_start) / 16);
440
441
442
443
	return bg_start + colour;
}

/**
444
 *	ext4_find_goal - find a preferred place for allocation.
445
446
447
448
 *	@inode: owner
 *	@block:  block we want
 *	@partial: pointer to the last triple within a chain
 *
449
 *	Normally this function find the preferred place for block allocation,
450
 *	returns it.
451
 */
Aneesh Kumar K.V's avatar
Aneesh Kumar K.V committed
452
static ext4_fsblk_t ext4_find_goal(struct inode *inode, ext4_lblk_t block,
453
		Indirect *partial)
454
{
455
	struct ext4_block_alloc_info *block_i;
456

457
	block_i =  EXT4_I(inode)->i_block_alloc_info;
458
459
460
461
462
463
464
465
466
467

	/*
	 * try the heuristic for sequential allocation,
	 * failing that at least try to get decent locality.
	 */
	if (block_i && (block == block_i->last_alloc_logical_block + 1)
		&& (block_i->last_alloc_physical_block != 0)) {
		return block_i->last_alloc_physical_block + 1;
	}

468
	return ext4_find_near(inode, partial);
469
470
471
}

/**
472
 *	ext4_blks_to_allocate: Look up the block map and count the number
473
474
475
476
477
478
479
480
481
482
 *	of direct blocks need to be allocated for the given branch.
 *
 *	@branch: chain of indirect blocks
 *	@k: number of blocks need for indirect blocks
 *	@blks: number of data blocks to be mapped.
 *	@blocks_to_boundary:  the offset in the indirect block
 *
 *	return the total number of blocks to be allocate, including the
 *	direct and indirect blocks.
 */
483
static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned long blks,
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
		int blocks_to_boundary)
{
	unsigned long count = 0;

	/*
	 * Simple case, [t,d]Indirect block(s) has not allocated yet
	 * then it's clear blocks on that path have not allocated
	 */
	if (k > 0) {
		/* right now we don't handle cross boundary allocation */
		if (blks < blocks_to_boundary + 1)
			count += blks;
		else
			count += blocks_to_boundary + 1;
		return count;
	}

	count++;
	while (count < blks && count <= blocks_to_boundary &&
		le32_to_cpu(*(branch[0].p + count)) == 0) {
		count++;
	}
	return count;
}

/**
510
 *	ext4_alloc_blocks: multiple allocate blocks needed for a branch
511
512
513
514
515
516
517
518
 *	@indirect_blks: the number of blocks need to allocate for indirect
 *			blocks
 *
 *	@new_blocks: on return it will store the new block numbers for
 *	the indirect blocks(if needed) and the first direct block,
 *	@blks:	on return it will store the total number of allocated
 *		direct blocks
 */
519
static int ext4_alloc_blocks(handle_t *handle, struct inode *inode,
520
521
522
				ext4_lblk_t iblock, ext4_fsblk_t goal,
				int indirect_blks, int blks,
				ext4_fsblk_t new_blocks[4], int *err)
523
524
{
	int target, i;
525
	unsigned long count = 0, blk_allocated = 0;
526
	int index = 0;
527
	ext4_fsblk_t current_block = 0;
528
529
530
531
532
533
534
535
536
537
	int ret = 0;

	/*
	 * Here we try to allocate the requested multiple blocks at once,
	 * on a best-effort basis.
	 * To build a branch, we should allocate blocks for
	 * the indirect blocks(if not allocated yet), and at least
	 * the first direct block of this branch.  That's the
	 * minimum number of blocks need to allocate(required)
	 */
538
539
540
	/* first we try to allocate the indirect blocks */
	target = indirect_blks;
	while (target > 0) {
541
542
		count = target;
		/* allocating blocks for indirect blocks and direct blocks */
543
544
		current_block = ext4_new_meta_blocks(handle, inode,
							goal, &count, err);
545
546
547
548
549
550
551
552
553
		if (*err)
			goto failed_out;

		target -= count;
		/* allocate blocks for indirect blocks */
		while (index < indirect_blks && count) {
			new_blocks[index++] = current_block++;
			count--;
		}
554
555
556
557
558
559
560
561
562
		if (count > 0) {
			/*
			 * save the new block number
			 * for the first direct block
			 */
			new_blocks[index] = current_block;
			printk(KERN_INFO "%s returned more blocks than "
						"requested\n", __func__);
			WARN_ON(1);
563
			break;
564
		}
565
566
	}

567
568
569
570
571
572
	target = blks - count ;
	blk_allocated = count;
	if (!target)
		goto allocated;
	/* Now allocate data blocks */
	count = target;
573
	/* allocating blocks for data blocks */
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
	current_block = ext4_new_blocks(handle, inode, iblock,
						goal, &count, err);
	if (*err && (target == blks)) {
		/*
		 * if the allocation failed and we didn't allocate
		 * any blocks before
		 */
		goto failed_out;
	}
	if (!*err) {
		if (target == blks) {
		/*
		 * save the new block number
		 * for the first direct block
		 */
			new_blocks[index] = current_block;
		}
		blk_allocated += count;
	}
allocated:
594
	/* total number of blocks allocated for direct blocks */
595
	ret = blk_allocated;
596
597
598
599
	*err = 0;
	return ret;
failed_out:
	for (i = 0; i <index; i++)
600
		ext4_free_blocks(handle, inode, new_blocks[i], 1, 0);
601
602
603
604
	return ret;
}

/**
605
 *	ext4_alloc_branch - allocate and set up a chain of blocks.
606
607
608
609
610
611
612
613
614
615
 *	@inode: owner
 *	@indirect_blks: number of allocated indirect blocks
 *	@blks: number of allocated direct blocks
 *	@offsets: offsets (in the blocks) to store the pointers to next.
 *	@branch: place to store the chain in.
 *
 *	This function allocates blocks, zeroes out all but the last one,
 *	links them into chain and (if we are synchronous) writes them to disk.
 *	In other words, it prepares a branch that can be spliced onto the
 *	inode. It stores the information about that chain in the branch[], in
616
 *	the same format as ext4_get_branch() would do. We are calling it after
617
618
 *	we had read the existing part of chain and partial points to the last
 *	triple of that (one with zero ->key). Upon the exit we have the same
619
 *	picture as after the successful ext4_get_block(), except that in one
620
621
622
623
624
625
 *	place chain is disconnected - *branch->p is still zero (we did not
 *	set the last link), but branch->key contains the number that should
 *	be placed into *branch->p to fill that gap.
 *
 *	If allocation fails we free all blocks we've allocated (and forget
 *	their buffer_heads) and return the error value the from failed
626
 *	ext4_alloc_block() (normally -ENOSPC). Otherwise we set the chain
627
628
 *	as described above and return 0.
 */
629
static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
630
631
632
				ext4_lblk_t iblock, int indirect_blks,
				int *blks, ext4_fsblk_t goal,
				ext4_lblk_t *offsets, Indirect *branch)
633
634
635
636
637
638
{
	int blocksize = inode->i_sb->s_blocksize;
	int i, n = 0;
	int err = 0;
	struct buffer_head *bh;
	int num;
639
640
	ext4_fsblk_t new_blocks[4];
	ext4_fsblk_t current_block;
641

642
	num = ext4_alloc_blocks(handle, inode, iblock, goal, indirect_blks,
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
				*blks, new_blocks, &err);
	if (err)
		return err;

	branch[0].key = cpu_to_le32(new_blocks[0]);
	/*
	 * metadata blocks and data blocks are allocated.
	 */
	for (n = 1; n <= indirect_blks;  n++) {
		/*
		 * Get buffer_head for parent block, zero it out
		 * and set the pointer to new one, then send
		 * parent to disk.
		 */
		bh = sb_getblk(inode->i_sb, new_blocks[n-1]);
		branch[n].bh = bh;
		lock_buffer(bh);
		BUFFER_TRACE(bh, "call get_create_access");
661
		err = ext4_journal_get_create_access(handle, bh);
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
		if (err) {
			unlock_buffer(bh);
			brelse(bh);
			goto failed;
		}

		memset(bh->b_data, 0, blocksize);
		branch[n].p = (__le32 *) bh->b_data + offsets[n];
		branch[n].key = cpu_to_le32(new_blocks[n]);
		*branch[n].p = branch[n].key;
		if ( n == indirect_blks) {
			current_block = new_blocks[n];
			/*
			 * End of chain, update the last new metablock of
			 * the chain to point to the new allocated
			 * data blocks numbers
			 */
			for (i=1; i < num; i++)
				*(branch[n].p + i) = cpu_to_le32(++current_block);
		}
		BUFFER_TRACE(bh, "marking uptodate");
		set_buffer_uptodate(bh);
		unlock_buffer(bh);

686
687
		BUFFER_TRACE(bh, "call ext4_journal_dirty_metadata");
		err = ext4_journal_dirty_metadata(handle, bh);
688
689
690
691
692
693
694
695
		if (err)
			goto failed;
	}
	*blks = num;
	return err;
failed:
	/* Allocation failed, free what we already allocated */
	for (i = 1; i <= n ; i++) {
696
		BUFFER_TRACE(branch[i].bh, "call jbd2_journal_forget");
697
		ext4_journal_forget(handle, branch[i].bh);
698
699
	}
	for (i = 0; i <indirect_blks; i++)
700
		ext4_free_blocks(handle, inode, new_blocks[i], 1, 0);
701

702
	ext4_free_blocks(handle, inode, new_blocks[i], num, 0);
703
704
705
706
707

	return err;
}

/**
708
 * ext4_splice_branch - splice the allocated branch onto inode.
709
710
711
 * @inode: owner
 * @block: (logical) number of block we are adding
 * @chain: chain of indirect blocks (with a missing link - see
712
 *	ext4_alloc_branch)
713
714
715
716
717
718
719
720
 * @where: location of missing link
 * @num:   number of indirect blocks we are adding
 * @blks:  number of direct blocks we are adding
 *
 * This function fills the missing link and does all housekeeping needed in
 * inode (->i_blocks, etc.). In case of success we end up with the full
 * chain to new block and return 0.
 */
721
static int ext4_splice_branch(handle_t *handle, struct inode *inode,
Aneesh Kumar K.V's avatar
Aneesh Kumar K.V committed
722
			ext4_lblk_t block, Indirect *where, int num, int blks)
723
724
725
{
	int i;
	int err = 0;
726
727
	struct ext4_block_alloc_info *block_i;
	ext4_fsblk_t current_block;
728

729
	block_i = EXT4_I(inode)->i_block_alloc_info;
730
731
732
733
734
735
736
	/*
	 * If we're splicing into a [td]indirect block (as opposed to the
	 * inode) then we need to get write access to the [td]indirect block
	 * before the splice.
	 */
	if (where->bh) {
		BUFFER_TRACE(where->bh, "get_write_access");
737
		err = ext4_journal_get_write_access(handle, where->bh);
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
		if (err)
			goto err_out;
	}
	/* That's it */

	*where->p = where->key;

	/*
	 * Update the host buffer_head or inode to point to more just allocated
	 * direct blocks blocks
	 */
	if (num == 0 && blks > 1) {
		current_block = le32_to_cpu(where->key) + 1;
		for (i = 1; i < blks; i++)
			*(where->p + i ) = cpu_to_le32(current_block++);
	}

	/*
	 * update the most recently allocated logical & physical block
	 * in i_block_alloc_info, to assist find the proper goal block for next
	 * allocation
	 */
	if (block_i) {
		block_i->last_alloc_logical_block = block + blks - 1;
		block_i->last_alloc_physical_block =
				le32_to_cpu(where[num].key) + blks - 1;
	}

	/* We are done with atomic stuff, now do the rest of housekeeping */

Kalpak Shah's avatar
Kalpak Shah committed
768
	inode->i_ctime = ext4_current_time(inode);
769
	ext4_mark_inode_dirty(handle, inode);
770
771
772
773
774
775
776
777
778

	/* had we spliced it onto indirect block? */
	if (where->bh) {
		/*
		 * If we spliced it onto an indirect block, we haven't
		 * altered the inode.  Note however that if it is being spliced
		 * onto an indirect block at the very end of the file (the
		 * file is growing) then we *will* alter the inode to reflect
		 * the new i_size.  But that is not done here - it is done in
779
		 * generic_commit_write->__mark_inode_dirty->ext4_dirty_inode.
780
781
		 */
		jbd_debug(5, "splicing indirect only\n");
782
783
		BUFFER_TRACE(where->bh, "call ext4_journal_dirty_metadata");
		err = ext4_journal_dirty_metadata(handle, where->bh);
784
785
786
787
788
789
790
791
792
793
794
795
796
		if (err)
			goto err_out;
	} else {
		/*
		 * OK, we spliced it into the inode itself on a direct block.
		 * Inode was dirtied above.
		 */
		jbd_debug(5, "splicing direct\n");
	}
	return err;

err_out:
	for (i = 1; i <= num; i++) {
797
		BUFFER_TRACE(where[i].bh, "call jbd2_journal_forget");
798
		ext4_journal_forget(handle, where[i].bh);
799
800
		ext4_free_blocks(handle, inode,
					le32_to_cpu(where[i-1].key), 1, 0);
801
	}
802
	ext4_free_blocks(handle, inode, le32_to_cpu(where[num].key), blks, 0);
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823

	return err;
}

/*
 * Allocation strategy is simple: if we have to allocate something, we will
 * have to go the whole way to leaf. So let's do it before attaching anything
 * to tree, set linkage between the newborn blocks, write them if sync is
 * required, recheck the path, free and repeat if check fails, otherwise
 * set the last missing link (that will protect us from any truncate-generated
 * removals - all blocks on the path are immune now) and possibly force the
 * write on the parent block.
 * That has a nice additional property: no special recovery from the failed
 * allocations is needed - we simply release blocks and do not touch anything
 * reachable from inode.
 *
 * `handle' can be NULL if create == 0.
 *
 * return > 0, # of blocks mapped or allocated.
 * return = 0, if plain lookup failed.
 * return < 0, error case.
824
825
826
 *
 *
 * Need to be called with
827
828
 * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block
 * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem)
829
 */
830
int ext4_get_blocks_handle(handle_t *handle, struct inode *inode,
Aneesh Kumar K.V's avatar
Aneesh Kumar K.V committed
831
		ext4_lblk_t iblock, unsigned long maxblocks,
832
833
834
835
		struct buffer_head *bh_result,
		int create, int extend_disksize)
{
	int err = -EIO;
Aneesh Kumar K.V's avatar
Aneesh Kumar K.V committed
836
	ext4_lblk_t offsets[4];
837
838
	Indirect chain[4];
	Indirect *partial;
839
	ext4_fsblk_t goal;
840
841
842
	int indirect_blks;
	int blocks_to_boundary = 0;
	int depth;
843
	struct ext4_inode_info *ei = EXT4_I(inode);
844
	int count = 0;
845
	ext4_fsblk_t first_block = 0;
846
847


848
	J_ASSERT(!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL));
849
	J_ASSERT(handle != NULL || create == 0);
Aneesh Kumar K.V's avatar
Aneesh Kumar K.V committed
850
851
	depth = ext4_block_to_path(inode, iblock, offsets,
					&blocks_to_boundary);
852
853
854
855

	if (depth == 0)
		goto out;

856
	partial = ext4_get_branch(inode, depth, offsets, chain, &err);
857
858
859
860
861
862
863
864

	/* Simplest case - block found, no allocation needed */
	if (!partial) {
		first_block = le32_to_cpu(chain[depth - 1].key);
		clear_buffer_new(bh_result);
		count++;
		/*map more blocks*/
		while (count < maxblocks && count <= blocks_to_boundary) {
865
			ext4_fsblk_t blk;
866
867
868
869
870
871
872
873

			blk = le32_to_cpu(*(chain[depth-1].p + count));

			if (blk == first_block + count)
				count++;
			else
				break;
		}
874
		goto got_it;
875
876
877
878
879
880
881
882
883
884
885
	}

	/* Next simple case - plain lookup or failed read of indirect block */
	if (!create || err == -EIO)
		goto cleanup;

	/*
	 * Okay, we need to do block allocation.  Lazily initialize the block
	 * allocation info here if necessary
	*/
	if (S_ISREG(inode->i_mode) && (!ei->i_block_alloc_info))
886
		ext4_init_block_alloc_info(inode);
887

888
	goal = ext4_find_goal(inode, iblock, partial);
889
890
891
892
893
894
895
896

	/* the number of blocks need to allocate for [d,t]indirect blocks */
	indirect_blks = (chain + depth) - partial - 1;

	/*
	 * Next look up the indirect map to count the totoal number of
	 * direct blocks to allocate for this branch.
	 */
897
	count = ext4_blks_to_allocate(partial, indirect_blks,
898
899
					maxblocks, blocks_to_boundary);
	/*
900
	 * Block out ext4_truncate while we alter the tree
901
	 */
902
903
904
	err = ext4_alloc_branch(handle, inode, iblock, indirect_blks,
					&count, goal,
					offsets + (partial - chain), partial);
905
906

	/*
907
	 * The ext4_splice_branch call will free and forget any buffers
908
909
910
911
912
913
	 * on the new chain if there is a failure, but that risks using
	 * up transaction credits, especially for bitmaps where the
	 * credits cannot be returned.  Can we handle this somehow?  We
	 * may need to return -EAGAIN upwards in the worst case.  --sct
	 */
	if (!err)
914
		err = ext4_splice_branch(handle, inode, iblock,
915
916
					partial, indirect_blks, count);
	/*
917
	 * i_disksize growing is protected by i_data_sem.  Don't forget to
918
	 * protect it if you're about to implement concurrent
919
	 * ext4_get_block() -bzzz
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
	*/
	if (!err && extend_disksize && inode->i_size > ei->i_disksize)
		ei->i_disksize = inode->i_size;
	if (err)
		goto cleanup;

	set_buffer_new(bh_result);
got_it:
	map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key));
	if (count > blocks_to_boundary)
		set_buffer_boundary(bh_result);
	err = count;
	/* Clean up and exit */
	partial = chain + depth - 1;	/* the whole chain */
cleanup:
	while (partial > chain) {
		BUFFER_TRACE(partial->bh, "call brelse");
		brelse(partial->bh);
		partial--;
	}
	BUFFER_TRACE(bh_result, "returned");
out:
	return err;
}

Jan Kara's avatar
Jan Kara committed
945
946
947
948
949
950
951
952
953
954
/* Maximum number of blocks we map for direct IO at once. */
#define DIO_MAX_BLOCKS 4096
/*
 * Number of credits we need for writing DIO_MAX_BLOCKS:
 * We need sb + group descriptor + bitmap + inode -> 4
 * For B blocks with A block pointers per block we need:
 * 1 (triple ind.) + (B/A/A + 2) (doubly ind.) + (B/A + 2) (indirect).
 * If we plug in 4096 for B and 256 for A (for 1KB block size), we get 25.
 */
#define DIO_CREDITS 25
955

956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979

/*
 *
 *
 * ext4_ext4 get_block() wrapper function
 * It will do a look up first, and returns if the blocks already mapped.
 * Otherwise it takes the write lock of the i_data_sem and allocate blocks
 * and store the allocated blocks in the result buffer head and mark it
 * mapped.
 *
 * If file type is extents based, it will call ext4_ext_get_blocks(),
 * Otherwise, call with ext4_get_blocks_handle() to handle indirect mapping
 * based files
 *
 * On success, it returns the number of blocks being mapped or allocate.
 * if create==0 and the blocks are pre-allocated and uninitialized block,
 * the result buffer head is unmapped. If the create ==1, it will make sure
 * the buffer head is mapped.
 *
 * It returns 0 if plain look up failed (blocks have not been allocated), in
 * that casem, buffer head is unmapped
 *
 * It returns the error in case of allocation failure.
 */
980
981
982
983
984
int ext4_get_blocks_wrap(handle_t *handle, struct inode *inode, sector_t block,
			unsigned long max_blocks, struct buffer_head *bh,
			int create, int extend_disksize)
{
	int retval;
985
986
987

	clear_buffer_mapped(bh);

988
989
990
991
992
993
994
995
	/*
	 * Try to see if we can get  the block without requesting
	 * for new file system block.
	 */
	down_read((&EXT4_I(inode)->i_data_sem));
	if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) {
		retval =  ext4_ext_get_blocks(handle, inode, block, max_blocks,
				bh, 0, 0);
996
	} else {
997
998
		retval = ext4_get_blocks_handle(handle,
				inode, block, max_blocks, bh, 0, 0);
999
	}
1000
	up_read((&EXT4_I(inode)->i_data_sem));
For faster browsing, not all history is shown. View entire blame