super.c 189 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
 *  linux/fs/ext4/super.c
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
 *
 * Copyright (C) 1992, 1993, 1994, 1995
 * Remy Card (card@masi.ibp.fr)
 * Laboratoire MASI - Institut Blaise Pascal
 * Universite Pierre et Marie Curie (Paris VI)
 *
 *  from
 *
 *  linux/fs/minix/inode.c
 *
 *  Copyright (C) 1991, 1992  Linus Torvalds
 *
 *  Big-endian to little-endian byte-swapping/bitmaps by
 *        David S. Miller (davem@caip.rutgers.edu), 1995
 */

#include <linux/module.h>
#include <linux/string.h>
#include <linux/fs.h>
#include <linux/time.h>
24
#include <linux/vmalloc.h>
25
26
27
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/blkdev.h>
28
#include <linux/backing-dev.h>
29
30
#include <linux/parser.h>
#include <linux/buffer_head.h>
31
#include <linux/exportfs.h>
32
33
34
35
36
37
#include <linux/vfs.h>
#include <linux/random.h>
#include <linux/mount.h>
#include <linux/namei.h>
#include <linux/quotaops.h>
#include <linux/seq_file.h>
Theodore Ts'o's avatar
Theodore Ts'o committed
38
#include <linux/ctype.h>
Vignesh Babu's avatar
Vignesh Babu committed
39
#include <linux/log2.h>
40
#include <linux/crc16.h>
41
#include <linux/dax.h>
Dan Magenheimer's avatar
Dan Magenheimer committed
42
#include <linux/cleancache.h>
43
#include <linux/uaccess.h>
44
#include <linux/iversion.h>
45
#include <linux/unicode.h>
46
#include <linux/part_stat.h>
47
48
49
#include <linux/kthread.h>
#include <linux/freezer.h>

50
#include "ext4.h"
51
#include "ext4_extents.h"	/* Needed for trace points definition */
52
#include "ext4_jbd2.h"
53
54
#include "xattr.h"
#include "acl.h"
55
#include "mballoc.h"
56
#include "fsmap.h"
57

58
59
60
#define CREATE_TRACE_POINTS
#include <trace/events/ext4.h>

61
62
static struct ext4_lazy_init *ext4_li_info;
static struct mutex ext4_li_mtx;
63
static struct ratelimit_state ext4_mount_msg_ratelimit;
64

65
static int ext4_load_journal(struct super_block *, struct ext4_super_block *,
66
			     unsigned long journal_devnum);
67
static int ext4_show_options(struct seq_file *seq, struct dentry *root);
68
static int ext4_commit_super(struct super_block *sb, int sync);
69
static int ext4_mark_recovery_complete(struct super_block *sb,
70
					struct ext4_super_block *es);
71
72
static int ext4_clear_journal_err(struct super_block *sb,
				  struct ext4_super_block *es);
73
static int ext4_sync_fs(struct super_block *sb, int wait);
74
75
static int ext4_remount(struct super_block *sb, int *flags, char *data);
static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf);
76
77
static int ext4_unfreeze(struct super_block *sb);
static int ext4_freeze(struct super_block *sb);
Al Viro's avatar
Al Viro committed
78
79
static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags,
		       const char *dev_name, void *data);
80
81
static inline int ext2_feature_set_ok(struct super_block *sb);
static inline int ext3_feature_set_ok(struct super_block *sb);
82
static int ext4_feature_set_ok(struct super_block *sb, int readonly);
83
84
static void ext4_destroy_lazyinit_thread(void);
static void ext4_unregister_li_request(struct super_block *sb);
85
static void ext4_clear_request_list(void);
86
87
static struct inode *ext4_get_journal_inode(struct super_block *sb,
					    unsigned int journal_inum);
88

Jan Kara's avatar
Jan Kara committed
89
90
91
92
93
94
95
/*
 * Lock ordering
 *
 * Note the difference between i_mmap_sem (EXT4_I(inode)->i_mmap_sem) and
 * i_mmap_rwsem (inode->i_mmap_rwsem)!
 *
 * page fault path:
96
 * mmap_lock -> sb_start_pagefault -> i_mmap_sem (r) -> transaction start ->
Jan Kara's avatar
Jan Kara committed
97
98
99
 *   page lock -> i_data_sem (rw)
 *
 * buffered write path:
100
 * sb_start_write -> i_mutex -> mmap_lock
Jan Kara's avatar
Jan Kara committed
101
102
103
104
 * sb_start_write -> i_mutex -> transaction start -> page lock ->
 *   i_data_sem (rw)
 *
 * truncate:
105
106
107
 * sb_start_write -> i_mutex -> i_mmap_sem (w) -> i_mmap_rwsem (w) -> page lock
 * sb_start_write -> i_mutex -> i_mmap_sem (w) -> transaction start ->
 *   i_data_sem (rw)
Jan Kara's avatar
Jan Kara committed
108
109
 *
 * direct IO:
110
 * sb_start_write -> i_mutex -> mmap_lock
111
 * sb_start_write -> i_mutex -> transaction start -> i_data_sem (rw)
Jan Kara's avatar
Jan Kara committed
112
113
114
115
116
 *
 * writepages:
 * transaction start -> page lock(s) -> i_data_sem (rw)
 */

Jan Kara's avatar
Jan Kara committed
117
#if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT2)
118
119
120
121
122
123
124
static struct file_system_type ext2_fs_type = {
	.owner		= THIS_MODULE,
	.name		= "ext2",
	.mount		= ext4_mount,
	.kill_sb	= kill_block_super,
	.fs_flags	= FS_REQUIRES_DEV,
};
125
MODULE_ALIAS_FS("ext2");
126
MODULE_ALIAS("ext2");
127
128
129
130
131
132
#define IS_EXT2_SB(sb) ((sb)->s_bdev->bd_holder == &ext2_fs_type)
#else
#define IS_EXT2_SB(sb) (0)
#endif


133
134
135
static struct file_system_type ext3_fs_type = {
	.owner		= THIS_MODULE,
	.name		= "ext3",
Al Viro's avatar
Al Viro committed
136
	.mount		= ext4_mount,
137
138
139
	.kill_sb	= kill_block_super,
	.fs_flags	= FS_REQUIRES_DEV,
};
140
MODULE_ALIAS_FS("ext3");
141
MODULE_ALIAS("ext3");
142
#define IS_EXT3_SB(sb) ((sb)->s_bdev->bd_holder == &ext3_fs_type)
Laurent Vivier's avatar
Laurent Vivier committed
143

144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205

static inline void __ext4_read_bh(struct buffer_head *bh, int op_flags,
				  bh_end_io_t *end_io)
{
	/*
	 * buffer's verified bit is no longer valid after reading from
	 * disk again due to write out error, clear it to make sure we
	 * recheck the buffer contents.
	 */
	clear_buffer_verified(bh);

	bh->b_end_io = end_io ? end_io : end_buffer_read_sync;
	get_bh(bh);
	submit_bh(REQ_OP_READ, op_flags, bh);
}

void ext4_read_bh_nowait(struct buffer_head *bh, int op_flags,
			 bh_end_io_t *end_io)
{
	BUG_ON(!buffer_locked(bh));

	if (ext4_buffer_uptodate(bh)) {
		unlock_buffer(bh);
		return;
	}
	__ext4_read_bh(bh, op_flags, end_io);
}

int ext4_read_bh(struct buffer_head *bh, int op_flags, bh_end_io_t *end_io)
{
	BUG_ON(!buffer_locked(bh));

	if (ext4_buffer_uptodate(bh)) {
		unlock_buffer(bh);
		return 0;
	}

	__ext4_read_bh(bh, op_flags, end_io);

	wait_on_buffer(bh);
	if (buffer_uptodate(bh))
		return 0;
	return -EIO;
}

int ext4_read_bh_lock(struct buffer_head *bh, int op_flags, bool wait)
{
	if (trylock_buffer(bh)) {
		if (wait)
			return ext4_read_bh(bh, op_flags, NULL);
		ext4_read_bh_nowait(bh, op_flags, NULL);
		return 0;
	}
	if (wait) {
		wait_on_buffer(bh);
		if (buffer_uptodate(bh))
			return 0;
		return -EIO;
	}
	return 0;
}

206
/*
207
 * This works like __bread_gfp() except it uses ERR_PTR for error
208
209
210
211
 * returns.  Currently with sb_bread it's impossible to distinguish
 * between ENOMEM and EIO situations (since both result in a NULL
 * return.
 */
212
213
214
static struct buffer_head *__ext4_sb_bread_gfp(struct super_block *sb,
					       sector_t block, int op_flags,
					       gfp_t gfp)
215
{
216
217
	struct buffer_head *bh;
	int ret;
218

219
	bh = sb_getblk_gfp(sb, block, gfp);
220
221
	if (bh == NULL)
		return ERR_PTR(-ENOMEM);
222
	if (ext4_buffer_uptodate(bh))
223
		return bh;
224
225
226
227
228
229
230

	ret = ext4_read_bh_lock(bh, REQ_META | op_flags, true);
	if (ret) {
		put_bh(bh);
		return ERR_PTR(ret);
	}
	return bh;
231
232
}

233
234
235
236
237
238
239
240
241
242
243
244
struct buffer_head *ext4_sb_bread(struct super_block *sb, sector_t block,
				   int op_flags)
{
	return __ext4_sb_bread_gfp(sb, block, op_flags, __GFP_MOVABLE);
}

struct buffer_head *ext4_sb_bread_unmovable(struct super_block *sb,
					    sector_t block)
{
	return __ext4_sb_bread_gfp(sb, block, 0, 0);
}

245
246
247
248
249
250
251
252
void ext4_sb_breadahead_unmovable(struct super_block *sb, sector_t block)
{
	struct buffer_head *bh = sb_getblk_gfp(sb, block, 0);

	if (likely(bh)) {
		ext4_read_bh_lock(bh, REQ_RAHEAD, false);
		brelse(bh);
	}
253
254
}

255
256
257
static int ext4_verify_csum_type(struct super_block *sb,
				 struct ext4_super_block *es)
{
258
	if (!ext4_has_feature_metadata_csum(sb))
259
260
261
262
263
		return 1;

	return es->s_checksum_type == EXT4_CRC32C_CHKSUM;
}

264
265
266
267
268
269
270
271
272
273
274
275
static __le32 ext4_superblock_csum(struct super_block *sb,
				   struct ext4_super_block *es)
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	int offset = offsetof(struct ext4_super_block, s_checksum);
	__u32 csum;

	csum = ext4_chksum(sbi, ~0, (char *)es, offset);

	return cpu_to_le32(csum);
}

276
277
static int ext4_superblock_csum_verify(struct super_block *sb,
				       struct ext4_super_block *es)
278
{
279
	if (!ext4_has_metadata_csum(sb))
280
281
282
283
284
		return 1;

	return es->s_checksum == ext4_superblock_csum(sb, es);
}

285
void ext4_superblock_csum_set(struct super_block *sb)
286
{
287
288
	struct ext4_super_block *es = EXT4_SB(sb)->s_es;

289
	if (!ext4_has_metadata_csum(sb))
290
291
292
293
294
		return;

	es->s_checksum = ext4_superblock_csum(sb, es);
}

295
296
ext4_fsblk_t ext4_block_bitmap(struct super_block *sb,
			       struct ext4_group_desc *bg)
Laurent Vivier's avatar
Laurent Vivier committed
297
{
298
	return le32_to_cpu(bg->bg_block_bitmap_lo) |
299
		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
300
		 (ext4_fsblk_t)le32_to_cpu(bg->bg_block_bitmap_hi) << 32 : 0);
Laurent Vivier's avatar
Laurent Vivier committed
301
302
}

303
304
ext4_fsblk_t ext4_inode_bitmap(struct super_block *sb,
			       struct ext4_group_desc *bg)
Laurent Vivier's avatar
Laurent Vivier committed
305
{
306
	return le32_to_cpu(bg->bg_inode_bitmap_lo) |
307
		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
308
		 (ext4_fsblk_t)le32_to_cpu(bg->bg_inode_bitmap_hi) << 32 : 0);
Laurent Vivier's avatar
Laurent Vivier committed
309
310
}

311
312
ext4_fsblk_t ext4_inode_table(struct super_block *sb,
			      struct ext4_group_desc *bg)
Laurent Vivier's avatar
Laurent Vivier committed
313
{
314
	return le32_to_cpu(bg->bg_inode_table_lo) |
315
		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
316
		 (ext4_fsblk_t)le32_to_cpu(bg->bg_inode_table_hi) << 32 : 0);
Laurent Vivier's avatar
Laurent Vivier committed
317
318
}

319
320
__u32 ext4_free_group_clusters(struct super_block *sb,
			       struct ext4_group_desc *bg)
321
322
323
{
	return le16_to_cpu(bg->bg_free_blocks_count_lo) |
		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
324
		 (__u32)le16_to_cpu(bg->bg_free_blocks_count_hi) << 16 : 0);
325
326
327
328
329
330
331
}

__u32 ext4_free_inodes_count(struct super_block *sb,
			      struct ext4_group_desc *bg)
{
	return le16_to_cpu(bg->bg_free_inodes_count_lo) |
		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
332
		 (__u32)le16_to_cpu(bg->bg_free_inodes_count_hi) << 16 : 0);
333
334
335
336
337
338
339
}

__u32 ext4_used_dirs_count(struct super_block *sb,
			      struct ext4_group_desc *bg)
{
	return le16_to_cpu(bg->bg_used_dirs_count_lo) |
		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
340
		 (__u32)le16_to_cpu(bg->bg_used_dirs_count_hi) << 16 : 0);
341
342
343
344
345
346
347
}

__u32 ext4_itable_unused_count(struct super_block *sb,
			      struct ext4_group_desc *bg)
{
	return le16_to_cpu(bg->bg_itable_unused_lo) |
		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
348
		 (__u32)le16_to_cpu(bg->bg_itable_unused_hi) << 16 : 0);
349
350
}

351
352
void ext4_block_bitmap_set(struct super_block *sb,
			   struct ext4_group_desc *bg, ext4_fsblk_t blk)
Laurent Vivier's avatar
Laurent Vivier committed
353
{
354
	bg->bg_block_bitmap_lo = cpu_to_le32((u32)blk);
355
356
	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
		bg->bg_block_bitmap_hi = cpu_to_le32(blk >> 32);
Laurent Vivier's avatar
Laurent Vivier committed
357
358
}

359
360
void ext4_inode_bitmap_set(struct super_block *sb,
			   struct ext4_group_desc *bg, ext4_fsblk_t blk)
Laurent Vivier's avatar
Laurent Vivier committed
361
{
362
	bg->bg_inode_bitmap_lo  = cpu_to_le32((u32)blk);
363
364
	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
		bg->bg_inode_bitmap_hi = cpu_to_le32(blk >> 32);
Laurent Vivier's avatar
Laurent Vivier committed
365
366
}

367
368
void ext4_inode_table_set(struct super_block *sb,
			  struct ext4_group_desc *bg, ext4_fsblk_t blk)
Laurent Vivier's avatar
Laurent Vivier committed
369
{
370
	bg->bg_inode_table_lo = cpu_to_le32((u32)blk);
371
372
	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
		bg->bg_inode_table_hi = cpu_to_le32(blk >> 32);
Laurent Vivier's avatar
Laurent Vivier committed
373
374
}

375
376
void ext4_free_group_clusters_set(struct super_block *sb,
				  struct ext4_group_desc *bg, __u32 count)
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
{
	bg->bg_free_blocks_count_lo = cpu_to_le16((__u16)count);
	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
		bg->bg_free_blocks_count_hi = cpu_to_le16(count >> 16);
}

void ext4_free_inodes_set(struct super_block *sb,
			  struct ext4_group_desc *bg, __u32 count)
{
	bg->bg_free_inodes_count_lo = cpu_to_le16((__u16)count);
	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
		bg->bg_free_inodes_count_hi = cpu_to_le16(count >> 16);
}

void ext4_used_dirs_set(struct super_block *sb,
			  struct ext4_group_desc *bg, __u32 count)
{
	bg->bg_used_dirs_count_lo = cpu_to_le16((__u16)count);
	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
		bg->bg_used_dirs_count_hi = cpu_to_le16(count >> 16);
}

void ext4_itable_unused_set(struct super_block *sb,
			  struct ext4_group_desc *bg, __u32 count)
{
	bg->bg_itable_unused_lo = cpu_to_le16((__u16)count);
	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
		bg->bg_itable_unused_hi = cpu_to_le16(count >> 16);
}

407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
static void __ext4_update_tstamp(__le32 *lo, __u8 *hi)
{
	time64_t now = ktime_get_real_seconds();

	now = clamp_val(now, 0, (1ull << 40) - 1);

	*lo = cpu_to_le32(lower_32_bits(now));
	*hi = upper_32_bits(now);
}

static time64_t __ext4_get_tstamp(__le32 *lo, __u8 *hi)
{
	return ((time64_t)(*hi) << 32) + le32_to_cpu(*lo);
}
#define ext4_update_tstamp(es, tstamp) \
	__ext4_update_tstamp(&(es)->tstamp, &(es)->tstamp ## _hi)
#define ext4_get_tstamp(es, tstamp) \
	__ext4_get_tstamp(&(es)->tstamp, &(es)->tstamp ## _hi)
425

426
427
428
static void __save_error_info(struct super_block *sb, int error,
			      __u32 ino, __u64 block,
			      const char *func, unsigned int line)
429
430
{
	struct ext4_super_block *es = EXT4_SB(sb)->s_es;
431
	int err;
432
433

	EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
434
435
	if (bdev_read_only(sb->s_bdev))
		return;
436
	es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
437
	ext4_update_tstamp(es, s_last_error_time);
438
439
	strncpy(es->s_last_error_func, func, sizeof(es->s_last_error_func));
	es->s_last_error_line = cpu_to_le32(line);
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
	es->s_last_error_ino = cpu_to_le32(ino);
	es->s_last_error_block = cpu_to_le64(block);
	switch (error) {
	case EIO:
		err = EXT4_ERR_EIO;
		break;
	case ENOMEM:
		err = EXT4_ERR_ENOMEM;
		break;
	case EFSBADCRC:
		err = EXT4_ERR_EFSBADCRC;
		break;
	case 0:
	case EFSCORRUPTED:
		err = EXT4_ERR_EFSCORRUPTED;
		break;
	case ENOSPC:
		err = EXT4_ERR_ENOSPC;
		break;
	case ENOKEY:
		err = EXT4_ERR_ENOKEY;
		break;
	case EROFS:
		err = EXT4_ERR_EROFS;
		break;
	case EFBIG:
		err = EXT4_ERR_EFBIG;
		break;
	case EEXIST:
		err = EXT4_ERR_EEXIST;
		break;
	case ERANGE:
		err = EXT4_ERR_ERANGE;
		break;
	case EOVERFLOW:
		err = EXT4_ERR_EOVERFLOW;
		break;
	case EBUSY:
		err = EXT4_ERR_EBUSY;
		break;
	case ENOTDIR:
		err = EXT4_ERR_ENOTDIR;
		break;
	case ENOTEMPTY:
		err = EXT4_ERR_ENOTEMPTY;
		break;
	case ESHUTDOWN:
		err = EXT4_ERR_ESHUTDOWN;
		break;
	case EFAULT:
		err = EXT4_ERR_EFAULT;
		break;
	default:
		err = EXT4_ERR_UNKNOWN;
	}
	es->s_last_error_errcode = err;
496
497
	if (!es->s_first_error_time) {
		es->s_first_error_time = es->s_last_error_time;
498
		es->s_first_error_time_hi = es->s_last_error_time_hi;
499
500
501
502
503
		strncpy(es->s_first_error_func, func,
			sizeof(es->s_first_error_func));
		es->s_first_error_line = cpu_to_le32(line);
		es->s_first_error_ino = es->s_last_error_ino;
		es->s_first_error_block = es->s_last_error_block;
504
		es->s_first_error_errcode = es->s_last_error_errcode;
505
	}
506
507
508
509
510
511
	/*
	 * Start the daily error reporting function if it hasn't been
	 * started already
	 */
	if (!es->s_error_count)
		mod_timer(&EXT4_SB(sb)->s_err_report, jiffies + 24*60*60*HZ);
512
	le32_add_cpu(&es->s_error_count, 1);
513
514
}

515
516
517
static void save_error_info(struct super_block *sb, int error,
			    __u32 ino, __u64 block,
			    const char *func, unsigned int line)
518
{
519
	__save_error_info(sb, error, ino, block, func, line);
520
521
	if (!bdev_read_only(sb->s_bdev))
		ext4_commit_super(sb, 1);
522
523
}

524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
/*
 * The del_gendisk() function uninitializes the disk-specific data
 * structures, including the bdi structure, without telling anyone
 * else.  Once this happens, any attempt to call mark_buffer_dirty()
 * (for example, by ext4_commit_super), will cause a kernel OOPS.
 * This is a kludge to prevent these oops until we can put in a proper
 * hook in del_gendisk() to inform the VFS and file system layers.
 */
static int block_device_ejected(struct super_block *sb)
{
	struct inode *bd_inode = sb->s_bdev->bd_inode;
	struct backing_dev_info *bdi = inode_to_bdi(bd_inode);

	return bdi->dev == NULL;
}

Bobi Jam's avatar
Bobi Jam committed
540
541
542
543
544
static void ext4_journal_commit_callback(journal_t *journal, transaction_t *txn)
{
	struct super_block		*sb = journal->j_private;
	struct ext4_sb_info		*sbi = EXT4_SB(sb);
	int				error = is_journal_aborted(journal);
545
	struct ext4_journal_cb_entry	*jce;
Bobi Jam's avatar
Bobi Jam committed
546

547
	BUG_ON(txn->t_state == T_FINISHED);
548
549
550

	ext4_process_freed_data(sb, txn->t_tid);

Bobi Jam's avatar
Bobi Jam committed
551
	spin_lock(&sbi->s_md_lock);
552
553
554
	while (!list_empty(&txn->t_private_list)) {
		jce = list_entry(txn->t_private_list.next,
				 struct ext4_journal_cb_entry, jce_list);
Bobi Jam's avatar
Bobi Jam committed
555
556
557
558
559
560
561
		list_del_init(&jce->jce_list);
		spin_unlock(&sbi->s_md_lock);
		jce->jce_func(sb, jce, error);
		spin_lock(&sbi->s_md_lock);
	}
	spin_unlock(&sbi->s_md_lock);
}
562

563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
/*
 * This writepage callback for write_cache_pages()
 * takes care of a few cases after page cleaning.
 *
 * write_cache_pages() already checks for dirty pages
 * and calls clear_page_dirty_for_io(), which we want,
 * to write protect the pages.
 *
 * However, we may have to redirty a page (see below.)
 */
static int ext4_journalled_writepage_callback(struct page *page,
					      struct writeback_control *wbc,
					      void *data)
{
	transaction_t *transaction = (transaction_t *) data;
	struct buffer_head *bh, *head;
	struct journal_head *jh;

	bh = head = page_buffers(page);
	do {
		/*
		 * We have to redirty a page in these cases:
		 * 1) If buffer is dirty, it means the page was dirty because it
		 * contains a buffer that needs checkpointing. So the dirty bit
		 * needs to be preserved so that checkpointing writes the buffer
		 * properly.
		 * 2) If buffer is not part of the committing transaction
		 * (we may have just accidentally come across this buffer because
		 * inode range tracking is not exact) or if the currently running
		 * transaction already contains this buffer as well, dirty bit
		 * needs to be preserved so that the buffer gets writeprotected
		 * properly on running transaction's commit.
		 */
		jh = bh2jh(bh);
		if (buffer_dirty(bh) ||
		    (jh && (jh->b_transaction != transaction ||
			    jh->b_next_transaction))) {
			redirty_page_for_writepage(wbc, page);
			goto out;
		}
	} while ((bh = bh->b_this_page) != head);

out:
	return AOP_WRITEPAGE_ACTIVATE;
}

static int ext4_journalled_submit_inode_data_buffers(struct jbd2_inode *jinode)
{
	struct address_space *mapping = jinode->i_vfs_inode->i_mapping;
	struct writeback_control wbc = {
		.sync_mode =  WB_SYNC_ALL,
		.nr_to_write = LONG_MAX,
		.range_start = jinode->i_dirty_start,
		.range_end = jinode->i_dirty_end,
        };

	return write_cache_pages(mapping, &wbc,
				 ext4_journalled_writepage_callback,
				 jinode->i_transaction);
}

static int ext4_journal_submit_inode_data_buffers(struct jbd2_inode *jinode)
{
	int ret;

	if (ext4_should_journal_data(jinode->i_vfs_inode))
		ret = ext4_journalled_submit_inode_data_buffers(jinode);
	else
		ret = jbd2_journal_submit_inode_data_buffers(jinode);

	return ret;
}

static int ext4_journal_finish_inode_data_buffers(struct jbd2_inode *jinode)
{
	int ret = 0;

	if (!ext4_should_journal_data(jinode->i_vfs_inode))
		ret = jbd2_journal_finish_inode_data_buffers(jinode);

	return ret;
}

646
647
648
649
650
651
static bool system_going_down(void)
{
	return system_state == SYSTEM_HALT || system_state == SYSTEM_POWER_OFF
		|| system_state == SYSTEM_RESTART;
}

652
653
654
655
/* Deal with the reporting of failure conditions on a filesystem such as
 * inconsistencies detected or read IO failures.
 *
 * On ext2, we can store the error state of the filesystem in the
656
 * superblock.  That is not possible on ext4, because we may have other
657
658
659
660
661
 * write ordering constraints on the superblock which prevent us from
 * writing it out straight away; and given that the journal is about to
 * be aborted, we can't rely on the current, or future, transactions to
 * write out the superblock safely.
 *
662
 * We'll just use the jbd2_journal_abort() error code to record an error in
663
 * the journal instead.  On recovery, the journal will complain about
664
665
666
 * that error until we've noted it down and cleared it.
 */

667
static void ext4_handle_error(struct super_block *sb)
668
{
669
670
	journal_t *journal = EXT4_SB(sb)->s_journal;

671
672
673
	if (test_opt(sb, WARN_ON_ERROR))
		WARN_ON_ONCE(1);

674
	if (sb_rdonly(sb) || test_opt(sb, ERRORS_CONT))
675
676
		return;

677
678
679
	ext4_set_mount_flag(sb, EXT4_MF_FS_ABORTED);
	if (journal)
		jbd2_journal_abort(journal, -EIO);
680
681
682
683
684
685
	/*
	 * We force ERRORS_RO behavior when system is rebooting. Otherwise we
	 * could panic during 'reboot -f' as the underlying device got already
	 * disabled.
	 */
	if (test_opt(sb, ERRORS_RO) || system_going_down()) {
686
		ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only");
687
688
689
690
691
		/*
		 * Make sure updated value of ->s_mount_flags will be visible
		 * before ->s_flags update
		 */
		smp_wmb();
692
		sb->s_flags |= SB_RDONLY;
693
	} else if (test_opt(sb, ERRORS_PANIC)) {
694
		panic("EXT4-fs (device %s): panic forced after error\n",
695
			sb->s_id);
696
	}
697
698
}

699
700
701
702
#define ext4_error_ratelimit(sb)					\
		___ratelimit(&(EXT4_SB(sb)->s_err_ratelimit_state),	\
			     "EXT4-fs error")

703
void __ext4_error(struct super_block *sb, const char *function,
704
705
		  unsigned int line, int error, __u64 block,
		  const char *fmt, ...)
706
{
Joe Perches's avatar
Joe Perches committed
707
	struct va_format vaf;
708
709
	va_list args;

710
711
712
	if (unlikely(ext4_forced_shutdown(EXT4_SB(sb))))
		return;

713
	trace_ext4_error(sb, function, line);
714
715
716
717
718
719
720
721
722
	if (ext4_error_ratelimit(sb)) {
		va_start(args, fmt);
		vaf.fmt = fmt;
		vaf.va = &args;
		printk(KERN_CRIT
		       "EXT4-fs error (device %s): %s:%d: comm %s: %pV\n",
		       sb->s_id, function, line, current->comm, &vaf);
		va_end(args);
	}
723
	save_error_info(sb, error, 0, block, function, line);
724
	ext4_handle_error(sb);
725
726
}

727
void __ext4_error_inode(struct inode *inode, const char *function,
728
			unsigned int line, ext4_fsblk_t block, int error,
729
			const char *fmt, ...)
730
731
{
	va_list args;
732
	struct va_format vaf;
733

734
735
736
	if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
		return;

737
	trace_ext4_error(inode->i_sb, function, line);
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
	if (ext4_error_ratelimit(inode->i_sb)) {
		va_start(args, fmt);
		vaf.fmt = fmt;
		vaf.va = &args;
		if (block)
			printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: "
			       "inode #%lu: block %llu: comm %s: %pV\n",
			       inode->i_sb->s_id, function, line, inode->i_ino,
			       block, current->comm, &vaf);
		else
			printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: "
			       "inode #%lu: comm %s: %pV\n",
			       inode->i_sb->s_id, function, line, inode->i_ino,
			       current->comm, &vaf);
		va_end(args);
	}
754
755
	save_error_info(inode->i_sb, error, inode->i_ino, block,
			function, line);
756
757
758
	ext4_handle_error(inode->i_sb);
}

759
760
761
void __ext4_error_file(struct file *file, const char *function,
		       unsigned int line, ext4_fsblk_t block,
		       const char *fmt, ...)
762
763
{
	va_list args;
764
	struct va_format vaf;
Al Viro's avatar
Al Viro committed
765
	struct inode *inode = file_inode(file);
766
767
	char pathname[80], *path;

768
769
770
	if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
		return;

771
	trace_ext4_error(inode->i_sb, function, line);
772
	if (ext4_error_ratelimit(inode->i_sb)) {
Miklos Szeredi's avatar
Miklos Szeredi committed
773
		path = file_path(file, pathname, sizeof(pathname));
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
		if (IS_ERR(path))
			path = "(unknown)";
		va_start(args, fmt);
		vaf.fmt = fmt;
		vaf.va = &args;
		if (block)
			printk(KERN_CRIT
			       "EXT4-fs error (device %s): %s:%d: inode #%lu: "
			       "block %llu: comm %s: path %s: %pV\n",
			       inode->i_sb->s_id, function, line, inode->i_ino,
			       block, current->comm, path, &vaf);
		else
			printk(KERN_CRIT
			       "EXT4-fs error (device %s): %s:%d: inode #%lu: "
			       "comm %s: path %s: %pV\n",
			       inode->i_sb->s_id, function, line, inode->i_ino,
			       current->comm, path, &vaf);
		va_end(args);
	}
793
794
	save_error_info(inode->i_sb, EFSCORRUPTED, inode->i_ino, block,
			function, line);
795
796
797
	ext4_handle_error(inode->i_sb);
}

798
799
const char *ext4_decode_error(struct super_block *sb, int errno,
			      char nbuf[16])
800
801
802
803
{
	char *errstr = NULL;

	switch (errno) {
804
805
806
807
808
809
	case -EFSCORRUPTED:
		errstr = "Corrupt filesystem";
		break;
	case -EFSBADCRC:
		errstr = "Filesystem failed CRC";
		break;
810
811
812
813
814
815
816
	case -EIO:
		errstr = "IO failure";
		break;
	case -ENOMEM:
		errstr = "Out of memory";
		break;
	case -EROFS:
817
818
		if (!sb || (EXT4_SB(sb)->s_journal &&
			    EXT4_SB(sb)->s_journal->j_flags & JBD2_ABORT))
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
			errstr = "Journal has aborted";
		else
			errstr = "Readonly filesystem";
		break;
	default:
		/* If the caller passed in an extra buffer for unknown
		 * errors, textualise them now.  Else we just return
		 * NULL. */
		if (nbuf) {
			/* Check for truncated error codes... */
			if (snprintf(nbuf, 16, "error %d", -errno) >= 0)
				errstr = nbuf;
		}
		break;
	}

	return errstr;
}

838
/* __ext4_std_error decodes expected errors from journaling functions
839
840
 * automatically and invokes the appropriate error response.  */

841
842
void __ext4_std_error(struct super_block *sb, const char *function,
		      unsigned int line, int errno)
843
844
845
846
{
	char nbuf[16];
	const char *errstr;

847
848
849
	if (unlikely(ext4_forced_shutdown(EXT4_SB(sb))))
		return;

850
851
852
	/* Special case: if the error is EROFS, and we're not already
	 * inside a transaction, then there's really no point in logging
	 * an error. */
853
	if (errno == -EROFS && journal_current_handle() == NULL && sb_rdonly(sb))
854
855
		return;

856
857
858
859
860
	if (ext4_error_ratelimit(sb)) {
		errstr = ext4_decode_error(sb, errno, nbuf);
		printk(KERN_CRIT "EXT4-fs error (device %s) in %s:%d: %s\n",
		       sb->s_id, function, line, errstr);
	}
861

862
	save_error_info(sb, -errno, 0, 0, function, line);
863
	ext4_handle_error(sb);
864
865
866
}

/*
867
 * ext4_abort is a much stronger failure handler than ext4_error.  The
868
869
870
871
872
873
874
875
 * abort function may be used to deal with unrecoverable failures such
 * as journal IO errors or ENOMEM at a critical moment in log management.
 *
 * We unconditionally force the filesystem into an ABORT|READONLY state,
 * unless the error response on the fs has been set to panic in which
 * case we take the easy way out and panic immediately.
 */

876
void __ext4_abort(struct super_block *sb, const char *function,
877
		  unsigned int line, int error, const char *fmt, ...)
878
{
879
	struct va_format vaf;
880
881
	va_list args;

882
883
884
	if (unlikely(ext4_forced_shutdown(EXT4_SB(sb))))
		return;

885
	save_error_info(sb, error, 0, 0, function, line);
886
	va_start(args, fmt);
887
888
889
890
	vaf.fmt = fmt;
	vaf.va = &args;
	printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: %pV\n",
	       sb->s_id, function, line, &vaf);
891
892
	va_end(args);

893
	if (sb_rdonly(sb) == 0) {
894
		ext4_set_mount_flag(sb, EXT4_MF_FS_ABORTED);
895
896
897
898
		if (EXT4_SB(sb)->s_journal)
			jbd2_journal_abort(EXT4_SB(sb)->s_journal, -EIO);

		ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only");
899
900
901
902
903
		/*
		 * Make sure updated value of ->s_mount_flags will be visible
		 * before ->s_flags update
		 */
		smp_wmb();
904
		sb->s_flags |= SB_RDONLY;
905
	}
906
	if (test_opt(sb, ERRORS_PANIC) && !system_going_down())
907
		panic("EXT4-fs panic from previous error\n");
908
909
}

910
911
void __ext4_msg(struct super_block *sb,
		const char *prefix, const char *fmt, ...)
912
{
Joe Perches's avatar
Joe Perches committed
913
	struct va_format vaf;
914
915
	va_list args;

916
	atomic_inc(&EXT4_SB(sb)->s_msg_count);
917
918
919
	if (!___ratelimit(&(EXT4_SB(sb)->s_msg_ratelimit_state), "EXT4-fs"))
		return;

920
	va_start(args, fmt);
Joe Perches's avatar
Joe Perches committed
921
922
923
	vaf.fmt = fmt;
	vaf.va = &args;
	printk("%sEXT4-fs (%s): %pV\n", prefix, sb->s_id, &vaf);
924
925
926
	va_end(args);
}

927
928
929
930
931
932
static int ext4_warning_ratelimit(struct super_block *sb)
{
	atomic_inc(&EXT4_SB(sb)->s_warning_count);
	return ___ratelimit(&(EXT4_SB(sb)->s_warning_ratelimit_state),
			    "EXT4-fs warning");
}
933

934
void __ext4_warning(struct super_block *sb, const char *function,
935
		    unsigned int line, const char *fmt, ...)
936
{
Joe Perches's avatar
Joe Perches committed
937
	struct va_format vaf;
938
939
	va_list args;

940
	if (!ext4_warning_ratelimit(sb))
941
942
		return;

943
	va_start(args, fmt);
Joe Perches's avatar
Joe Perches committed
944
945
946
947
	vaf.fmt = fmt;
	vaf.va = &args;
	printk(KERN_WARNING "EXT4-fs warning (device %s): %s:%d: %pV\n",
	       sb->s_id, function, line, &vaf);
948
949
950
	va_end(args);
}

951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
void __ext4_warning_inode(const struct inode *inode, const char *function,
			  unsigned int line, const char *fmt, ...)
{
	struct va_format vaf;
	va_list args;

	if (!ext4_warning_ratelimit(inode->i_sb))
		return;

	va_start(args, fmt);
	vaf.fmt = fmt;
	vaf.va = &args;
	printk(KERN_WARNING "EXT4-fs warning (device %s): %s:%d: "
	       "inode #%lu: comm %s: %pV\n", inode->i_sb->s_id,
	       function, line, inode->i_ino, current->comm, &vaf);
	va_end(args);
}

969
970
971
972
void __ext4_grp_locked_error(const char *function, unsigned int line,
			     struct super_block *sb, ext4_group_t grp,
			     unsigned long ino, ext4_fsblk_t block,
			     const char *fmt, ...)
973
974
975
__releases(bitlock)
__acquires(bitlock)
{
Joe Perches's avatar
Joe Perches committed
976
	struct va_format vaf;
977
978
	va_list args;

979
980
981
	if (unlikely(ext4_forced_shutdown(EXT4_SB(sb))))
		return;

982
	trace_ext4_error(sb, function, line);
983
	__save_error_info(sb, EFSCORRUPTED, ino, block, function, line);
Joe Perches's avatar
Joe Perches committed
984

985
986
987
988
989
990
991
992
993
994
995
996
997
998
	if (ext4_error_ratelimit(sb)) {
		va_start(args, fmt);
		vaf.fmt = fmt;
		vaf.va = &args;
		printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: group %u, ",
		       sb->s_id, function, line, grp);
		if (ino)
			printk(KERN_CONT "inode %lu: ", ino);
		if (block)
			printk(KERN_CONT "block %llu:",
			       (unsigned long long) block);
		printk(KERN_CONT "%pV\n", &vaf);
		va_end(args);
	}
999

1000
1001
1002
	if (test_opt(sb, WARN_ON_ERROR))
		WARN_ON_ONCE(1);

1003
	if (test_opt(sb, ERRORS_CONT)) {
1004
		ext4_commit_super(sb, 0);
1005
1006
		return;
	}
1007

1008
	ext4_unlock_group(sb, grp);
1009
	ext4_commit_super(sb, 1);
1010
1011
1012
1013
1014
1015
1016
	ext4_handle_error(sb);
	/*
	 * We only get here in the ERRORS_RO case; relocking the group
	 * may be dangerous, but nothing bad will happen since the
	 * filesystem will have already been marked read/only and the
	 * journal has been aborted.  We return 1 as a hint to callers
	 * who might what to use the return value from
Lucas De Marchi's avatar
Lucas De Marchi committed
1017
	 * ext4_grp_locked_error() to distinguish between the
1018
1019
1020
1021
1022
1023
1024
1025
	 * ERRORS_CONT and ERRORS_RO case, and perhaps return more
	 * aggressively from the ext4 function in question, with a
	 * more appropriate error code.
	 */
	ext4_lock_group(sb, grp);
	return;
}

1026
1027
1028
1029
1030
1031
1032
void ext4_mark_group_bitmap_corrupted(struct super_block *sb,
				     ext4_group_t group,
				     unsigned int flags)
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	struct ext4_group_info *grp = ext4_get_group_info(sb, group);
	struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group, NULL);
1033
1034
1035
1036
1037
1038
1039
1040
	int ret;

	if (flags & EXT4_GROUP_INFO_BBITMAP_CORRUPT) {
		ret = ext4_test_and_set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT,
					    &grp->bb_state);
		if (!ret)
			percpu_counter_sub(&sbi->s_freeclusters_counter,
					   grp->bb_free);
1041
1042
	}

1043
1044
1045
1046
	if (flags & EXT4_GROUP_INFO_IBITMAP_CORRUPT) {
		ret = ext4_test_and_set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT,
					    &grp->bb_state);
		if (!ret && gdp) {
1047
1048
1049
1050
1051
1052
1053
1054
1055
			int count;

			count = ext4_free_inodes_count(sb, gdp);
			percpu_counter_sub(&sbi->s_freeinodes_counter,
					   count);
		}
	}
}

1056
void ext4_update_dynamic_rev(struct super_block *sb)
1057
{
1058
	struct ext4_super_block *es = EXT4_SB(sb)->s_es;
1059

1060
	if (le32_to_cpu(es->s_rev_level) > EXT4_GOOD_OLD_REV)
1061
1062
		return;

1063
	ext4_warning(sb,
1064
1065
		     "updating to rev %d because of new feature flag, "
		     "running e2fsck is recommended",
1066
		     EXT4_DYNAMIC_REV);
1067

1068
1069
1070
	es->s_first_ino = cpu_to_le32(EXT4_GOOD_OLD_FIRST_INO);
	es->s_inode_size = cpu_to_le16(EXT4_GOOD_OLD_INODE_SIZE);
	es->s_rev_level = cpu_to_le32(EXT4_DYNAMIC_REV);
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
	/* leave es->s_feature_*compat flags alone */
	/* es->s_uuid will be set by e2fsck if empty */

	/*
	 * The rest of the superblock fields should be zero, and if not it
	 * means they are likely already in use, so leave them alone.  We
	 * can leave it up to e2fsck to clean up any inconsistencies there.
	 */
}

/*
 * Open the external journal device
 */
1084
static struct block_device *ext4_blkdev_get(dev_t dev, struct super_block *sb)
1085
1086
1087
{
	struct block_device *bdev;

1088
	bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, sb);
1089
1090
1091
1092
1093
	if (IS_ERR(bdev))
		goto fail;
	return bdev;

fail:
Christoph Hellwig's avatar
Christoph Hellwig committed
1094
1095
1096
	ext4_msg(sb, KERN_ERR,
		 "failed to open journal device unknown-block(%u,%u) %ld",
		 MAJOR(dev), MINOR(dev), PTR_ERR(bdev));
1097
1098
1099
1100
1101
1102
	return NULL;
}

/*
 * Release the journal device
 */
Al Viro's avatar
Al Viro committed
1103
static void ext4_blkdev_put(struct block_device *bdev)
1104
{
Al Viro's avatar
Al Viro committed
1105
	blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
1106
1107
}

Al Viro's avatar
Al Viro committed
1108
static void ext4_blkdev_remove(struct ext4_sb_info *sbi)
1109
1110
{
	struct block_device *bdev;
1111
	bdev = sbi->s_journal_bdev;
1112
	if (bdev) {
Al Viro's avatar
Al Viro committed
1113
		ext4_blkdev_put(bdev);
1114
		sbi->s_journal_bdev = NULL;
1115
1116
1117
1118
1119
	}
}

static inline struct inode *orphan_list_entry(struct list_head *l)
{
1120
	return &list_entry(l, struct ext4_inode_info, i_orphan)->vfs_inode;
1121
1122
}

1123
static void dump_orphan_list(struct super_block *sb, struct ext4_sb_info *sbi)
1124
1125
1126
{
	struct list_head *l;

1127
1128
	ext4_msg(sb, KERN_ERR, "sb orphan head is %d",
		 le32_to_cpu(sbi->s_es->s_last_orphan));
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140

	printk(KERN_ERR "sb_info orphan list:\n");
	list_for_each(l, &sbi->s_orphan) {
		struct inode *inode = orphan_list_entry(l);
		printk(KERN_ERR "  "
		       "inode %s:%lu at %p: mode %o, nlink %d, next %d\n",
		       inode->i_sb->s_id, inode->i_ino, inode,
		       inode->i_mode, inode->i_nlink,
		       NEXT_ORPHAN(inode));
	}
}

1141
1142
1143
1144
1145
1146
1147
#ifdef CONFIG_QUOTA
static int ext4_quota_off(struct super_block *sb, int type);

static inline void ext4_quota_off_umount(struct super_block *sb)
{
	int type;

1148
1149
1150
	/* Use our quota_off function to clear inode flags etc. */
	for (type = 0; type < EXT4_MAXQUOTAS; type++)
		ext4_quota_off(sb, type);
1151
}
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163

/*
 * This is a helper function which is used in the mount/remount
 * codepaths (which holds s_umount) to fetch the quota file name.
 */
static inline char *get_qf_name(struct super_block *sb,
				struct ext4_sb_info *sbi,
				int type)
{
	return rcu_dereference_protected(sbi->s_qf_names[type],
					 lockdep_is_held(&sb->s_umount));
}
1164
1165
1166
1167
1168
1169
#else
static inline void ext4_quota_off_umount(struct super_block *sb)
{
}
#endif

1170
static void ext4_put_super(struct super_block *sb)
1171
{
1172
1173
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	struct ext4_super_block *es = sbi->s_es;
1174
	struct buffer_head **group_desc;
1175
	struct flex_groups **flex_groups;
1176
	int aborted = 0;
1177
	int i, err;
1178

1179
	ext4_unregister_li_request(sb);
1180
	ext4_quota_off_umount(sb);
1181

1182
	destroy_workqueue(sbi->rsv_conversion_wq);
1183

1184
1185
1186
1187
1188
1189
1190
	/*
	 * Unregister sysfs before destroying jbd2 journal.
	 * Since we could still access attr_journal_task attribute via sysfs
	 * path which could have sbi->s_journal->j_task as NULL
	 */
	ext4_unregister_sysfs(sb);

1191
	if (sbi->s_journal) {
1192
		aborted = is_journal_aborted(sbi->s_journal);
1193
1194
		err = jbd2_journal_destroy(sbi->s_journal);
		sbi->s_journal = NULL;
1195
		if ((err < 0) && !aborted) {
1196
			ext4_abort(sb, -err, "Couldn't clean up the journal");
1197
		}
1198
	}
1199

1200
	ext4_es_unregister_shrinker(sbi);
1201
	del_timer_sync(&sbi->s_err_report);
1202
1203
1204
1205
	ext4_release_system_zone(sb);
	ext4_mb_release(sb);
	ext4_ext_release(sb);

1206
	if (!sb_rdonly(sb) && !aborted) {
1207
		ext4_clear_feature_journal_needs_recovery(sb);
1208
1209
		es->s_state = cpu_to_le16(sbi->s_mount_state);
	}
1210
	if (!sb_rdonly(sb))
1211
1212
</