int mkfs_ext2_main(int argc UNUSED_PARAM, char **argv)
{
unsigned i, pos, n;
- unsigned bs, blocksize, blocksize_log2;
+ unsigned bs, bpi;
+ unsigned blocksize, blocksize_log2;
unsigned nreserved = 5;
- unsigned long long nblocks_ull;
+ unsigned long long kilobytes;
uint32_t nblocks;
uint32_t ngroups;
- unsigned bytes_per_inode;
- uint32_t nblocks_per_group;
+ uint32_t bytes_per_inode;
uint32_t first_data_block;
- uint32_t ninodes;
uint32_t ninodes_per_group;
- uint32_t gdtsz, rgdtsz, itsz;
+ uint32_t gdtsz, itsz;
time_t timestamp;
unsigned opts;
const char *label;
opt_complementary = "-1:b+:m+:i+";
opts = getopt32(argv, "cl:b:f:i:I:J:G:N:m:o:g:L:M:O:r:E:T:U:jnqvFS",
- NULL, &bs, NULL, &bytes_per_inode, NULL, NULL, NULL, NULL,
+ NULL, &bs, NULL, &bpi, NULL, NULL, NULL, NULL,
&nreserved, NULL, NULL, &label, NULL, NULL, NULL, NULL, NULL, NULL);
argv += optind; // argv[0] -- device
bb_error_msg_and_die("-%c is bad", 'm');
// check the device is a block device
- xstat(argv[0], &st);
+ xmove_fd(xopen(argv[0], O_WRONLY), fd);
+ fstat(fd, &st);
if (!S_ISBLK(st.st_mode) && !(opts & OPT_F))
bb_error_msg_and_die("not a block device");
bb_error_msg_and_die("can't format mounted filesystem");
// open the device, get size in kbytes
- xmove_fd(xopen3(argv[0], O_WRONLY | O_CREAT, 0666), fd);
if (argv[1]) {
- nblocks_ull = xatoull(argv[1]);
+ kilobytes = xatoull(argv[1]);
} else {
- nblocks_ull = (uoff_t)xlseek(fd, 0, SEEK_END) / 1024;
+ kilobytes = (uoff_t)xlseek(fd, 0, SEEK_END) / 1024;
}
+ bytes_per_inode = 16384;
+ if (kilobytes < 512*1024)
+ bytes_per_inode = 4096;
+ if (kilobytes < 3*1024)
+ bytes_per_inode = 8192;
+ if (opts & OPT_i)
+ bytes_per_inode = bpi;
+
// block size is a multiple of 1024
blocksize = 1024;
- if (nblocks_ull >= 512*1024) // mke2fs 1.41.9 compat
+ if (kilobytes >= 512*1024) // mke2fs 1.41.9 compat
blocksize = 4096;
if (EXT2_MAX_BLOCK_SIZE > 4096) {
- // nblocks_ull >> 22 == size in 4gigabyte chunks.
+ // kilobytes >> 22 == size in 4gigabyte chunks.
// if it is >= 16k gigs, blocksize must be increased.
// Try "mke2fs -F image_std $((16 * 1024*1024*1024))"
- while ((nblocks_ull >> 22) >= blocksize)
+ while ((kilobytes >> 22) >= blocksize)
blocksize *= 2;
}
if (opts & OPT_b)
bb_error_msg_and_die("blocksize %u is bad", blocksize);
}
blocksize_log2 = int_log2(blocksize);
- nblocks_ull >>= (blocksize_log2 - EXT2_MIN_BLOCK_LOG_SIZE);
+ kilobytes >>= (blocksize_log2 - EXT2_MIN_BLOCK_LOG_SIZE);
// nblocks: the total number of blocks in the filesystem
- nblocks = nblocks_ull;
- if (nblocks != nblocks_ull)
+ nblocks = kilobytes;
+ if (nblocks != kilobytes)
bb_error_msg_and_die("block count doesn't fit in 32 bits");
if (nblocks < 8)
bb_error_msg_and_die("need >= 8 blocks");
+#define kilobytes kilobytes_unused_after_this
+
+ // number of bits in one block, i.e. 8*blocksize
+#define blocks_per_group (8 * blocksize)
+
+/* e2fsprogs-1.41.9
+ overhead = 2 + fs->inode_blocks_per_group;
+ if (has_super(fs->group_desc_count - 1))
+ overhead += 1 + fs->desc_blocks + super->s_reserved_gdt_blocks;
+ rem = (nblocks - first_data_block) % blocks_per_group);
+ if ((fs->group_desc_count == 1) && rem && (rem < overhead)) {
+ retval = EXT2_ET_TOOSMALL;
+ goto cleanup;
+ }
+ if (rem && (rem < overhead+50)) {
+ nblocks -= rem;
+ goto retry;
+ }
+*/
+
+ // N.B. a block group can have no more than blocks_per_group blocks
+ first_data_block = (EXT2_MIN_BLOCK_SIZE == blocksize);
+ ngroups = div_roundup(nblocks - first_data_block, blocks_per_group);
+ if (0 == ngroups)
+ bb_error_msg_and_die("ngroups");
+
+ {
+ // ninodes is the total number of inodes (files) in the file system
+ uint32_t ninodes = nblocks / (bytes_per_inode >> blocksize_log2);
+ if (ninodes < EXT2_GOOD_OLD_FIRST_INO+1)
+ ninodes = EXT2_GOOD_OLD_FIRST_INO+1;
+ ninodes_per_group = div_roundup(ninodes, ngroups);
+ // minimum number because the first EXT2_GOOD_OLD_FIRST_INO-1 are reserved
+ if (ninodes_per_group < 16)
+ ninodes_per_group = 16;
+ }
// TODO: 5?/5 WE MUST NOT DEPEND ON WHETHER DEVICE IS /dev/zero 'ed OR NOT
// TODO: 3/5 refuse if mounted
sb->s_log_block_size = sb->s_log_frag_size = blocksize_log2 - EXT2_MIN_BLOCK_LOG_SIZE;
// first 1024 bytes of the device are for boot record. If block size is 1024 bytes, then
// the first block available for data is 1, otherwise 0
- first_data_block = sb->s_first_data_block = (EXT2_MIN_BLOCK_SIZE == blocksize);
+ sb->s_first_data_block = first_data_block; // 0 or 1
// block and inode bitmaps occupy no more than one block, so maximum number of blocks is
- // number of bits in one block, i.e. 8*blocksize
- nblocks_per_group = sb->s_blocks_per_group = sb->s_frags_per_group = sb->s_inodes_per_group = 8 * blocksize;
+ sb->s_blocks_per_group = sb->s_frags_per_group = blocks_per_group;
timestamp = time(NULL);
sb->s_mkfs_time = sb->s_wtime = sb->s_lastcheck = timestamp;
sb->s_state = 1;
sb->s_creator_os = EXT2_OS_LINUX;
- sb->s_max_mnt_count = EXT2_DFL_MAX_MNT_COUNT;
sb->s_checkinterval = 24*60*60 * 180; // 180 days
sb->s_errors = EXT2_ERRORS_DEFAULT;
sb->s_feature_compat = EXT2_FEATURE_COMPAT_SUPP
* don't check all the filesystems at the same time. We use a
* kludgy hack of using the UUID to derive a random jitter value.
*/
- sb->s_max_mnt_count += sb->s_uuid[ARRAY_SIZE(sb->s_uuid)-1] % EXT2_DFL_MAX_MNT_COUNT;
+ sb->s_max_mnt_count = EXT2_DFL_MAX_MNT_COUNT
+ + (sb->s_uuid[ARRAY_SIZE(sb->s_uuid)-1] % EXT2_DFL_MAX_MNT_COUNT);
sb->s_blocks_count = nblocks;
// reserve blocks for superuser
sb->s_r_blocks_count = ((uint64_t) nblocks * nreserved) / 100;
- // N.B. a block group can have no more than nblocks_per_group blocks
- ngroups = div_roundup(nblocks - first_data_block, nblocks_per_group);
- if (0 == ngroups)
- bb_error_msg_and_die("ngroups");
gdtsz = div_roundup(ngroups, EXT2_DESC_PER_BLOCK(sb));
/*
* From e2fsprogs: Calculate the number of GDT blocks to reserve for online
/* We set it at 1024x the current filesystem size, or
* the upper block count limit (2^32), whichever is lower.
*/
-#if ENABLE_FEATURE_MKFS_EXT2_RESERVED_GDT
- rgdtsz = 0xFFFFFFFF; // maximum block number
- if (nblocks < rgdtsz / 1024)
- rgdtsz = nblocks * 1024;
- rgdtsz = div_roundup(rgdtsz - first_data_block, nblocks_per_group);
- rgdtsz = div_roundup(rgdtsz, EXT2_DESC_PER_BLOCK(sb)) - gdtsz;
- if (rgdtsz > EXT2_ADDR_PER_BLOCK(sb))
- rgdtsz = EXT2_ADDR_PER_BLOCK(sb);
- sb->s_reserved_gdt_blocks = rgdtsz;
- //bb_info_msg("RSRVD[%u]", n);
-#else
- rgdtsz = 0;
-#endif
-
- // ninodes is the total number of inodes (files) in the file system
- if (!(opts & OPT_i)) {
- bytes_per_inode = 16384;
- if (nblocks < 512*1024)
- bytes_per_inode = 4096;
- if (nblocks < 3*1024)
- bytes_per_inode = 8192;
+ if (ENABLE_FEATURE_MKFS_EXT2_RESERVED_GDT) {
+ uint32_t rgdtsz = 0xFFFFFFFF; // maximum block number
+ if (nblocks < rgdtsz / 1024)
+ rgdtsz = nblocks * 1024;
+ rgdtsz = div_roundup(rgdtsz - first_data_block, blocks_per_group);
+ rgdtsz = div_roundup(rgdtsz, EXT2_DESC_PER_BLOCK(sb)) - gdtsz;
+ if (rgdtsz > EXT2_ADDR_PER_BLOCK(sb))
+ rgdtsz = EXT2_ADDR_PER_BLOCK(sb);
+ STORE_LE(sb->s_reserved_gdt_blocks, rgdtsz);
+ gdtsz += rgdtsz;
}
- ninodes = nblocks / (bytes_per_inode >> blocksize_log2);
- if (ninodes < EXT2_GOOD_OLD_FIRST_INO+1)
- ninodes = EXT2_GOOD_OLD_FIRST_INO+1;
- ninodes_per_group = div_roundup(ninodes, ngroups);
- if (ninodes_per_group < 16)
- ninodes_per_group = 16; // minimum number because the first EXT2_GOOD_OLD_FIRST_INO-1 are reserved
- // N.B. a block group can have no more than 8*blocksize = sb->s_inodes_per_group inodes
- if (ninodes_per_group > sb->s_inodes_per_group)
- ninodes_per_group = sb->s_inodes_per_group;
+
+ // N.B. a block group can have no more than 8*blocksize inodes
+ if (ninodes_per_group > blocks_per_group)
+ ninodes_per_group = blocks_per_group;
// adjust inodes per group so they completely fill the inode table blocks in the descriptor
ninodes_per_group = (div_roundup(ninodes_per_group * EXT2_INODE_SIZE(sb), blocksize) << blocksize_log2) / EXT2_INODE_SIZE(sb);
// make sure the number of inodes per group is a multiple of 8
ninodes_per_group &= ~7;
sb->s_inodes_per_group = ninodes_per_group;// = div_roundup(ninodes_per_group * sb->s_inode_size, blocksize);
// total ninodes
- ninodes = sb->s_inodes_count = ninodes_per_group * ngroups;
+ sb->s_inodes_count = ninodes_per_group * ngroups;
itsz = ninodes_per_group * sb->s_inode_size / blocksize;
sb->s_free_inodes_count = sb->s_inodes_count - EXT2_GOOD_OLD_FIRST_INO;
"%u inodes, %u blocks\n"
"%u blocks (%u%%) reserved for the super user\n"
"First data block=%u\n"
-// "Maximum filesystem blocks=%lu\n"
+ "Maximum filesystem blocks=%u\n"
"%u block groups\n"
"%u blocks per group, %u fragments per group\n"
"%u inodes per group"
, sb->s_inodes_count, sb->s_blocks_count
, sb->s_r_blocks_count, nreserved
, first_data_block
-// , (rgdtsz + gdtsz) * EXT2_DESC_PER_BLOCK(sb) * nblocks_per_group
+ , gdtsz * EXT2_DESC_PER_BLOCK(sb) * blocks_per_group
, ngroups
- , nblocks_per_group, nblocks_per_group
+ , blocks_per_group, blocks_per_group
, ninodes_per_group
);
{
- const char *fmt = "\nSuperblock backups stored on blocks: %u";
+ const char *fmt = "\nSuperblock backups stored on blocks:\n"
+ "\t%u";
pos = first_data_block;
for (i = 1; i < ngroups; i++) {
- pos += nblocks_per_group;
+ pos += blocks_per_group;
if (has_super(i)) {
printf(fmt, (unsigned)pos);
fmt = ", %u";
#endif
if (opts & OPT_n)
- return EXIT_SUCCESS;
+ goto done;
// fill group descriptors
- gd = xzalloc((gdtsz + rgdtsz) * blocksize);
+ gd = xzalloc(gdtsz * blocksize);
sb->s_free_blocks_count = 0;
for (i = 0, pos = first_data_block, n = nblocks;
i < ngroups;
- i++, pos += nblocks_per_group, n -= nblocks_per_group
+ i++, pos += blocks_per_group, n -= blocks_per_group
) {
- uint32_t overhead = pos + (has_super(i) ? (1/*sb*/ + gdtsz + rgdtsz) : 0);
+ uint32_t overhead = pos + (has_super(i) ? (1/*sb*/ + gdtsz) : 0);
gd[i].bg_block_bitmap = overhead + 0;
gd[i].bg_inode_bitmap = overhead + 1;
gd[i].bg_inode_table = overhead + 2;
if (ngroups-1 == i) {
overhead += first_data_block;
}
- gd[i].bg_free_blocks_count = (n < nblocks_per_group ? n : nblocks_per_group) - overhead;
+ gd[i].bg_free_blocks_count = (n < blocks_per_group ? n : blocks_per_group) - overhead;
sb->s_free_blocks_count += gd[i].bg_free_blocks_count;
}
STORE_LE(sb->s_free_blocks_count, sb->s_free_blocks_count);
// dump filesystem skeleton structures
// printf("Writing superblocks and filesystem accounting information: ");
buf = xmalloc(blocksize);
- for (i = 0, pos = first_data_block; i < ngroups; i++, pos += nblocks_per_group) {
- uint32_t overhead = has_super(i) ? (1/*sb*/ + gdtsz + rgdtsz) : 0;
- uint32_t start;// = has_super(i) ? (1/*sb*/ + gdtsz + rgdtsz) : 0;
+ for (i = 0, pos = first_data_block; i < ngroups; i++, pos += blocks_per_group) {
+ uint32_t overhead = has_super(i) ? (1/*sb*/ + gdtsz) : 0;
+ uint32_t start;
uint32_t end;
// dump superblock and group descriptors and their backups
if (overhead) { // N.B. in fact, we want (has_super(i)) condition, but it is equal to (overhead != 0) and is cheaper
// N.B. 1024 byte blocks are special
PUT(((uint64_t)pos << blocksize_log2) + ((0 == i && 0 == first_data_block) ? 1024 : 0), sb, 1024);//blocksize);
- PUT(((uint64_t)pos << blocksize_log2) + blocksize, gd, (gdtsz + rgdtsz) * blocksize);
+ PUT(((uint64_t)pos << blocksize_log2) + blocksize, gd, gdtsz * blocksize);
}
start = overhead + 1/*bbmp*/ + 1/*ibmp*/ + itsz;
if (i == 0)
start += 2; // for "/" and "/lost+found"
- end = nblocks_per_group - (start + gd[i].bg_free_blocks_count);
+ end = blocks_per_group - (start + gd[i].bg_free_blocks_count);
// mark preallocated blocks as allocated
allocate(buf, blocksize, start, end);
// mark preallocated inodes as allocated
allocate(buf, blocksize,
ninodes_per_group - gd[i].bg_free_inodes_count,
- 8 * blocksize - ninodes_per_group
+ blocks_per_group - ninodes_per_group
);
// dump inode bitmap
//PUT((uint64_t)(pos + overhead + 1) * blocksize, buf, blocksize);
// bb_info_msg("done\n"
// "This filesystem will be automatically checked every %u mounts or\n"
-// "%u days, whichever comes first. Use tune2fs -c or -i to override.",
-// sb->s_max_mnt_count, sb->s_checkinterval / (3600 * 24)
+// "180 days, whichever comes first. Use tune2fs -c or -i to override.",
+// sb->s_max_mnt_count
// );
+ done:
// cleanup
if (ENABLE_FEATURE_CLEAN_UP) {
free(buf);