2 * unix_io.c --- This is the Unix (well, really POSIX) implementation
5 * Implements a one-block write-through cache.
7 * Includes support for Windows NT support under Cygwin.
9 * Copyright (C) 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
10 * 2002 by Theodore Ts'o.
13 * This file may be redistributed under the terms of the GNU Public
29 #include <sys/utsname.h>
35 #include <sys/types.h>
37 #include <sys/resource.h>
43 * For checking structure magic numbers...
46 #define EXT2_CHECK_MAGIC(struct, code) \
47 if ((struct)->magic != (code)) return (code)
58 #define WRITE_DIRECT_SIZE 4 /* Must be smaller than CACHE_SIZE */
59 #define READ_DIRECT_SIZE 4 /* Should be smaller than CACHE_SIZE */
61 struct unix_private_data {
67 struct unix_cache cache[CACHE_SIZE];
70 static errcode_t unix_open(const char *name, int flags, io_channel *channel);
71 static errcode_t unix_close(io_channel channel);
72 static errcode_t unix_set_blksize(io_channel channel, int blksize);
73 static errcode_t unix_read_blk(io_channel channel, unsigned long block,
74 int count, void *data);
75 static errcode_t unix_write_blk(io_channel channel, unsigned long block,
76 int count, const void *data);
77 static errcode_t unix_flush(io_channel channel);
78 static errcode_t unix_write_byte(io_channel channel, unsigned long offset,
79 int size, const void *data);
80 static errcode_t unix_set_option(io_channel channel, const char *option,
83 static void reuse_cache(io_channel channel, struct unix_private_data *data,
84 struct unix_cache *cache, unsigned long block);
86 /* __FreeBSD_kernel__ is defined by GNU/kFreeBSD - the FreeBSD kernel
87 * does not know buffered block devices - everything is raw. */
88 #if defined(__CYGWIN__) || defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
89 #define NEED_BOUNCE_BUFFER
91 #undef NEED_BOUNCE_BUFFER
94 static struct struct_io_manager struct_unix_manager = {
95 EXT2_ET_MAGIC_IO_MANAGER,
103 #ifdef NEED_BOUNCE_BUFFER
111 io_manager unix_io_manager = &struct_unix_manager;
114 * Here are the raw I/O functions
116 #ifndef NEED_BOUNCE_BUFFER
117 static errcode_t raw_read_blk(io_channel channel,
118 struct unix_private_data *data,
120 int count, void *buf)
124 ext2_loff_t location;
127 size = (count < 0) ? -count : count * channel->block_size;
128 location = ((ext2_loff_t) block * channel->block_size) + data->offset;
129 if (ext2fs_llseek(data->dev, location, SEEK_SET) != location) {
130 retval = errno ? errno : EXT2_ET_LLSEEK_FAILED;
133 actual = read(data->dev, buf, size);
134 if (actual != size) {
137 retval = EXT2_ET_SHORT_READ;
143 memset((char *) buf+actual, 0, size-actual);
144 if (channel->read_error)
145 retval = (channel->read_error)(channel, block, count, buf,
146 size, actual, retval);
149 #else /* NEED_BOUNCE_BUFFER */
151 * Windows and FreeBSD block devices only allow sector alignment IO in offset and size
153 static errcode_t raw_read_blk(io_channel channel,
154 struct unix_private_data *data,
156 int count, void *buf)
159 size_t size, alignsize, fragment;
160 ext2_loff_t location;
161 int total = 0, actual;
162 #define BLOCKALIGN 512
163 char sector[BLOCKALIGN];
165 size = (count < 0) ? -count : count * channel->block_size;
166 location = ((ext2_loff_t) block * channel->block_size) + data->offset;
168 printf("count=%d, size=%d, block=%d, blk_size=%d, location=%lx\n",
169 count, size, block, channel->block_size, location);
171 if (ext2fs_llseek(data->dev, location, SEEK_SET) != location) {
172 retval = errno ? errno : EXT2_ET_LLSEEK_FAILED;
175 fragment = size % BLOCKALIGN;
176 alignsize = size - fragment;
178 actual = read(data->dev, buf, alignsize);
179 if (actual != alignsize)
183 actual = read(data->dev, sector, BLOCKALIGN);
184 if (actual != BLOCKALIGN)
186 memcpy(buf+alignsize, sector, fragment);
193 retval = EXT2_ET_SHORT_READ;
196 memset((char *) buf+total, 0, size-actual);
197 if (channel->read_error)
198 retval = (channel->read_error)(channel, block, count, buf,
199 size, actual, retval);
204 static errcode_t raw_write_blk(io_channel channel,
205 struct unix_private_data *data,
207 int count, const void *buf)
210 ext2_loff_t location;
215 size = channel->block_size;
220 size = count * channel->block_size;
223 location = ((ext2_loff_t) block * channel->block_size) + data->offset;
224 if (ext2fs_llseek(data->dev, location, SEEK_SET) != location) {
225 retval = errno ? errno : EXT2_ET_LLSEEK_FAILED;
229 actual = write(data->dev, buf, size);
230 if (actual != size) {
231 retval = EXT2_ET_SHORT_WRITE;
237 if (channel->write_error)
238 retval = (channel->write_error)(channel, block, count, buf,
239 size, actual, retval);
245 * Here we implement the cache functions
248 /* Allocate the cache buffers */
249 static errcode_t alloc_cache(io_channel channel,
250 struct unix_private_data *data)
253 struct unix_cache *cache;
256 data->access_time = 0;
257 for (i=0, cache = data->cache; i < CACHE_SIZE; i++, cache++) {
259 cache->access_time = 0;
262 if ((retval = ext2fs_get_mem(channel->block_size,
269 /* Free the cache buffers */
270 static void free_cache(struct unix_private_data *data)
272 struct unix_cache *cache;
275 data->access_time = 0;
276 for (i=0, cache = data->cache; i < CACHE_SIZE; i++, cache++) {
278 cache->access_time = 0;
281 ext2fs_free_mem(&cache->buf);
288 * Try to find a block in the cache. If the block is not found, and
289 * eldest is a non-zero pointer, then fill in eldest with the cache
290 * entry to that should be reused.
292 static struct unix_cache *find_cached_block(struct unix_private_data *data,
294 struct unix_cache **eldest)
296 struct unix_cache *cache, *unused_cache, *oldest_cache;
299 unused_cache = oldest_cache = 0;
300 for (i=0, cache = data->cache; i < CACHE_SIZE; i++, cache++) {
301 if (!cache->in_use) {
303 unused_cache = cache;
306 if (cache->block == block) {
307 cache->access_time = ++data->access_time;
311 (cache->access_time < oldest_cache->access_time))
312 oldest_cache = cache;
315 *eldest = (unused_cache) ? unused_cache : oldest_cache;
320 * Reuse a particular cache entry for another block.
322 static void reuse_cache(io_channel channel, struct unix_private_data *data,
323 struct unix_cache *cache, unsigned long block)
325 if (cache->dirty && cache->in_use)
326 raw_write_blk(channel, data, cache->block, 1, cache->buf);
330 cache->block = block;
331 cache->access_time = ++data->access_time;
335 * Flush all of the blocks in the cache
337 static errcode_t flush_cached_blocks(io_channel channel,
338 struct unix_private_data *data,
342 struct unix_cache *cache;
343 errcode_t retval, retval2;
347 for (i=0, cache = data->cache; i < CACHE_SIZE; i++, cache++) {
357 retval = raw_write_blk(channel, data,
358 cache->block, 1, cache->buf);
366 #endif /* NO_IO_CACHE */
368 static errcode_t unix_open(const char *name, int flags, io_channel *channel)
370 io_channel io = NULL;
371 struct unix_private_data *data = NULL;
380 return EXT2_ET_BAD_DEVICE_NAME;
381 retval = ext2fs_get_mem(sizeof(struct struct_io_channel), &io);
384 memset(io, 0, sizeof(struct struct_io_channel));
385 io->magic = EXT2_ET_MAGIC_IO_CHANNEL;
386 retval = ext2fs_get_mem(sizeof(struct unix_private_data), &data);
390 io->manager = unix_io_manager;
391 retval = ext2fs_get_mem(strlen(name)+1, &io->name);
395 strcpy(io->name, name);
396 io->private_data = data;
397 io->block_size = 1024;
402 memset(data, 0, sizeof(struct unix_private_data));
403 data->magic = EXT2_ET_MAGIC_UNIX_IO_CHANNEL;
405 if ((retval = alloc_cache(io, data)))
408 open_flags = (flags & IO_FLAG_RW) ? O_RDWR : O_RDONLY;
410 data->dev = open64(io->name, open_flags);
412 data->dev = open(io->name, open_flags);
421 #if (defined(__alpha__) || ((defined(__sparc__) || defined(__mips__)) && (SIZEOF_LONG == 4)))
422 #define RLIM_INFINITY ((unsigned long)(~0UL>>1))
424 #define RLIM_INFINITY (~0UL)
427 * Work around a bug in 2.4.10-2.4.18 kernels where writes to
428 * block devices are wrongly getting hit by the filesize
429 * limit. This workaround isn't perfect, since it won't work
430 * if glibc wasn't built against 2.2 header files. (Sigh.)
433 if ((flags & IO_FLAG_RW) &&
435 ((ut.release[0] == '2') && (ut.release[1] == '.') &&
436 (ut.release[2] == '4') && (ut.release[3] == '.') &&
437 (ut.release[4] == '1') && (ut.release[5] >= '0') &&
438 (ut.release[5] < '8')) &&
439 (fstat(data->dev, &st) == 0) &&
440 (S_ISBLK(st.st_mode))) {
443 rlim.rlim_cur = rlim.rlim_max = (unsigned long) RLIM_INFINITY;
444 setrlimit(RLIMIT_FSIZE, &rlim);
445 getrlimit(RLIMIT_FSIZE, &rlim);
446 if (((unsigned long) rlim.rlim_cur) <
447 ((unsigned long) rlim.rlim_max)) {
448 rlim.rlim_cur = rlim.rlim_max;
449 setrlimit(RLIMIT_FSIZE, &rlim);
459 ext2fs_free_mem(&data);
461 ext2fs_free_mem(&io);
465 static errcode_t unix_close(io_channel channel)
467 struct unix_private_data *data;
468 errcode_t retval = 0;
470 EXT2_CHECK_MAGIC(channel, EXT2_ET_MAGIC_IO_CHANNEL);
471 data = (struct unix_private_data *) channel->private_data;
472 EXT2_CHECK_MAGIC(data, EXT2_ET_MAGIC_UNIX_IO_CHANNEL);
474 if (--channel->refcount > 0)
478 retval = flush_cached_blocks(channel, data, 0);
481 if (close(data->dev) < 0)
485 ext2fs_free_mem(&channel->private_data);
486 ext2fs_free_mem(&channel->name);
487 ext2fs_free_mem(&channel);
491 static errcode_t unix_set_blksize(io_channel channel, int blksize)
493 struct unix_private_data *data;
496 EXT2_CHECK_MAGIC(channel, EXT2_ET_MAGIC_IO_CHANNEL);
497 data = (struct unix_private_data *) channel->private_data;
498 EXT2_CHECK_MAGIC(data, EXT2_ET_MAGIC_UNIX_IO_CHANNEL);
500 if (channel->block_size != blksize) {
502 if ((retval = flush_cached_blocks(channel, data, 0)))
506 channel->block_size = blksize;
508 if ((retval = alloc_cache(channel, data)))
515 static errcode_t unix_read_blk(io_channel channel, unsigned long block,
516 int count, void *buf)
518 struct unix_private_data *data;
519 struct unix_cache *cache, *reuse[READ_DIRECT_SIZE];
524 EXT2_CHECK_MAGIC(channel, EXT2_ET_MAGIC_IO_CHANNEL);
525 data = (struct unix_private_data *) channel->private_data;
526 EXT2_CHECK_MAGIC(data, EXT2_ET_MAGIC_UNIX_IO_CHANNEL);
529 return raw_read_blk(channel, data, block, count, buf);
532 * If we're doing an odd-sized read or a very large read,
533 * flush out the cache and then do a direct read.
535 if (count < 0 || count > WRITE_DIRECT_SIZE) {
536 if ((retval = flush_cached_blocks(channel, data, 0)))
538 return raw_read_blk(channel, data, block, count, buf);
543 /* If it's in the cache, use it! */
544 if ((cache = find_cached_block(data, block, &reuse[0]))) {
546 printf("Using cached block %d\n", block);
548 memcpy(cp, cache->buf, channel->block_size);
551 cp += channel->block_size;
555 * Find the number of uncached blocks so we can do a
556 * single read request
558 for (i=1; i < count; i++)
559 if (find_cached_block(data, block+i, &reuse[i]))
562 printf("Reading %d blocks starting at %d\n", i, block);
564 if ((retval = raw_read_blk(channel, data, block, i, cp)))
567 /* Save the results in the cache */
568 for (j=0; j < i; j++) {
571 reuse_cache(channel, data, cache, block++);
572 memcpy(cache->buf, cp, channel->block_size);
573 cp += channel->block_size;
577 #endif /* NO_IO_CACHE */
580 static errcode_t unix_write_blk(io_channel channel, unsigned long block,
581 int count, const void *buf)
583 struct unix_private_data *data;
584 struct unix_cache *cache, *reuse;
585 errcode_t retval = 0;
589 EXT2_CHECK_MAGIC(channel, EXT2_ET_MAGIC_IO_CHANNEL);
590 data = (struct unix_private_data *) channel->private_data;
591 EXT2_CHECK_MAGIC(data, EXT2_ET_MAGIC_UNIX_IO_CHANNEL);
594 return raw_write_blk(channel, data, block, count, buf);
597 * If we're doing an odd-sized write or a very large write,
598 * flush out the cache completely and then do a direct write.
600 if (count < 0 || count > WRITE_DIRECT_SIZE) {
601 if ((retval = flush_cached_blocks(channel, data, 1)))
603 return raw_write_blk(channel, data, block, count, buf);
607 * For a moderate-sized multi-block write, first force a write
608 * if we're in write-through cache mode, and then fill the
609 * cache with the blocks.
611 writethrough = channel->flags & CHANNEL_FLAGS_WRITETHROUGH;
613 retval = raw_write_blk(channel, data, block, count, buf);
617 cache = find_cached_block(data, block, &reuse);
620 reuse_cache(channel, data, cache, block);
622 memcpy(cache->buf, cp, channel->block_size);
623 cache->dirty = !writethrough;
626 cp += channel->block_size;
629 #endif /* NO_IO_CACHE */
632 static errcode_t unix_write_byte(io_channel channel, unsigned long offset,
633 int size, const void *buf)
635 struct unix_private_data *data;
636 errcode_t retval = 0;
639 EXT2_CHECK_MAGIC(channel, EXT2_ET_MAGIC_IO_CHANNEL);
640 data = (struct unix_private_data *) channel->private_data;
641 EXT2_CHECK_MAGIC(data, EXT2_ET_MAGIC_UNIX_IO_CHANNEL);
645 * Flush out the cache completely
647 if ((retval = flush_cached_blocks(channel, data, 1)))
651 if (lseek(data->dev, offset + data->offset, SEEK_SET) < 0)
654 actual = write(data->dev, buf, size);
656 return EXT2_ET_SHORT_WRITE;
662 * Flush data buffers to disk.
664 static errcode_t unix_flush(io_channel channel)
666 struct unix_private_data *data;
667 errcode_t retval = 0;
669 EXT2_CHECK_MAGIC(channel, EXT2_ET_MAGIC_IO_CHANNEL);
670 data = (struct unix_private_data *) channel->private_data;
671 EXT2_CHECK_MAGIC(data, EXT2_ET_MAGIC_UNIX_IO_CHANNEL);
674 retval = flush_cached_blocks(channel, data, 0);
680 static errcode_t unix_set_option(io_channel channel, const char *option,
683 struct unix_private_data *data;
687 EXT2_CHECK_MAGIC(channel, EXT2_ET_MAGIC_IO_CHANNEL);
688 data = (struct unix_private_data *) channel->private_data;
689 EXT2_CHECK_MAGIC(data, EXT2_ET_MAGIC_UNIX_IO_CHANNEL);
691 if (!strcmp(option, "offset")) {
693 return EXT2_ET_INVALID_ARGUMENT;
695 tmp = strtoul(arg, &end, 0);
697 return EXT2_ET_INVALID_ARGUMENT;
701 return EXT2_ET_INVALID_ARGUMENT;