Linux-libre 4.19.8-gnu
[librecmc/linux-libre.git] / fs / udf / balloc.c
1 /*
2  * balloc.c
3  *
4  * PURPOSE
5  *      Block allocation handling routines for the OSTA-UDF(tm) filesystem.
6  *
7  * COPYRIGHT
8  *      This file is distributed under the terms of the GNU General Public
9  *      License (GPL). Copies of the GPL can be obtained from:
10  *              ftp://prep.ai.mit.edu/pub/gnu/GPL
11  *      Each contributing author retains all rights to their own work.
12  *
13  *  (C) 1999-2001 Ben Fennema
14  *  (C) 1999 Stelias Computing Inc
15  *
16  * HISTORY
17  *
18  *  02/24/99 blf  Created.
19  *
20  */
21
22 #include "udfdecl.h"
23
24 #include <linux/bitops.h>
25
26 #include "udf_i.h"
27 #include "udf_sb.h"
28
29 #define udf_clear_bit   __test_and_clear_bit_le
30 #define udf_set_bit     __test_and_set_bit_le
31 #define udf_test_bit    test_bit_le
32 #define udf_find_next_one_bit   find_next_bit_le
33
34 static int read_block_bitmap(struct super_block *sb,
35                              struct udf_bitmap *bitmap, unsigned int block,
36                              unsigned long bitmap_nr)
37 {
38         struct buffer_head *bh = NULL;
39         int retval = 0;
40         struct kernel_lb_addr loc;
41
42         loc.logicalBlockNum = bitmap->s_extPosition;
43         loc.partitionReferenceNum = UDF_SB(sb)->s_partition;
44
45         bh = udf_tread(sb, udf_get_lb_pblock(sb, &loc, block));
46         if (!bh)
47                 retval = -EIO;
48
49         bitmap->s_block_bitmap[bitmap_nr] = bh;
50         return retval;
51 }
52
53 static int __load_block_bitmap(struct super_block *sb,
54                                struct udf_bitmap *bitmap,
55                                unsigned int block_group)
56 {
57         int retval = 0;
58         int nr_groups = bitmap->s_nr_groups;
59
60         if (block_group >= nr_groups) {
61                 udf_debug("block_group (%u) > nr_groups (%d)\n",
62                           block_group, nr_groups);
63         }
64
65         if (bitmap->s_block_bitmap[block_group])
66                 return block_group;
67
68         retval = read_block_bitmap(sb, bitmap, block_group, block_group);
69         if (retval < 0)
70                 return retval;
71
72         return block_group;
73 }
74
75 static inline int load_block_bitmap(struct super_block *sb,
76                                     struct udf_bitmap *bitmap,
77                                     unsigned int block_group)
78 {
79         int slot;
80
81         slot = __load_block_bitmap(sb, bitmap, block_group);
82
83         if (slot < 0)
84                 return slot;
85
86         if (!bitmap->s_block_bitmap[slot])
87                 return -EIO;
88
89         return slot;
90 }
91
92 static void udf_add_free_space(struct super_block *sb, u16 partition, u32 cnt)
93 {
94         struct udf_sb_info *sbi = UDF_SB(sb);
95         struct logicalVolIntegrityDesc *lvid;
96
97         if (!sbi->s_lvid_bh)
98                 return;
99
100         lvid = (struct logicalVolIntegrityDesc *)sbi->s_lvid_bh->b_data;
101         le32_add_cpu(&lvid->freeSpaceTable[partition], cnt);
102         udf_updated_lvid(sb);
103 }
104
105 static void udf_bitmap_free_blocks(struct super_block *sb,
106                                    struct udf_bitmap *bitmap,
107                                    struct kernel_lb_addr *bloc,
108                                    uint32_t offset,
109                                    uint32_t count)
110 {
111         struct udf_sb_info *sbi = UDF_SB(sb);
112         struct buffer_head *bh = NULL;
113         struct udf_part_map *partmap;
114         unsigned long block;
115         unsigned long block_group;
116         unsigned long bit;
117         unsigned long i;
118         int bitmap_nr;
119         unsigned long overflow;
120
121         mutex_lock(&sbi->s_alloc_mutex);
122         partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
123         if (bloc->logicalBlockNum + count < count ||
124             (bloc->logicalBlockNum + count) > partmap->s_partition_len) {
125                 udf_debug("%u < %d || %u + %u > %u\n",
126                           bloc->logicalBlockNum, 0,
127                           bloc->logicalBlockNum, count,
128                           partmap->s_partition_len);
129                 goto error_return;
130         }
131
132         block = bloc->logicalBlockNum + offset +
133                 (sizeof(struct spaceBitmapDesc) << 3);
134
135         do {
136                 overflow = 0;
137                 block_group = block >> (sb->s_blocksize_bits + 3);
138                 bit = block % (sb->s_blocksize << 3);
139
140                 /*
141                 * Check to see if we are freeing blocks across a group boundary.
142                 */
143                 if (bit + count > (sb->s_blocksize << 3)) {
144                         overflow = bit + count - (sb->s_blocksize << 3);
145                         count -= overflow;
146                 }
147                 bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
148                 if (bitmap_nr < 0)
149                         goto error_return;
150
151                 bh = bitmap->s_block_bitmap[bitmap_nr];
152                 for (i = 0; i < count; i++) {
153                         if (udf_set_bit(bit + i, bh->b_data)) {
154                                 udf_debug("bit %lu already set\n", bit + i);
155                                 udf_debug("byte=%2x\n",
156                                           ((__u8 *)bh->b_data)[(bit + i) >> 3]);
157                         }
158                 }
159                 udf_add_free_space(sb, sbi->s_partition, count);
160                 mark_buffer_dirty(bh);
161                 if (overflow) {
162                         block += count;
163                         count = overflow;
164                 }
165         } while (overflow);
166
167 error_return:
168         mutex_unlock(&sbi->s_alloc_mutex);
169 }
170
171 static int udf_bitmap_prealloc_blocks(struct super_block *sb,
172                                       struct udf_bitmap *bitmap,
173                                       uint16_t partition, uint32_t first_block,
174                                       uint32_t block_count)
175 {
176         struct udf_sb_info *sbi = UDF_SB(sb);
177         int alloc_count = 0;
178         int bit, block, block_group, group_start;
179         int nr_groups, bitmap_nr;
180         struct buffer_head *bh;
181         __u32 part_len;
182
183         mutex_lock(&sbi->s_alloc_mutex);
184         part_len = sbi->s_partmaps[partition].s_partition_len;
185         if (first_block >= part_len)
186                 goto out;
187
188         if (first_block + block_count > part_len)
189                 block_count = part_len - first_block;
190
191         do {
192                 nr_groups = udf_compute_nr_groups(sb, partition);
193                 block = first_block + (sizeof(struct spaceBitmapDesc) << 3);
194                 block_group = block >> (sb->s_blocksize_bits + 3);
195                 group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc);
196
197                 bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
198                 if (bitmap_nr < 0)
199                         goto out;
200                 bh = bitmap->s_block_bitmap[bitmap_nr];
201
202                 bit = block % (sb->s_blocksize << 3);
203
204                 while (bit < (sb->s_blocksize << 3) && block_count > 0) {
205                         if (!udf_clear_bit(bit, bh->b_data))
206                                 goto out;
207                         block_count--;
208                         alloc_count++;
209                         bit++;
210                         block++;
211                 }
212                 mark_buffer_dirty(bh);
213         } while (block_count > 0);
214
215 out:
216         udf_add_free_space(sb, partition, -alloc_count);
217         mutex_unlock(&sbi->s_alloc_mutex);
218         return alloc_count;
219 }
220
221 static udf_pblk_t udf_bitmap_new_block(struct super_block *sb,
222                                 struct udf_bitmap *bitmap, uint16_t partition,
223                                 uint32_t goal, int *err)
224 {
225         struct udf_sb_info *sbi = UDF_SB(sb);
226         int newbit, bit = 0;
227         udf_pblk_t block;
228         int block_group, group_start;
229         int end_goal, nr_groups, bitmap_nr, i;
230         struct buffer_head *bh = NULL;
231         char *ptr;
232         udf_pblk_t newblock = 0;
233
234         *err = -ENOSPC;
235         mutex_lock(&sbi->s_alloc_mutex);
236
237 repeat:
238         if (goal >= sbi->s_partmaps[partition].s_partition_len)
239                 goal = 0;
240
241         nr_groups = bitmap->s_nr_groups;
242         block = goal + (sizeof(struct spaceBitmapDesc) << 3);
243         block_group = block >> (sb->s_blocksize_bits + 3);
244         group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc);
245
246         bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
247         if (bitmap_nr < 0)
248                 goto error_return;
249         bh = bitmap->s_block_bitmap[bitmap_nr];
250         ptr = memscan((char *)bh->b_data + group_start, 0xFF,
251                       sb->s_blocksize - group_start);
252
253         if ((ptr - ((char *)bh->b_data)) < sb->s_blocksize) {
254                 bit = block % (sb->s_blocksize << 3);
255                 if (udf_test_bit(bit, bh->b_data))
256                         goto got_block;
257
258                 end_goal = (bit + 63) & ~63;
259                 bit = udf_find_next_one_bit(bh->b_data, end_goal, bit);
260                 if (bit < end_goal)
261                         goto got_block;
262
263                 ptr = memscan((char *)bh->b_data + (bit >> 3), 0xFF,
264                               sb->s_blocksize - ((bit + 7) >> 3));
265                 newbit = (ptr - ((char *)bh->b_data)) << 3;
266                 if (newbit < sb->s_blocksize << 3) {
267                         bit = newbit;
268                         goto search_back;
269                 }
270
271                 newbit = udf_find_next_one_bit(bh->b_data,
272                                                sb->s_blocksize << 3, bit);
273                 if (newbit < sb->s_blocksize << 3) {
274                         bit = newbit;
275                         goto got_block;
276                 }
277         }
278
279         for (i = 0; i < (nr_groups * 2); i++) {
280                 block_group++;
281                 if (block_group >= nr_groups)
282                         block_group = 0;
283                 group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc);
284
285                 bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
286                 if (bitmap_nr < 0)
287                         goto error_return;
288                 bh = bitmap->s_block_bitmap[bitmap_nr];
289                 if (i < nr_groups) {
290                         ptr = memscan((char *)bh->b_data + group_start, 0xFF,
291                                       sb->s_blocksize - group_start);
292                         if ((ptr - ((char *)bh->b_data)) < sb->s_blocksize) {
293                                 bit = (ptr - ((char *)bh->b_data)) << 3;
294                                 break;
295                         }
296                 } else {
297                         bit = udf_find_next_one_bit(bh->b_data,
298                                                     sb->s_blocksize << 3,
299                                                     group_start << 3);
300                         if (bit < sb->s_blocksize << 3)
301                                 break;
302                 }
303         }
304         if (i >= (nr_groups * 2)) {
305                 mutex_unlock(&sbi->s_alloc_mutex);
306                 return newblock;
307         }
308         if (bit < sb->s_blocksize << 3)
309                 goto search_back;
310         else
311                 bit = udf_find_next_one_bit(bh->b_data, sb->s_blocksize << 3,
312                                             group_start << 3);
313         if (bit >= sb->s_blocksize << 3) {
314                 mutex_unlock(&sbi->s_alloc_mutex);
315                 return 0;
316         }
317
318 search_back:
319         i = 0;
320         while (i < 7 && bit > (group_start << 3) &&
321                udf_test_bit(bit - 1, bh->b_data)) {
322                 ++i;
323                 --bit;
324         }
325
326 got_block:
327         newblock = bit + (block_group << (sb->s_blocksize_bits + 3)) -
328                 (sizeof(struct spaceBitmapDesc) << 3);
329
330         if (!udf_clear_bit(bit, bh->b_data)) {
331                 udf_debug("bit already cleared for block %d\n", bit);
332                 goto repeat;
333         }
334
335         mark_buffer_dirty(bh);
336
337         udf_add_free_space(sb, partition, -1);
338         mutex_unlock(&sbi->s_alloc_mutex);
339         *err = 0;
340         return newblock;
341
342 error_return:
343         *err = -EIO;
344         mutex_unlock(&sbi->s_alloc_mutex);
345         return 0;
346 }
347
348 static void udf_table_free_blocks(struct super_block *sb,
349                                   struct inode *table,
350                                   struct kernel_lb_addr *bloc,
351                                   uint32_t offset,
352                                   uint32_t count)
353 {
354         struct udf_sb_info *sbi = UDF_SB(sb);
355         struct udf_part_map *partmap;
356         uint32_t start, end;
357         uint32_t elen;
358         struct kernel_lb_addr eloc;
359         struct extent_position oepos, epos;
360         int8_t etype;
361         struct udf_inode_info *iinfo;
362
363         mutex_lock(&sbi->s_alloc_mutex);
364         partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
365         if (bloc->logicalBlockNum + count < count ||
366             (bloc->logicalBlockNum + count) > partmap->s_partition_len) {
367                 udf_debug("%u < %d || %u + %u > %u\n",
368                           bloc->logicalBlockNum, 0,
369                           bloc->logicalBlockNum, count,
370                           partmap->s_partition_len);
371                 goto error_return;
372         }
373
374         iinfo = UDF_I(table);
375         udf_add_free_space(sb, sbi->s_partition, count);
376
377         start = bloc->logicalBlockNum + offset;
378         end = bloc->logicalBlockNum + offset + count - 1;
379
380         epos.offset = oepos.offset = sizeof(struct unallocSpaceEntry);
381         elen = 0;
382         epos.block = oepos.block = iinfo->i_location;
383         epos.bh = oepos.bh = NULL;
384
385         while (count &&
386                (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
387                 if (((eloc.logicalBlockNum +
388                         (elen >> sb->s_blocksize_bits)) == start)) {
389                         if ((0x3FFFFFFF - elen) <
390                                         (count << sb->s_blocksize_bits)) {
391                                 uint32_t tmp = ((0x3FFFFFFF - elen) >>
392                                                         sb->s_blocksize_bits);
393                                 count -= tmp;
394                                 start += tmp;
395                                 elen = (etype << 30) |
396                                         (0x40000000 - sb->s_blocksize);
397                         } else {
398                                 elen = (etype << 30) |
399                                         (elen +
400                                         (count << sb->s_blocksize_bits));
401                                 start += count;
402                                 count = 0;
403                         }
404                         udf_write_aext(table, &oepos, &eloc, elen, 1);
405                 } else if (eloc.logicalBlockNum == (end + 1)) {
406                         if ((0x3FFFFFFF - elen) <
407                                         (count << sb->s_blocksize_bits)) {
408                                 uint32_t tmp = ((0x3FFFFFFF - elen) >>
409                                                 sb->s_blocksize_bits);
410                                 count -= tmp;
411                                 end -= tmp;
412                                 eloc.logicalBlockNum -= tmp;
413                                 elen = (etype << 30) |
414                                         (0x40000000 - sb->s_blocksize);
415                         } else {
416                                 eloc.logicalBlockNum = start;
417                                 elen = (etype << 30) |
418                                         (elen +
419                                         (count << sb->s_blocksize_bits));
420                                 end -= count;
421                                 count = 0;
422                         }
423                         udf_write_aext(table, &oepos, &eloc, elen, 1);
424                 }
425
426                 if (epos.bh != oepos.bh) {
427                         oepos.block = epos.block;
428                         brelse(oepos.bh);
429                         get_bh(epos.bh);
430                         oepos.bh = epos.bh;
431                         oepos.offset = 0;
432                 } else {
433                         oepos.offset = epos.offset;
434                 }
435         }
436
437         if (count) {
438                 /*
439                  * NOTE: we CANNOT use udf_add_aext here, as it can try to
440                  * allocate a new block, and since we hold the super block
441                  * lock already very bad things would happen :)
442                  *
443                  * We copy the behavior of udf_add_aext, but instead of
444                  * trying to allocate a new block close to the existing one,
445                  * we just steal a block from the extent we are trying to add.
446                  *
447                  * It would be nice if the blocks were close together, but it
448                  * isn't required.
449                  */
450
451                 int adsize;
452
453                 eloc.logicalBlockNum = start;
454                 elen = EXT_RECORDED_ALLOCATED |
455                         (count << sb->s_blocksize_bits);
456
457                 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
458                         adsize = sizeof(struct short_ad);
459                 else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
460                         adsize = sizeof(struct long_ad);
461                 else {
462                         brelse(oepos.bh);
463                         brelse(epos.bh);
464                         goto error_return;
465                 }
466
467                 if (epos.offset + (2 * adsize) > sb->s_blocksize) {
468                         /* Steal a block from the extent being free'd */
469                         udf_setup_indirect_aext(table, eloc.logicalBlockNum,
470                                                 &epos);
471
472                         eloc.logicalBlockNum++;
473                         elen -= sb->s_blocksize;
474                 }
475
476                 /* It's possible that stealing the block emptied the extent */
477                 if (elen)
478                         __udf_add_aext(table, &epos, &eloc, elen, 1);
479         }
480
481         brelse(epos.bh);
482         brelse(oepos.bh);
483
484 error_return:
485         mutex_unlock(&sbi->s_alloc_mutex);
486         return;
487 }
488
489 static int udf_table_prealloc_blocks(struct super_block *sb,
490                                      struct inode *table, uint16_t partition,
491                                      uint32_t first_block, uint32_t block_count)
492 {
493         struct udf_sb_info *sbi = UDF_SB(sb);
494         int alloc_count = 0;
495         uint32_t elen, adsize;
496         struct kernel_lb_addr eloc;
497         struct extent_position epos;
498         int8_t etype = -1;
499         struct udf_inode_info *iinfo;
500
501         if (first_block >= sbi->s_partmaps[partition].s_partition_len)
502                 return 0;
503
504         iinfo = UDF_I(table);
505         if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
506                 adsize = sizeof(struct short_ad);
507         else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
508                 adsize = sizeof(struct long_ad);
509         else
510                 return 0;
511
512         mutex_lock(&sbi->s_alloc_mutex);
513         epos.offset = sizeof(struct unallocSpaceEntry);
514         epos.block = iinfo->i_location;
515         epos.bh = NULL;
516         eloc.logicalBlockNum = 0xFFFFFFFF;
517
518         while (first_block != eloc.logicalBlockNum &&
519                (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
520                 udf_debug("eloc=%u, elen=%u, first_block=%u\n",
521                           eloc.logicalBlockNum, elen, first_block);
522                 ; /* empty loop body */
523         }
524
525         if (first_block == eloc.logicalBlockNum) {
526                 epos.offset -= adsize;
527
528                 alloc_count = (elen >> sb->s_blocksize_bits);
529                 if (alloc_count > block_count) {
530                         alloc_count = block_count;
531                         eloc.logicalBlockNum += alloc_count;
532                         elen -= (alloc_count << sb->s_blocksize_bits);
533                         udf_write_aext(table, &epos, &eloc,
534                                         (etype << 30) | elen, 1);
535                 } else
536                         udf_delete_aext(table, epos);
537         } else {
538                 alloc_count = 0;
539         }
540
541         brelse(epos.bh);
542
543         if (alloc_count)
544                 udf_add_free_space(sb, partition, -alloc_count);
545         mutex_unlock(&sbi->s_alloc_mutex);
546         return alloc_count;
547 }
548
549 static udf_pblk_t udf_table_new_block(struct super_block *sb,
550                                struct inode *table, uint16_t partition,
551                                uint32_t goal, int *err)
552 {
553         struct udf_sb_info *sbi = UDF_SB(sb);
554         uint32_t spread = 0xFFFFFFFF, nspread = 0xFFFFFFFF;
555         udf_pblk_t newblock = 0;
556         uint32_t adsize;
557         uint32_t elen, goal_elen = 0;
558         struct kernel_lb_addr eloc, uninitialized_var(goal_eloc);
559         struct extent_position epos, goal_epos;
560         int8_t etype;
561         struct udf_inode_info *iinfo = UDF_I(table);
562
563         *err = -ENOSPC;
564
565         if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
566                 adsize = sizeof(struct short_ad);
567         else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
568                 adsize = sizeof(struct long_ad);
569         else
570                 return newblock;
571
572         mutex_lock(&sbi->s_alloc_mutex);
573         if (goal >= sbi->s_partmaps[partition].s_partition_len)
574                 goal = 0;
575
576         /* We search for the closest matching block to goal. If we find
577            a exact hit, we stop. Otherwise we keep going till we run out
578            of extents. We store the buffer_head, bloc, and extoffset
579            of the current closest match and use that when we are done.
580          */
581         epos.offset = sizeof(struct unallocSpaceEntry);
582         epos.block = iinfo->i_location;
583         epos.bh = goal_epos.bh = NULL;
584
585         while (spread &&
586                (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
587                 if (goal >= eloc.logicalBlockNum) {
588                         if (goal < eloc.logicalBlockNum +
589                                         (elen >> sb->s_blocksize_bits))
590                                 nspread = 0;
591                         else
592                                 nspread = goal - eloc.logicalBlockNum -
593                                         (elen >> sb->s_blocksize_bits);
594                 } else {
595                         nspread = eloc.logicalBlockNum - goal;
596                 }
597
598                 if (nspread < spread) {
599                         spread = nspread;
600                         if (goal_epos.bh != epos.bh) {
601                                 brelse(goal_epos.bh);
602                                 goal_epos.bh = epos.bh;
603                                 get_bh(goal_epos.bh);
604                         }
605                         goal_epos.block = epos.block;
606                         goal_epos.offset = epos.offset - adsize;
607                         goal_eloc = eloc;
608                         goal_elen = (etype << 30) | elen;
609                 }
610         }
611
612         brelse(epos.bh);
613
614         if (spread == 0xFFFFFFFF) {
615                 brelse(goal_epos.bh);
616                 mutex_unlock(&sbi->s_alloc_mutex);
617                 return 0;
618         }
619
620         /* Only allocate blocks from the beginning of the extent.
621            That way, we only delete (empty) extents, never have to insert an
622            extent because of splitting */
623         /* This works, but very poorly.... */
624
625         newblock = goal_eloc.logicalBlockNum;
626         goal_eloc.logicalBlockNum++;
627         goal_elen -= sb->s_blocksize;
628
629         if (goal_elen)
630                 udf_write_aext(table, &goal_epos, &goal_eloc, goal_elen, 1);
631         else
632                 udf_delete_aext(table, goal_epos);
633         brelse(goal_epos.bh);
634
635         udf_add_free_space(sb, partition, -1);
636
637         mutex_unlock(&sbi->s_alloc_mutex);
638         *err = 0;
639         return newblock;
640 }
641
642 void udf_free_blocks(struct super_block *sb, struct inode *inode,
643                      struct kernel_lb_addr *bloc, uint32_t offset,
644                      uint32_t count)
645 {
646         uint16_t partition = bloc->partitionReferenceNum;
647         struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
648
649         if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) {
650                 udf_bitmap_free_blocks(sb, map->s_uspace.s_bitmap,
651                                        bloc, offset, count);
652         } else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) {
653                 udf_table_free_blocks(sb, map->s_uspace.s_table,
654                                       bloc, offset, count);
655         } else if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP) {
656                 udf_bitmap_free_blocks(sb, map->s_fspace.s_bitmap,
657                                        bloc, offset, count);
658         } else if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE) {
659                 udf_table_free_blocks(sb, map->s_fspace.s_table,
660                                       bloc, offset, count);
661         }
662
663         if (inode) {
664                 inode_sub_bytes(inode,
665                                 ((sector_t)count) << sb->s_blocksize_bits);
666         }
667 }
668
669 inline int udf_prealloc_blocks(struct super_block *sb,
670                                struct inode *inode,
671                                uint16_t partition, uint32_t first_block,
672                                uint32_t block_count)
673 {
674         struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
675         int allocated;
676
677         if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP)
678                 allocated = udf_bitmap_prealloc_blocks(sb,
679                                                        map->s_uspace.s_bitmap,
680                                                        partition, first_block,
681                                                        block_count);
682         else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE)
683                 allocated = udf_table_prealloc_blocks(sb,
684                                                       map->s_uspace.s_table,
685                                                       partition, first_block,
686                                                       block_count);
687         else if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP)
688                 allocated = udf_bitmap_prealloc_blocks(sb,
689                                                        map->s_fspace.s_bitmap,
690                                                        partition, first_block,
691                                                        block_count);
692         else if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE)
693                 allocated = udf_table_prealloc_blocks(sb,
694                                                       map->s_fspace.s_table,
695                                                       partition, first_block,
696                                                       block_count);
697         else
698                 return 0;
699
700         if (inode && allocated > 0)
701                 inode_add_bytes(inode, allocated << sb->s_blocksize_bits);
702         return allocated;
703 }
704
705 inline udf_pblk_t udf_new_block(struct super_block *sb,
706                          struct inode *inode,
707                          uint16_t partition, uint32_t goal, int *err)
708 {
709         struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
710         udf_pblk_t block;
711
712         if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP)
713                 block = udf_bitmap_new_block(sb,
714                                              map->s_uspace.s_bitmap,
715                                              partition, goal, err);
716         else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE)
717                 block = udf_table_new_block(sb,
718                                             map->s_uspace.s_table,
719                                             partition, goal, err);
720         else if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP)
721                 block = udf_bitmap_new_block(sb,
722                                              map->s_fspace.s_bitmap,
723                                              partition, goal, err);
724         else if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE)
725                 block = udf_table_new_block(sb,
726                                             map->s_fspace.s_table,
727                                             partition, goal, err);
728         else {
729                 *err = -EIO;
730                 return 0;
731         }
732         if (inode && block)
733                 inode_add_bytes(inode, sb->s_blocksize);
734         return block;
735 }