2 * CDE - Common Desktop Environment
4 * Copyright (c) 1993-2012, The Open Group. All rights reserved.
6 * These libraries and programs are free software; you can
7 * redistribute them and/or modify them under the terms of the GNU
8 * Lesser General Public License as published by the Free Software
9 * Foundation; either version 2 of the License, or (at your option)
12 * These libraries and programs are distributed in the hope that
13 * they will be useful, but WITHOUT ANY WARRANTY; without even the
14 * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
15 * PURPOSE. See the GNU Lesser General Public License for more
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with these librararies and programs; if not, write
20 * to the Free Software Foundation, Inc., 51 Franklin Street, Fifth
21 * Floor, Boston, MA 02110-1301 USA
23 /* $XConsortium: vmbest.c /main/2 1996/05/08 20:01:03 drk $ */
24 /***************************************************************
26 * AT&T - PROPRIETARY *
28 * THIS IS PROPRIETARY SOURCE CODE LICENSED BY *
31 * Copyright (c) 1995 AT&T Corp. *
32 * All Rights Reserved *
34 * This software is licensed by AT&T Corp. *
35 * under the terms and conditions of the license in *
36 * http://www.research.att.com/orgs/ssr/book/reuse *
38 * This software was created by the *
39 * Software Engineering Research Department *
40 * AT&T Bell Laboratories *
42 * For further information contact *
43 * gsf@research.att.com *
45 ***************************************************************/
48 /* Best-fit allocation method. This is based on a best-fit strategy
49 ** using a splay tree for storage of lists of free blocks of the same
50 ** size. Recent free blocks may be cached for fast reuse.
52 ** Written by (Kiem-)Phong Vo, kpv@research.att.com, 01/16/94.
56 static int N_free; /* # of free calls */
57 static int N_alloc; /* # of alloc calls */
58 static int N_resize; /* # of resize calls */
59 static int N_wild; /* # allocated from the wild block */
60 static int N_cache; /* # allocated from cache */
61 static int N_last; /* # allocated from last free block */
62 static int P_junk; /* # of semi-free pieces */
63 static int P_free; /* # of free pieces */
64 static int P_busy; /* # of busy pieces */
65 static size_t M_junk; /* max size of a junk piece */
66 static size_t M_free; /* max size of a free piece */
67 static size_t M_busy; /* max size of a busy piece */
68 static size_t S_free; /* total free space */
69 static size_t S_junk; /* total junk space */
70 static int Vmcheck=0; /* 1 if checking */
72 /* Check to see if a block is in the free tree */
74 static vmintree(Block_t* node, Block_t* b)
76 static vmintree(node,b)
82 for(t = node; t; t = LINK(t))
85 if(LEFT(node) && vmintree(LEFT(node),b))
87 if(RIGHT(node) && vmintree(RIGHT(node),b))
92 /* check to see if the tree is in good shape */
94 static vmchktree(Block_t* node)
96 static vmchktree(node)
101 for(t = LINK(node); t; t = LINK(t))
102 /**/ASSERT(SIZE(t) == SIZE(node));
103 if((t = LEFT(node)) )
104 { /**/ASSERT(SIZE(t) < SIZE(node));
107 if((t = RIGHT(node)) )
108 { /**/ASSERT(SIZE(t) > SIZE(node));
115 static vmcheck(Vmdata_t* vd, size_t size, int wild)
117 static vmcheck(vd, size, wild)
119 size_t size; /* if > 0, checking that no large free block >size */
120 int wild; /* if != 0, do above but allow wild to be >size */
124 reg Block_t *b, *endb, *t, *np;
130 /**/ASSERT(size <= 0 || !vd->free);
131 /**/ASSERT(!vd->root || vmchktree(vd->root));
133 P_junk = P_free = P_busy = 0;
134 M_junk = M_free = M_busy = S_free = 0;
135 for(seg = vd->seg; seg; seg = seg->next)
137 endb = (Block_t*)(seg->baddr - sizeof(Head_t));
140 np = (Block_t*)((uchar*)DATA(b) + s);
142 if(!ISBUSY(SIZE(b)) )
143 { /**/ ASSERT(!ISJUNK(SIZE(b)));
144 /**/ ASSERT(!ISPFREE(SIZE(b)));
145 /**/ ASSERT(TINIEST(b) || SEG(b)==seg);
146 /**/ ASSERT(ISBUSY(SIZE(np)));
147 /**/ ASSERT(ISPFREE(SIZE(np)));
148 /**/ ASSERT(*SELF(b) == b);
149 /**/ ASSERT(size<=0 || SIZE(b)<size ||
151 (wild && b==vd->wild));
158 { for(t = TINY(vd)[INDEX(s)]; t; t = LINK(t))
163 { /**/ASSERT(VMWILD(vd,b));
166 if(vd->root && vmintree(vd->root,b))
171 else if(ISJUNK(SIZE(b)) )
172 { /**/ ASSERT(ISBUSY(SIZE(b)));
173 /**/ ASSERT(!ISPFREE(SIZE(np)));
182 { for(t = CACHE(vd)[INDEX(s)]; t; t = LINK(t))
186 for(t = CACHE(vd)[S_CACHE]; t; t = LINK(t))
192 { /**/ ASSERT(!ISPFREE(SIZE(b)) || !ISBUSY(SIZE(LAST(b))));
193 /**/ ASSERT(SEG(b) == seg);
194 /**/ ASSERT(!ISPFREE(SIZE(np)));
210 /* Tree rotation functions */
211 #define RROTATE(x,y) (LEFT(x) = RIGHT(y), RIGHT(y) = (x), (x) = (y))
212 #define LROTATE(x,y) (RIGHT(x) = LEFT(y), LEFT(y) = (x), (x) = (y))
213 #define RLINK(s,x) ((s) = LEFT(s) = (x))
214 #define LLINK(s,x) ((s) = RIGHT(s) = (x))
216 /* Find and delete a suitable element in the free tree. */
218 static Block_t* bestsearch(Vmdata_t* vd, reg size_t size, Block_t* wanted)
220 static Block_t* bestsearch(vd, size, wanted)
227 reg Block_t *t, *root, *l, *r;
230 /* extracting a tiniest block from its list */
231 if((root = wanted) && size == TINYSIZE)
235 if((r = LINK(root)) )
239 else TINY(vd)[0] = r;
244 else for(;; seg = seg->next)
245 { if((uchar*)root > (uchar*)seg->addr && (uchar*)root < seg->baddr)
254 /* find the right one to delete */
256 if((root = vd->root) ) do
257 { /**/ ASSERT(!ISBITS(size) && !ISBITS(SIZE(root)));
258 if(size == (s = SIZE(root)) )
261 { if((t = LEFT(root)) )
262 { if(size <= (s = SIZE(t)) )
276 { if((t = RIGHT(root)) )
277 { if(size >= (s = SIZE(t)) )
290 } while((root = t) );
292 if(root) /* found it, now isolate it */
293 { RIGHT(l) = LEFT(root);
294 LEFT(r) = RIGHT(root);
296 else /* nothing exactly fit */
297 { LEFT(r) = NIL(Block_t*);
298 RIGHT(l) = NIL(Block_t*);
300 /* grab the least one from the right tree */
301 if((root = LEFT(&link)) )
302 { while((t = LEFT(root)) )
304 LEFT(&link) = RIGHT(root);
308 if(root && (r = LINK(root)) )
309 { /* head of a link list, use next one for root */
310 LEFT(r) = RIGHT(&link);
311 RIGHT(r) = LEFT(&link);
313 else if(!(r = LEFT(&link)) )
315 else /* graft left tree to right tree */
316 { while((t = LEFT(r)) )
318 LEFT(r) = RIGHT(&link);
322 /**/ ASSERT(!wanted || wanted == root);
326 /* Reclaim all delayed free blocks into the free tree */
328 static bestreclaim(reg Vmdata_t* vd, Block_t* wanted, int c)
330 static bestreclaim(vd, wanted, c)
337 reg Block_t *fp, *np, *t, *list, **cache;
342 { LINK(fp) = *(cache = CACHE(vd) + C_INDEX(SIZE(fp)) );
344 vd->free = NIL(Block_t*);
347 LINK(&tree) = NIL(Block_t*);
349 for(n = S_CACHE; n >= c; --n)
350 { list = *(cache = CACHE(vd) + n);
351 *cache = NIL(Block_t*);
353 { /* Note that below here we allow ISJUNK blocks to be
354 ** forward-merged even though they are not removed from
355 ** the list immediately. In this way, the list is
356 ** scanned only once. It works because the LINK and SIZE
357 ** fields are not destroyed during the merging. This can
358 ** be seen by observing that a tiniest block has a 2-word
359 ** header and a 2-word body. Merging a tiniest block
360 ** (1seg) and the next block (2seg) looks like this:
361 ** 1seg size link left 2seg size link left ....
362 ** 1seg size link left rite xxxx xxxx .... self
363 ** After the merge, the 2seg word is replaced by the RIGHT
364 ** pointer of the new block and somewhere beyond the
365 ** two xxxx fields, the SELF pointer will replace some
366 ** other word. The important part is that the two xxxx
367 ** fields are kept intact.
372 if(!ISJUNK(size)) /* already done */
375 if(ISPFREE(size)) /* backward merge */
378 DELETE(vd,fp,INDEX(s),t,bestsearch);
379 size = (size&~BITS) + s + sizeof(Head_t);
383 for(;;) /* forward merge */
384 { np = (Block_t*)((uchar*)fp+size+sizeof(Head_t));
385 s = SIZE(np); /**/ASSERT(s > 0);
388 vd->wild = NIL(Block_t*);
389 else DELETE(vd,np,INDEX(s),t,bestsearch);
391 else if(ISJUNK(s) && C_INDEX(s) >= c )
396 size += s + sizeof(Head_t);
400 if(fp == wanted) /* about to be consumed by bestresize */
403 /* tell next block that this one is free */
404 SETPFREE(SIZE(np)); /**/ ASSERT(ISBUSY(SIZE(np)) );
407 if(np->body.data >= vd->seg->baddr)
412 /* tiny block goes to tiny list */
415 np = LINK(fp) = TINY(vd)[s];
416 if(s == 0) /* TINIEST block */
419 TLEFT(fp) = NIL(Block_t*);
424 LEFT(fp) = NIL(Block_t*);
431 /* don't put in free tree yet because they may be merged soon */
433 if((LINK(fp) = LINK(np)) )
441 /* insert all free blocks into the free tree */
442 for(list = LINK(&tree); list; )
446 /**/ASSERT(!ISBITS(SIZE(fp)));
447 /**/ASSERT(ISBUSY(SIZE(NEXT(fp))) );
448 /**/ASSERT(ISPFREE(SIZE(NEXT(fp))) );
449 LEFT(fp) = RIGHT(fp) = LINK(fp) = NIL(Block_t*);
450 if(!(np = vd->root) ) /* inserting into an empty tree */
456 while(1) /* leaf insertion */
457 { if((s = SIZE(np)) > size)
458 { if((t = LEFT(np)) )
466 { if((t = RIGHT(np)) )
474 { if((t = LINK(np)) )
490 static Void_t* bestalloc(Vmalloc_t* vm, reg size_t size )
492 static Void_t* bestalloc(vm,size)
493 Vmalloc_t* vm; /* region allocating from */
494 reg size_t size; /* desired block size */
497 reg Vmdata_t* vd = vm->data;
499 reg Block_t *tp, *np, **cache;
505 if(!(local = vd->mode&VM_TRUST))
506 { GETLOCAL(vd,local);
507 if(ISLOCK(vd,local) )
513 /**/ ASSERT(HEADSIZE == sizeof(Head_t));
514 /**/ ASSERT(BODYSIZE == sizeof(Body_t));
515 /**/ ASSERT((ALIGN%(BITS+1)) == 0 );
516 /**/ ASSERT((sizeof(Head_t)%ALIGN) == 0 );
517 /**/ ASSERT((sizeof(Body_t)%ALIGN) == 0 );
518 /**/ ASSERT((TINYSIZE%ALIGN) == 0 );
519 /**/ ASSERT(sizeof(Block_t) == (sizeof(Body_t)+sizeof(Head_t)) );
521 /* for ANSI requirement that malloc(0) returns non-NULL pointer */
522 size = size <= TINYSIZE ? TINYSIZE : ROUND(size,ALIGN);
524 if(size < MAXCACHE && (tp = *(cache = CACHE(vd) + INDEX(size)) ) )
531 if((tp = vd->free) ) /* allocate from last free piece */
532 { /**/ASSERT(ISBUSY(SIZE(tp)) );
533 /**/ASSERT(ISJUNK(SIZE(tp)) );
536 vd->free = NIL(Block_t*);
537 if((s = SIZE(tp)) < size)
538 { LINK(tp) = *(cache = CACHE(vd)+C_INDEX(s));
542 { if(s >= size + (sizeof(Head_t)+TINYSIZE) )
546 SIZE(np) = ((s&~BITS) - (size+sizeof(Head_t)))|JUNK|BUSY;
556 { for(;;) /* best-fit - more or less */
557 { for(s = INDEX(size); s < S_TINY; ++s)
558 { if((tp = TINY(vd)[s]) )
559 { DELETE(vd,tp,s,np,bestsearch);
560 CLRPFREE(SIZE(NEXT(tp)));
565 if(CACHE(vd)[S_CACHE]) /* reclaim big pieces */
566 bestreclaim(vd,NIL(Block_t*),S_CACHE);
567 if(vd->root && (tp = bestsearch(vd,size,NIL(Block_t*))) )
569 if(bestreclaim(vd,NIL(Block_t*),0) == 0)
573 /**/ASSERT(!vd->free);
574 if((tp = vd->wild) && SIZE(tp) >= size)
575 { /**/ASSERT(vmcheck(vd,size,1));
577 vd->wild = NIL(Block_t*);
581 /**/ASSERT(vmcheck(vd,size,0) );
582 if((tp = (*_Vmextend)(vm,size,bestsearch)) )
584 else if(vd->mode&VM_AGAIN)
585 vd->mode &= ~VM_AGAIN;
593 /**/ ASSERT(!ISBITS(SIZE(tp)));
594 /**/ ASSERT(SIZE(tp) >= size);
595 /**/ ASSERT((SIZE(tp)%ALIGN) == 0);
596 /**/ ASSERT(!vd->free);
598 /* tell next block that we are no longer a free block */
599 CLRPFREE(SIZE(NEXT(tp))); /**/ ASSERT(ISBUSY(SIZE(NEXT(tp))));
601 if((s = SIZE(tp)-size) >= (sizeof(Head_t)+TINYSIZE) )
606 SIZE(np) = (s - sizeof(Head_t)) | BUSY|JUNK;
610 else if(VMWILD(vd,np))
613 SETPFREE(SIZE(NEXT(np)));
617 { LINK(np) = *(cache = CACHE(vd) + C_INDEX(SIZE(np)));
625 if(!local && (vd->mode&VM_TRACE) && _Vmtrace && VMETHOD(vd) == VM_MTBEST)
626 (*_Vmtrace)(vm,NIL(uchar*),(uchar*)DATA(tp),orgsize);
633 static long bestaddr(Vmalloc_t* vm, Void_t* addr )
635 static long bestaddr(vm, addr)
636 Vmalloc_t* vm; /* region allocating from */
637 Void_t* addr; /* address to check */
641 reg Block_t *b, *endb;
643 reg Vmdata_t* vd = vm->data;
646 if(!(local = vd->mode&VM_TRUST) )
647 { GETLOCAL(vd,local);
654 for(seg = vd->seg; seg; seg = seg->next)
656 endb = (Block_t*)(seg->baddr - sizeof(Head_t));
657 if((uchar*)addr > (uchar*)b && (uchar*)addr < (uchar*)endb)
661 if(local && !(vd->mode&VM_TRUST) ) /* from bestfree or bestresize */
663 if(seg && SEG(b) == seg && ISBUSY(SIZE(b)) && !ISJUNK(SIZE(b)) )
665 if(offset != 0 && vm->disc->exceptf)
666 (void)(*vm->disc->exceptf)(vm,VM_BADADDR,addr,vm->disc);
670 { reg uchar* data = (uchar*)DATA(b);
671 reg size_t size = SIZE(b)&~BITS;
673 if((uchar*)addr >= data && (uchar*)addr < data+size)
674 { if(ISJUNK(SIZE(b)) || !ISBUSY(SIZE(b)))
676 else offset = (uchar*)addr - data;
680 b = (Block_t*)((uchar*)DATA(b) + size);
690 static bestfree(Vmalloc_t* vm, Void_t* data )
692 static bestfree(vm, data )
697 reg Vmdata_t* vd = vm->data;
698 reg Block_t *bp, *fp, **cache;
704 if(!data) /* ANSI-ism */
707 if(!(local = vd->mode&VM_TRUST) )
710 if(KPVADDR(vm,data,bestaddr) != 0 )
715 bp = BLOCK(data); /**/ASSERT(ISBUSY(SIZE(bp)) && !ISJUNK(SIZE(bp)));
717 if((s = SIZE(bp)) < MAXCACHE)
718 { LINK(bp) = *(cache = CACHE(vd) + INDEX(s));
722 { if((!(fp = vd->free) || VMWILD(vd,fp)) && !VMWILD(vd,bp) )
726 { LINK(fp) = *(cache = CACHE(vd) + C_INDEX(SIZE(fp)));
731 if(!local && _Vmtrace && (vd->mode&VM_TRACE) && VMETHOD(vd) == VM_MTBEST )
732 (*_Vmtrace)(vm,(uchar*)data,NIL(uchar*),SIZE(BLOCK(data))&~BITS);
739 static Void_t* bestresize(Vmalloc_t* vm, Void_t* data, reg size_t size, int flags)
741 static Void_t* bestresize(vm,data,size,flags)
742 Vmalloc_t* vm; /* region allocating from */
743 Void_t* data; /* old block of data */
744 reg size_t size; /* new size */
745 int flags; /* VM_RS* */
748 reg Vmdata_t* vd = vm->data;
749 reg Block_t *rp, *np, *t, **cache;
751 reg int local, *nd, *od;
755 /**/ COUNT(N_resize);
758 { if((data = bestalloc(vm,size)) && (flags & VM_RSZERO))
759 { s = (size+sizeof(int)-1)/sizeof(int);
760 for(nd = (int*)data; s-- > 0; )
766 { (void)bestfree(vm,data);
770 if(!(local = vd->mode&VM_TRUST) )
771 { GETLOCAL(vd,local);
772 if(ISLOCK(vd,local) )
774 if(!local && KPVADDR(vm,data,bestaddr) != 0 )
778 orgdata = data; /* for tracing */
782 size = size <= TINYSIZE ? TINYSIZE : ROUND(size,ALIGN);
783 rp = BLOCK(data); /**/ASSERT(ISBUSY(SIZE(rp)) && !ISJUNK(SIZE(rp)));
784 if((bs = SIZE(rp)) < size)
787 do /* forward merge as much as possible */
790 { vd->free = NIL(Block_t*);
794 { CPYBITS(SIZE(rp),bs);
795 bestreclaim(vd,np,C_INDEX(s));
802 vd->wild = NIL(Block_t*);
803 else DELETE(vd,np,INDEX(s),t,bestsearch);
807 SIZE(rp) += (s += sizeof(Head_t));
808 np = (Block_t*)((uchar*)np + s);
810 } while(SIZE(rp) < size);
812 if(SIZE(rp) < size && size > vd->incr && SEGWILD(rp) )
815 s = (size - SIZE(rp)) + sizeof(Head_t);
816 s = ROUND(s,vd->incr);
818 if((*vm->disc->memoryf)(vm,seg->addr,seg->extent,seg->extent+s,
819 vm->disc) == seg->addr )
825 SIZE(NEXT(rp)) = BUSY;
829 CPYBITS(SIZE(rp),bs);
832 /* If a buffer is resized, it is likely to be resized again. So we
833 make the increment a reasonable size to reduce future work */
834 #define INCREMENT 128
835 if((s = SIZE(rp)) >= (size + (INCREMENT+TINYSIZE+sizeof(Head_t))) )
839 SIZE(np) = (((s&~BITS)-size) - sizeof(Head_t))|BUSY|JUNK;
845 { if(!(flags & VM_RSFREE))
849 if(size < (s&~BITS)+INCREMENT)
850 size = (s&~BITS)+INCREMENT;
851 if((data = KPVALLOC(vm,size,bestalloc)) )
852 { if(flags & VM_RSZERO)
853 { bs = (size-s+sizeof(int)-1)/sizeof(int);
854 for(nd = (int*)((char*)data+s); bs-- > 0; )
857 if(flags & VM_RSCOPY)
859 for(s /= sizeof(int); s-- > 0; )
864 if((np = vd->free) && VMWILD(vd,np) )
868 cache = CACHE(vd) + C_INDEX(SIZE(rp));
875 if(!local && _Vmtrace && data && (vd->mode&VM_TRACE) && VMETHOD(vd) == VM_MTBEST)
876 (*_Vmtrace)(vm, (uchar*)orgdata, (uchar*)data, orgsize);
883 static long bestsize(Vmalloc_t* vm, Void_t* addr )
885 static long bestsize(vm, addr)
886 Vmalloc_t* vm; /* region allocating from */
887 Void_t* addr; /* address to check */
891 reg Block_t *b, *endb;
893 reg Vmdata_t* vd = vm->data;
895 if(!(vd->mode&VM_TRUST) )
902 for(seg = vd->seg; seg; seg = seg->next)
904 endb = (Block_t*)(seg->baddr - sizeof(Head_t));
905 if((uchar*)addr <= (uchar*)b || (uchar*)addr >= (uchar*)endb)
908 { if(addr == DATA(b))
909 { if(!ISBUSY(SIZE(b)) || ISJUNK(SIZE(b)) )
911 else size = (long)SIZE(b)&~BITS;
914 else if((uchar*)addr <= (uchar*)b)
917 b = (Block_t*)((uchar*)DATA(b) + (SIZE(b)&~BITS) );
927 static bestcompact(Vmalloc_t* vm)
929 static bestcompact(vm)
933 reg Seg_t *seg, *next;
936 reg Vmdata_t* vd = vm->data;
938 if(!(vd->mode&VM_TRUST) )
944 bestreclaim(vd,NIL(Block_t*),0);
946 for(seg = vd->seg; seg; )
948 bp = BLOCK(seg->baddr);
949 if(!ISPFREE(SIZE(bp)) )
952 bp = LAST(bp); /**/ ASSERT(!ISBUSY(SIZE(bp)));
955 vd->wild = NIL(Block_t*);
956 else DELETE(vd,bp,INDEX(size),t,bestsearch);
957 CLRPFREE(SIZE(NEXT(bp)));
960 size += sizeof(Head_t);
962 if((*_Vmtruncate)(vm,seg,size,1) >= 0)
963 { if((size = (seg->baddr - ((uchar*)bp) - sizeof(Head_t))) > 0)
964 SIZE(bp) = size - sizeof(Head_t);
965 else bp = NIL(Block_t*);
969 { /**/ ASSERT(SIZE(bp) >= TINYSIZE);
970 /**/ ASSERT(SEGWILD(bp));
971 SIZE(bp) |= BUSY|JUNK;
972 LINK(bp) = CACHE(vd)[C_INDEX(SIZE(bp))];
973 CACHE(vd)[C_INDEX(SIZE(bp))] = bp;
985 static Void_t* bestalign(Vmalloc_t* vm, size_t size, size_t align)
987 static Void_t* bestalign(vm, size, align)
994 reg Block_t *tp, *np;
997 reg Vmdata_t* vd = vm->data;
999 if(size <= 0 || align <= 0)
1000 return NIL(Void_t*);
1002 if(!(vd->mode&VM_TRUST) )
1004 return NIL(Void_t*);
1008 align = MULTIPLE(align,ALIGN);
1009 s = (size <= TINYSIZE ? TINYSIZE : ROUND(size,ALIGN)) + align;
1010 if(!(data = (uchar*)KPVALLOC(vm,s,bestalloc)) )
1016 /* get an aligned address that we can live with */
1017 if((s = (size_t)(ULONG(data)%align)) != 0)
1020 if(((uchar*)np - (uchar*)tp) < sizeof(Block_t) )
1023 /* np is the usable block of data */
1025 s = (uchar*)np - (uchar*)tp;
1026 SIZE(np) = ((SIZE(tp)&~BITS) - s)|BUSY;
1028 data = (uchar*)DATA(np);
1030 /* now free the left part */
1031 SIZE(tp) = (s - sizeof(Head_t)) | (SIZE(tp)&BITS) | JUNK;
1032 /**/ ASSERT(SIZE(tp) >= sizeof(Body_t) );
1036 { LINK(tp) = CACHE(vd)[C_INDEX(SIZE(tp))];
1037 CACHE(vd)[C_INDEX(SIZE(tp))] = tp;
1040 if(!(vd->mode&VM_TRUST) && _Vmtrace && (vd->mode&VM_TRACE) &&
1041 VMETHOD(vd) == VM_MTBEST )
1042 (*_Vmtrace)(vm,NIL(uchar*),data,size);
1046 return (Void_t*)data;
1049 /* The world knows us by this */
1062 /* The heap region */
1063 static Vmdata_t _Vmdata =
1065 VM_MTBEST|VM_TRUST, /* mode */
1068 NIL(Seg_t*), /* seg */
1069 NIL(Block_t*), /* free */
1070 NIL(Block_t*), /* wild */
1071 NIL(Block_t*), /* root */
1084 Vmdcsbrk, /* disc */
1088 Vmalloc_t* Vmregion = &_Vmheap; /* region for malloc/free/realloc */