diff --git a/arch/m68k-all/include/aros/cpu.h b/arch/m68k-all/include/aros/cpu.h index 6d1109dafb..379087658a 100644 --- a/arch/m68k-all/include/aros/cpu.h +++ b/arch/m68k-all/include/aros/cpu.h @@ -23,7 +23,7 @@ #define AROS_PTRALIGN 2 /* Alignment for PTR */ #define AROS_IPTRALIGN 2 /* Alignment for IPTR */ #define AROS_DOUBLEALIGN 2 /* Alignment for double */ -#define AROS_WORSTALIGN 32 /* Worst case alignment - ApolloOS = 32 Bytes */ +#define AROS_WORSTALIGN 4 /* Worst case alignment - ApolloOS = 32 Bytes */ #define AROS_SLOWSTACKFORMAT 1 diff --git a/compiler/include/aros/cpu.h b/compiler/include/aros/cpu.h index e556b361f8..d7a5381721 100644 --- a/compiler/include/aros/cpu.h +++ b/compiler/include/aros/cpu.h @@ -50,7 +50,7 @@ #define AROS_PTRALIGN 8 /* Alignment for PTR */ #define AROS_IPTRALIGN 8 /* Alignment for IPTR */ #define AROS_DOUBLEALIGN 8 /* Alignment for double */ -#define AROS_WORSTALIGN 32 /* Worst case alignment */ +#define AROS_WORSTALIGN 4 /* Worst case alignment */ #define AROS_STACKALIGN 16 /* Clean stack alignment */ #elif defined __arm__ # if defined __thumb2__ diff --git a/compiler/include/exec/memory.h b/compiler/include/exec/memory.h index cb97060ffe..7855fbedfd 100644 --- a/compiler/include/exec/memory.h +++ b/compiler/include/exec/memory.h @@ -30,7 +30,7 @@ struct MemChunk }; /* Total size of struct MemChunk, including padding */ -#define MEMCHUNK_TOTAL 32 //[WD] ApolloOS is set to 32 Byte Memory Boundary from Core 11350+ | (AROS_WORSTALIGN > sizeof(struct MemChunk) ? AROS_WORSTALIGN : sizeof(struct MemChunk)) +#define MEMCHUNK_TOTAL (AROS_WORSTALIGN > sizeof(struct MemChunk) ? AROS_WORSTALIGN : sizeof(struct MemChunk)) /* Total size of struct MemHeader, including padding */ #define MEMHEADER_TOTAL (AROS_ROUNDUP2(sizeof(struct MemHeader), MEMCHUNK_TOTAL)) diff --git a/rom/exec/allocate.c b/rom/exec/allocate.c index 22ada6d323..aa3efa3013 100644 --- a/rom/exec/allocate.c +++ b/rom/exec/allocate.c @@ -84,6 +84,7 @@ { AROS_LIBFUNC_INIT +#ifdef HANDLE_MANAGED_MEM if ((freeList->mh_Node.ln_Type == NT_MEMORY) && IsManagedMem(freeList)) { struct MemHeaderExt *mhe = (struct MemHeaderExt *)freeList; @@ -94,7 +95,8 @@ return NULL; } else - { +#endif + { struct TraceLocation tp = CURRENT_LOCATION("Allocate"); APTR res; diff --git a/rom/exec/allocpooled.c b/rom/exec/allocpooled.c index badde62ab5..f6e4cba169 100644 --- a/rom/exec/allocpooled.c +++ b/rom/exec/allocpooled.c @@ -75,6 +75,7 @@ if(!memSize) return NULL; +#ifdef HANDLE_MANAGED_MEM if (IsManagedMem(mhe)) { ULONG poolrequirements = (ULONG)(IPTR)mhe->mhe_MemHeader.mh_First; @@ -85,7 +86,8 @@ return NULL; } else - { +#endif + { struct TraceLocation tp = CURRENT_LOCATION("AllocPooled"); struct Pool *pool = poolHeader + MEMHEADER_TOTAL; diff --git a/rom/exec/allocvecpooled.c b/rom/exec/allocvecpooled.c index 95f945ee35..c7eb4908f3 100644 --- a/rom/exec/allocvecpooled.c +++ b/rom/exec/allocvecpooled.c @@ -56,7 +56,7 @@ /* 0-sized allocation results in returning NULL (API guarantee) */ if(!memSize) return NULL; - +#ifdef HANDLE_MANAGED_MEM if (IsManagedMem(mhe)) { ULONG poolrequirements = (ULONG)(IPTR)mhe->mhe_MemHeader.mh_First; @@ -67,7 +67,8 @@ return NULL; } else - { +#endif + { IPTR *memory; if (poolHeader == NULL) return NULL; diff --git a/rom/exec/createpool.c b/rom/exec/createpool.c index 49d42ccb91..18ed4774fa 100644 --- a/rom/exec/createpool.c +++ b/rom/exec/createpool.c @@ -139,6 +139,7 @@ * If the pool is in managed memory, don't bother any further setup. The * pool should do the rest self. */ +#ifdef HANDLE_MANAGED_MEM if (IsManagedMem(firstPuddle)) { D(bug("Managed pool\n");) @@ -152,7 +153,8 @@ firstPuddle->mh_First = (APTR)(IPTR)requirements; } else - { +#endif + { /* * Add the puddle to the list (yes, contained in itself). * This is the first puddle so it's safe to use AddTail() here. diff --git a/rom/exec/deallocate.c b/rom/exec/deallocate.c index c3ad9eb20f..ac57d52374 100644 --- a/rom/exec/deallocate.c +++ b/rom/exec/deallocate.c @@ -58,6 +58,7 @@ { AROS_LIBFUNC_INIT +#ifdef HANDLE_MANAGED_MEM if ((freeList->mh_Node.ln_Type == NT_MEMORY) && IsManagedMem(freeList)) { struct MemHeaderExt *mhe = (struct MemHeaderExt *)freeList; @@ -66,7 +67,8 @@ mhe->mhe_Free(mhe, memoryBlock, byteSize); } else - { +#endif + { struct TraceLocation tp = CURRENT_LOCATION("Deallocate"); /* If there is no memory free nothing */ diff --git a/rom/exec/deletepool.c b/rom/exec/deletepool.c index 4541ce6f2f..224602368c 100644 --- a/rom/exec/deletepool.c +++ b/rom/exec/deletepool.c @@ -61,12 +61,14 @@ { struct TraceLocation tp = CURRENT_LOCATION("DeletePool"); +#ifdef HANDLE_MANAGED_MEM if (IsManagedMem(poolHeader)) { /* Do nothing, everything is handled in FreeMemHeader */ } else - { +#endif + { struct Pool *pool = poolHeader + MEMHEADER_TOTAL; struct Node *p, *p2; /* Avoid casts */ diff --git a/rom/exec/freepooled.c b/rom/exec/freepooled.c index c2dd992f13..c77ad43f0e 100644 --- a/rom/exec/freepooled.c +++ b/rom/exec/freepooled.c @@ -61,13 +61,15 @@ if(!memSize || !memory) return; +#ifdef HANDLE_MANAGED_MEM if (IsManagedMem(mhe)) { if (mhe->mhe_Free) mhe->mhe_Free(mhe, memory, memSize); } else - { +#endif + { struct TraceLocation tp = CURRENT_LOCATION("FreePooled"); InternalFreePooled(poolHeader, memory, memSize, &tp, SysBase); diff --git a/rom/exec/freevecpooled.c b/rom/exec/freevecpooled.c index 845692fa85..647d437764 100644 --- a/rom/exec/freevecpooled.c +++ b/rom/exec/freevecpooled.c @@ -57,13 +57,15 @@ if(!memory) return; +#ifdef HANDLE_MANAGED_MEM if (IsManagedMem(mhe)) { if (mhe->mhe_FreeVec) mhe->mhe_FreeVec(mhe, memory); } else - { +#endif + { if (memory != NULL) { IPTR *real = (IPTR *) memory; diff --git a/rom/exec/memory.c b/rom/exec/memory.c index 430c7edf3a..a0597430df 100644 --- a/rom/exec/memory.c +++ b/rom/exec/memory.c @@ -33,6 +33,7 @@ struct MemHeader *FindMem(APTR address, struct ExecBase *SysBase) while (mh->mh_Node.ln_Succ != NULL) { +#ifdef HANDLE_MANAGED_MEM if (IsManagedMem(mh)) { struct MemHeaderExt *mhe = (struct MemHeaderExt *)mh; @@ -47,7 +48,8 @@ struct MemHeader *FindMem(APTR address, struct ExecBase *SysBase) } } else - { +#endif + { /* Check if this MemHeader fits */ if (address >= mh->mh_Lower && address < mh->mh_Upper) { @@ -99,9 +101,9 @@ char *FormatMMContext(char *buffer, struct MMContext *ctx, struct ExecBase *SysB return buffer; } -/* #define NO_ALLOCATOR_CONTEXT */ +#define NO_ALLOCATOR_CONTEXT -#ifdef NO_ALLOCATOR_CONTEXT +#if defined(NO_ALLOCATOR_CONTEXT) struct MemHeaderAllocatorCtx * mhac_GetSysCtx(struct MemHeader * mh, struct ExecBase * SysBase) { @@ -379,8 +381,8 @@ void mhac_PoolMemHeaderSetup(struct MemHeader * mh, struct ProtectedPool * pool) #ifdef NO_CONSISTENCY_CHECKS -#define validateHeader(mh, op, addr, size, SysBase) TRUE -#define validateChunk(mc, prev, mh, op, addr, size, SysBase) TRUE +#define validateHeader(mh, op, addr, size, tp, SysBase) TRUE +#define validateChunk(mc, prev, mh, op, addr, size, tp, SysBase) TRUE #else @@ -444,10 +446,10 @@ static inline BOOL validateChunk(struct MemChunk *p2, struct MemChunk *p1, struc UBYTE op, APTR addr, IPTR size, struct TraceLocation *tp, struct ExecBase *SysBase) { - if (((IPTR)p2->mc_Next & (MEMCHUNK_TOTAL-1)) || (p2->mc_Bytes == 0) || (p2->mc_Bytes & (MEMCHUNK_TOTAL-1)) || /* 1 */ - ((APTR)p2 + p2->mc_Bytes > mh->mh_Upper) || /* 2 */ - (p2->mc_Next && (((APTR)p2->mc_Next < (APTR)p2 + p2->mc_Bytes + MEMCHUNK_TOTAL) || /* 3 */ - ((APTR)p2->mc_Next > mh->mh_Upper - MEMCHUNK_TOTAL)))) + if (((IPTR)p2->mc_Next & (MEMCHUNK_TOTAL-1)) || (p2->mc_Bytes == 0) || (p2->mc_Bytes & (MEMCHUNK_TOTAL-1)) || /* 1 */ + ((APTR)p2 + p2->mc_Bytes > mh->mh_Upper) || /* 2 */ + (p2->mc_Next && (((APTR)p2->mc_Next < (APTR)p2 + p2->mc_Bytes + MEMCHUNK_TOTAL) || /* 3 */ + ((APTR)p2->mc_Next > mh->mh_Upper - MEMCHUNK_TOTAL)))) { if (tp) { @@ -471,6 +473,105 @@ static inline BOOL validateChunk(struct MemChunk *p2, struct MemChunk *p1, struc #endif + #define MK_UBYTEPTR(a) ((UBYTE *)(a)) + +APTR stdAllocReverse(struct MemHeader *mh, struct MemHeaderAllocatorCtx *mhac, IPTR size, + ULONG requirements, struct TraceLocation *tp, struct ExecBase *SysBase) +{ + /* First round byteSize up to a multiple of MEMCHUNK_TOTAL */ + IPTR byteSize = AROS_ROUNDUP2(size, MEMCHUNK_TOTAL); + struct MemChunk *mc=NULL, *p1, *p2; + + /* Validate MemHeader before doing anything. */ + if (!validateHeader(mh, MM_ALLOC, NULL, size, tp, SysBase)) + return NULL; + + /* Validate if there is even enough total free memory */ + if (mh->mh_Free < byteSize) + return NULL; + /* + * The free memory list is only single linked, i.e. to remove + * elements from the list I need the node's predecessor. For the + * first element I can use mh->mh_First instead of a real predecessor. + */ + p1 = mhac_GetBetterPrevMemChunk((struct MemChunk *)&mh->mh_First, size, mhac); + p2 = p1->mc_Next; + + /* + * Follow the memory list. p1 is the previous MemChunk, p2 is the current one. + * On 1st pass p1 points to mh->mh_First, so that changing p1->mc_Next actually + * changes mh->mh_First. + */ + while (p2 != NULL) + { + /* Validate the current chunk */ + if (!validateChunk(p2, p1, mh, MM_ALLOC, NULL, size, tp, SysBase)) + return NULL; + + /* Check if the current block is large enough */ + if (p2->mc_Bytes>=byteSize) + { + /* It is. */ + mc = p1; + } + /* Go to next block */ + p1 = p2; + p2 = p1->mc_Next; + } + + /* Something found? */ + if (mc != NULL) + { + /* Remember: MEMF_REVERSE is set p1 and p2 are now invalid. */ + p1 = mc; + p2 = p1->mc_Next; + + mhac_MemChunkClaimed(p2, mhac); + + /* Remove the block from the list and return it. */ + if (p2->mc_Bytes == byteSize) + { + /* Fits exactly. Just relink the list. */ + p1->mc_Next = p2->mc_Next; + mc = p2; + } + else + { + struct MemChunk * pp = p1; + + /* Return the last bytes. */ + p1->mc_Next=p2; + mc = (struct MemChunk *)(MK_UBYTEPTR(p2)+p2->mc_Bytes-byteSize); + + p1 = p1->mc_Next; + p1->mc_Next = p2->mc_Next; + p1->mc_Bytes = p2->mc_Bytes-byteSize; + + mhac_MemChunkCreated(p1, pp, mhac); + } + + mh->mh_Free -= byteSize; + + /* Clear the block if requested */ + if (requirements & MEMF_CLEAR) + memset(mc, 0, byteSize); + } + + else + { + if (!mhac_IsIndexEmpty(mhac)) + { + /* + * Since chunks created during deallocation are not returned to index, + * retry with cleared index. + */ + mhac_ClearIndex(mhac); + mc = stdAlloc(mh, mhac, size, requirements, tp, SysBase); + } + } + return mc; +} + /* * Allocate block from the given MemHeader in a specific way. * This routine can be called with SysBase = NULL. @@ -479,6 +580,7 @@ static inline BOOL validateChunk(struct MemChunk *p2, struct MemChunk *p1, struc * However if it was passed once for a given MemHeader it needs to be passed * in all consecutive calls. */ + APTR stdAlloc(struct MemHeader *mh, struct MemHeaderAllocatorCtx *mhac, IPTR size, ULONG requirements, struct TraceLocation *tp, struct ExecBase *SysBase) { @@ -486,6 +588,7 @@ APTR stdAlloc(struct MemHeader *mh, struct MemHeaderAllocatorCtx *mhac, IPTR siz * The check has to be done for the second time. Exec uses stdAlloc on memheader * passed upon startup. This is bad, very bad. So here a temporary hack :) */ +#ifdef HANDLE_MANAGED_MEM if ((mh->mh_Node.ln_Type == NT_MEMORY) && IsManagedMem(mh)) { struct MemHeaderExt *mhe = (struct MemHeaderExt *)mh; @@ -498,8 +601,13 @@ APTR stdAlloc(struct MemHeader *mh, struct MemHeaderAllocatorCtx *mhac, IPTR siz return NULL; } else +#endif { - /* First round byteSize up to a multiple of MEMCHUNK_TOTAL */ + if(requirements & MEMF_REVERSE) + { + return stdAllocReverse(mh, mhac, size, requirements, tp, SysBase); + } + /* First round byteSize up to a multiple of MEMCHUNK_TOTAL */ IPTR byteSize = AROS_ROUNDUP2(size, MEMCHUNK_TOTAL); struct MemChunk *mc=NULL, *p1, *p2; @@ -508,10 +616,9 @@ APTR stdAlloc(struct MemHeader *mh, struct MemHeaderAllocatorCtx *mhac, IPTR siz return NULL; /* Validate if there is even enough total free memory */ - if (mh->mh_Free < byteSize) + if (mh->mh_Free < byteSize) return NULL; - /* * The free memory list is only single linked, i.e. to remove * elements from the list I need the node's predecessor. For the @@ -536,11 +643,7 @@ APTR stdAlloc(struct MemHeader *mh, struct MemHeaderAllocatorCtx *mhac, IPTR siz { /* It is. */ mc = p1; - - /* Use this one if MEMF_REVERSE is not set.*/ - if (!(requirements & MEMF_REVERSE)) - break; - /* Else continue - there may be more to come. */ + break; } /* Go to next block */ @@ -568,21 +671,12 @@ APTR stdAlloc(struct MemHeader *mh, struct MemHeaderAllocatorCtx *mhac, IPTR siz { struct MemChunk * pp = p1; - if (requirements & MEMF_REVERSE) - { - /* Return the last bytes. */ - p1->mc_Next=p2; - mc = (struct MemChunk *)((UBYTE *)p2+p2->mc_Bytes-byteSize); - } - else - { - /* Return the first bytes. */ - p1->mc_Next=(struct MemChunk *)((UBYTE *)p2+byteSize); - mc=p2; - } + /* Return the first bytes. */ + p1->mc_Next=(struct MemChunk *)(MK_UBYTEPTR(p2)+byteSize); + mc=p2; - p1 = p1->mc_Next; - p1->mc_Next = p2->mc_Next; + p1 = p1->mc_Next; + p1->mc_Next = p2->mc_Next; p1->mc_Bytes = p2->mc_Bytes-byteSize; mhac_MemChunkCreated(p1, pp, mhac); @@ -606,7 +700,6 @@ APTR stdAlloc(struct MemHeader *mh, struct MemHeaderAllocatorCtx *mhac, IPTR siz mc = stdAlloc(mh, mhac, size, requirements, tp, SysBase); } } - return mc; } } @@ -623,6 +716,7 @@ void stdDealloc(struct MemHeader *freeList, struct MemHeaderAllocatorCtx *mhac, struct MemChunk *p1, *p2, *p3; UBYTE *p4; +#ifdef HANDLE_MANAGED_MEM if ((freeList->mh_Node.ln_Type == NT_MEMORY) && IsManagedMem(freeList)) { struct MemHeaderExt *mhe = (struct MemHeaderExt *)freeList; @@ -631,7 +725,8 @@ void stdDealloc(struct MemHeader *freeList, struct MemHeaderAllocatorCtx *mhac, mhe->mhe_Free(mhe, addr, size); } else - { +#endif + { /* Make sure the MemHeader is OK */ if (!validateHeader(freeList, MM_FREE, addr, size, tp, SysBase)) return; @@ -684,7 +779,7 @@ void stdDealloc(struct MemHeader *freeList, struct MemHeaderAllocatorCtx *mhac, If the memory to be freed overlaps with the current block something must be wrong. */ - if (p4>(UBYTE *)p2) + if (p4 > MK_UBYTEPTR(p2)) { bug("[MM] Chunk allocator error\n"); bug("[MM] Attempt to free %u bytes at 0x%p from MemHeader 0x%p\n", byteSize, memoryBlock, freeList); @@ -709,18 +804,17 @@ void stdDealloc(struct MemHeader *freeList, struct MemHeaderAllocatorCtx *mhac, { #if !defined(NO_CONSISTENCY_CHECKS) /* Check if they overlap. */ - if ((UBYTE *)p1 + p1->mc_Bytes > (UBYTE *)p3) + if (MK_UBYTEPTR(p1) + p1->mc_Bytes > MK_UBYTEPTR(p3)) { bug("[MM] Chunk allocator error\n"); - bug("[MM] Attempt to free %u bytes at 0x%p from MemHeader 0x%p\n", byteSize, memoryBlock, freeList); - bug("[MM] Block overlaps (2) with chunk 0x%p (%u bytes)\n", p1, p1->mc_Bytes); - + bug("[MM] Attempt to free 0x%08x bytes at 0x%p from MemHeader 0x%p\n", byteSize, memoryBlock, freeList); + bug("[MM] Block overlaps (2) with chunk 0x%p (0x%08x bytes)\n", p1, p1->mc_Bytes); Alert(AN_FreeTwice); return; } #endif /* Merge if possible */ - if ((UBYTE *)p1 + p1->mc_Bytes == (UBYTE *)p3) + if ((MK_UBYTEPTR(p1) + p1->mc_Bytes) == MK_UBYTEPTR(p3)) { mhac_MemChunkClaimed(p1, mhac); p3 = p1; @@ -740,7 +834,7 @@ void stdDealloc(struct MemHeader *freeList, struct MemHeaderAllocatorCtx *mhac, p1->mc_Next = p3; /* Try to merge with next block (if there is one ;-) ). */ - if (p4 == (UBYTE *)p2 && p2 != NULL) + if (p4 == MK_UBYTEPTR(p2) && p2 != NULL) { /* Overlap checking already done. Doing it here after @@ -752,7 +846,7 @@ void stdDealloc(struct MemHeader *freeList, struct MemHeaderAllocatorCtx *mhac, } /* relink the list and return. */ p3->mc_Next = p2; - p3->mc_Bytes = p4 - (UBYTE *)p3; + p3->mc_Bytes = p4 - MK_UBYTEPTR(p3); freeList->mh_Free += byteSize; if (p1->mc_Next==p3) mhac_MemChunkCreated(p3, p1, mhac); } @@ -795,7 +889,8 @@ APTR AllocMemHeader(IPTR size, ULONG flags, struct TraceLocation *loc, struct Ex { struct MemHeader *orig = FindMem(mh, SysBase); - if (IsManagedMem(orig)) +#ifdef HANDLE_MANAGED_MEM + if (IsManagedMem(orig)) { struct MemHeaderExt *mhe_orig = (struct MemHeaderExt *)orig; struct MemHeaderExt *mhe = (struct MemHeaderExt *)mh; @@ -837,7 +932,8 @@ APTR AllocMemHeader(IPTR size, ULONG flags, struct TraceLocation *loc, struct Ex mhe->mhe_InitPool(mhe, size, size - header_size); } else - { +#endif + { size -= MEMHEADER_TOTAL; /* @@ -868,12 +964,13 @@ void FreeMemHeader(APTR addr, struct TraceLocation *loc, struct ExecBase *SysBas IPTR size = (IPTR)mhe->mhe_MemHeader.mh_Upper - (IPTR)addr; +#ifdef HANDLE_MANAGED_MEM if (IsManagedMem(mhe)) { if (mhe->mhe_DestroyPool) mhe->mhe_DestroyPool(mhe); } - +#endif DMH(bug("[FreeMemHeader] Freeing %u bytes at 0x%p\n", size, addr)); nommu_FreeMem(addr, size, loc, SysBase); } diff --git a/rom/exec/memory.h b/rom/exec/memory.h index 69ba623a52..7eaba5fd2d 100644 --- a/rom/exec/memory.h +++ b/rom/exec/memory.h @@ -13,6 +13,10 @@ #include #include +#define NO_CONSISTENCY_CHECKS 1 +#define HANDLE_MANAGED_MEM 0 + + #if defined(__AROSEXEC_SMP__) #define MEM_LOCK do { Forbid(); EXEC_SPINLOCK_LOCK(&PrivExecBase(SysBase)->MemListSpinLock, NULL, SPINLOCK_MODE_WRITE); } while(0) #define MEM_LOCK_SHARED do { Forbid(); EXEC_SPINLOCK_LOCK(&PrivExecBase(SysBase)->MemListSpinLock, NULL, SPINLOCK_MODE_READ); } while(0) diff --git a/rom/exec/memory_nommu.c b/rom/exec/memory_nommu.c index 937e2f51ea..a909651ee9 100644 --- a/rom/exec/memory_nommu.c +++ b/rom/exec/memory_nommu.c @@ -36,10 +36,10 @@ APTR nommu_AllocMem(IPTR byteSize, ULONG flags, struct TraceLocation *loc, struc * The requirements are OK if there's no bit in the * 'attributes' that isn't set in the 'mh->mh_Attributes'. */ - if ((requirements & ~mh->mh_Attributes) - || mh->mh_Free < byteSize) + if ((requirements & ~mh->mh_Attributes) || mh->mh_Free < byteSize) continue; +#ifdef HANDLE_MANAGED_MEM if (IsManagedMem(mh)) { struct MemHeaderExt *mhe = (struct MemHeaderExt *)mh; @@ -48,7 +48,8 @@ APTR nommu_AllocMem(IPTR byteSize, ULONG flags, struct TraceLocation *loc, struc res = mhe->mhe_Alloc(mhe, byteSize, &flags); } else - { +#endif + { res = stdAlloc(mh, mhac_GetSysCtx(mh, SysBase), byteSize, flags, loc, SysBase); } if (res) @@ -72,6 +73,7 @@ APTR nommu_AllocAbs(APTR location, IPTR byteSize, struct ExecBase *SysBase) /* Loop over MemHeader structures */ ForeachNode(&SysBase->MemList, mh) { +#ifdef HANDLE_MANAGED_MEM if (IsManagedMem(mh)) { struct MemHeaderExt *mhe = (struct MemHeaderExt *)mh; @@ -88,7 +90,8 @@ APTR nommu_AllocAbs(APTR location, IPTR byteSize, struct ExecBase *SysBase) } } else - if (mh->mh_Lower <= location && mh->mh_Upper >= endlocation) +#endif + if (mh->mh_Lower <= location && mh->mh_Upper >= endlocation) break; } @@ -211,6 +214,7 @@ void nommu_FreeMem(APTR memoryBlock, IPTR byteSize, struct TraceLocation *loc, s ForeachNode(&SysBase->MemList, mh) { +#ifdef HANDLE_MANAGED_MEM if (IsManagedMem(mh)) { struct MemHeaderExt *mhe = (struct MemHeaderExt *)mh; @@ -224,7 +228,8 @@ void nommu_FreeMem(APTR memoryBlock, IPTR byteSize, struct TraceLocation *loc, s } else - { +#endif + { /* Test if the memory belongs to this MemHeader. */ if (mh->mh_Lower > memoryBlock || mh->mh_Upper < blockEnd) continue; @@ -275,7 +280,7 @@ IPTR nommu_AvailMem(ULONG attributes, struct ExecBase *SysBase) D(bug("[MM] Skipping (mh_Attributes = 0x%08X\n", mh->mh_Attributes);) continue; } - +#ifdef HANDLE_MANAGED_MEM if (IsManagedMem(mh)) { struct MemHeaderExt *mhe = (struct MemHeaderExt *)mh; @@ -295,7 +300,7 @@ IPTR nommu_AvailMem(ULONG attributes, struct ExecBase *SysBase) continue; } } - +#endif /* Find largest chunk? */ if (attributes & MEMF_LARGEST) { diff --git a/rom/exec/prepareexecbase.c b/rom/exec/prepareexecbase.c index a009d35bc6..8b0317eeb2 100644 --- a/rom/exec/prepareexecbase.c +++ b/rom/exec/prepareexecbase.c @@ -120,6 +120,7 @@ static APTR allocmem(struct MemHeader *mh, ULONG size, ULONG attributes) { APTR ret; +#ifdef HANDLE_MANAGED_MEM if (IsManagedMem(mh)) { struct MemHeaderExt *mhe = (struct MemHeaderExt *)mh; @@ -130,7 +131,8 @@ static APTR allocmem(struct MemHeader *mh, ULONG size, ULONG attributes) ret = 0; } else - { +#endif + { ret = stdAlloc(mh, NULL, size, attributes, NULL, NULL); }