* Copyright 2009-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
* Copyright 2002-2009, Axel Dörfler, axeld@pinc-software.de.
* Distributed under the terms of the MIT License.
*
* Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
* Distributed under the terms of the NewOS License.
*/
#include "VMKernelAddressSpace.h"
#include <stdlib.h>
#include <KernelExport.h>
#include <heap.h>
#include <slab/Slab.h>
#include <thread.h>
#include <vm/vm.h>
#include <vm/VMArea.h>
#ifdef TRACE_VM
# define TRACE(x...) dprintf(x)
#else
# define TRACE(x...) ;
#endif
#ifdef PARANOIA_CHECKS
# define PARANOIA_CHECK_STRUCTURES() _CheckStructures()
#else
# define PARANOIA_CHECK_STRUCTURES() do {} while (false)
#endif
static int
ld(size_t value)
{
int index = -1;
while (value > 0) {
value >>= 1;
index++;
}
return index;
}
the spot defined by base and limit and checks for overflows.
\param base Minimum base address valid for the area.
\param alignedBase The base address of the range to check.
\param size The size of the area.
\param limit The last (inclusive) addresss of the range to check.
*/
static inline bool
is_valid_spot(addr_t base, addr_t alignedBase, addr_t size, addr_t limit)
{
return (alignedBase >= base && alignedBase + (size - 1) > alignedBase
&& alignedBase + (size - 1) <= limit);
}
VMKernelAddressSpace::VMKernelAddressSpace(team_id id, addr_t base, size_t size)
:
VMAddressSpace(id, base, size, "kernel address space"),
fAreaObjectCache(NULL),
fRangesObjectCache(NULL)
{
}
VMKernelAddressSpace::~VMKernelAddressSpace()
{
panic("deleting the kernel aspace!\n");
}
status_t
VMKernelAddressSpace::InitObject()
{
fAreaObjectCache = create_object_cache("kernel areas",
sizeof(VMKernelArea), 0);
if (fAreaObjectCache == NULL)
return B_NO_MEMORY;
fRangesObjectCache = create_object_cache("kernel address ranges",
sizeof(Range), 0);
if (fRangesObjectCache == NULL)
return B_NO_MEMORY;
size_t size = fEndAddress - fBase + 1;
fFreeListCount = ld(size) - PAGE_SHIFT + 1;
fFreeLists = new(std::nothrow) RangeFreeList[fFreeListCount];
if (fFreeLists == NULL)
return B_NO_MEMORY;
Range* range = new(fRangesObjectCache, 0) Range(fBase, size,
Range::RANGE_FREE);
if (range == NULL)
return B_NO_MEMORY;
_InsertRange(range);
TRACE("VMKernelAddressSpace::InitObject(): address range: %#" B_PRIxADDR
" - %#" B_PRIxADDR ", free lists: %d\n", fBase, fEndAddress,
fFreeListCount);
return B_OK;
}
inline VMArea*
VMKernelAddressSpace::FirstArea() const
{
Range* range = fRangeList.Head();
while (range != NULL && range->type != Range::RANGE_AREA)
range = fRangeList.GetNext(range);
return range != NULL ? range->area : NULL;
}
inline VMArea*
VMKernelAddressSpace::NextArea(VMArea* _area) const
{
Range* range = static_cast<VMKernelArea*>(_area)->Range();
do {
range = fRangeList.GetNext(range);
} while (range != NULL && range->type != Range::RANGE_AREA);
return range != NULL ? range->area : NULL;
}
VMArea*
VMKernelAddressSpace::CreateArea(const char* name, uint32 wiring,
uint32 protection, uint32 allocationFlags)
{
return VMKernelArea::Create(this, name, wiring, protection,
fAreaObjectCache, allocationFlags);
}
void
VMKernelAddressSpace::DeleteArea(VMArea* _area, uint32 allocationFlags)
{
TRACE("VMKernelAddressSpace::DeleteArea(%p)\n", _area);
VMKernelArea* area = static_cast<VMKernelArea*>(_area);
object_cache_delete(fAreaObjectCache, area);
}
VMArea*
VMKernelAddressSpace::LookupArea(addr_t address) const
{
Range* range = fRangeTree.FindClosest(address, true);
if (range == NULL || range->type != Range::RANGE_AREA)
return NULL;
VMKernelArea* area = range->area;
return area->ContainsAddress(address) ? area : NULL;
}
VMArea*
VMKernelAddressSpace::FindClosestArea(addr_t address, bool less) const
{
Range* range = fRangeTree.FindClosest(address, less);
while (range != NULL && range->type != Range::RANGE_AREA)
range = less ? fRangeTree.Previous(range) : fRangeTree.Next(range);
return range != NULL ? range->area : NULL;
}
It will also set the "_address" argument to its base address when
the call succeeds.
You need to hold the VMAddressSpace write lock.
*/
status_t
VMKernelAddressSpace::InsertArea(VMArea* _area, size_t size,
const virtual_address_restrictions* addressRestrictions,
uint32 allocationFlags, void** _address)
{
TRACE("VMKernelAddressSpace::InsertArea(%p, %" B_PRIu32 ", %#" B_PRIxSIZE
", %p \"%s\")\n", addressRestrictions->address,
addressRestrictions->address_specification, size, _area, _area->name);
ASSERT_WRITE_LOCKED_RW_LOCK(&fLock);
VMKernelArea* area = static_cast<VMKernelArea*>(_area);
Range* range;
status_t error = _AllocateRange(addressRestrictions, size,
addressRestrictions->address_specification == B_EXACT_ADDRESS,
allocationFlags, range);
if (error != B_OK)
return error;
range->type = Range::RANGE_AREA;
range->area = area;
area->SetRange(range);
area->SetBase(range->base);
area->SetSize(range->size);
if (_address != NULL)
*_address = (void*)area->Base();
fFreeSpace -= area->Size();
PARANOIA_CHECK_STRUCTURES();
return B_OK;
}
void
VMKernelAddressSpace::RemoveArea(VMArea* _area, uint32 allocationFlags)
{
TRACE("VMKernelAddressSpace::RemoveArea(%p)\n", _area);
ASSERT_WRITE_LOCKED_RW_LOCK(&fLock);
VMKernelArea* area = static_cast<VMKernelArea*>(_area);
_FreeRange(area->Range(), allocationFlags);
fFreeSpace += area->Size();
PARANOIA_CHECK_STRUCTURES();
}
bool
VMKernelAddressSpace::CanResizeArea(VMArea* area, size_t newSize)
{
Range* range = static_cast<VMKernelArea*>(area)->Range();
if (newSize <= range->size)
return true;
Range* nextRange = fRangeList.GetNext(range);
if (nextRange == NULL || nextRange->type == Range::RANGE_AREA)
return false;
if (nextRange->type == Range::RANGE_RESERVED
&& nextRange->reserved.base > range->base) {
return false;
}
return newSize - range->size <= nextRange->size;
}
status_t
VMKernelAddressSpace::ResizeArea(VMArea* _area, size_t newSize,
uint32 allocationFlags)
{
TRACE("VMKernelAddressSpace::ResizeArea(%p, %#" B_PRIxSIZE ")\n", _area,
newSize);
VMKernelArea* area = static_cast<VMKernelArea*>(_area);
Range* range = area->Range();
if (newSize == range->size)
return B_OK;
Range* nextRange = fRangeList.GetNext(range);
if (newSize < range->size) {
if (nextRange != NULL && nextRange->type == Range::RANGE_FREE) {
_FreeListRemoveRange(nextRange, nextRange->size);
nextRange->size += range->size - newSize;
nextRange->base = range->base + newSize;
_FreeListInsertRange(nextRange, nextRange->size);
} else {
nextRange = new(fRangesObjectCache, allocationFlags) Range(
range->base + newSize, range->size - newSize,
Range::RANGE_FREE);
if (nextRange == NULL)
return B_NO_MEMORY;
_InsertRange(nextRange);
}
} else {
if (nextRange == NULL
|| (nextRange->type == Range::RANGE_RESERVED
&& nextRange->reserved.base > range->base)) {
return B_BAD_VALUE;
}
size_t sizeDiff = newSize - range->size;
if (sizeDiff > nextRange->size)
return B_BAD_VALUE;
if (sizeDiff == nextRange->size) {
_RemoveRange(nextRange);
object_cache_delete(fRangesObjectCache, nextRange, allocationFlags);
} else {
if (nextRange->type == Range::RANGE_FREE)
_FreeListRemoveRange(nextRange, nextRange->size);
nextRange->size -= sizeDiff;
nextRange->base = range->base + newSize;
if (nextRange->type == Range::RANGE_FREE)
_FreeListInsertRange(nextRange, nextRange->size);
}
}
range->size = newSize;
area->SetSize(newSize);
IncrementChangeCount();
PARANOIA_CHECK_STRUCTURES();
return B_OK;
}
status_t
VMKernelAddressSpace::ShrinkAreaHead(VMArea* _area, size_t newSize,
uint32 allocationFlags)
{
TRACE("VMKernelAddressSpace::ShrinkAreaHead(%p, %#" B_PRIxSIZE ")\n", _area,
newSize);
VMKernelArea* area = static_cast<VMKernelArea*>(_area);
Range* range = area->Range();
if (newSize == range->size)
return B_OK;
if (newSize > range->size)
return B_BAD_VALUE;
Range* previousRange = fRangeList.GetPrevious(range);
size_t sizeDiff = range->size - newSize;
if (previousRange != NULL && previousRange->type == Range::RANGE_FREE) {
_FreeListRemoveRange(previousRange, previousRange->size);
previousRange->size += sizeDiff;
_FreeListInsertRange(previousRange, previousRange->size);
range->base += sizeDiff;
range->size = newSize;
} else {
previousRange = new(fRangesObjectCache, allocationFlags) Range(
range->base, sizeDiff, Range::RANGE_FREE);
if (previousRange == NULL)
return B_NO_MEMORY;
range->base += sizeDiff;
range->size = newSize;
_InsertRange(previousRange);
}
area->SetBase(range->base);
area->SetSize(range->size);
IncrementChangeCount();
PARANOIA_CHECK_STRUCTURES();
return B_OK;
}
status_t
VMKernelAddressSpace::ShrinkAreaTail(VMArea* area, size_t newSize,
uint32 allocationFlags)
{
return ResizeArea(area, newSize, allocationFlags);
}
status_t
VMKernelAddressSpace::ReserveAddressRange(size_t size,
const virtual_address_restrictions* addressRestrictions,
uint32 flags, uint32 allocationFlags, void** _address)
{
TRACE("VMKernelAddressSpace::ReserveAddressRange(%p, %" B_PRIu32 ", %#"
B_PRIxSIZE ", %#" B_PRIx32 ")\n", addressRestrictions->address,
addressRestrictions->address_specification, size, flags);
if (fDeleting)
return B_BAD_TEAM_ID;
Range* range;
status_t error = _AllocateRange(addressRestrictions, size, false,
allocationFlags, range);
if (error != B_OK)
return error;
range->type = Range::RANGE_RESERVED;
range->reserved.base = range->base;
range->reserved.flags = flags;
if (_address != NULL)
*_address = (void*)range->base;
Get();
PARANOIA_CHECK_STRUCTURES();
return B_OK;
}
status_t
VMKernelAddressSpace::UnreserveAddressRange(addr_t address, size_t size,
uint32 allocationFlags)
{
TRACE("VMKernelAddressSpace::UnreserveAddressRange(%#" B_PRIxADDR ", %#"
B_PRIxSIZE ")\n", address, size);
if (fDeleting)
return B_BAD_TEAM_ID;
addr_t endAddress = address + (size - 1);
Range* range = fRangeTree.FindClosest(address, false);
while (range != NULL && range->base + (range->size - 1) <= endAddress) {
Range* nextRange = fRangeList.GetNext(range);
while (nextRange != NULL && nextRange->type == Range::RANGE_FREE)
nextRange = fRangeList.GetNext(nextRange);
if (range->type == Range::RANGE_RESERVED) {
_FreeRange(range, allocationFlags);
Put();
}
range = nextRange;
}
PARANOIA_CHECK_STRUCTURES();
return B_OK;
}
void
VMKernelAddressSpace::UnreserveAllAddressRanges(uint32 allocationFlags)
{
Range* range = fRangeList.Head();
while (range != NULL) {
Range* nextRange = fRangeList.GetNext(range);
while (nextRange != NULL && nextRange->type == Range::RANGE_FREE)
nextRange = fRangeList.GetNext(nextRange);
if (range->type == Range::RANGE_RESERVED) {
_FreeRange(range, allocationFlags);
Put();
}
range = nextRange;
}
PARANOIA_CHECK_STRUCTURES();
}
void
VMKernelAddressSpace::Dump() const
{
VMAddressSpace::Dump();
kprintf("range list:\n");
for (RangeList::ConstIterator it = fRangeList.GetIterator();
Range* range = it.Next();) {
switch (range->type) {
case Range::RANGE_AREA:
{
VMKernelArea* area = range->area;
kprintf(" area %" B_PRId32 ": ", area->id);
kprintf("base_addr = %#" B_PRIxADDR " ", area->Base());
kprintf("size = %#" B_PRIxSIZE " ", area->Size());
kprintf("name = '%s' ", area->name);
kprintf("protection = %#" B_PRIx32 "\n", area->protection);
break;
}
case Range::RANGE_RESERVED:
kprintf(" reserved: base_addr = %#" B_PRIxADDR
" reserved_base = %#" B_PRIxADDR " size = %#"
B_PRIxSIZE " flags = %#" B_PRIx32 "\n", range->base,
range->reserved.base, range->size, range->reserved.flags);
break;
case Range::RANGE_FREE:
kprintf(" free: base_addr = %#" B_PRIxADDR " size = %#"
B_PRIxSIZE "\n", range->base, range->size);
break;
}
}
}
inline void
VMKernelAddressSpace::_FreeListInsertRange(Range* range, size_t size)
{
TRACE(" VMKernelAddressSpace::_FreeListInsertRange(%p (%#" B_PRIxADDR
", %#" B_PRIxSIZE ", %d), %#" B_PRIxSIZE " (%d))\n", range, range->base,
range->size, range->type, size, ld(size) - PAGE_SHIFT);
fFreeLists[ld(size) - PAGE_SHIFT].Add(range);
}
inline void
VMKernelAddressSpace::_FreeListRemoveRange(Range* range, size_t size)
{
TRACE(" VMKernelAddressSpace::_FreeListRemoveRange(%p (%#" B_PRIxADDR
", %#" B_PRIxSIZE ", %d), %#" B_PRIxSIZE " (%d))\n", range, range->base,
range->size, range->type, size, ld(size) - PAGE_SHIFT);
fFreeLists[ld(size) - PAGE_SHIFT].Remove(range);
}
void
VMKernelAddressSpace::_InsertRange(Range* range)
{
TRACE(" VMKernelAddressSpace::_InsertRange(%p (%#" B_PRIxADDR ", %#"
B_PRIxSIZE ", %d))\n", range, range->base, range->size, range->type);
Range* insertBeforeRange = fRangeTree.FindClosest(range->base, true);
fRangeList.InsertBefore(
insertBeforeRange != NULL
? fRangeList.GetNext(insertBeforeRange) : fRangeList.Head(),
range);
fRangeTree.Insert(range);
if (range->type == Range::RANGE_FREE)
_FreeListInsertRange(range, range->size);
}
void
VMKernelAddressSpace::_RemoveRange(Range* range)
{
TRACE(" VMKernelAddressSpace::_RemoveRange(%p (%#" B_PRIxADDR ", %#"
B_PRIxSIZE ", %d))\n", range, range->base, range->size, range->type);
fRangeTree.Remove(range);
fRangeList.Remove(range);
if (range->type == Range::RANGE_FREE)
_FreeListRemoveRange(range, range->size);
}
status_t
VMKernelAddressSpace::_AllocateRange(
const virtual_address_restrictions* addressRestrictions,
size_t size, bool allowReservedRange, uint32 allocationFlags,
Range*& _range)
{
TRACE(" VMKernelAddressSpace::_AllocateRange(address: %p, size: %#"
B_PRIxSIZE ", addressSpec: %#" B_PRIx32 ", reserved allowed: %d)\n",
addressRestrictions->address, size,
addressRestrictions->address_specification, allowReservedRange);
addr_t address = (addr_t)addressRestrictions->address;
size = ROUNDUP(size, B_PAGE_SIZE);
size_t alignment = addressRestrictions->alignment != 0
? addressRestrictions->alignment : B_PAGE_SIZE;
switch (addressRestrictions->address_specification) {
case B_EXACT_ADDRESS:
{
if (address % B_PAGE_SIZE != 0)
return B_BAD_VALUE;
break;
}
case B_BASE_ADDRESS:
address = ROUNDUP(address, B_PAGE_SIZE);
break;
case B_ANY_KERNEL_BLOCK_ADDRESS:
while (alignment < size)
alignment <<= 1;
case B_ANY_ADDRESS:
case B_ANY_KERNEL_ADDRESS:
address = fBase;
break;
default:
return B_BAD_VALUE;
}
Range* range = _FindFreeRange(address, size, alignment,
addressRestrictions->address_specification, allowReservedRange,
address);
if (range == NULL) {
return addressRestrictions->address_specification == B_EXACT_ADDRESS
? B_BAD_VALUE : B_NO_MEMORY;
}
TRACE(" VMKernelAddressSpace::_AllocateRange() found range:(%p (%#"
B_PRIxADDR ", %#" B_PRIxSIZE ", %d)\n", range, range->base, range->size,
range->type);
size_t rangeSize = range->size;
if (address == range->base) {
if (range->size > size) {
Range* leftOverRange = new(fRangesObjectCache, allocationFlags)
Range(address + size, range->size - size, range);
if (leftOverRange == NULL)
return B_NO_MEMORY;
range->size = size;
_InsertRange(leftOverRange);
}
} else if (address + size == range->base + range->size) {
Range* leftOverRange = new(fRangesObjectCache, allocationFlags) Range(
range->base, range->size - size, range);
if (leftOverRange == NULL)
return B_NO_MEMORY;
range->base = address;
range->size = size;
_InsertRange(leftOverRange);
} else {
Range* leftOverRange1 = new(fRangesObjectCache, allocationFlags) Range(
range->base, address - range->base, range);
if (leftOverRange1 == NULL)
return B_NO_MEMORY;
Range* leftOverRange2 = new(fRangesObjectCache, allocationFlags) Range(
address + size, range->size - size - leftOverRange1->size, range);
if (leftOverRange2 == NULL) {
object_cache_delete(fRangesObjectCache, leftOverRange1,
allocationFlags);
return B_NO_MEMORY;
}
range->base = address;
range->size = size;
_InsertRange(leftOverRange1);
_InsertRange(leftOverRange2);
}
if (range->type == Range::RANGE_FREE)
_FreeListRemoveRange(range, rangeSize);
IncrementChangeCount();
TRACE(" VMKernelAddressSpace::_AllocateRange() -> %p (%#" B_PRIxADDR ", %#"
B_PRIxSIZE ")\n", range, range->base, range->size);
_range = range;
return B_OK;
}
VMKernelAddressSpace::Range*
VMKernelAddressSpace::_FindFreeRange(addr_t start, size_t size,
size_t alignment, uint32 addressSpec, bool allowReservedRange,
addr_t& _foundAddress)
{
TRACE(" VMKernelAddressSpace::_FindFreeRange(start: %#" B_PRIxADDR
", size: %#" B_PRIxSIZE ", alignment: %#" B_PRIxSIZE ", addressSpec: %#"
B_PRIx32 ", reserved allowed: %d)\n", start, size, alignment,
addressSpec, allowReservedRange);
switch (addressSpec) {
case B_BASE_ADDRESS:
{
Range* range = fRangeTree.FindClosest(start, true);
while (range != NULL) {
if (range->type == Range::RANGE_FREE) {
addr_t alignedBase = ROUNDUP(range->base, alignment);
if (is_valid_spot(start, alignedBase, size,
range->base + (range->size - 1))) {
_foundAddress = alignedBase;
return range;
}
}
range = fRangeList.GetNext(range);
}
start = fBase;
addressSpec = B_ANY_ADDRESS;
}
case B_ANY_ADDRESS:
case B_ANY_KERNEL_ADDRESS:
case B_ANY_KERNEL_BLOCK_ADDRESS:
{
int freeListIndex = ld((size * 2 - 1) >> PAGE_SHIFT);
for (int32 i = freeListIndex; i < fFreeListCount; i++) {
RangeFreeList& freeList = fFreeLists[i];
if (freeList.IsEmpty())
continue;
for (RangeFreeList::Iterator it = freeList.GetIterator();
Range* range = it.Next();) {
addr_t alignedBase = ROUNDUP(range->base, alignment);
if (is_valid_spot(start, alignedBase, size,
range->base + (range->size - 1))) {
_foundAddress = alignedBase;
return range;
}
}
}
if (!allowReservedRange)
return NULL;
Range* range = fRangeTree.FindClosest(start, true);
while (range != NULL) {
if (range->type == Range::RANGE_RESERVED) {
addr_t alignedBase = ROUNDUP(range->base, alignment);
if (is_valid_spot(start, alignedBase, size,
range->base + (range->size - 1))) {
if ((range->reserved.flags & RESERVED_AVOID_BASE)
!= 0) {
alignedBase = ROUNDDOWN(
range->base + (range->size - size), alignment);
}
_foundAddress = alignedBase;
return range;
}
}
range = fRangeList.GetNext(range);
}
return NULL;
}
case B_EXACT_ADDRESS:
{
Range* range = fRangeTree.FindClosest(start, true);
TRACE(" B_EXACT_ADDRESS: range: %p\n", range);
if (range == NULL || range->type == Range::RANGE_AREA
|| range->base + (range->size - 1) < start + (size - 1)) {
TRACE(" -> no suitable range\n");
return NULL;
}
if (range->type != Range::RANGE_FREE && !allowReservedRange)
{
TRACE(" -> reserved range not allowed\n");
return NULL;
}
_foundAddress = start;
return range;
}
default:
return NULL;
}
}
void
VMKernelAddressSpace::_FreeRange(Range* range, uint32 allocationFlags)
{
TRACE(" VMKernelAddressSpace::_FreeRange(%p (%#" B_PRIxADDR ", %#"
B_PRIxSIZE ", %d))\n", range, range->base, range->size, range->type);
Range* previousRange = fRangeList.GetPrevious(range);
Range* nextRange = fRangeList.GetNext(range);
if (previousRange != NULL && previousRange->type == Range::RANGE_FREE) {
if (nextRange != NULL && nextRange->type == Range::RANGE_FREE) {
_FreeListRemoveRange(previousRange, previousRange->size);
_RemoveRange(range);
_RemoveRange(nextRange);
previousRange->size += range->size + nextRange->size;
object_cache_delete(fRangesObjectCache, range, allocationFlags);
object_cache_delete(fRangesObjectCache, nextRange, allocationFlags);
_FreeListInsertRange(previousRange, previousRange->size);
} else {
_FreeListRemoveRange(previousRange, previousRange->size);
_RemoveRange(range);
previousRange->size += range->size;
object_cache_delete(fRangesObjectCache, range, allocationFlags);
_FreeListInsertRange(previousRange, previousRange->size);
}
} else {
if (nextRange != NULL && nextRange->type == Range::RANGE_FREE) {
_RemoveRange(nextRange);
range->size += nextRange->size;
object_cache_delete(fRangesObjectCache, nextRange, allocationFlags);
}
range->type = Range::RANGE_FREE;
_FreeListInsertRange(range, range->size);
}
IncrementChangeCount();
}
#ifdef PARANOIA_CHECKS
void
VMKernelAddressSpace::_CheckStructures() const
{
fRangeTree.CheckTree();
size_t spaceSize = fEndAddress - fBase + 1;
addr_t nextBase = fBase;
Range* previousRange = NULL;
int previousRangeType = Range::RANGE_AREA;
uint64 freeRanges = 0;
RangeList::ConstIterator listIt = fRangeList.GetIterator();
RangeTree::ConstIterator treeIt = fRangeTree.GetIterator();
while (true) {
Range* range = listIt.Next();
Range* treeRange = treeIt.Next();
if (range != treeRange) {
panic("VMKernelAddressSpace::_CheckStructures(): list/tree range "
"mismatch: %p vs %p", range, treeRange);
}
if (range == NULL)
break;
if (range->base != nextBase) {
panic("VMKernelAddressSpace::_CheckStructures(): range base %#"
B_PRIxADDR ", expected: %#" B_PRIxADDR, range->base, nextBase);
}
if (range->size == 0) {
panic("VMKernelAddressSpace::_CheckStructures(): empty range %p",
range);
}
if (range->size % B_PAGE_SIZE != 0) {
panic("VMKernelAddressSpace::_CheckStructures(): range %p (%#"
B_PRIxADDR ", %#" B_PRIxSIZE ") not page aligned", range,
range->base, range->size);
}
if (range->size > spaceSize - (range->base - fBase)) {
panic("VMKernelAddressSpace::_CheckStructures(): range too large: "
"(%#" B_PRIxADDR ", %#" B_PRIxSIZE "), address space end: %#"
B_PRIxADDR, range->base, range->size, fEndAddress);
}
if (range->type == Range::RANGE_FREE) {
freeRanges++;
if (previousRangeType == Range::RANGE_FREE) {
panic("VMKernelAddressSpace::_CheckStructures(): adjoining "
"free ranges: %p (%#" B_PRIxADDR ", %#" B_PRIxSIZE
"), %p (%#" B_PRIxADDR ", %#" B_PRIxSIZE ")", previousRange,
previousRange->base, previousRange->size, range,
range->base, range->size);
}
}
previousRange = range;
nextBase = range->base + range->size;
previousRangeType = range->type;
}
if (nextBase - 1 != fEndAddress) {
panic("VMKernelAddressSpace::_CheckStructures(): space not fully "
"covered by ranges: last: %#" B_PRIxADDR ", expected %#" B_PRIxADDR,
nextBase - 1, fEndAddress);
}
uint64 freeListRanges = 0;
for (int i = 0; i < fFreeListCount; i++) {
RangeFreeList& freeList = fFreeLists[i];
if (freeList.IsEmpty())
continue;
for (RangeFreeList::Iterator it = freeList.GetIterator();
Range* range = it.Next();) {
if (range->type != Range::RANGE_FREE) {
panic("VMKernelAddressSpace::_CheckStructures(): non-free "
"range %p (%#" B_PRIxADDR ", %#" B_PRIxSIZE ", %d) in "
"free list %d", range, range->base, range->size,
range->type, i);
}
if (fRangeTree.Find(range->base) != range) {
panic("VMKernelAddressSpace::_CheckStructures(): unknown "
"range %p (%#" B_PRIxADDR ", %#" B_PRIxSIZE ", %d) in "
"free list %d", range, range->base, range->size,
range->type, i);
}
if (ld(range->size) - PAGE_SHIFT != i) {
panic("VMKernelAddressSpace::_CheckStructures(): "
"range %p (%#" B_PRIxADDR ", %#" B_PRIxSIZE ", %d) in "
"wrong free list %d", range, range->base, range->size,
range->type, i);
}
freeListRanges++;
}
}
}
#endif