* Copyright 2010, Ingo Weinhold, ingo_weinhold@gmx.de.
* Copyright 2004-2008, Axel Dörfler, axeld@pinc-software.de.
* Distributed under the terms of the MIT License.
*/
#include <OS.h>
#include <string.h>
#include <algorithm>
#include <kernel.h>
#include <boot/stage2.h>
#include <boot/platform.h>
static const size_t kChunkSize = 16 * B_PAGE_SIZE;
kernel_args gKernelArgs;
KMessage gBootParams;
static void* sFirstFree;
static void* sLast;
static size_t sFree = kChunkSize;
static status_t
add_kernel_args_range(void* start, size_t size)
{
return insert_address_range(gKernelArgs.kernel_args_range,
&gKernelArgs.num_kernel_args_ranges, MAX_KERNEL_ARGS_RANGE,
(addr_t)start, size);
}
static void
remove_range_index(addr_range* ranges, uint32& numRanges, uint32 index)
{
if (index + 1 == numRanges) {
numRanges--;
return;
}
memmove(&ranges[index], &ranges[index + 1],
sizeof(addr_range) * (numRanges - 1 - index));
numRanges--;
}
addr_range array.
It will extend existing ranges in order to have as little
ranges in the array as possible.
Returns B_OK on success, or B_ENTRY_NOT_FOUND if there was
no free array entry available anymore.
*/
extern "C" status_t
insert_address_range(addr_range* ranges, uint32* _numRanges, uint32 maxRanges,
uint64 start, uint64 size)
{
uint32 numRanges = *_numRanges;
start = ROUNDDOWN(start, B_PAGE_SIZE);
size = ROUNDUP(size, B_PAGE_SIZE);
uint64 end = start + size;
for (uint32 i = 0; i < numRanges; i++) {
uint64 rangeStart = ranges[i].start;
uint64 rangeEnd = rangeStart + ranges[i].size;
if (end < rangeStart || start > rangeEnd) {
continue;
}
if (start >= rangeStart && end <= rangeEnd) {
return B_OK;
}
if (start < rangeStart) {
ranges[i].start = start;
ranges[i].size += rangeStart - start;
}
if (end > ranges[i].start + ranges[i].size) {
ranges[i].size = end - ranges[i].start;
}
for (uint32 j = 0; j < numRanges; j++) {
if (i == j)
continue;
rangeStart = ranges[i].start;
rangeEnd = rangeStart + ranges[i].size;
uint64 joinStart = ranges[j].start;
uint64 joinEnd = joinStart + ranges[j].size;
if (rangeStart <= joinEnd && joinEnd <= rangeEnd) {
if (joinStart < rangeStart) {
ranges[i].size += rangeStart - joinStart;
ranges[i].start = joinStart;
}
remove_range_index(ranges, numRanges, j--);
} else if (joinStart <= rangeEnd && joinEnd > rangeEnd) {
ranges[i].size += joinEnd - rangeEnd;
remove_range_index(ranges, numRanges, j--);
}
}
*_numRanges = numRanges;
return B_OK;
}
if (numRanges >= maxRanges)
return B_ENTRY_NOT_FOUND;
ranges[numRanges].start = (uint64)start;
ranges[numRanges].size = size;
(*_numRanges)++;
return B_OK;
}
extern "C" status_t
remove_address_range(addr_range* ranges, uint32* _numRanges, uint32 maxRanges,
uint64 start, uint64 size)
{
uint32 numRanges = *_numRanges;
uint64 end = ROUNDUP(start + size, B_PAGE_SIZE);
start = ROUNDDOWN(start, B_PAGE_SIZE);
for (uint32 i = 0; i < numRanges; i++) {
uint64 rangeStart = ranges[i].start;
uint64 rangeEnd = rangeStart + ranges[i].size;
if (start <= rangeStart) {
if (end <= rangeStart) {
} else if (end >= rangeEnd) {
remove_range_index(ranges, numRanges, i);
i--;
} else {
ranges[i].start = end;
ranges[i].size = rangeEnd - end;
}
} else if (end >= rangeEnd) {
if (start < rangeEnd) {
ranges[i].size = start - rangeStart;
}
} else {
ranges[i].size = start - rangeStart;
return insert_address_range(ranges, _numRanges, maxRanges, end,
rangeEnd - end);
}
}
*_numRanges = numRanges;
return B_OK;
}
bool
get_free_address_range(addr_range* ranges, uint32 numRanges, uint64 base,
uint64 size, uint64* _rangeBase)
{
uint64 end = base + size - 1;
if (end < base)
return false;
for (uint32 i = 0; i < numRanges;) {
uint64 rangeStart = ranges[i].start;
uint64 rangeEnd = ranges[i].start + ranges[i].size - 1;
if (base <= rangeEnd && rangeStart <= end) {
base = rangeEnd + 1;
end = rangeEnd + size;
if (base == 0 || end < base)
return false;
i = 0;
continue;
}
i++;
}
*_rangeBase = base;
return true;
}
bool
is_address_range_covered(addr_range* ranges, uint32 numRanges, uint64 base,
uint64 size)
{
for (uint32 i = 0; i < numRanges;) {
uint64 rangeStart = ranges[i].start;
uint64 rangeSize = ranges[i].size;
if (rangeStart <= base && rangeSize > base - rangeStart) {
uint64 intersect = std::min(rangeStart + rangeSize - base, size);
base += intersect;
size -= intersect;
if (size == 0)
return true;
i = 0;
continue;
}
i++;
}
return false;
}
extern "C" uint64
total_address_ranges_size(addr_range* ranges, uint32 numRanges)
{
uint64 total = 0;
for (uint32 i = 0; i < numRanges; i++)
total += ranges[i].size;
return total;
}
void
sort_address_ranges(addr_range* ranges, uint32 numRanges)
{
bool done;
do {
done = true;
for (uint32 i = 1; i < numRanges; i++) {
if (ranges[i].start < ranges[i - 1].start) {
done = false;
addr_range tempRange;
memcpy(&tempRange, &ranges[i], sizeof(addr_range));
memcpy(&ranges[i], &ranges[i - 1], sizeof(addr_range));
memcpy(&ranges[i - 1], &tempRange, sizeof(addr_range));
}
}
} while (!done);
}
status_t
insert_physical_memory_range(uint64 start, uint64 size)
{
return insert_address_range(gKernelArgs.physical_memory_range,
&gKernelArgs.num_physical_memory_ranges, MAX_PHYSICAL_MEMORY_RANGE,
start, size);
}
status_t
remove_physical_memory_range(uint64 start, uint64 size)
{
return remove_address_range(gKernelArgs.physical_memory_range,
&gKernelArgs.num_physical_memory_ranges, MAX_PHYSICAL_MEMORY_RANGE,
start, size);
}
uint64
total_physical_memory()
{
return total_address_ranges_size(gKernelArgs.physical_memory_range,
gKernelArgs.num_physical_memory_ranges);
}
status_t
insert_physical_allocated_range(uint64 start, uint64 size)
{
return insert_address_range(gKernelArgs.physical_allocated_range,
&gKernelArgs.num_physical_allocated_ranges,
MAX_PHYSICAL_ALLOCATED_RANGE, start, size);
}
status_t
remove_physical_allocated_range(uint64 start, uint64 size)
{
return remove_address_range(gKernelArgs.physical_allocated_range,
&gKernelArgs.num_physical_allocated_ranges, MAX_PHYSICAL_ALLOCATED_RANGE,
start, size);
}
status_t
insert_virtual_allocated_range(uint64 start, uint64 size)
{
return insert_address_range(gKernelArgs.virtual_allocated_range,
&gKernelArgs.num_virtual_allocated_ranges, MAX_VIRTUAL_ALLOCATED_RANGE,
start, size);
}
status_t
remove_virtual_allocated_range(uint64 start, uint64 size)
{
return remove_address_range(gKernelArgs.virtual_allocated_range,
&gKernelArgs.num_virtual_allocated_ranges, MAX_VIRTUAL_ALLOCATED_RANGE,
start, size);
}
#if B_HAIKU_PHYSICAL_BITS > 32
void
ignore_physical_memory_ranges_beyond_4gb()
{
sort_address_ranges(gKernelArgs.physical_memory_range,
gKernelArgs.num_physical_memory_ranges);
static const uint64 kLimit = (uint64)1 << 32;
for (uint32 i = gKernelArgs.num_physical_memory_ranges; i > 0; i--) {
addr_range& range = gKernelArgs.physical_memory_range[i - 1];
if (range.start >= kLimit) {
dprintf("ignore_physical_memory_ranges_beyond_4gb(): ignoring "
"range: %#" B_PRIx64 " - %#" B_PRIx64 "\n", range.start,
range.start + range.size);
gKernelArgs.ignored_physical_memory += range.size;
gKernelArgs.num_physical_memory_ranges = i - 1;
continue;
}
if ((range.start + range.size) > kLimit) {
dprintf("ignore_physical_memory_ranges_beyond_4gb(): ignoring "
"range: %#" B_PRIx64 " - %#" B_PRIx64 "\n", kLimit,
range.start + range.size);
gKernelArgs.ignored_physical_memory
+= range.size - (kLimit - range.start);
range.size = kLimit - range.start;
}
break;
}
}
#endif
kernel args heap.
*/
char*
kernel_args_strdup(const char* string)
{
if (string == NULL || string[0] == '\0')
return NULL;
size_t length = strlen(string) + 1;
char* target = (char*)kernel_args_malloc(length);
if (target == NULL)
return NULL;
memcpy(target, string, length);
return target;
}
is going to be passed over to the kernel. For example, the preloaded_image
structures are allocated this way.
The boot loader heap doesn't make it into the kernel!
*/
void*
kernel_args_malloc(size_t size, uint8 alignment)
{
#define ALIGN(addr, align) (((addr) + align - 1) & ~(align - 1))
size_t alignedSize = size + alignment - 1;
if (sFirstFree != NULL && alignedSize <= sFree) {
void* address = (void*)ALIGN((addr_t)sFirstFree, alignment);
sFirstFree = (void*)((addr_t)sFirstFree + alignedSize);
sLast = address;
sFree -= alignedSize;
return address;
}
if (alignedSize > kChunkSize / 2 && sFree < alignedSize) {
void* block = NULL;
if (platform_allocate_region(&block, alignedSize,
B_READ_AREA | B_WRITE_AREA) != B_OK) {
return NULL;
}
addr_t translated_block;
platform_bootloader_address_to_kernel_address(block, &translated_block);
if (add_kernel_args_range((void *)translated_block, size) != B_OK)
panic("kernel_args max range too low!\n");
return (void*)ALIGN((addr_t)block, alignment);
}
void* block = NULL;
if (platform_allocate_region(&block, kChunkSize, B_READ_AREA | B_WRITE_AREA) != B_OK)
return NULL;
sFirstFree = (void*)((addr_t)block + alignedSize);
sLast = block;
sFree = kChunkSize - alignedSize;
addr_t translated_block;
platform_bootloader_address_to_kernel_address(block, &translated_block);
if (add_kernel_args_range((void *)translated_block, kChunkSize) != B_OK)
panic("kernel_args max range too low!\n");
return (void*)ALIGN((addr_t)block, alignment);
}
It's very simple; it can only free the last allocation. It's
enough for its current usage in the boot loader, though.
*/
void
kernel_args_free(void* block)
{
if (sLast != block) {
return;
}
sFree = (addr_t)sFirstFree - (addr_t)sLast;
sFirstFree = block;
sLast = NULL;
}