#include "arch-specific.h"
#include "heap.h"
#include <OS.h>
#include <Debug.h>
#include <syscalls.h>
#include <libroot_private.h>
#include <stdlib.h>
#include <unistd.h>
#ifdef TRACE_CHUNKS
# define CTRACE(x) debug_printf x
#else
# define CTRACE(x) ;
#endif
using namespace BPrivate;
struct free_chunk {
free_chunk *next;
size_t size;
};
static const size_t kInitialHeapSize = 64 * B_PAGE_SIZE;
static const size_t kHeapIncrement = 16 * B_PAGE_SIZE;
#if B_HAIKU_64_BIT
static const addr_t kHeapReservationBase = 0x100100000000;
static const addr_t kHeapReservationSize = 0x1000000000;
#else
static const addr_t kHeapReservationBase = 0x18000000;
static const addr_t kHeapReservationSize = 0x48000000;
#endif
static area_id sHeapArea;
static hoardLockType sHeapLock;
static void *sHeapBase;
static addr_t sFreeHeapBase;
static size_t sFreeHeapSize, sHeapAreaSize;
static free_chunk *sFreeChunks;
void
__init_after_fork(void)
{
sHeapArea = area_for((void*)sFreeHeapBase);
if (sHeapArea < 0) {
debug_printf("hoard: init_after_fork(): thread %" B_PRId32 ", Heap "
"area not found! Base address: %p\n", find_thread(NULL),
sHeapBase);
exit(1);
}
}
extern "C" status_t
__init_heap(void)
{
hoardHeap::initNumProcs();
sHeapBase = (void *)kHeapReservationBase;
status_t status = _kern_reserve_address_range((addr_t *)&sHeapBase,
B_RANDOMIZED_BASE_ADDRESS, kHeapReservationSize);
if (status != B_OK)
sHeapBase = NULL;
uint32 protection = B_READ_AREA | B_WRITE_AREA;
if (__gABIVersion < B_HAIKU_ABI_GCC_2_HAIKU)
protection |= B_EXECUTE_AREA;
sHeapArea = create_area("heap", (void **)&sHeapBase,
status == B_OK ? B_EXACT_ADDRESS : B_RANDOMIZED_BASE_ADDRESS,
kInitialHeapSize, B_NO_LOCK, protection);
if (sHeapArea < B_OK)
return sHeapArea;
sFreeHeapBase = (addr_t)sHeapBase;
sHeapAreaSize = kInitialHeapSize;
hoardLockInit(sHeapLock, "heap");
return B_OK;
}
extern "C" void
__heap_terminate_after()
{
}
static void
insert_chunk(free_chunk *newChunk)
{
free_chunk *chunk = (free_chunk *)sFreeChunks, *smaller = NULL;
for (; chunk != NULL; chunk = chunk->next) {
if (chunk->size < newChunk->size)
smaller = chunk;
else
break;
}
if (smaller) {
newChunk->next = smaller->next;
smaller->next = newChunk;
} else {
newChunk->next = sFreeChunks;
sFreeChunks = newChunk;
}
}
namespace BPrivate {
void *
hoardSbrk(long size)
{
assert(size > 0);
CTRACE(("sbrk: size = %ld\n", size));
size = (size + hoardHeap::ALIGNMENT - 1) & ~(hoardHeap::ALIGNMENT - 1);
uint32 protection = B_READ_AREA | B_WRITE_AREA;
if (__gABIVersion < B_HAIKU_ABI_GCC_2_HAIKU)
protection |= B_EXECUTE_AREA;
hoardLock(sHeapLock);
free_chunk *chunk = sFreeChunks, *last = NULL;
for (; chunk != NULL; chunk = chunk->next) {
CTRACE((" chunk %p (%ld)\n", chunk, chunk->size));
if (chunk->size < (size_t)size) {
last = chunk;
continue;
}
SERIAL_PRINT(("HEAP-%" B_PRId32 ": "
"found free chunk to hold %ld bytes\n", find_thread(NULL), size));
void *address = (void *)chunk;
if (chunk->size > (size_t)size + sizeof(free_chunk)) {
size_t newSize = chunk->size - size;
free_chunk *next = chunk->next;
chunk = (free_chunk *)((addr_t)chunk + size);
chunk->next = next;
chunk->size = newSize;
if (last != NULL) {
last->next = next;
insert_chunk(chunk);
} else
sFreeChunks = chunk;
} else {
chunk = chunk->next;
if (last != NULL)
last->next = chunk;
else
sFreeChunks = chunk;
}
hoardUnlock(sHeapLock);
return address;
}
size_t oldHeapSize = sFreeHeapSize;
sFreeHeapSize += size;
size_t incrementAlignedSize = (sFreeHeapSize + kHeapIncrement - 1)
& ~(kHeapIncrement - 1);
if (incrementAlignedSize <= sHeapAreaSize) {
SERIAL_PRINT(("HEAP-%" B_PRId32 ": heap area large enough for %ld\n",
find_thread(NULL), size));
hoardUnlock(sHeapLock);
return (void *)(sFreeHeapBase + oldHeapSize);
}
SERIAL_PRINT(("HEAP-%" B_PRId32 ": need to resize heap area to %ld "
"(%ld requested)\n", find_thread(NULL), incrementAlignedSize, size));
status_t status = resize_area(sHeapArea, incrementAlignedSize);
if (status != B_OK) {
sFreeHeapSize = oldHeapSize;
if (status == B_NO_MEMORY) {
hoardUnlock(sHeapLock);
return NULL;
}
size_t newHeapSize = (size + kHeapIncrement - 1) / kHeapIncrement
* kHeapIncrement;
void* base = (void*)(sFreeHeapBase + sHeapAreaSize);
area_id area = -1;
if (sHeapBase != NULL
&& base >= sHeapBase
&& (addr_t)base + newHeapSize
<= (addr_t)sHeapBase + kHeapReservationSize) {
area = create_area("heap", &base, B_EXACT_ADDRESS, newHeapSize,
B_NO_LOCK, protection);
if (area == B_NO_MEMORY) {
hoardUnlock(sHeapLock);
return NULL;
}
}
if (area < 0) {
base = (void*)(sFreeHeapBase + sHeapAreaSize);
area = create_area("heap", &base, B_RANDOMIZED_BASE_ADDRESS,
newHeapSize, B_NO_LOCK, protection);
}
if (area < 0) {
hoardUnlock(sHeapLock);
return NULL;
}
sHeapArea = area;
sFreeHeapBase = (addr_t)base;
sHeapAreaSize = newHeapSize;
sFreeHeapSize = size;
oldHeapSize = 0;
} else
sHeapAreaSize = incrementAlignedSize;
hoardUnlock(sHeapLock);
return (void *)(sFreeHeapBase + oldHeapSize);
}
void
hoardUnsbrk(void *ptr, long size)
{
CTRACE(("unsbrk: %p, %ld!\n", ptr, size));
hoardLock(sHeapLock);
free_chunk *chunk = (free_chunk *)sFreeChunks, *last = NULL, *smaller = NULL;
for (; chunk != NULL; chunk = chunk->next) {
if ((addr_t)chunk + chunk->size == (addr_t)ptr
|| (addr_t)ptr + size == (addr_t)chunk) {
CTRACE((" found adjacent chunks: %p, %ld\n", chunk, chunk->size));
if (last)
last->next = chunk->next;
else
sFreeChunks = chunk->next;
if ((addr_t)chunk < (addr_t)ptr)
chunk->size += size;
else {
free_chunk *newChunk = (free_chunk *)ptr;
newChunk->next = chunk->next;
newChunk->size = size + chunk->size;
chunk = newChunk;
}
insert_chunk(chunk);
hoardUnlock(sHeapLock);
return;
}
last = chunk;
if (chunk->size < (size_t)size)
smaller = chunk;
}
free_chunk *newChunk = (free_chunk *)ptr;
newChunk->size = size;
if (smaller) {
newChunk->next = smaller->next;
smaller->next = newChunk;
} else {
newChunk->next = sFreeChunks;
sFreeChunks = newChunk;
}
hoardUnlock(sHeapLock);
}
void
hoardLockInit(hoardLockType &lock, const char *name)
{
mutex_init_etc(&lock, name, MUTEX_FLAG_ADAPTIVE);
}
void
hoardLock(hoardLockType &lock)
{
mutex_lock(&lock);
}
void
hoardUnlock(hoardLockType &lock)
{
mutex_unlock(&lock);
}
void
hoardYield(void)
{
_kern_thread_yield();
}
}