* Copyright 2010, Ingo Weinhold <ingo_weinhold@gmx.de>.
* Copyright 2007, Hugo Santos. All Rights Reserved.
* Distributed under the terms of the MIT License.
*/
#include "slab_private.h"
#include <stdio.h>
#include <string.h>
#include <algorithm>
#include <debug.h>
#include <heap.h>
#include <kernel.h>
#include <malloc.h>
#include <vm/vm.h>
#include <vm/VMAddressSpace.h>
#include "ObjectCache.h"
#include "MemoryManager.h"
#if USE_SLAB_ALLOCATOR_FOR_MALLOC
static const size_t kBlockSizes[] = {
16, 24, 32,
48, 64, 80, 96, 112, 128,
160, 192, 224, 256,
320, 384, 448, 512,
640, 768, 896, 1024,
1280, 1536, 1792, 2048,
2560, 3072, 3584, 4096,
5120, 6144, 7168, 8192,
10240, 12288, 14336, 16384,
};
static const size_t kNumBlockSizes = B_COUNT_OF(kBlockSizes);
static object_cache* sBlockCaches[kNumBlockSizes];
static addr_t sBootStrapMemory = 0;
static size_t sBootStrapMemorySize = 0;
static size_t sUsedBootStrapMemory = 0;
RANGE_MARKER_FUNCTION_BEGIN(slab_allocator)
static int
size_to_index(size_t size)
{
if (size <= 16)
return 0;
if (size <= 32)
return 1 + (size - 16 - 1) / 8;
if (size <= 128)
return 3 + (size - 32 - 1) / 16;
if (size <= 256)
return 9 + (size - 128 - 1) / 32;
if (size <= 512)
return 13 + (size - 256 - 1) / 64;
if (size <= 1024)
return 17 + (size - 512 - 1) / 128;
if (size <= 2048)
return 21 + (size - 1024 - 1) / 256;
if (size <= 4096)
return 25 + (size - 2048 - 1) / 512;
if (size <= 8192)
return 29 + (size - 4096 - 1) / 1024;
if (size <= 16384)
return 33 + (size - 8192 - 1) / 2048;
return -1;
}
static void*
block_alloc(size_t size, size_t alignment, uint32 flags)
{
if (alignment > kMinObjectAlignment) {
ASSERT((alignment & (alignment - 1)) == 0);
while (alignment < size)
alignment <<= 1;
size = alignment;
if (size > kBlockSizes[kNumBlockSizes - 1])
flags |= CACHE_ALIGN_ON_SIZE;
}
int index = size_to_index(size);
if (index >= 0)
return object_cache_alloc(sBlockCaches[index], flags);
void* block;
if (MemoryManager::AllocateRaw(size, flags, block) != B_OK)
return NULL;
return block;
}
void*
block_alloc_early(size_t size)
{
int index = size_to_index(size);
if (index >= 0 && sBlockCaches[index] != NULL)
return object_cache_alloc(sBlockCaches[index], CACHE_DURING_BOOT);
if (size > SLAB_CHUNK_SIZE_SMALL) {
void* block;
if (MemoryManager::AllocateRaw(size, 0, block) != B_OK)
return NULL;
return block;
}
if (sBootStrapMemorySize - sUsedBootStrapMemory < size) {
void* block;
if (MemoryManager::AllocateRaw(SLAB_CHUNK_SIZE_SMALL, 0, block) != B_OK)
return NULL;
sBootStrapMemory = (addr_t)block;
sBootStrapMemorySize = SLAB_CHUNK_SIZE_SMALL;
sUsedBootStrapMemory = 0;
}
size_t neededSize = ROUNDUP(size, sizeof(double));
if (sUsedBootStrapMemory + neededSize > sBootStrapMemorySize)
return NULL;
void* block = (void*)(sBootStrapMemory + sUsedBootStrapMemory);
sUsedBootStrapMemory += neededSize;
return block;
}
static void
block_free(void* block, uint32 flags)
{
if (block == NULL)
return;
ObjectCache* cache = MemoryManager::FreeRawOrReturnCache(block, flags);
if (cache != NULL) {
ASSERT(cache->object_size >= kBlockSizes[0]);
ASSERT(cache->object_size <= kBlockSizes[kNumBlockSizes - 1]);
ASSERT(cache == sBlockCaches[size_to_index(cache->object_size)]);
object_cache_free(cache, block, flags);
}
}
void
block_allocator_init_boot()
{
for (size_t index = 0; index < kNumBlockSizes; index++) {
char name[32];
snprintf(name, sizeof(name), "block allocator: %lu",
kBlockSizes[index]);
uint32 flags = CACHE_DURING_BOOT;
size_t size = kBlockSizes[index];
size_t alignment = (size & (size - 1)) == 0 ? size : 0;
if (size > 2048)
flags |= CACHE_NO_DEPOT;
sBlockCaches[index] = create_object_cache_etc(name, size, alignment, 0,
0, 0, flags, NULL, NULL, NULL, NULL);
if (sBlockCaches[index] == NULL)
panic("allocator: failed to init block cache");
}
}
void
block_allocator_init_rest()
{
#ifdef TEST_ALL_CACHES_DURING_BOOT
for (int index = 0; kBlockSizes[index] != 0; index++) {
block_free(block_alloc(kBlockSizes[index] - sizeof(boundary_tag)), 0,
0);
}
#endif
}
void*
memalign(size_t alignment, size_t size)
{
return block_alloc(size, alignment, 0);
}
void *
memalign_etc(size_t alignment, size_t size, uint32 flags)
{
return block_alloc(size, alignment, flags & CACHE_ALLOC_FLAGS);
}
int
posix_memalign(void** _pointer, size_t alignment, size_t size)
{
if ((alignment & (sizeof(void*) - 1)) != 0 || _pointer == NULL)
return B_BAD_VALUE;
*_pointer = block_alloc(size, alignment, 0);
return 0;
}
void
free_etc(void *address, uint32 flags)
{
if ((flags & CACHE_DONT_LOCK_KERNEL_SPACE) != 0) {
deferred_free(address);
return;
}
block_free(address, flags & CACHE_ALLOC_FLAGS);
}
void*
malloc(size_t size)
{
return block_alloc(size, 0, 0);
}
void
free(void* address)
{
block_free(address, 0);
}
void*
realloc_etc(void* address, size_t newSize, uint32 flags)
{
if (newSize == 0) {
block_free(address, flags);
return NULL;
}
if (address == NULL)
return block_alloc(newSize, 0, flags);
size_t oldSize;
ObjectCache* cache = MemoryManager::GetAllocationInfo(address, oldSize);
if (cache == NULL && oldSize == 0) {
panic("block_realloc(): allocation %p not known", address);
return NULL;
}
if (oldSize == newSize)
return address;
void* newBlock = block_alloc(newSize, 0, flags);
if (newBlock == NULL)
return NULL;
memcpy(newBlock, address, std::min(oldSize, newSize));
block_free(address, flags);
return newBlock;
}
void*
realloc(void* address, size_t newSize)
{
return realloc_etc(address, newSize, 0);
}
#else
void*
block_alloc_early(size_t size)
{
panic("block allocator not enabled!");
return NULL;
}
void
block_allocator_init_boot()
{
}
void
block_allocator_init_rest()
{
}
#endif
RANGE_MARKER_FUNCTION_END(slab_allocator)