* Copyright 2008-2010, François Revol, revol@free.fr. All rights reserved.
* Copyright 2004-2007, Axel Dörfler, axeld@pinc-software.de.
* Based on code written by Travis Geiselbrecht for NewOS.
*
* Distributed under the terms of the MIT License.
*/
#include "atari_memory_map.h"
#include "toscalls.h"
#include "mmu.h"
#include <boot/platform.h>
#include <boot/stdio.h>
#include <boot/kernel_args.h>
#include <boot/stage2.h>
#include <arch/cpu.h>
#include <arch_kernel.h>
#include <kernel.h>
#include <OS.h>
#include <string.h>
* 0x0500 - 0x10000 protected mode stack
* 0x0500 - 0x09000 real mode stack
* 0x10000 - ? code (up to ~500 kB)
* 0x90000 1st temporary page table (identity maps 0-4 MB)
* 0x91000 2nd (4-8 MB)
* 0x92000 - 0x92000 further page tables
* 0x9e000 - 0xa0000 SMP trampoline code
* [0xa0000 - 0x100000 BIOS/ROM/reserved area]
* 0x100000 page directory
* ... boot loader heap (32 kB)
* ... free physical memory
*
* The first 8 MB are identity mapped (0x0 - 0x0800000); paging is turned
* on. The kernel is mapped at 0x80000000, all other stuff mapped by the
* loader (kernel args, modules, driver settings, ...) comes after
* 0x81000000 which means that there is currently only 1 MB reserved for
* the kernel itself (see kMaxKernelSize).
*/
* 0x0800 - 0x10000 supervisor mode stack (1) XXX: more ? x86 starts at 500
* 0x10000 - ? code (up to ~500 kB)
* 0x100000 or FAST_RAM_BASE if any:
* ... page root directory
* ... interrupt vectors (VBR)
* ... page directory
* ... boot loader heap (32 kB)
* ... free physical memory
* 0xdNNNNN video buffer usually there, as per v_bas_ad
* (=Logbase() but Physbase() is better)
*
* The first 32 MB (2) are identity mapped (0x0 - 0x1000000); paging
* is turned on. The kernel is mapped at 0x80000000, all other stuff
* mapped by the loader (kernel args, modules, driver settings, ...)
* comes after 0x81000000 which means that there is currently only
* 1 MB reserved for the kernel itself (see kMaxKernelSize).
*
* (1) no need for user stack, we are already in supervisor mode in the
* loader.
* (2) maps the whole regular ST space; transparent translation registers
* have larger granularity anyway.
*/
#warning M68K: check for Physbase() < ST_RAM_TOP
#define TRACE_MMU
#ifdef TRACE_MMU
# define TRACE(x) dprintf x
#else
# define TRACE(x) ;
#endif
#define VBR_PAGE_OFFSET 1024
static const uint32 kDefaultPageTableFlags = 0x07;
static const size_t kMaxKernelSize = 0x200000;
addr_t gPageRoot = 0;
static addr_t sNextPhysicalAddress = 0x100000;
static addr_t sNextVirtualAddress = KERNEL_LOAD_BASE + kMaxKernelSize;
static addr_t sMaxVirtualAddress = KERNEL_LOAD_BASE ;
#if 0
static addr_t sNextPageTableAddress = 0x90000;
static const uint32 kPageTableRegionEnd = 0x9e000;
#endif
static const struct boot_mmu_ops *gMMUOps;
static addr_t
get_next_virtual_address(size_t size)
{
addr_t address = sNextVirtualAddress;
sNextVirtualAddress += size;
TRACE(("%s(%d): %08x\n", __FUNCTION__, size, address));
return address;
}
static addr_t
get_next_physical_address(size_t size)
{
addr_t address = sNextPhysicalAddress;
sNextPhysicalAddress += size;
TRACE(("%s(%d): %08x\n", __FUNCTION__, size, address));
return address;
}
static addr_t
get_next_virtual_page()
{
TRACE(("%s\n", __FUNCTION__));
return get_next_virtual_address(B_PAGE_SIZE);
}
static addr_t
get_next_physical_page()
{
TRACE(("%s\n", __FUNCTION__));
return get_next_physical_address(B_PAGE_SIZE);
}
extern "C" addr_t
mmu_get_next_page_tables()
{
#if 0
TRACE(("mmu_get_next_page_tables, sNextPageTableAddress %p, kPageTableRegionEnd %p\n",
sNextPageTableAddress, kPageTableRegionEnd));
addr_t address = sNextPageTableAddress;
if (address >= kPageTableRegionEnd)
return (uint32 *)get_next_physical_page();
sNextPageTableAddress += B_PAGE_SIZE;
return (uint32 *)address;
#endif
addr_t tbl = get_next_physical_page();
if (!tbl)
return tbl;
#if 0
uint32 *p = (uint32 *)tbl;
for (int32 i = 0; i < 1024; i++)
p[i] = 0;
#endif
return tbl;
}
#if 0
static void
add_page_table(addr_t base)
{
TRACE(("add_page_table(base = %p)\n", (void *)base));
#if 0
uint32 *pageTable = mmu_get_next_page_tables();
if (pageTable > (uint32 *)(8 * 1024 * 1024))
panic("tried to add page table beyond the indentity mapped 8 MB region\n");
gKernelArgs.arch_args.pgtables[gKernelArgs.arch_args.num_pgtables++] = (uint32)pageTable;
for (int32 i = 0; i < 1024; i++)
pageTable[i] = 0;
gPageRoot[base/(4*1024*1024)] = (uint32)pageTable | kDefaultPageTableFlags;
#endif
}
#endif
static void
unmap_page(addr_t virtualAddress)
{
gMMUOps->unmap_page(virtualAddress);
}
* physicalAddress.
* If the mapping goes beyond the current page table, it will allocate
* a new one. If it cannot map the requested page, it panics.
*/
static void
map_page(addr_t virtualAddress, addr_t physicalAddress, uint32 flags)
{
TRACE(("map_page: vaddr 0x%lx, paddr 0x%lx\n", virtualAddress, physicalAddress));
if (virtualAddress < KERNEL_LOAD_BASE)
panic("map_page: asked to map invalid page %p!\n", (void *)virtualAddress);
gMMUOps->add_page_table(virtualAddress);
#if 0
if (virtualAddress >= sMaxVirtualAddress) {
gMMUOps->add_page_table(sMaxVirtualAddress);
sMaxVirtualAddress += B_PAGE_SIZE * 64;
if (virtualAddress >= sMaxVirtualAddress)
panic("map_page: asked to map a page to %p\n", (void *)virtualAddress);
}
#endif
physicalAddress &= ~(B_PAGE_SIZE - 1);
gMMUOps->map_page(virtualAddress, physicalAddress, flags);
}
static void
init_page_directory(void)
{
TRACE(("init_page_directory\n"));
gPageRoot = get_next_physical_page();
gKernelArgs.arch_args.phys_pgroot = (uint32)gPageRoot;
gKernelArgs.arch_args.phys_vbr = (uint32)gPageRoot + VBR_PAGE_OFFSET;
gMMUOps->load_rp(gPageRoot);
gMMUOps->allocate_kernel_pgdirs();
gMMUOps->enable_paging();
gMMUOps->add_page_table(KERNEL_LOAD_BASE);
#if 0
for (int32 i = 0; i < 1024; i++) {
gPageRoot[i] = 0;
}
uint32 *pageTable = mmu_get_next_page_tables();
for (int32 i = 0; i < 1024; i++) {
pageTable[i] = (i * 0x1000) | kDefaultPageFlags;
}
gPageRoot[0] = (uint32)pageTable | kDefaultPageFlags;
pageTable = mmu_get_next_page_tables();
for (int32 i = 0; i < 1024; i++) {
pageTable[i] = (i * 0x1000 + 0x400000) | kDefaultPageFlags;
}
gPageRoot[1] = (uint32)pageTable | kDefaultPageFlags;
gKernelArgs.arch_args.num_pgtables = 0;
add_page_table(KERNEL_LOAD_BASE);
asm("movl %0, %%eax;"
"movl %%eax, %%cr3;" : : "m" (gPageRoot) : "eax");
asm("movl %%eax, %%cr0" : : "a" ((1 << 31) | (1 << 16) | (1 << 5) | 1));
#endif
}
extern "C" addr_t
mmu_map_physical_memory(addr_t physicalAddress, size_t size, uint32 flags)
{
addr_t address = sNextVirtualAddress;
addr_t pageOffset = physicalAddress & (B_PAGE_SIZE - 1);
physicalAddress -= pageOffset;
for (addr_t offset = 0; offset < size; offset += B_PAGE_SIZE) {
map_page(get_next_virtual_page(), physicalAddress + offset, flags);
}
return address + pageOffset;
}
extern "C" void *
mmu_allocate(void *virtualAddress, size_t size)
{
TRACE(("mmu_allocate: requested vaddr: %p, next free vaddr: 0x%lx, size: %ld\n",
virtualAddress, sNextVirtualAddress, size));
size = (size + B_PAGE_SIZE - 1) / B_PAGE_SIZE;
if (virtualAddress != NULL) {
addr_t address = (addr_t)virtualAddress;
if (address < KERNEL_LOAD_BASE || address + size * B_PAGE_SIZE
>= KERNEL_LOAD_BASE + kMaxKernelSize)
return NULL;
for (uint32 i = 0; i < size; i++) {
map_page(address, get_next_physical_page(), kDefaultPageFlags);
address += B_PAGE_SIZE;
}
TRACE(("mmu_allocate(KERNEL, %d): done\n", size));
return virtualAddress;
}
void *address = (void *)sNextVirtualAddress;
for (uint32 i = 0; i < size; i++) {
map_page(get_next_virtual_page(), get_next_physical_page(), kDefaultPageFlags);
}
TRACE(("mmu_allocate(NULL, %d): %p\n", size, address));
return address;
}
* address space. It might not actually free memory (as its implementation
* is very simple), but it might.
*/
extern "C" void
mmu_free(void *virtualAddress, size_t size)
{
TRACE(("mmu_free(virtualAddress = %p, size: %ld)\n", virtualAddress, size));
addr_t address = (addr_t)virtualAddress;
addr_t pageOffset = address % B_PAGE_SIZE;
address -= pageOffset;
size = (size + pageOffset + B_PAGE_SIZE - 1) / B_PAGE_SIZE * B_PAGE_SIZE;
if (address < KERNEL_LOAD_BASE || address + size > sNextVirtualAddress) {
panic("mmu_free: asked to unmap out of range region (%p, size %lx)\n",
(void *)address, size);
}
for (size_t i = 0; i < size; i += B_PAGE_SIZE) {
unmap_page(address);
address += B_PAGE_SIZE;
}
if (address == sNextVirtualAddress) {
sNextVirtualAddress -= size;
}
}
* BIOS calls won't work any longer after this function has
* been called.
*/
extern "C" void
mmu_init_for_kernel(void)
{
TRACE(("mmu_init_for_kernel\n"));
#if 0
{
struct gdt_idt_descr idtDescriptor;
uint32 *idt;
idt = (uint32 *)get_next_physical_page();
gKernelArgs.arch_args.phys_idt = (uint32)idt;
TRACE(("idt at %p\n", idt));
gKernelArgs.arch_args.vir_idt = (uint32)get_next_virtual_page();
map_page(gKernelArgs.arch_args.vir_idt, (uint32)idt, kDefaultPageFlags);
uint32* virtualIDT = (uint32*)gKernelArgs.arch_args.vir_idt;
for (int32 i = 0; i < IDT_LIMIT / 4; i++) {
virtualIDT[i] = 0;
}
idtDescriptor.limit = IDT_LIMIT - 1;
idtDescriptor.base = (uint32 *)gKernelArgs.arch_args.vir_idt;
asm("lidt %0;"
: : "m" (idtDescriptor));
TRACE(("idt at virtual address 0x%lx\n", gKernelArgs.arch_args.vir_idt));
}
{
struct gdt_idt_descr gdtDescriptor;
segment_descriptor *gdt;
gdt = (segment_descriptor *)get_next_physical_page();
gKernelArgs.arch_args.phys_gdt = (uint32)gdt;
TRACE(("gdt at %p\n", gdt));
gKernelArgs.arch_args.vir_gdt = (uint32)get_next_virtual_page();
map_page(gKernelArgs.arch_args.vir_gdt, (uint32)gdt, kDefaultPageFlags);
segment_descriptor* virtualGDT
= (segment_descriptor*)gKernelArgs.arch_args.vir_gdt;
clear_segment_descriptor(&virtualGDT[0]);
set_segment_descriptor(&virtualGDT[1], 0, 0xffffffff, DT_CODE_READABLE,
DPL_KERNEL);
set_segment_descriptor(&virtualGDT[2], 0, 0xffffffff, DT_DATA_WRITEABLE,
DPL_KERNEL);
set_segment_descriptor(&virtualGDT[3], 0, 0xffffffff, DT_CODE_READABLE,
DPL_USER);
set_segment_descriptor(&virtualGDT[4], 0, 0xffffffff, DT_DATA_WRITEABLE,
DPL_USER);
gdtDescriptor.limit = GDT_LIMIT - 1;
gdtDescriptor.base = (uint32 *)gKernelArgs.arch_args.vir_gdt;
asm("lgdt %0;"
: : "m" (gdtDescriptor));
TRACE(("gdt at virtual address %p\n", (void *)gKernelArgs.arch_args.vir_gdt));
}
#endif
gKernelArgs.physical_allocated_range[0].size = sNextPhysicalAddress - gKernelArgs.physical_allocated_range[0].start;
gKernelArgs.virtual_allocated_range[0].start = KERNEL_LOAD_BASE;
gKernelArgs.virtual_allocated_range[0].size = sNextVirtualAddress - KERNEL_LOAD_BASE;
gKernelArgs.num_virtual_allocated_ranges = 1;
sort_address_ranges(gKernelArgs.physical_memory_range,
gKernelArgs.num_physical_memory_ranges);
sort_address_ranges(gKernelArgs.physical_allocated_range,
gKernelArgs.num_physical_allocated_ranges);
sort_address_ranges(gKernelArgs.virtual_allocated_range,
gKernelArgs.num_virtual_allocated_ranges);
#ifdef TRACE_MMU
{
uint32 i;
dprintf("phys memory ranges:\n");
for (i = 0; i < gKernelArgs.num_physical_memory_ranges; i++) {
dprintf(" base 0x%08" B_PRIx64 ", length 0x%08" B_PRIx64 "\n",
gKernelArgs.physical_memory_range[i].start,
gKernelArgs.physical_memory_range[i].size);
}
dprintf("allocated phys memory ranges:\n");
for (i = 0; i < gKernelArgs.num_physical_allocated_ranges; i++) {
dprintf(" base 0x%08" B_PRIx64 ", length 0x%08" B_PRIx64 "\n",
gKernelArgs.physical_allocated_range[i].start,
gKernelArgs.physical_allocated_range[i].size);
}
dprintf("allocated virt memory ranges:\n");
for (i = 0; i < gKernelArgs.num_virtual_allocated_ranges; i++) {
dprintf(" base 0x%08" B_PRIx64 ", length 0x%08" B_PRIx64 "\n",
gKernelArgs.virtual_allocated_range[i].start,
gKernelArgs.virtual_allocated_range[i].size);
}
}
#endif
}
extern "C" void
mmu_init(void)
{
TRACE(("mmu_init\n"));
switch (gKernelArgs.arch_args.mmu_type) {
#if 0
case 68851:
gMMUOps = &k851MMUOps;
break;
#endif
case 68030:
gMMUOps = &k030MMUOps;
break;
case 68040:
gMMUOps = &k040MMUOps;
break;
#if 0
case 68060:
gMMUOps = &k060MMUOps;
break;
#endif
default:
panic("unknown mmu type %d\n", gKernelArgs.arch_args.mmu_type);
}
gMMUOps->initialize();
addr_t fastram_top = 0;
if (*TOSVARramvalid == TOSVARramvalid_MAGIC)
fastram_top = *TOSVARramtop;
if (fastram_top) {
sNextPhysicalAddress = ATARI_FASTRAM_BASE;
}
gKernelArgs.physical_allocated_range[0].start = sNextPhysicalAddress;
gKernelArgs.physical_allocated_range[0].size = 0;
gKernelArgs.num_physical_allocated_ranges = 1;
TRACE(("mmu_init: enabling transparent translation\n"));
gMMUOps->set_tt(0, ATARI_CHIPRAM_BASE, 0x10000000, 0);
gMMUOps->set_tt(1, ATARI_SHADOW_BASE, 0x01000000, 0);
TRACE(("mmu_init: init rtdir\n"));
init_page_directory();
#if 0
gPageRoot[1023] = (uint32)gPageRoot | kDefaultPageFlags;
#endif
gKernelArgs.arch_args.vir_pgroot = get_next_virtual_page();
map_page(gKernelArgs.arch_args.vir_pgroot, (uint32)gPageRoot, kDefaultPageFlags);
gKernelArgs.arch_args.vir_vbr = gKernelArgs.arch_args.vir_pgroot
+ VBR_PAGE_OFFSET;
gKernelArgs.cpu_kstack[0].start = (addr_t)mmu_allocate(NULL,
KERNEL_STACK_SIZE + KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE);
gKernelArgs.cpu_kstack[0].size = KERNEL_STACK_SIZE
+ KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE;
TRACE(("kernel stack at 0x%lx to 0x%lx\n", gKernelArgs.cpu_kstack[0].start,
gKernelArgs.cpu_kstack[0].start + gKernelArgs.cpu_kstack[0].size));
gKernelArgs.physical_memory_range[0].start = ATARI_CHIPRAM_BASE;
gKernelArgs.physical_memory_range[0].size = *TOSVARphystop - ATARI_CHIPRAM_BASE;
gKernelArgs.num_physical_memory_ranges = 1;
if (fastram_top) {
gKernelArgs.physical_memory_range[1].start =
ATARI_FASTRAM_BASE;
gKernelArgs.physical_memory_range[1].size =
fastram_top - ATARI_FASTRAM_BASE;
gKernelArgs.num_physical_memory_ranges++;
}
addr_t video_base = *TOSVAR_memtop;
video_base &= ~(B_PAGE_SIZE-1);
gKernelArgs.physical_allocated_range[gKernelArgs.num_physical_allocated_ranges].start = video_base;
gKernelArgs.physical_allocated_range[gKernelArgs.num_physical_allocated_ranges].size = *TOSVARphystop - video_base;
gKernelArgs.num_physical_allocated_ranges++;
gKernelArgs.arch_args.plat_args.atari.nat_feat.nf_page =
get_next_physical_page() ;
}
extern "C" status_t
platform_allocate_region(void **_address, size_t size, uint8 protection)
{
void *address = mmu_allocate(*_address, size);
if (address == NULL)
return B_NO_MEMORY;
*_address = address;
return B_OK;
}
extern "C" status_t
platform_free_region(void *address, size_t size)
{
mmu_free(address, size);
return B_OK;
}
ssize_t
platform_allocate_heap_region(size_t size, void **_base)
{
size = ROUNDUP(size, B_PAGE_SIZE);
addr_t base = get_next_physical_address(size);
if (base == 0)
return B_NO_MEMORY;
if ((base + size) > (32 * 1024 * 1024))
panic("platform_allocate_heap_region: region end is beyond identity map");
*_base = (void*)base;
return size;
}
void
platform_free_heap_region(void *_base, size_t size)
{
addr_t base = (addr_t)_base;
remove_physical_allocated_range(base, size);
if (sNextPhysicalAddress == (base + size))
sNextPhysicalAddress -= size;
}
extern "C" status_t
platform_bootloader_address_to_kernel_address(void *address, addr_t *_result)
{
TRACE(("%s: called\n", __func__));
*_result = (addr_t)address;
return B_OK;
}
extern "C" status_t
platform_kernel_address_to_bootloader_address(addr_t address, void **_result)
{
TRACE(("%s: called\n", __func__));
*_result = (void*)address;
return B_OK;
}