* Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
* Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
* Distributed under the terms of the MIT License.
*
* Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
* Distributed under the terms of the NewOS License.
*/
#include "paging/classic/PPCPagingMethodClassic.h"
#include <stdlib.h>
#include <string.h>
#include <AutoDeleter.h>
#include <arch/cpu.h>
#include <arch_mmu.h>
#include <arch_system_info.h>
#include <boot/kernel_args.h>
#include <int.h>
#include <thread.h>
#include <vm/vm.h>
#include <vm/VMAddressSpace.h>
#include "paging/classic/PPCPagingStructuresClassic.h"
#include "paging/classic/PPCVMTranslationMapClassic.h"
#include "generic_vm_physical_page_mapper.h"
#include "generic_vm_physical_page_ops.h"
#include "GenericVMPhysicalPageMapper.h"
#ifdef TRACE_PPC_PAGING_METHOD_CLASSIC
# define TRACE(x...) dprintf(x)
#else
# define TRACE(x...) ;
#endif
#define IOSPACE_SIZE (64*1024*1024)
#define IOSPACE_CHUNK_SIZE (16 * B_PAGE_SIZE)
static addr_t sIOSpaceBase;
static status_t
map_iospace_chunk(addr_t va, phys_addr_t pa, uint32 flags)
{
pa &= ~(B_PAGE_SIZE - 1);
va &= ~(B_PAGE_SIZE - 1);
if (va < sIOSpaceBase || va >= (sIOSpaceBase + IOSPACE_SIZE))
panic("map_iospace_chunk: passed invalid va 0x%lx\n", va);
return ppc_map_address_range(va, pa, IOSPACE_CHUNK_SIZE);
}
PPCPagingMethodClassic::PPCPagingMethodClassic()
:
fPageHole(NULL),
fPageHolePageDir(NULL),
fKernelPhysicalPageDirectory(0),
fKernelVirtualPageDirectory(NULL),
fPhysicalPageMapper(NULL),
fKernelPhysicalPageMapper(NULL)
*/
{
}
PPCPagingMethodClassic::~PPCPagingMethodClassic()
{
}
status_t
PPCPagingMethodClassic::Init(kernel_args* args,
VMPhysicalPageMapper** _physicalPageMapper)
{
TRACE("PPCPagingMethodClassic::Init(): entry\n");
fPageTable = (page_table_entry_group *)args->arch_args.page_table.start;
fPageTableSize = args->arch_args.page_table.size;
fPageTableHashMask = fPageTableSize / sizeof(page_table_entry_group) - 1;
status_t error = generic_vm_physical_page_mapper_init(args,
map_iospace_chunk, &sIOSpaceBase, IOSPACE_SIZE, IOSPACE_CHUNK_SIZE);
if (error != B_OK)
return error;
new(&fPhysicalPageMapper) GenericVMPhysicalPageMapper;
*_physicalPageMapper = &fPhysicalPageMapper;
return B_OK;
#if 0
fKernelPhysicalPageDirectory = args->arch_args.phys_pgdir;
fKernelVirtualPageDirectory = (page_directory_entry*)(addr_t)
args->arch_args.vir_pgdir;
#ifdef TRACE_PPC_PAGING_METHOD_CLASSIC
TRACE("page hole: %p, page dir: %p\n", fPageHole, fPageHolePageDir);
TRACE("page dir: %p (physical: %#" B_PRIx32 ")\n",
fKernelVirtualPageDirectory, fKernelPhysicalPageDirectory);
#endif
PPCPagingStructuresClassic::StaticInit();
PhysicalPageSlotPool* pool
= new(&PhysicalPageSlotPool::sInitialPhysicalPagePool)
PhysicalPageSlotPool;
status_t error = pool->InitInitial(args);
if (error != B_OK) {
panic("PPCPagingMethodClassic::Init(): Failed to create initial pool "
"for physical page mapper!");
return error;
}
large_memory_physical_page_ops_init(args, pool, fPhysicalPageMapper,
fKernelPhysicalPageMapper);
if (x86_check_feature(IA32_FEATURE_PGE, FEATURE_COMMON)) {
x86_write_cr4(x86_read_cr4() | IA32_CR4_GLOBAL_PAGES);
}
TRACE("PPCPagingMethodClassic::Init(): done\n");
*_physicalPageMapper = fPhysicalPageMapper;
return B_OK;
#endif
}
status_t
PPCPagingMethodClassic::InitPostArea(kernel_args* args)
{
if (!IS_KERNEL_ADDRESS(fPageTable)) {
addr_t newAddress = (addr_t)fPageTable;
status_t error = ppc_remap_address_range(&newAddress, fPageTableSize,
false);
if (error != B_OK) {
panic("arch_vm_translation_map_init_post_area(): Failed to remap "
"the page table!");
return error;
}
addr_t oldVirtualBase = (addr_t)(fPageTable);
fPageTable = (page_table_entry_group*)newAddress;
ppc_unmap_address_range(oldVirtualBase, fPageTableSize);
}
fPageTableArea = create_area("page_table", (void **)&fPageTable, B_EXACT_ADDRESS,
fPageTableSize, B_ALREADY_WIRED, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
status_t error = generic_vm_physical_page_mapper_init_post_area(args);
if (error != B_OK)
return error;
return B_OK;
#if 0
void *temp;
status_t error;
area_id area;
fKernelVirtualPageDirectory[1023] = 0;
fPageHolePageDir = NULL;
fPageHole = NULL;
temp = (void*)fKernelVirtualPageDirectory;
area = create_area("kernel_pgdir", &temp, B_EXACT_ADDRESS, B_PAGE_SIZE,
B_ALREADY_WIRED, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
if (area < B_OK)
return area;
error = PhysicalPageSlotPool::sInitialPhysicalPagePool
.InitInitialPostArea(args);
if (error != B_OK)
return error;
return B_OK;
#endif
}
status_t
PPCPagingMethodClassic::CreateTranslationMap(bool kernel, VMTranslationMap** _map)
{
PPCVMTranslationMapClassic* map = new(std::nothrow) PPCVMTranslationMapClassic;
if (map == NULL)
return B_NO_MEMORY;
status_t error = map->Init(kernel);
if (error != B_OK) {
delete map;
return error;
}
*_map = map;
return B_OK;
}
status_t
PPCPagingMethodClassic::MapEarly(kernel_args* args, addr_t virtualAddress,
phys_addr_t physicalAddress, uint8 attributes,
page_num_t (*get_free_page)(kernel_args*))
{
uint32 virtualSegmentID = get_sr((void *)virtualAddress) & 0xffffff;
uint32 hash = page_table_entry::PrimaryHash(virtualSegmentID, (uint32)virtualAddress);
page_table_entry_group *group = &fPageTable[hash & fPageTableHashMask];
for (int32 i = 0; i < 8; i++) {
if (group->entry[i].valid)
continue;
FillPageTableEntry(&group->entry[i], virtualSegmentID,
virtualAddress, physicalAddress, PTE_READ_WRITE, 0, false);
return B_OK;
}
hash = page_table_entry::SecondaryHash(hash);
group = &fPageTable[hash & fPageTableHashMask];
for (int32 i = 0; i < 8; i++) {
if (group->entry[i].valid)
continue;
FillPageTableEntry(&group->entry[i], virtualSegmentID,
virtualAddress, physicalAddress, PTE_READ_WRITE, 0, true);
return B_OK;
}
return B_ERROR;
}
bool
PPCPagingMethodClassic::IsKernelPageAccessible(addr_t virtualAddress,
uint32 protection)
{
VMAddressSpace *addressSpace = VMAddressSpace::Kernel();
PPCVMTranslationMapClassic* map = static_cast<PPCVMTranslationMapClassic*>(
addressSpace->TranslationMap());
phys_addr_t physicalAddress;
uint32 flags;
if (map->Query(virtualAddress, &physicalAddress, &flags) != B_OK)
return false;
if ((flags & PAGE_PRESENT) == 0)
return false;
return (protection & B_KERNEL_WRITE_AREA) == 0
|| (flags & B_KERNEL_WRITE_AREA) != 0;
}
void
PPCPagingMethodClassic::FillPageTableEntry(page_table_entry *entry,
uint32 virtualSegmentID, addr_t virtualAddress, phys_addr_t physicalAddress,
uint8 protection, uint32 memoryType, bool secondaryHash)
{
entry->physical_page_number = physicalAddress / B_PAGE_SIZE;
entry->_reserved0 = 0;
entry->referenced = false;
entry->changed = false;
entry->write_through = (memoryType == B_MTR_UC) || (memoryType == B_MTR_WT);
entry->caching_inhibited = (memoryType == B_MTR_UC);
entry->memory_coherent = false;
entry->guarded = false;
entry->_reserved1 = 0;
entry->page_protection = protection & 0x3;
eieio();
entry->virtual_segment_id = virtualSegmentID;
entry->secondary_hash = secondaryHash;
entry->abbr_page_index = (virtualAddress >> 22) & 0x3f;
entry->valid = true;
ppc_sync();
}
#if 0
void
PPCPagingMethodClassic::PutPageTableInPageDir(page_directory_entry* entry,
phys_addr_t pgtablePhysical, uint32 attributes)
{
*entry = (pgtablePhysical & PPC_PDE_ADDRESS_MASK)
| PPC_PDE_PRESENT
| PPC_PDE_WRITABLE
| PPC_PDE_USER;
}
void
PPCPagingMethodClassic::PutPageTableEntryInTable(page_table_entry* entry,
phys_addr_t physicalAddress, uint32 attributes, uint32 memoryType,
bool globalPage)
{
page_table_entry page = (physicalAddress & PPC_PTE_ADDRESS_MASK)
| PPC_PTE_PRESENT | (globalPage ? PPC_PTE_GLOBAL : 0)
| MemoryTypeToPageTableEntryFlags(memoryType);
if ((attributes & B_USER_PROTECTION) != 0) {
page |= PPC_PTE_USER;
if ((attributes & B_WRITE_AREA) != 0)
page |= PPC_PTE_WRITABLE;
} else if ((attributes & B_KERNEL_WRITE_AREA) != 0)
page |= PPC_PTE_WRITABLE;
*(volatile page_table_entry*)entry = page;
}
void
PPCPagingMethodClassic::_EarlyPreparePageTables(page_table_entry* pageTables,
addr_t address, size_t size)
{
memset(pageTables, 0, B_PAGE_SIZE * (size / (B_PAGE_SIZE * 1024)));
{
addr_t virtualTable = (addr_t)pageTables;
page_directory_entry* pageHolePageDir
= PPCPagingMethodClassic::Method()->PageHolePageDir();
for (size_t i = 0; i < (size / (B_PAGE_SIZE * 1024));
i++, virtualTable += B_PAGE_SIZE) {
phys_addr_t physicalTable = 0;
_EarlyQuery(virtualTable, &physicalTable);
page_directory_entry* entry = &pageHolePageDir[
(address / (B_PAGE_SIZE * 1024)) + i];
PutPageTableInPageDir(entry, physicalTable,
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
}
}
}
status_t
PPCPagingMethodClassic::_EarlyQuery(addr_t virtualAddress,
phys_addr_t *_physicalAddress)
{
PPCPagingMethodClassic* method = PPCPagingMethodClassic::Method();
int index = VADDR_TO_PDENT(virtualAddress);
if ((method->PageHolePageDir()[index] & PPC_PDE_PRESENT) == 0) {
return B_ERROR;
}
page_table_entry* entry = method->PageHole() + virtualAddress / B_PAGE_SIZE;
if ((*entry & PPC_PTE_PRESENT) == 0) {
return B_ERROR;
}
*_physicalAddress = *entry & PPC_PTE_ADDRESS_MASK;
return B_OK;
}
#endif