* Copyright 2009, Ingo Weinhold, ingo_weinhold@gmx.de.
* Distributed under the terms of the MIT License.
*/
#ifndef UNUSED_VNODES_H
#define UNUSED_VNODES_H
#include <algorithm>
#include <util/AutoLock.h>
#include <util/list.h>
#include <low_resource_manager.h>
#include "Vnode.h"
const static uint32 kMaxUnusedVnodes = 8192;
Must have at least a read-lock of sHotVnodesLock when acquiring!
*/
static spinlock sUnusedVnodesLock = B_SPINLOCK_INITIALIZER;
typedef DoublyLinkedList<Vnode, DoublyLinkedListMemberGetLink<Vnode, &Vnode::unused_link> >
UnusedVnodeList;
static UnusedVnodeList sUnusedVnodeList;
static uint32 sUnusedVnodes = 0;
static const int32 kMaxHotVnodes = 1024;
static rw_lock sHotVnodesLock = RW_LOCK_INITIALIZER("hot vnodes");
static Vnode* sHotVnodes[kMaxHotVnodes];
static int32 sNextHotVnodeIndex = 0;
static const int32 kUnusedVnodesCheckInterval = 64;
static int32 sUnusedVnodesCheckCount = 0;
*/
static void
flush_hot_vnodes_locked()
{
InterruptsSpinLocker unusedLocker(sUnusedVnodesLock);
int32 count = std::min(sNextHotVnodeIndex, kMaxHotVnodes);
for (int32 i = 0; i < count; i++) {
Vnode* vnode = sHotVnodes[i];
if (vnode == NULL)
continue;
if (vnode->IsHot()) {
if (vnode->IsUnused()) {
sUnusedVnodeList.Add(vnode);
sUnusedVnodes++;
}
vnode->SetHot(false);
}
sHotVnodes[i] = NULL;
}
unusedLocker.Unlock();
sNextHotVnodeIndex = 0;
}
Must be called with sVnodeLock at least read-locked and the vnode locked.
\param vnode The vnode.
\return \c true, if the caller should trigger unused vnode freeing.
*/
static bool
vnode_unused(Vnode* vnode)
{
ReadLocker hotReadLocker(sHotVnodesLock);
vnode->SetUnused(true);
bool result = false;
int32 checkCount = atomic_add(&sUnusedVnodesCheckCount, 1);
if (checkCount == kUnusedVnodesCheckInterval) {
uint32 unusedCount = atomic_get((int32*)&sUnusedVnodes);
if (unusedCount > kMaxUnusedVnodes
&& low_resource_state(
B_KERNEL_RESOURCE_PAGES | B_KERNEL_RESOURCE_MEMORY)
!= B_NO_LOW_RESOURCE) {
result = true;
} else {
atomic_set(&sUnusedVnodesCheckCount, 0);
}
}
if (vnode->IsHot())
return result;
int32 index = atomic_add(&sNextHotVnodeIndex, 1);
if (index < kMaxHotVnodes) {
vnode->SetHot(true);
sHotVnodes[index] = vnode;
return result;
}
hotReadLocker.Unlock();
WriteLocker hotWriteLocker(sHotVnodesLock);
if (sNextHotVnodeIndex >= kMaxHotVnodes)
flush_hot_vnodes_locked();
index = sNextHotVnodeIndex++;
vnode->SetHot(true);
sHotVnodes[index] = vnode;
return result;
}
Must be called with sVnodeLock at least read-locked and the vnode locked.
\param vnode The vnode.
*/
static void
vnode_used(Vnode* vnode)
{
ReadLocker hotReadLocker(sHotVnodesLock);
if (!vnode->IsUnused())
return;
vnode->SetUnused(false);
if (!vnode->IsHot()) {
InterruptsSpinLocker unusedLocker(sUnusedVnodesLock);
sUnusedVnodeList.Remove(vnode);
sUnusedVnodes--;
}
}
Must be called with sVnodeLock at least read-locked and the vnode locked.
\param vnode The vnode.
*/
static void
vnode_to_be_freed(Vnode* vnode)
{
ReadLocker hotReadLocker(sHotVnodesLock);
if (vnode->IsHot()) {
int32 count = atomic_get(&sNextHotVnodeIndex);
count = std::min(count, kMaxHotVnodes);
for (int32 i = 0; i < count; i++) {
if (sHotVnodes[i] == vnode) {
sHotVnodes[i] = NULL;
break;
}
}
} else if (vnode->IsUnused()) {
InterruptsSpinLocker unusedLocker(sUnusedVnodesLock);
sUnusedVnodeList.Remove(vnode);
sUnusedVnodes--;
}
vnode->SetUnused(false);
}
static inline void
flush_hot_vnodes()
{
WriteLocker hotWriteLocker(sHotVnodesLock);
flush_hot_vnodes_locked();
}
static inline void
unused_vnodes_check_started()
{
atomic_set(&sUnusedVnodesCheckCount, kUnusedVnodesCheckInterval + 1);
}
static inline void
unused_vnodes_check_done()
{
atomic_set(&sUnusedVnodesCheckCount, 0);
}
#endif