* Copyright 2002-2011, Haiku. All rights reserved.
* Distributed under the terms of the MIT License.
*
* Copyright 2001, Travis Geiselbrecht. All rights reserved.
* Distributed under the terms of the NewOS License.
*/
#include <timer.h>
#include <OS.h>
#include <arch/timer.h>
#include <boot/kernel_args.h>
#include <cpu.h>
#include <debug.h>
#include <elf.h>
#include <real_time_clock.h>
#include <smp.h>
#include <thread.h>
#include <util/AutoLock.h>
struct per_cpu_timer_data {
spinlock lock;
timer* events;
timer* current_event;
int32 current_event_in_progress;
bigtime_t real_time_offset;
};
static per_cpu_timer_data sPerCPU[SMP_MAX_CPUS];
#ifdef TRACE_TIMER
# define TRACE(x) dprintf x
#else
# define TRACE(x) ;
#endif
\param scheduleTime The absolute system time for the timer expiration.
\param now The current system time.
*/
static void
set_hardware_timer(bigtime_t scheduleTime, bigtime_t now)
{
arch_timer_set_hardware_timer(scheduleTime > now ? scheduleTime - now : 0);
}
\param scheduleTime The absolute system time for the timer expiration.
*/
static inline void
set_hardware_timer(bigtime_t scheduleTime)
{
set_hardware_timer(scheduleTime, system_time());
}
static void
add_event_to_list(timer* event, timer** list)
{
timer* next;
timer* previous = NULL;
for (next = *list; next != NULL; previous = next, next = previous->next) {
if ((bigtime_t)next->schedule_time >= (bigtime_t)event->schedule_time)
break;
}
event->next = next;
if (previous != NULL)
previous->next = event;
else
*list = event;
}
static void
per_cpu_real_time_clock_changed(void*, int cpu)
{
per_cpu_timer_data& cpuData = sPerCPU[cpu];
SpinLocker cpuDataLocker(cpuData.lock);
bigtime_t realTimeOffset = rtc_boot_time();
if (realTimeOffset == cpuData.real_time_offset)
return;
bigtime_t timeDiff = cpuData.real_time_offset - realTimeOffset;
cpuData.real_time_offset = realTimeOffset;
timer* affectedTimers = NULL;
timer** it = &cpuData.events;
timer* firstEvent = *it;
while (timer* event = *it) {
uint32 flags = event->flags;
if ((flags & ~B_TIMER_FLAGS) != B_ONE_SHOT_ABSOLUTE_TIMER
|| (flags & B_TIMER_REAL_TIME_BASE) == 0) {
it = &event->next;
continue;
}
*it = event->next;
event->next = affectedTimers;
affectedTimers = event;
}
bool firstEventChanged = cpuData.events != firstEvent;
firstEvent = cpuData.events;
while (affectedTimers != NULL) {
timer* event = affectedTimers;
affectedTimers = event->next;
bigtime_t oldTime = event->schedule_time;
event->schedule_time += timeDiff;
if (timeDiff >= 0) {
if (event->schedule_time < oldTime)
event->schedule_time = B_INFINITE_TIMEOUT;
} else {
if (event->schedule_time < 0)
event->schedule_time = 0;
}
add_event_to_list(event, &cpuData.events);
}
firstEventChanged |= cpuData.events != firstEvent;
if (firstEventChanged)
set_hardware_timer(cpuData.events->schedule_time);
}
static int
dump_timers(int argc, char** argv)
{
int32 cpuCount = smp_get_num_cpus();
for (int32 i = 0; i < cpuCount; i++) {
kprintf("CPU %" B_PRId32 ":\n", i);
if (sPerCPU[i].events == NULL) {
kprintf(" no timers scheduled\n");
continue;
}
for (timer* event = sPerCPU[i].events; event != NULL;
event = event->next) {
kprintf(" [%9lld] %p: ", (long long)event->schedule_time, event);
if ((event->flags & ~B_TIMER_FLAGS) == B_PERIODIC_TIMER)
kprintf("periodic %9lld, ", (long long)event->period);
else
kprintf("one shot, ");
kprintf("flags: %#x, user data: %p, callback: %p ",
event->flags, event->user_data, event->hook);
const char* symbol;
const char* imageName;
bool exactMatch;
status_t error = elf_debug_lookup_symbol_address(
(addr_t)event->hook, NULL, &symbol, &imageName, &exactMatch);
if (error == B_OK && exactMatch) {
if (const char* slash = strchr(imageName, '/'))
imageName = slash + 1;
kprintf(" %s:%s", imageName, symbol);
}
kprintf("\n");
}
}
kprintf("current time: %lld\n", (long long)system_time());
return 0;
}
status_t
timer_init(kernel_args* args)
{
TRACE(("timer_init: entry\n"));
if (arch_init_timer(args) != B_OK)
panic("arch_init_timer() failed");
add_debugger_command_etc("timers", &dump_timers, "List all timers",
"\n"
"Prints a list of all scheduled timers.\n", 0);
return B_OK;
}
void
timer_init_post_rtc(void)
{
bigtime_t realTimeOffset = rtc_boot_time();
int32 cpuCount = smp_get_num_cpus();
for (int32 i = 0; i < cpuCount; i++)
sPerCPU[i].real_time_offset = realTimeOffset;
}
void
timer_real_time_clock_changed()
{
call_all_cpus(&per_cpu_real_time_clock_changed, NULL);
}
int32
timer_interrupt()
{
per_cpu_timer_data& cpuData = sPerCPU[smp_get_current_cpu()];
int32 rc = B_HANDLED_INTERRUPT;
TRACE(("timer_interrupt: time %" B_PRIdBIGTIME ", cpu %" B_PRId32 "\n",
system_time(), smp_get_current_cpu()));
spinlock* spinlock = &cpuData.lock;
acquire_spinlock(spinlock);
timer* event = cpuData.events;
while (event != NULL && ((bigtime_t)event->schedule_time < system_time())) {
int mode = event->flags;
cpuData.events = event->next;
cpuData.current_event = event;
atomic_set(&cpuData.current_event_in_progress, 1);
release_spinlock(spinlock);
TRACE(("timer_interrupt: calling hook %p for event %p\n", event->hook,
event));
if (event->hook)
rc = event->hook(event);
atomic_set(&cpuData.current_event_in_progress, 0);
acquire_spinlock(spinlock);
if ((mode & ~B_TIMER_FLAGS) == B_PERIODIC_TIMER
&& cpuData.current_event != NULL) {
event->schedule_time += event->period;
bigtime_t now = system_time();
if (now >= event->schedule_time + event->period) {
event->schedule_time = now
- (now - event->schedule_time) % event->period;
}
add_event_to_list(event, &cpuData.events);
}
cpuData.current_event = NULL;
event = cpuData.events;
}
if (cpuData.events != NULL)
set_hardware_timer(cpuData.events->schedule_time);
release_spinlock(spinlock);
return rc;
}
status_t
add_timer(timer* event, timer_hook hook, bigtime_t period, int32 flags)
{
const bigtime_t currentTime = system_time();
if (event == NULL || hook == NULL || period < 0)
return B_BAD_VALUE;
TRACE(("add_timer: event %p\n", event));
if ((flags & B_TIMER_USE_TIMER_STRUCT_TIMES) == 0) {
bigtime_t scheduleTime = period;
if ((flags & ~B_TIMER_FLAGS) != B_ONE_SHOT_ABSOLUTE_TIMER)
scheduleTime += currentTime;
event->schedule_time = (int64)scheduleTime;
event->period = period;
}
event->hook = hook;
event->flags = flags;
InterruptsLocker interruptsLocker;
const int currentCPU = smp_get_current_cpu();
per_cpu_timer_data& cpuData = sPerCPU[currentCPU];
SpinLocker locker(&cpuData.lock);
if ((flags & ~B_TIMER_FLAGS) == B_ONE_SHOT_ABSOLUTE_TIMER
&& (flags & B_TIMER_REAL_TIME_BASE) != 0) {
if (event->schedule_time > cpuData.real_time_offset)
event->schedule_time -= cpuData.real_time_offset;
else
event->schedule_time = 0;
}
add_event_to_list(event, &cpuData.events);
event->cpu = currentCPU;
if (event == cpuData.events)
set_hardware_timer(event->schedule_time, currentTime);
return B_OK;
}
bool
cancel_timer(timer* event)
{
TRACE(("cancel_timer: event %p\n", event));
InterruptsLocker _;
int cpu = event->cpu;
SpinLocker spinLocker;
while (true) {
if (cpu >= SMP_MAX_CPUS)
return false;
spinLocker.SetTo(sPerCPU[cpu].lock, false);
if (cpu == event->cpu)
break;
spinLocker.Unlock();
cpu = event->cpu;
}
per_cpu_timer_data& cpuData = sPerCPU[cpu];
if (event != cpuData.current_event) {
timer* current = cpuData.events;
timer* previous = NULL;
while (current != NULL) {
if (current == event) {
if (previous == NULL)
cpuData.events = current->next;
else
previous->next = current->next;
current->next = NULL;
break;
}
previous = current;
current = current->next;
}
if (current == NULL)
return true;
event->cpu = 0xffff;
if (cpu == smp_get_current_cpu()) {
if (cpuData.events == NULL)
arch_timer_clear_hardware_timer();
else
set_hardware_timer(cpuData.events->schedule_time);
}
return false;
}
cpuData.current_event = NULL;
if (cpu != smp_get_current_cpu()) {
spinLocker.Unlock();
while (atomic_get(&cpuData.current_event_in_progress) == 1)
cpu_wait(&cpuData.current_event_in_progress, 0);
}
return true;
}
void
spin(bigtime_t microseconds)
{
bigtime_t target = system_time() + microseconds;
while (system_time() < target)
cpu_pause();
}