summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Weinhold <ingo_weinhold@gmx.de>2011-06-12 00:00:23 (GMT)
committerIngo Weinhold <ingo_weinhold@gmx.de>2011-06-12 00:00:23 (GMT)
commit24df65921befcd0ad0c5c7866118f922da61cb96 (patch)
tree687a69fc5176fd3231063951b71c39312034faa1
parentccd31b93f10b5c8f7e34d4385a6c0c82be5295c3 (diff)
Merged signals-merge branch into trunk with the following changes:hrev42116
* Reorganized the kernel locking related to threads and teams. * We now discriminate correctly between process and thread signals. Signal handlers have been moved to teams. Fixes #5679. * Implemented real-time signal support, including signal queuing, SA_SIGINFO support, sigqueue(), sigwaitinfo(), sigtimedwait(), waitid(), and the addition of the real-time signal range. Closes #1935 and #2695. * Gave SIGBUS a separate signal number. Fixes #6704. * Implemented <time.h> clock and timer support, and fixed/completed alarm() and [set]itimer(). Closes #5682. * Implemented support for thread cancellation. Closes #5686. * Moved send_signal() from <signal.h> to <OS.h>. Fixes #7554. * Lots over smaller more or less related changes. git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@42116 a95241bf-73f2-0310-859d-f6bbb57e9c96
-rw-r--r--build/jam/MainBuildRules5
-rw-r--r--headers/os/kernel/OS.h7
-rw-r--r--headers/os/kernel/debugger.h4
-rw-r--r--headers/posix/arch/x86/signal.h12
-rw-r--r--headers/posix/limits.h6
-rw-r--r--headers/posix/pthread.h61
-rw-r--r--headers/posix/setjmp.h3
-rw-r--r--headers/posix/signal.h361
-rw-r--r--headers/posix/sys/types.h64
-rw-r--r--headers/posix/sys/wait.h8
-rw-r--r--headers/posix/time.h37
-rw-r--r--headers/posix/unistd.h18
-rw-r--r--headers/private/kernel/DPC.h114
-rw-r--r--headers/private/kernel/UserEvent.h109
-rw-r--r--headers/private/kernel/UserTimer.h273
-rw-r--r--headers/private/kernel/arch/thread.h10
-rw-r--r--headers/private/kernel/arch/x86/arch_cpu.h27
-rw-r--r--headers/private/kernel/arch/x86/arch_thread.h3
-rw-r--r--headers/private/kernel/condition_variable.h20
-rw-r--r--headers/private/kernel/cpu.h1
-rw-r--r--headers/private/kernel/elf.h7
-rw-r--r--headers/private/kernel/kscheduler.h62
-rw-r--r--headers/private/kernel/ksignal.h237
-rw-r--r--headers/private/kernel/lock.h10
-rw-r--r--headers/private/kernel/real_time_clock.h4
-rw-r--r--headers/private/kernel/team.h20
-rw-r--r--headers/private/kernel/thread.h258
-rw-r--r--headers/private/kernel/thread_types.h615
-rw-r--r--headers/private/kernel/timer.h21
-rw-r--r--headers/private/kernel/user_debugger.h60
-rw-r--r--headers/private/kernel/usergroup.h1
-rw-r--r--headers/private/kernel/util/AutoLock.h7
-rw-r--r--headers/private/kernel/util/KernelReferenceable.h29
-rw-r--r--headers/private/libroot/libroot_private.h3
-rw-r--r--headers/private/libroot/pthread_private.h22
-rw-r--r--headers/private/libroot/signal_private.h118
-rw-r--r--headers/private/libroot/time_private.h88
-rw-r--r--headers/private/libroot/times_private.h23
-rw-r--r--headers/private/libroot/unistd_private.h23
-rw-r--r--headers/private/shared/syscall_utils.h32
-rw-r--r--headers/private/system/arch/x86/arch_commpage_defs.h4
-rw-r--r--headers/private/system/signal_defs.h43
-rw-r--r--headers/private/system/syscalls.h41
-rw-r--r--headers/private/system/thread_defs.h23
-rw-r--r--headers/private/system/tls.h1
-rw-r--r--headers/private/system/user_thread_defs.h12
-rw-r--r--headers/private/system/user_timer_defs.h50
-rw-r--r--src/add-ons/kernel/debugger/invalidate_on_exit/invalidate_on_exit.cpp4
-rw-r--r--src/add-ons/kernel/debugger/run_on_exit/run_on_exit.cpp4
-rw-r--r--src/apps/terminal/TermApp.cpp2
-rw-r--r--src/bin/debug/profile/profile.cpp2
-rw-r--r--src/bin/debug/scheduling_recorder/scheduling_recorder.cpp2
-rw-r--r--src/bin/debug/strace/MemoryReader.cpp43
-rw-r--r--src/bin/debug/strace/MemoryReader.h21
-rw-r--r--src/bin/debug/strace/strace.cpp244
-rw-r--r--src/bin/gdb/libiberty/config.h2
-rw-r--r--src/kits/network/socket.cpp22
-rw-r--r--src/libs/posix_error_mapper/Jamfile1
-rw-r--r--src/libs/posix_error_mapper/pthread_thread.cpp3
-rw-r--r--src/libs/posix_error_mapper/signal.cpp7
-rw-r--r--src/libs/posix_error_mapper/time.cpp23
-rw-r--r--src/system/kernel/DPC.cpp327
-rw-r--r--src/system/kernel/Jamfile3
-rw-r--r--src/system/kernel/TeamThreadTables.h184
-rw-r--r--src/system/kernel/UserEvent.cpp243
-rw-r--r--src/system/kernel/UserTimer.cpp1786
-rw-r--r--src/system/kernel/arch/arm/arch_thread.cpp47
-rw-r--r--src/system/kernel/arch/m68k/arch_debug.cpp2
-rw-r--r--src/system/kernel/arch/m68k/arch_int.cpp8
-rw-r--r--src/system/kernel/arch/m68k/arch_thread.cpp15
-rw-r--r--src/system/kernel/arch/mipsel/arch_thread.cpp13
-rw-r--r--src/system/kernel/arch/ppc/arch_debug.cpp2
-rw-r--r--src/system/kernel/arch/ppc/arch_int.cpp8
-rw-r--r--src/system/kernel/arch/ppc/arch_thread.cpp15
-rw-r--r--src/system/kernel/arch/x86/Jamfile4
-rw-r--r--src/system/kernel/arch/x86/arch_commpage.cpp6
-rw-r--r--src/system/kernel/arch/x86/arch_debug.cpp6
-rw-r--r--src/system/kernel/arch/x86/arch_int.cpp64
-rw-r--r--src/system/kernel/arch/x86/arch_interrupts.S74
-rw-r--r--src/system/kernel/arch/x86/arch_thread.cpp440
-rw-r--r--src/system/kernel/arch/x86/arch_user_debugger.cpp14
-rw-r--r--src/system/kernel/arch/x86/arch_x86.S21
-rw-r--r--src/system/kernel/arch/x86/asm_offsets.cpp19
-rw-r--r--src/system/kernel/arch/x86/x86_signals.cpp125
-rw-r--r--src/system/kernel/arch/x86/x86_signals.h16
-rw-r--r--src/system/kernel/arch/x86/x86_signals_asm.S71
-rw-r--r--src/system/kernel/arch/x86/x86_syscalls.cpp4
-rw-r--r--src/system/kernel/arch/x86/x86_syscalls.h2
-rw-r--r--src/system/kernel/condition_variable.cpp27
-rw-r--r--src/system/kernel/cpu.cpp25
-rw-r--r--src/system/kernel/debug/debug_heap.cpp6
-rw-r--r--src/system/kernel/debug/system_profiler.cpp182
-rw-r--r--src/system/kernel/debug/user_debugger.cpp476
-rw-r--r--src/system/kernel/device_manager/IOSchedulerSimple.cpp3
-rw-r--r--src/system/kernel/elf.cpp14
-rw-r--r--src/system/kernel/fs/Vnode.cpp4
-rw-r--r--src/system/kernel/fs/fd.cpp9
-rw-r--r--src/system/kernel/fs/fifo.cpp2
-rw-r--r--src/system/kernel/fs/vfs.cpp103
-rw-r--r--src/system/kernel/image.cpp78
-rw-r--r--src/system/kernel/kernel_versions3
-rw-r--r--src/system/kernel/lib/Jamfile6
-rw-r--r--src/system/kernel/lib/kernel_lib.h15
-rw-r--r--src/system/kernel/locks/lock.cpp36
-rw-r--r--src/system/kernel/main.cpp4
-rw-r--r--src/system/kernel/port.cpp725
-rw-r--r--src/system/kernel/posix/xsi_message_queue.cpp8
-rw-r--r--src/system/kernel/posix/xsi_semaphore.cpp7
-rw-r--r--src/system/kernel/real_time_clock.cpp27
-rw-r--r--src/system/kernel/scheduler/scheduler.cpp25
-rw-r--r--src/system/kernel/scheduler/scheduler_affine.cpp49
-rw-r--r--src/system/kernel/scheduler/scheduler_common.h86
-rw-r--r--src/system/kernel/scheduler/scheduler_simple.cpp41
-rw-r--r--src/system/kernel/scheduler/scheduler_simple_smp.cpp41
-rw-r--r--src/system/kernel/sem.cpp179
-rw-r--r--src/system/kernel/signal.cpp2121
-rw-r--r--src/system/kernel/syscalls.cpp11
-rw-r--r--src/system/kernel/team.cpp3148
-rw-r--r--src/system/kernel/thread.cpp2277
-rw-r--r--src/system/kernel/timer.cpp263
-rw-r--r--src/system/kernel/usergroup.cpp54
-rw-r--r--src/system/kernel/util/Jamfile1
-rw-r--r--src/system/kernel/util/KernelReferenceable.cpp19
-rw-r--r--src/system/kernel/vm/VMCache.cpp2
-rw-r--r--src/system/kernel/vm/vm.cpp22
-rw-r--r--src/system/kernel/vm/vm_page.cpp2
-rw-r--r--src/system/libroot/libroot_init.c14
-rw-r--r--src/system/libroot/libroot_versions3
-rw-r--r--src/system/libroot/os/thread.c23
-rw-r--r--src/system/libroot/os/time.cpp41
-rw-r--r--src/system/libroot/posix/arch/generic/longjmp_return.c17
-rw-r--r--src/system/libroot/posix/arch/generic/setjmp_save_sigs.c19
-rw-r--r--src/system/libroot/posix/errno.c2
-rw-r--r--src/system/libroot/posix/fcntl.cpp31
-rw-r--r--src/system/libroot/posix/glibc/include/bits/types.h2
-rw-r--r--src/system/libroot/posix/glibc/include/bits/typesizes.h2
-rw-r--r--src/system/libroot/posix/poll.c16
-rw-r--r--src/system/libroot/posix/pthread/Jamfile2
-rw-r--r--src/system/libroot/posix/pthread/pthread.cpp (renamed from src/system/libroot/posix/pthread/pthread.c)115
-rw-r--r--src/system/libroot/posix/pthread/pthread_cancel.cpp99
-rw-r--r--src/system/libroot/posix/pthread/pthread_cond.cpp10
-rw-r--r--src/system/libroot/posix/pthread/pthread_once.cpp111
-rw-r--r--src/system/libroot/posix/semaphore.cpp9
-rw-r--r--src/system/libroot/posix/signal/Jamfile25
-rw-r--r--src/system/libroot/posix/signal/kill.c20
-rw-r--r--src/system/libroot/posix/signal/psiginfo.cpp14
-rw-r--r--src/system/libroot/posix/signal/psignal.cpp20
-rw-r--r--src/system/libroot/posix/signal/raise.c11
-rw-r--r--src/system/libroot/posix/signal/send_signal.c11
-rw-r--r--src/system/libroot/posix/signal/set_signal_disposition.cpp49
-rw-r--r--src/system/libroot/posix/signal/set_signal_mask.cpp75
-rw-r--r--src/system/libroot/posix/signal/set_signal_stack.c3
-rw-r--r--src/system/libroot/posix/signal/sigaction.c27
-rw-r--r--src/system/libroot/posix/signal/sigaction.cpp70
-rw-r--r--src/system/libroot/posix/signal/sighold.cpp32
-rw-r--r--src/system/libroot/posix/signal/sigignore.cpp32
-rw-r--r--src/system/libroot/posix/signal/siginterrupt.cpp33
-rw-r--r--src/system/libroot/posix/signal/signal.c30
-rw-r--r--src/system/libroot/posix/signal/signal.cpp72
-rw-r--r--src/system/libroot/posix/signal/signal_limits.cpp23
-rw-r--r--src/system/libroot/posix/signal/sigpause.cpp26
-rw-r--r--src/system/libroot/posix/signal/sigpending.c26
-rw-r--r--src/system/libroot/posix/signal/sigpending.cpp46
-rw-r--r--src/system/libroot/posix/signal/sigprocmask.c33
-rw-r--r--src/system/libroot/posix/signal/sigqueue.cpp35
-rw-r--r--src/system/libroot/posix/signal/sigrelse.cpp32
-rw-r--r--src/system/libroot/posix/signal/sigset.c75
-rw-r--r--src/system/libroot/posix/signal/sigset.cpp97
-rw-r--r--src/system/libroot/posix/signal/sigset_accessors.cpp156
-rw-r--r--src/system/libroot/posix/signal/sigsuspend.c22
-rw-r--r--src/system/libroot/posix/signal/sigsuspend.cpp47
-rw-r--r--src/system/libroot/posix/signal/sigtimedwait.cpp47
-rw-r--r--src/system/libroot/posix/signal/sigwait.c18
-rw-r--r--src/system/libroot/posix/signal/sigwait.cpp54
-rw-r--r--src/system/libroot/posix/signal/sigwaitinfo.cpp14
-rw-r--r--src/system/libroot/posix/signal/strsignal.c69
-rw-r--r--src/system/libroot/posix/signal/strsignal.cpp101
-rw-r--r--src/system/libroot/posix/sys/Jamfile6
-rw-r--r--src/system/libroot/posix/sys/flock.c9
-rw-r--r--src/system/libroot/posix/sys/itimer.c60
-rw-r--r--src/system/libroot/posix/sys/itimer.cpp106
-rw-r--r--src/system/libroot/posix/sys/mman.cpp2
-rw-r--r--src/system/libroot/posix/sys/select.c72
-rw-r--r--src/system/libroot/posix/sys/times.c39
-rw-r--r--src/system/libroot/posix/sys/times.cpp63
-rw-r--r--src/system/libroot/posix/sys/wait.c82
-rw-r--r--src/system/libroot/posix/sys/wait.cpp113
-rw-r--r--src/system/libroot/posix/sys/xsi_msg_queue.cpp8
-rw-r--r--src/system/libroot/posix/time/Jamfile6
-rw-r--r--src/system/libroot/posix/time/clock.c22
-rw-r--r--src/system/libroot/posix/time/clock.cpp39
-rw-r--r--src/system/libroot/posix/time/clock_support.cpp171
-rw-r--r--src/system/libroot/posix/time/nanosleep.c43
-rw-r--r--src/system/libroot/posix/time/stime.c10
-rw-r--r--src/system/libroot/posix/time/timer_support.cpp186
-rw-r--r--src/system/libroot/posix/unistd/Jamfile2
-rw-r--r--src/system/libroot/posix/unistd/close.c17
-rw-r--r--src/system/libroot/posix/unistd/conf.cpp (renamed from src/system/libroot/posix/unistd/conf.c)47
-rw-r--r--src/system/libroot/posix/unistd/pause.c4
-rw-r--r--src/system/libroot/posix/unistd/read.c41
-rw-r--r--src/system/libroot/posix/unistd/sleep.c8
-rw-r--r--src/system/libroot/posix/unistd/sync.c17
-rw-r--r--src/system/libroot/posix/unistd/system.cpp12
-rw-r--r--src/system/libroot/posix/unistd/write.c41
-rw-r--r--src/tests/system/kernel/unit/kernel_unit_tests.cpp4
-rw-r--r--src/tests/system/libroot/posix/posixtestsuite/conformance/interfaces/clock_getres/5-1.c56
-rw-r--r--src/tests/system/libroot/posix/posixtestsuite/conformance/interfaces/clock_getres/6-1.c60
-rw-r--r--src/tests/system/libroot/posix/posixtestsuite/conformance/interfaces/clock_getres/6-2.c63
-rw-r--r--src/tests/system/libroot/posix/posixtestsuite/conformance/interfaces/clock_gettime/8-2.c62
-rw-r--r--src/tests/system/libroot/posix/posixtestsuite/conformance/interfaces/clock_gettime/coverage.txt2
-rw-r--r--src/tests/system/libroot/posix/posixtestsuite/conformance/interfaces/clock_settime/17-1.c44
-rw-r--r--src/tests/system/libroot/posix/posixtestsuite/conformance/interfaces/clock_settime/17-2.c95
-rw-r--r--src/tests/system/libroot/posix/posixtestsuite/conformance/interfaces/clock_settime/coverage.txt2
-rw-r--r--src/tests/system/libroot/posix/posixtestsuite/conformance/interfaces/pthread_once/6-1.c3
-rw-r--r--src/tests/system/libroot/posix/posixtestsuite/conformance/interfaces/sigqueue/12-1.c2
-rw-r--r--src/tests/system/libroot/posix/posixtestsuite/conformance/interfaces/sigqueue/3-1.c2
-rw-r--r--src/tests/system/libroot/posix/posixtestsuite/conformance/interfaces/sigset/6-1.c2
-rw-r--r--src/tests/system/libroot/posix/posixtestsuite/conformance/interfaces/sigset/7-1.c2
-rw-r--r--src/tests/system/libroot/posix/posixtestsuite/conformance/interfaces/sigset/8-1.c23
-rw-r--r--src/tests/system/libroot/posix/posixtestsuite/conformance/interfaces/sigwait/3-1.c108
-rw-r--r--src/tests/system/libroot/posix/posixtestsuite/conformance/interfaces/sigwait/assertions.xml6
-rw-r--r--src/tests/system/libroot/posix/posixtestsuite/conformance/interfaces/sigwait/coverage.txt1
-rw-r--r--src/tests/system/libroot/posix/posixtestsuite/conformance/interfaces/timer_create/10-1.c78
-rw-r--r--src/tests/system/libroot/posix/posixtestsuite/conformance/interfaces/timer_create/11-1.c72
-rw-r--r--src/tests/system/libroot/posix/posixtestsuite/conformance/interfaces/timer_delete/speculative/5-1.c38
-rw-r--r--src/tests/system/libroot/posix/posixtestsuite/conformance/interfaces/timer_delete/speculative/5-2.c94
-rw-r--r--src/tests/system/libroot/posix/posixtestsuite/conformance/interfaces/timer_getoverrun/speculative/6-1.c39
-rw-r--r--src/tests/system/libroot/posix/posixtestsuite/conformance/interfaces/timer_getoverrun/speculative/6-2.c57
-rw-r--r--src/tests/system/libroot/posix/posixtestsuite/conformance/interfaces/timer_getoverrun/speculative/6-3.c49
-rw-r--r--src/tests/system/libroot/posix/posixtestsuite/conformance/interfaces/timer_gettime/speculative/6-1.c38
-rw-r--r--src/tests/system/libroot/posix/posixtestsuite/conformance/interfaces/timer_gettime/speculative/6-2.c48
-rw-r--r--src/tests/system/libroot/posix/posixtestsuite/conformance/interfaces/timer_gettime/speculative/6-3.c52
-rw-r--r--src/tests/system/libroot/posix/posixtestsuite/conformance/interfaces/timer_settime/speculative/12-1.c40
-rw-r--r--src/tests/system/libroot/posix/posixtestsuite/conformance/interfaces/timer_settime/speculative/12-2.c54
-rw-r--r--src/tests/system/libroot/posix/posixtestsuite/conformance/interfaces/timer_settime/speculative/12-3.c57
235 files changed, 14889 insertions, 6508 deletions
diff --git a/build/jam/MainBuildRules b/build/jam/MainBuildRules
index e96b74c..69d6597 100644
--- a/build/jam/MainBuildRules
+++ b/build/jam/MainBuildRules
@@ -314,6 +314,11 @@ rule CreateAsmStructOffsetsHeader header : source
}
}
+ # Turn off "invalid use of offsetof()" macro warning. We use offsetof() also
+ # for non-PODs. Since we're using the same compiler for the whole kernel and
+ # don't do virtual inheritence, that works well enough.
+ flags += -Wno-invalid-offsetof ;
+
# locate object, search for source, and set on target variables
Depends $(header) : $(source) ;
diff --git a/headers/os/kernel/OS.h b/headers/os/kernel/OS.h
index eb34da5..a12deed 100644
--- a/headers/os/kernel/OS.h
+++ b/headers/os/kernel/OS.h
@@ -311,6 +311,8 @@ typedef struct {
#define B_REAL_TIME_PRIORITY 120
#define B_SYSTEM_TIMEBASE 0
+ /* time base for snooze_*(), compatible with the clockid_t constants defined
+ in <time.h> */
#define B_FIRST_REAL_TIME_PRIORITY B_REAL_TIME_DISPLAY_PRIORITY
@@ -739,6 +741,11 @@ extern int32 is_computer_on(void);
extern double is_computer_on_fire(void);
+/* signal related functions */
+int send_signal(thread_id threadID, unsigned int signal);
+void set_signal_stack(void* base, size_t size);
+
+
/* WARNING: Experimental API! */
enum {
diff --git a/headers/os/kernel/debugger.h b/headers/os/kernel/debugger.h
index 12add40..5b86c70 100644
--- a/headers/os/kernel/debugger.h
+++ b/headers/os/kernel/debugger.h
@@ -151,7 +151,7 @@ typedef enum {
B_DEBUG_MESSAGE_CLEAR_WATCHPOINT, // clear a watchpoint
B_DEBUG_MESSAGE_SET_SIGNAL_MASKS, // set/get a thread's masks of signals
B_DEBUG_MESSAGE_GET_SIGNAL_MASKS, // the debugger is interested in
- B_DEBUG_MESSAGE_SET_SIGNAL_HANDLER, // set/get a thread's signal handler for
+ B_DEBUG_MESSAGE_SET_SIGNAL_HANDLER, // set/get the team's signal handler for
B_DEBUG_MESSAGE_GET_SIGNAL_HANDLER, // a signal
B_DEBUG_MESSAGE_PREPARE_HANDOVER, // prepares the debugged team for being
@@ -356,7 +356,6 @@ typedef struct {
// B_DEBUG_MESSAGE_SET_SIGNAL_HANDLER
typedef struct {
- thread_id thread; // the thread
int signal; // the signal
struct sigaction handler; // the new signal handler
} debug_nub_set_signal_handler;
@@ -365,7 +364,6 @@ typedef struct {
typedef struct {
port_id reply_port; // port to send the reply to
- thread_id thread; // the thread
int signal; // the signal
} debug_nub_get_signal_handler;
diff --git a/headers/posix/arch/x86/signal.h b/headers/posix/arch/x86/signal.h
index 0d69b52..6d04281 100644
--- a/headers/posix/arch/x86/signal.h
+++ b/headers/posix/arch/x86/signal.h
@@ -90,7 +90,7 @@ typedef struct mmx_regs {
unsigned char mm7[10];
unsigned char _reserved_154_159[6];
} mmx_regs;
-
+
typedef struct xmmx_regs {
unsigned char xmm0[16];
unsigned char xmm1[16];
@@ -105,7 +105,7 @@ typedef struct xmmx_regs {
typedef struct new_extended_regs {
unsigned short fp_control;
unsigned short fp_status;
- unsigned short fp_tag;
+ unsigned short fp_tag;
unsigned short fp_opcode;
unsigned long fp_eip;
unsigned short fp_cs;
@@ -128,7 +128,7 @@ typedef struct extended_regs {
old_extended_regs old_format;
new_extended_regs new_format;
} state;
- unsigned long format;
+ unsigned long format;
} extended_regs;
struct vregs {
@@ -141,9 +141,11 @@ struct vregs {
unsigned long ebp;
unsigned long _reserved_1;
extended_regs xregs;
- unsigned long _reserved_2[3];
+ unsigned long edi;
+ unsigned long esi;
+ unsigned long ebx;
};
-
+
#endif /* __INTEL__ */
#endif /* _ARCH_SIGNAL_H_ */
diff --git a/headers/posix/limits.h b/headers/posix/limits.h
index 5c69bc9..8fd8ea1 100644
--- a/headers/posix/limits.h
+++ b/headers/posix/limits.h
@@ -71,6 +71,12 @@
#define _POSIX_STREAM_MAX (8)
#define _POSIX_TTY_NAME_MAX (256)
#define _POSIX_TZNAME_MAX (3)
+#define _POSIX_SEM_VALUE_MAX INT_MAX
+#define _POSIX_SIGQUEUE_MAX 32
+#define _POSIX_RTSIG_MAX 8
+#define _POSIX_CLOCKRES_MIN 20000000
+#define _POSIX_TIMER_MAX 32
+#define _POSIX_DELAYTIMER_MAX 32
#define _POSIX2_LINE_MAX (2048)
diff --git a/headers/posix/pthread.h b/headers/posix/pthread.h
index 3004de2..cc5b900 100644
--- a/headers/posix/pthread.h
+++ b/headers/posix/pthread.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2001-2010 Haiku Inc. All Rights Reserved.
+ * Copyright 2001-2011 Haiku, Inc. All Rights Reserved.
* Distributed under the terms of the Haiku License.
*/
#ifndef _PTHREAD_H_
@@ -8,66 +8,10 @@
#include <sched.h>
#include <stdint.h>
+#include <sys/types.h>
#include <time.h>
-typedef struct _pthread_thread *pthread_t;
-typedef struct _pthread_attr *pthread_attr_t;
-typedef struct _pthread_mutex pthread_mutex_t;
-typedef struct _pthread_mutexattr *pthread_mutexattr_t;
-typedef struct _pthread_cond pthread_cond_t;
-typedef struct _pthread_condattr *pthread_condattr_t;
-typedef int pthread_key_t;
-typedef struct _pthread_once pthread_once_t;
-typedef struct _pthread_rwlock pthread_rwlock_t;
-typedef struct _pthread_rwlockattr *pthread_rwlockattr_t;
-typedef struct _pthread_spinlock pthread_spinlock_t;
-/*
-typedef struct _pthread_barrier *pthread_barrier_t;
-typedef struct _pthread_barrierattr *pthread_barrierattr_t;
-*/
-
-struct _pthread_mutex {
- uint32_t flags;
- int32_t lock;
- int32_t unused;
- int32_t owner;
- int32_t owner_count;
-};
-
-struct _pthread_cond {
- uint32_t flags;
- int32_t unused;
- pthread_mutex_t *mutex;
- int32_t waiter_count;
- int32_t lock;
-};
-
-struct _pthread_once {
- int32_t state;
-};
-
-struct _pthread_rwlock {
- uint32_t flags;
- int32_t owner;
- union {
- struct {
- int32_t sem;
- } shared;
- struct {
- int32_t lock_sem;
- int32_t lock_count;
- int32_t reader_count;
- int32_t writer_count;
- void* waiters[2];
- } local;
- };
-};
-
-struct _pthread_spinlock {
- int32_t lock;
-};
-
#define PTHREAD_MUTEX_DEFAULT 0
#define PTHREAD_MUTEX_NORMAL 1
#define PTHREAD_MUTEX_ERRORCHECK 2
@@ -278,7 +222,6 @@ extern int pthread_equal(pthread_t t1, pthread_t t2);
extern void pthread_exit(void *value_ptr);
extern int pthread_join(pthread_t thread, void **_value);
extern pthread_t pthread_self(void);
-extern int pthread_kill(pthread_t thread, int sig);
extern int pthread_getconcurrency(void);
extern int pthread_setconcurrency(int newLevel);
diff --git a/headers/posix/setjmp.h b/headers/posix/setjmp.h
index cbae76b..30b24ea 100644
--- a/headers/posix/setjmp.h
+++ b/headers/posix/setjmp.h
@@ -15,8 +15,7 @@
typedef struct __jmp_buf_tag {
__jmp_buf regs; /* saved registers, stack & program pointer */
- int mask_was_saved;
- sigset_t saved_mask;
+ sigset_t inverted_signal_mask;
} jmp_buf[1];
typedef jmp_buf sigjmp_buf;
diff --git a/headers/posix/signal.h b/headers/posix/signal.h
index ab3cdb0..c43aef7 100644
--- a/headers/posix/signal.h
+++ b/headers/posix/signal.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2002-2010 Haiku Inc. All Rights Reserved.
+ * Copyright 2002-2011, Haiku, Inc. All Rights Reserved.
* Distributed under the terms of the MIT License.
*/
#ifndef _SIGNAL_H_
@@ -10,48 +10,73 @@
typedef int sig_atomic_t;
-typedef __haiku_int32 sigset_t;
+typedef __haiku_uint64 sigset_t;
-typedef void (*sighandler_t)(int);
- /* GNU-like signal handler typedef */
-typedef void (*__signal_func_ptr)(int);
- /* deprecated, for compatibility with BeOS only */
+/* macros defining the standard signal handling behavior */
+#define SIG_DFL ((__sighandler_t)0) /* "default" signal behaviour */
+#define SIG_IGN ((__sighandler_t)1) /* ignore signal */
+#define SIG_ERR ((__sighandler_t)-1) /* an error occurred during signal
+ processing */
+#define SIG_HOLD ((__sighandler_t)3) /* the signal was hold */
+
+/* macros specifying the event notification type (sigevent::sigev_notify) */
+#define SIGEV_NONE 0 /* no notification */
+#define SIGEV_SIGNAL 1 /* notify via queued signal */
+#define SIGEV_THREAD 2 /* notify via function called in new thread */
-/* macros defining the standard signal handling behavior */
-#define SIG_DFL ((sighandler_t)0) /* "default" signal behaviour */
-#define SIG_IGN ((sighandler_t)1) /* ignore signal */
-#define SIG_ERR ((sighandler_t)-1) /* an error occurred during signal processing */
-#define SIG_HOLD ((sighandler_t)3) /* the signal was hold */
-
-/* TODO: Support this structure, or more precisely the SA_SIGINFO flag. To do
- * this properly we need real-time signal support. Both are commented out for
- * the time being to not make "configure" scripts think we do support them. */
-#if 0
-typedef struct {
- int si_signo; /* signal number */
- int si_code; /* signal code */
- int si_errno; /* if non zero, an error number associated with this signal */
- pid_t si_pid; /* sending process ID */
- uid_t si_uid; /* real user ID of sending process */
- void *si_addr; /* address of faulting instruction */
- int si_status; /* exit value or signal */
- long si_band; /* band event for SIGPOLL */
+union sigval {
+ int sival_int;
+ void* sival_ptr;
+};
+
+struct sigevent {
+ int sigev_notify; /* notification type */
+ int sigev_signo; /* signal number */
+ union sigval sigev_value; /* user-defined signal value */
+ void (*sigev_notify_function)(union sigval);
+ /* notification function in case of
+ SIGEV_THREAD */
+ pthread_attr_t* sigev_notify_attributes;
+ /* pthread creation attributes in case of
+ SIGEV_THREAD */
+};
+
+typedef struct __siginfo_t {
+ int si_signo; /* signal number */
+ int si_code; /* signal code */
+ int si_errno; /* if non zero, an error number associated with
+ this signal */
+ pid_t si_pid; /* sending process ID */
+ uid_t si_uid; /* real user ID of sending process */
+ void* si_addr; /* address of faulting instruction */
+ int si_status; /* exit value or signal */
+ long si_band; /* band event for SIGPOLL */
+ union sigval si_value; /* signal value */
} siginfo_t;
-#endif /* 0 */
-/*
- * structure used by sigaction()
- *
- * Note: the 'sa_userdata' field is a non-POSIX extension.
- * See the documentation for more info on this.
- */
+
+/* signal handler function types */
+typedef void (*__sighandler_t)(int);
+typedef void (*__siginfo_handler_t)(int, siginfo_t*, void*);
+
+#ifdef __USE_GNU
+typedef __sighandler_t sighandler_t;
+ /* GNU-like signal handler typedef */
+#endif
+
+
+/* structure used by sigaction() */
struct sigaction {
- sighandler_t sa_handler;
- sigset_t sa_mask;
- int sa_flags;
- void *sa_userdata; /* will be passed to the signal handler */
+ union {
+ __sighandler_t sa_handler;
+ __siginfo_handler_t sa_sigaction;
+ };
+ sigset_t sa_mask;
+ int sa_flags;
+ void* sa_userdata; /* will be passed to the signal
+ handler, BeOS extension */
};
/* values for sa_flags */
@@ -61,7 +86,7 @@ struct sigaction {
#define SA_NODEFER 0x08
#define SA_RESTART 0x10
#define SA_ONSTACK 0x20
-/* #define SA_SIGINFO 0x40 */
+#define SA_SIGINFO 0x40
#define SA_NOMASK SA_NODEFER
#define SA_STACK SA_ONSTACK
#define SA_ONESHOT SA_RESETHAND
@@ -73,20 +98,13 @@ struct sigaction {
#define MINSIGSTKSZ 4096
#define SIGSTKSZ 16384
-/*
- * for signals using an alternate stack
- */
+/* for signals using an alternate stack */
typedef struct stack_t {
- void *ss_sp;
+ void* ss_sp;
size_t ss_size;
int ss_flags;
} stack_t;
-typedef struct sigstack {
- int ss_onstack;
- void *ss_sp;
-} sigstack;
-
/* for the 'how' arg of sigprocmask() */
#define SIG_BLOCK 1
#define SIG_UNBLOCK 2
@@ -99,91 +117,166 @@ typedef struct sigstack {
* some consistency with UN*X conventions so that things
* like "kill -9" do what you expect.
*/
-#define SIGHUP 1 /* hangup -- tty is gone! */
-#define SIGINT 2 /* interrupt */
-#define SIGQUIT 3 /* `quit' special character typed in tty */
-#define SIGILL 4 /* illegal instruction */
-#define SIGCHLD 5 /* child process exited */
-#define SIGABRT 6 /* abort() called, dont' catch */
-#define SIGPIPE 7 /* write to a pipe w/no readers */
-#define SIGFPE 8 /* floating point exception */
-#define SIGKILL 9 /* kill a team (not catchable) */
-#define SIGSTOP 10 /* suspend a thread (not catchable) */
-#define SIGSEGV 11 /* segmentation violation (read: invalid pointer) */
-#define SIGCONT 12 /* continue execution if suspended */
-#define SIGTSTP 13 /* `stop' special character typed in tty */
-#define SIGALRM 14 /* an alarm has gone off (see alarm()) */
-#define SIGTERM 15 /* termination requested */
-#define SIGTTIN 16 /* read of tty from bg process */
-#define SIGTTOU 17 /* write to tty from bg process */
-#define SIGUSR1 18 /* app defined signal 1 */
-#define SIGUSR2 19 /* app defined signal 2 */
-#define SIGWINCH 20 /* tty window size changed */
-#define SIGKILLTHR 21 /* be specific: kill just the thread, not team */
-#define SIGTRAP 22 /* Trace/breakpoint trap */
-#define SIGPOLL 23 /* Pollable event */
-#define SIGPROF 24 /* Profiling timer expired */
-#define SIGSYS 25 /* Bad system call */
-#define SIGURG 26 /* High bandwidth data is available at socket */
-#define SIGVTALRM 27 /* Virtual timer expired */
-#define SIGXCPU 28 /* CPU time limit exceeded */
-#define SIGXFSZ 29 /* File size limit exceeded */
-
-#define SIGBUS SIGSEGV /* for old style code */
-
-/*
- * Signal numbers 30-32 are currently free but may be used in future
- * releases. Use them at your own peril (if you do use them, at least
- * be smart and use them backwards from signal 32).
- */
-#define MAX_SIGNO 32 /* the most signals that a single thread can reference */
-#define __signal_max 29 /* the largest signal number that is actually defined */
-#define NSIG (__signal_max+1)
- /* the number of defined signals */
+#define SIGHUP 1 /* hangup -- tty is gone! */
+#define SIGINT 2 /* interrupt */
+#define SIGQUIT 3 /* `quit' special character typed in tty */
+#define SIGILL 4 /* illegal instruction */
+#define SIGCHLD 5 /* child process exited */
+#define SIGABRT 6 /* abort() called, dont' catch */
+#define SIGPIPE 7 /* write to a pipe w/no readers */
+#define SIGFPE 8 /* floating point exception */
+#define SIGKILL 9 /* kill a team (not catchable) */
+#define SIGSTOP 10 /* suspend a thread (not catchable) */
+#define SIGSEGV 11 /* segmentation violation (read: invalid pointer) */
+#define SIGCONT 12 /* continue execution if suspended */
+#define SIGTSTP 13 /* `stop' special character typed in tty */
+#define SIGALRM 14 /* an alarm has gone off (see alarm()) */
+#define SIGTERM 15 /* termination requested */
+#define SIGTTIN 16 /* read of tty from bg process */
+#define SIGTTOU 17 /* write to tty from bg process */
+#define SIGUSR1 18 /* app defined signal 1 */
+#define SIGUSR2 19 /* app defined signal 2 */
+#define SIGWINCH 20 /* tty window size changed */
+#define SIGKILLTHR 21 /* be specific: kill just the thread, not team */
+#define SIGTRAP 22 /* Trace/breakpoint trap */
+#define SIGPOLL 23 /* Pollable event */
+#define SIGPROF 24 /* Profiling timer expired */
+#define SIGSYS 25 /* Bad system call */
+#define SIGURG 26 /* High bandwidth data is available at socket */
+#define SIGVTALRM 27 /* Virtual timer expired */
+#define SIGXCPU 28 /* CPU time limit exceeded */
+#define SIGXFSZ 29 /* File size limit exceeded */
+#define SIGBUS 30 /* access to undefined portion of a memory object */
+#define SIGRESERVED1 31 /* reserved for future use */
+#define SIGRESERVED2 32 /* reserved for future use */
+
+#define SIGRTMIN (__signal_get_sigrtmin())
+ /* lowest realtime signal number */
+#define SIGRTMAX (__signal_get_sigrtmax())
+ /* greatest realtime signal number */
+
+#define __MAX_SIGNO 64 /* greatest possible signal number, can be used (+1)
+ as size of static arrays */
+#define NSIG (__MAX_SIGNO + 1)
+ /* BSD extension, size of the sys_siglist table,
+ obsolete */
+
+
+/* Signal code values appropriate for siginfo_t::si_code: */
+/* any signal */
+#define SI_USER 0 /* signal sent by user */
+#define SI_QUEUE 1 /* signal sent by sigqueue() */
+#define SI_TIMER 2 /* signal sent on timer_settime() timeout */
+#define SI_ASYNCIO 3 /* signal sent on asynchronous I/O completion */
+#define SI_MESGQ 4 /* signal sent on arrival of message on empty
+ message queue */
+/* SIGILL */
+#define ILL_ILLOPC 10 /* illegal opcode */
+#define ILL_ILLOPN 11 /* illegal operand */
+#define ILL_ILLADR 12 /* illegal addressing mode */
+#define ILL_ILLTRP 13 /* illegal trap */
+#define ILL_PRVOPC 14 /* privileged opcode */
+#define ILL_PRVREG 15 /* privileged register */
+#define ILL_COPROC 16 /* coprocessor error */
+#define ILL_BADSTK 17 /* internal stack error */
+/* SIGFPE */
+#define FPE_INTDIV 20 /* integer division by zero */
+#define FPE_INTOVF 21 /* integer overflow */
+#define FPE_FLTDIV 22 /* floating-point division by zero */
+#define FPE_FLTOVF 23 /* floating-point overflow */
+#define FPE_FLTUND 24 /* floating-point underflow */
+#define FPE_FLTRES 25 /* floating-point inexact result */
+#define FPE_FLTINV 26 /* invalid floating-point operation */
+#define FPE_FLTSUB 27 /* subscript out of range */
+/* SIGSEGV */
+#define SEGV_MAPERR 30 /* address not mapped to object */
+#define SEGV_ACCERR 31 /* invalid permissions for mapped object */
+/* SIGBUS */
+#define BUS_ADRALN 40 /* invalid address alignment */
+#define BUS_ADRERR 41 /* nonexistent physical address */
+#define BUS_OBJERR 42 /* object-specific hardware error */
+/* SIGTRAP */
+#define TRAP_BRKPT 50 /* process breakpoint */
+#define TRAP_TRACE 51 /* process trace trap. */
+/* SIGCHLD */
+#define CLD_EXITED 60 /* child exited */
+#define CLD_KILLED 61 /* child terminated abnormally without core dump */
+#define CLD_DUMPED 62 /* child terminated abnormally with core dump */
+#define CLD_TRAPPED 63 /* traced child trapped */
+#define CLD_STOPPED 64 /* child stopped */
+#define CLD_CONTINUED 65 /* stopped child continued */
+/* SIGPOLL */
+#define POLL_IN 70 /* input available */
+#define POLL_OUT 71 /* output available */
+#define POLL_MSG 72 /* input message available */
+#define POLL_ERR 73 /* I/O error */
+#define POLL_PRI 74 /* high priority input available */
+#define POLL_HUP 75 /* device disconnected */
/* the global table of text strings containing descriptions for each signal */
-extern const char * const sys_siglist[NSIG];
+extern const char* const sys_siglist[NSIG];
+ /* BSD extension, obsolete, use strsignal() instead */
#ifdef __cplusplus
extern "C" {
#endif
-sighandler_t signal(int sig, sighandler_t signalHandler);
-sighandler_t sigset(int sig, sighandler_t signalHandler);
-int raise(int sig);
-int kill(pid_t pid, int sig);
-int send_signal(pid_t tid, unsigned int sig);
-int killpg(pid_t processGroupID, int sig);
-
-int sigaction(int sig, const struct sigaction *act, struct sigaction *oact);
-int siginterrupt(int sig, int flag);
-int sigprocmask(int how, const sigset_t *set, sigset_t *oset);
-int sigpending(sigset_t *set);
-int sigsuspend(const sigset_t *mask);
-int sigwait(const sigset_t *set, int *sig);
-
-int sigemptyset(sigset_t *set);
-int sigfillset(sigset_t *set);
-int sigaddset(sigset_t *set, int signo);
-int sigdelset(sigset_t *set, int signo);
-int sigismember(const sigset_t *set, int signo);
-int sigignore(int signo);
-int sighold(int signo);
-int sigrelse(int signo);
-int sigpause(int signo);
-
-void set_signal_stack(void *ptr, size_t size);
-int sigaltstack(const stack_t *ss, stack_t *oss);
-
-/* pthread extension : equivalent of sigprocmask() */
-int pthread_sigmask(int how, const sigset_t *set, sigset_t *oset);
+
+/* signal management (actions and block masks) */
+__sighandler_t signal(int signal, __sighandler_t signalHandler);
+int sigaction(int signal, const struct sigaction* action,
+ struct sigaction* oldAction);
+__sighandler_t sigset(int signal, __sighandler_t signalHandler);
+int sigignore(int signal);
+int siginterrupt(int signal, int flag);
+
+int sigprocmask(int how, const sigset_t* set, sigset_t* oldSet);
+int pthread_sigmask(int how, const sigset_t* set, sigset_t* oldSet);
+int sighold(int signal);
+int sigrelse(int signal);
+
+/* sending signals */
+int raise(int signal);
+int kill(pid_t pid, int signal);
+int killpg(pid_t processGroupID, int signal);
+int sigqueue(pid_t pid, int signal, const union sigval userValue);
+int pthread_kill(pthread_t thread, int signal);
+
+/* querying and waiting for signals */
+int sigpending(sigset_t* set);
+int sigsuspend(const sigset_t* mask);
+int sigpause(int signal);
+int sigwait(const sigset_t* set, int* _signal);
+int sigwaitinfo(const sigset_t* set, siginfo_t* info);
+int sigtimedwait(const sigset_t* set, siginfo_t* info,
+ const struct timespec* timeout);
+
+/* setting the per-thread signal stack */
+int sigaltstack(const stack_t* stack, stack_t* oldStack);
+
+/* signal set (sigset_t) manipulation */
+int sigemptyset(sigset_t* set);
+int sigfillset(sigset_t* set);
+int sigaddset(sigset_t* set, int signal);
+int sigdelset(sigset_t* set, int signal);
+int sigismember(const sigset_t* set, int signal);
+
+/* printing signal names */
+void psiginfo(const siginfo_t* info, const char* message);
+void psignal(int signal, const char* message);
+
+/* implementation private */
+int __signal_get_sigrtmin();
+int __signal_get_sigrtmax();
+
#ifdef __cplusplus
}
#endif
+
/* TODO: move this into the documentation!
* ==================================================
* !!! SPECIAL NOTES CONCERNING NON-POSIX EXTENSIONS:
@@ -205,7 +298,7 @@ int pthread_sigmask(int how, const sigset_t *set, sigset_t *oset);
* handling. It also allows an opportunity, via the 'sigaction' struct, to
* enable additional data to be passed to the handler. For example:
* void
- * my_signal_handler(int sig, char *userData, vregs regs)
+ * my_signal_handler(int sig, char* userData, vregs* regs)
* {
* . . .
* }
@@ -213,7 +306,7 @@ int pthread_sigmask(int how, const sigset_t *set, sigset_t *oset);
* struct sigaction sa;
* char data_buffer[32];
*
- * sa.sa_handler = (sighandler_t)my_signal_handler;
+ * sa.sa_handler = (__sighandler_t)my_signal_handler;
* sigemptyset(&sa.sa_mask);
* sa.sa_userdata = userData;
*
@@ -223,8 +316,9 @@ int pthread_sigmask(int how, const sigset_t *set, sigset_t *oset);
* The two additional arguments available to the signal handler are extensions
* to the Posix standard. This feature was introduced by the BeOS and retained
* by Haiku. However, to remain compatible with Posix and ANSI C, the type
- * of the sa_handler field is defined as 'sighandler_t'. This requires the handler
- * to be cast when assigned to the sa_handler field, as in the example above.
+ * of the sa_handler field is defined as '__sighandler_t'. This requires the
+ * handler to be cast when assigned to the sa_handler field, as in the example
+ * above.
*
* The 3 arguments that Haiku provides to signal handlers are as follows:
* 1) The first argument is the (usual) signal number.
@@ -234,10 +328,14 @@ int pthread_sigmask(int how, const sigset_t *set, sigset_t *oset);
*
* 3) The third argument is a pointer to a vregs struct (defined below).
* The vregs struct contains the contents of the volatile registers at
- * the time the signal was delivered to your thread. You can change the fields
- * of the structure. After your signal handler completes, the OS uses this struct
- * to reload the registers for your thread (privileged registers are not loaded
- * of course). The vregs struct is of course terribly machine dependent.
+ * the time the signal was delivered to your thread. You can change the
+ * fields of the structure. After your signal handler completes, the OS uses
+ * this struct to reload the registers for your thread (privileged registers
+ * are not loaded of course). The vregs struct is of course terribly machine
+ * dependent.
+ * Note that in BeOS the vregs argument was passed by value, not by pointer.
+ * While Haiku retains binary compability with code compiled for BeOS, code
+ * built under Haiku must use the pointer argument.
*/
/*
@@ -245,11 +343,22 @@ int pthread_sigmask(int how, const sigset_t *set, sigset_t *oset);
*
* signal handlers get this as the last argument
*/
-
typedef struct vregs vregs;
+ /* BeOS extension */
+
/* include architecture specific definitions */
#include __HAIKU_ARCH_HEADER(signal.h)
+typedef struct vregs mcontext_t;
+
+typedef struct __ucontext_t {
+ struct __ucontext_t* uc_link;
+ sigset_t uc_sigmask;
+ stack_t uc_stack;
+ mcontext_t uc_mcontext;
+} ucontext_t;
+
+
#endif /* _SIGNAL_H_ */
diff --git a/headers/posix/sys/types.h b/headers/posix/sys/types.h
index d5ad169..13c7830 100644
--- a/headers/posix/sys/types.h
+++ b/headers/posix/sys/types.h
@@ -52,6 +52,70 @@ typedef char* caddr_t;
typedef __haiku_addr_t addr_t;
typedef __haiku_int32 key_t;
+typedef __haiku_std_int32 clockid_t;
+typedef struct __timer_t* timer_t;
+
+
+/* pthread types */
+
+typedef struct _pthread_thread *pthread_t;
+typedef struct _pthread_attr *pthread_attr_t;
+typedef struct _pthread_mutex pthread_mutex_t;
+typedef struct _pthread_mutexattr *pthread_mutexattr_t;
+typedef struct _pthread_cond pthread_cond_t;
+typedef struct _pthread_condattr *pthread_condattr_t;
+typedef int pthread_key_t;
+typedef struct _pthread_once pthread_once_t;
+typedef struct _pthread_rwlock pthread_rwlock_t;
+typedef struct _pthread_rwlockattr *pthread_rwlockattr_t;
+typedef struct _pthread_spinlock pthread_spinlock_t;
+/*
+typedef struct _pthread_barrier *pthread_barrier_t;
+typedef struct _pthread_barrierattr *pthread_barrierattr_t;
+*/
+
+struct _pthread_mutex {
+ __haiku_std_uint32 flags;
+ __haiku_std_int32 lock;
+ __haiku_std_int32 unused;
+ __haiku_std_int32 owner;
+ __haiku_std_int32 owner_count;
+};
+
+struct _pthread_cond {
+ __haiku_std_uint32 flags;
+ __haiku_std_int32 unused;
+ pthread_mutex_t* mutex;
+ __haiku_std_int32 waiter_count;
+ __haiku_std_int32 lock;
+};
+
+struct _pthread_once {
+ __haiku_std_int32 state;
+};
+
+struct _pthread_rwlock {
+ __haiku_std_uint32 flags;
+ __haiku_std_int32 owner;
+ union {
+ struct {
+ __haiku_std_int32 sem;
+ } shared;
+ struct {
+ __haiku_std_int32 lock_sem;
+ __haiku_std_int32 lock_count;
+ __haiku_std_int32 reader_count;
+ __haiku_std_int32 writer_count;
+ void* waiters[2];
+ } local;
+ };
+};
+
+struct _pthread_spinlock {
+ __haiku_std_int32 lock;
+};
+
+
#include <null.h>
#include <size_t.h>
#include <time.h>
diff --git a/headers/posix/sys/wait.h b/headers/posix/sys/wait.h
index 7c9c095..6b32173 100644
--- a/headers/posix/sys/wait.h
+++ b/headers/posix/sys/wait.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2004-2010 Haiku Inc. All Rights Reserved.
+ * Copyright 2004-2011, Haiku, Inc. All Rights Reserved.
* Distributed under the terms of the MIT License.
*/
#ifndef _SYS_WAIT_H
@@ -28,16 +28,12 @@
#define WIFCORED(value) ((value) & 0x10000)
#define WIFCONTINUED(value) ((value) & 0x20000)
-/* TODO: waitid() is part of the real-time signal extension. Uncomment when
- * implemented! */
-#if 0
/* ID types for waitid() */
typedef enum {
P_ALL, /* wait for any children, ignore ID */
P_PID, /* wait for the child whose process ID matches */
P_PGID /* wait for any child whose process group ID matches */
} idtype_t;
-#endif /* 0 */
#ifdef __cplusplus
@@ -46,7 +42,7 @@ extern "C" {
extern pid_t wait(int *_status);
extern pid_t waitpid(pid_t pid, int *_status, int options);
-/* extern int waitid(idtype_t idType, id_t id, siginfo_t *info, int options); */
+extern int waitid(idtype_t idType, id_t id, siginfo_t *info, int options);
#ifdef __cplusplus
}
diff --git a/headers/posix/time.h b/headers/posix/time.h
index af1ca57..856c6d5 100644
--- a/headers/posix/time.h
+++ b/headers/posix/time.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2005-2010 Haiku Inc. All Rights Reserved.
+ * Copyright 2005-2011, Haiku, Inc. All Rights Reserved.
* Distributed under the terms of the MIT License.
*/
#ifndef _TIME_H_
@@ -9,17 +9,33 @@
#include <sys/types.h>
+struct sigevent; /* defined in <signal.h> */
+
+
typedef __haiku_int32 clock_t;
typedef __haiku_int32 time_t;
typedef __haiku_int32 suseconds_t;
typedef __haiku_uint32 useconds_t;
-#define CLOCKS_PER_SEC 1000
+
+#define CLOCKS_PER_SEC 1000000
#define CLK_TCK CLOCKS_PER_SEC
#define MAX_TIMESTR 70
/* maximum length of a string returned by asctime(), and ctime() */
+#define CLOCK_MONOTONIC ((clockid_t)0)
+ /* system-wide monotonic clock (aka system time) */
+#define CLOCK_REALTIME ((clockid_t)-1)
+ /* system-wide real time clock */
+#define CLOCK_PROCESS_CPUTIME_ID ((clockid_t)-2)
+ /* clock measuring the used CPU time of the current process */
+#define CLOCK_THREAD_CPUTIME_ID ((clockid_t)-3)
+ /* clock measuring the used CPU time of the current thread */
+
+#define TIMER_ABSTIME 1 /* absolute timer flag */
+
+
struct timespec {
time_t tv_sec; /* seconds */
long tv_nsec; /* and nanoseconds */
@@ -72,6 +88,23 @@ extern size_t strftime(char *buffer, size_t maxSize, const char *format,
const struct tm *tm);
extern char *strptime(const char *buf, const char *format, struct tm *tm);
+/* clock functions */
+int clock_getres(clockid_t clockID, struct timespec* resolution);
+int clock_gettime(clockid_t clockID, struct timespec* time);
+int clock_settime(clockid_t clockID, const struct timespec* time);
+int clock_nanosleep(clockid_t clockID, int flags,
+ const struct timespec* time, struct timespec* remainingTime);
+int clock_getcpuclockid(pid_t pid, clockid_t* _clockID);
+
+/* timer functions */
+int timer_create(clockid_t clockID, struct sigevent* event,
+ timer_t* timerID);
+int timer_delete(timer_t timerID);
+int timer_gettime(timer_t timerID, struct itimerspec* value);
+int timer_settime(timer_t timerID, int flags,
+ const struct itimerspec* value, struct itimerspec* oldValue);
+int timer_getoverrun(timer_t timerID);
+
/* special timezone support */
extern void tzset(void);
diff --git a/headers/posix/unistd.h b/headers/posix/unistd.h
index e9de223..8380ae6 100644
--- a/headers/posix/unistd.h
+++ b/headers/posix/unistd.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2004-2010 Haiku Inc. All Rights Reserved.
+ * Copyright 2004-2011 Haiku, Inc. All Rights Reserved.
* Distributed under the terms of the MIT License.
*/
#ifndef _UNISTD_H_
@@ -45,9 +45,13 @@
#define _POSIX_THREAD_ATTR_STACKADDR (-1) /* currently unsupported */
#define _POSIX_THREAD_ATTR_STACKSIZE (200809L)
#define _POSIX_THREAD_PRIORITY_SCHEDULING (-1) /* currently unsupported */
-#define _POSIX_REALTIME_SIGNALS (-1) /* currently unsupported */
+#define _POSIX_REALTIME_SIGNALS (200809L)
#define _POSIX_MEMORY_PROTECTION (200809L)
-#define _POSIX_SEM_VALUE_MAX INT_MAX
+#define _POSIX_MONOTONIC_CLOCK (200809L)
+#define _POSIX_TIMERS (200809L)
+#define _POSIX_CPUTIME (200809L)
+#define _POSIX_THREAD_CPUTIME (200809L)
+
/* pathconf() constants */
/* BeOS supported values, do not touch */
@@ -119,6 +123,14 @@
#define _SC_THREAD_PRIORITY_SCHEDULING 50
#define _SC_REALTIME_SIGNALS 51
#define _SC_MEMORY_PROTECTION 52
+#define _SC_SIGQUEUE_MAX 53
+#define _SC_RTSIG_MAX 54
+#define _SC_MONOTONIC_CLOCK 55
+#define _SC_DELAYTIMER_MAX 56
+#define _SC_TIMER_MAX 57
+#define _SC_TIMERS 58
+#define _SC_CPUTIME 59
+#define _SC_THREAD_CPUTIME 60
/* confstr() constants */
diff --git a/headers/private/kernel/DPC.h b/headers/private/kernel/DPC.h
new file mode 100644
index 0000000..9267910
--- /dev/null
+++ b/headers/private/kernel/DPC.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright 2011, Ingo Weinhold, ingo_weinhold@gmx.de.
+ * Distributed under the terms of the MIT License.
+ */
+#ifndef _KERNEL_DPC_H
+#define _KERNEL_DPC_H
+
+
+#include <sys/cdefs.h>
+
+#include <KernelExport.h>
+
+#include <util/DoublyLinkedList.h>
+
+#include <condition_variable.h>
+
+
+namespace BKernel {
+
+
+class DPCQueue;
+
+
+class DPCCallback : public DoublyLinkedListLinkImpl<DPCCallback> {
+public:
+ DPCCallback();
+ virtual ~DPCCallback();
+
+ virtual void DoDPC(DPCQueue* queue) = 0;
+
+private:
+ friend class DPCQueue;
+
+private:
+ DPCQueue* fInQueue;
+};
+
+
+class FunctionDPCCallback : public DPCCallback {
+public:
+ FunctionDPCCallback(DPCQueue* owner);
+
+ void SetTo(void (*function)(void*), void* argument);
+
+ virtual void DoDPC(DPCQueue* queue);
+
+private:
+ DPCQueue* fOwner;
+ void (*fFunction)(void*);
+ void* fArgument;
+};
+
+
+class DPCQueue {
+public:
+ DPCQueue();
+ ~DPCQueue();
+
+ static DPCQueue* DefaultQueue(int priority);
+
+ status_t Init(const char* name, int32 priority,
+ uint32 reservedSlots);
+ void Close(bool cancelPending);
+
+ status_t Add(DPCCallback* callback,
+ bool schedulerLocked);
+ status_t Add(void (*function)(void*), void* argument,
+ bool schedulerLocked);
+ bool Cancel(DPCCallback* callback);
+
+ thread_id Thread() const
+ { return fThreadID; }
+
+public:
+ // conceptually package private
+ void Recycle(FunctionDPCCallback* callback);
+
+private:
+ typedef DoublyLinkedList<DPCCallback> CallbackList;
+
+private:
+ static status_t _ThreadEntry(void* data);
+ status_t _Thread();
+
+ bool _IsClosed() const
+ { return fThreadID < 0; }
+
+private:
+ spinlock fLock;
+ thread_id fThreadID;
+ CallbackList fCallbacks;
+ CallbackList fUnusedFunctionCallbacks;
+ ConditionVariable fPendingCallbacksCondition;
+ DPCCallback* fCallbackInProgress;
+ ConditionVariable* fCallbackDoneCondition;
+};
+
+
+} // namespace BKernel
+
+
+using BKernel::DPCCallback;
+using BKernel::DPCQueue;
+using BKernel::FunctionDPCCallback;
+
+
+__BEGIN_DECLS
+
+void dpc_init();
+
+__END_DECLS
+
+
+#endif // _KERNEL_DPC_H
diff --git a/headers/private/kernel/UserEvent.h b/headers/private/kernel/UserEvent.h
new file mode 100644
index 0000000..1b8ceb7
--- /dev/null
+++ b/headers/private/kernel/UserEvent.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright 2011, Ingo Weinhold, ingo_weinhold@gmx.de.
+ * Distributed under the terms of the MIT License.
+ */
+#ifndef _KERNEL_USER_EVENT_H
+#define _KERNEL_USER_EVENT_H
+
+
+#include <signal.h>
+
+#include <SupportDefs.h>
+
+#include <DPC.h>
+#include <thread.h>
+
+
+namespace BKernel {
+
+
+struct Team;
+struct Thread;
+
+
+struct UserEvent {
+ virtual ~UserEvent();
+
+ virtual status_t Fire() = 0;
+};
+
+
+struct SignalEvent : UserEvent {
+ virtual ~SignalEvent();
+
+ void SetUserValue(union sigval userValue);
+
+protected:
+ struct EventSignal;
+
+protected:
+ SignalEvent(EventSignal* signal);
+
+protected:
+ EventSignal* fSignal;
+};
+
+
+struct TeamSignalEvent : SignalEvent {
+ static TeamSignalEvent* Create(Team* team, uint32 signalNumber,
+ int32 signalCode, int32 errorCode);
+
+ virtual status_t Fire();
+
+private:
+ TeamSignalEvent(Team* team,
+ EventSignal* signal);
+
+private:
+ Team* fTeam;
+};
+
+
+struct ThreadSignalEvent : SignalEvent {
+ static ThreadSignalEvent* Create(Thread* thread, uint32 signalNumber,
+ int32 signalCode, int32 errorCode,
+ pid_t sendingTeam);
+
+ virtual status_t Fire();
+
+private:
+ ThreadSignalEvent(Thread* thread,
+ EventSignal* signal);
+
+private:
+ Thread* fThread;
+};
+
+
+struct CreateThreadEvent : UserEvent, private DPCCallback {
+ ~CreateThreadEvent();
+
+ static CreateThreadEvent* Create(
+ const ThreadCreationAttributes& attributes);
+
+ virtual status_t Fire();
+
+private:
+ CreateThreadEvent(
+ const ThreadCreationAttributes& attributes);
+
+ virtual void DoDPC(DPCQueue* queue);
+
+private:
+ ThreadCreationAttributes fCreationAttributes;
+ char fThreadName[B_OS_NAME_LENGTH];
+ bool fPendingDPC;
+};
+
+
+} // namespace BKernel
+
+
+using BKernel::CreateThreadEvent;
+using BKernel::SignalEvent;
+using BKernel::TeamSignalEvent;
+using BKernel::ThreadSignalEvent;
+using BKernel::UserEvent;
+
+
+#endif // _KERNEL_USER_EVENT_H
diff --git a/headers/private/kernel/UserTimer.h b/headers/private/kernel/UserTimer.h
new file mode 100644
index 0000000..2f439c1b
--- /dev/null
+++ b/headers/private/kernel/UserTimer.h
@@ -0,0 +1,273 @@
+/*
+ * Copyright 2011, Ingo Weinhold, ingo_weinhold@gmx.de.
+ * Distributed under the terms of the MIT License.
+ */
+#ifndef _KERNEL_USER_TIMER_H
+#define _KERNEL_USER_TIMER_H
+
+
+#include <sys/cdefs.h>
+#include <time.h>
+
+#include <util/DoublyLinkedList.h>
+
+#include <ksignal.h>
+#include <timer.h>
+#include <user_timer_defs.h>
+
+
+struct thread_creation_attributes;
+
+
+namespace BKernel {
+
+
+struct UserEvent;
+struct Team;
+
+
+struct UserTimer : DoublyLinkedListLinkImpl<UserTimer> {
+ UserTimer();
+ virtual ~UserTimer();
+
+ int32 ID() const
+ { return fID; }
+ void SetID(int32 id)
+ { fID = id; }
+
+ void SetEvent(UserEvent* event)
+ { fEvent = event; }
+
+ virtual void Schedule(bigtime_t nextTime, bigtime_t interval,
+ uint32 flags, bigtime_t& _oldRemainingTime,
+ bigtime_t& _oldInterval) = 0;
+ void Cancel();
+
+ virtual void GetInfo(bigtime_t& _remainingTime,
+ bigtime_t& _interval,
+ uint32& _overrunCount) = 0;
+
+protected:
+ static int32 HandleTimerHook(struct timer* timer);
+ virtual void HandleTimer();
+
+ inline void UpdatePeriodicStartTime();
+ inline void CheckPeriodicOverrun(bigtime_t now);
+
+protected:
+ int32 fID;
+ timer fTimer;
+ UserEvent* fEvent;
+ bigtime_t fNextTime;
+ bigtime_t fInterval;
+ uint32 fOverrunCount;
+ bool fScheduled; // fTimer scheduled
+};
+
+
+struct SystemTimeUserTimer : public UserTimer {
+ virtual void Schedule(bigtime_t nextTime, bigtime_t interval,
+ uint32 flags, bigtime_t& _oldRemainingTime,
+ bigtime_t& _oldInterval);
+ virtual void GetInfo(bigtime_t& _remainingTime,
+ bigtime_t& _interval,
+ uint32& _overrunCount);
+
+protected:
+ virtual void HandleTimer();
+
+ void ScheduleKernelTimer(bigtime_t now,
+ bool checkPeriodicOverrun);
+};
+
+
+struct RealTimeUserTimer : public SystemTimeUserTimer {
+ virtual void Schedule(bigtime_t nextTime, bigtime_t interval,
+ uint32 flags, bigtime_t& _oldRemainingTime,
+ bigtime_t& _oldInterval);
+
+ void TimeWarped();
+
+private:
+ bigtime_t fRealTimeOffset;
+ bool fAbsolute;
+
+protected:
+ virtual void HandleTimer();
+
+public:
+ // conceptually package private
+ DoublyLinkedListLink<RealTimeUserTimer> fGlobalListLink;
+};
+
+
+struct TeamTimeUserTimer : public UserTimer {
+ TeamTimeUserTimer(team_id teamID);
+ ~TeamTimeUserTimer();
+
+ virtual void Schedule(bigtime_t nextTime, bigtime_t interval,
+ uint32 flags, bigtime_t& _oldRemainingTime,
+ bigtime_t& _oldInterval);
+ virtual void GetInfo(bigtime_t& _remainingTime,
+ bigtime_t& _interval,
+ uint32& _overrunCount);
+
+ void Deactivate();
+
+ void Update(Thread* unscheduledThread);
+ void TimeWarped(bigtime_t changedBy);
+
+protected:
+ virtual void HandleTimer();
+
+private:
+ void _Update(bool unscheduling);
+
+private:
+ team_id fTeamID;
+ Team* fTeam;
+ int32 fRunningThreads;
+ bool fAbsolute;
+
+public:
+ // conceptually package private
+ DoublyLinkedListLink<TeamTimeUserTimer> fCPUTimeListLink;
+};
+
+
+struct TeamUserTimeUserTimer : public UserTimer {
+ TeamUserTimeUserTimer(team_id teamID);
+ ~TeamUserTimeUserTimer();
+
+ virtual void Schedule(bigtime_t nextTime, bigtime_t interval,
+ uint32 flags, bigtime_t& _oldRemainingTime,
+ bigtime_t& _oldInterval);
+ virtual void GetInfo(bigtime_t& _remainingTime,
+ bigtime_t& _interval,
+ uint32& _overrunCount);
+
+ void Deactivate();
+ void Check();
+
+private:
+ team_id fTeamID;
+ Team* fTeam;
+
+public:
+ // conceptually package private
+ DoublyLinkedListLink<TeamUserTimeUserTimer> fCPUTimeListLink;
+};
+
+
+struct ThreadTimeUserTimer : public UserTimer {
+ ThreadTimeUserTimer(thread_id threadID);
+ ~ThreadTimeUserTimer();
+
+ virtual void Schedule(bigtime_t nextTime, bigtime_t interval,
+ uint32 flags, bigtime_t& _oldRemainingTime,
+ bigtime_t& _oldInterval);
+ virtual void GetInfo(bigtime_t& _remainingTime,
+ bigtime_t& _interval,
+ uint32& _overrunCount);
+
+ void Deactivate();
+
+ void Start();
+ void Stop();
+ void TimeWarped(bigtime_t changedBy);
+
+protected:
+ virtual void HandleTimer();
+
+private:
+ thread_id fThreadID;
+ Thread* fThread; // != NULL only when active
+ bool fAbsolute;
+
+public:
+ // conceptually package private
+ DoublyLinkedListLink<ThreadTimeUserTimer> fCPUTimeListLink;
+};
+
+
+struct UserTimerList {
+ UserTimerList();
+ ~UserTimerList();
+
+ UserTimer* TimerFor(int32 id) const;
+ void AddTimer(UserTimer* timer);
+ void RemoveTimer(UserTimer* timer)
+ { fTimers.Remove(timer); }
+ int32 DeleteTimers(bool userDefinedOnly);
+
+private:
+ typedef DoublyLinkedList<UserTimer> TimerList;
+
+private:
+ TimerList fTimers;
+};
+
+
+typedef DoublyLinkedList<RealTimeUserTimer,
+ DoublyLinkedListMemberGetLink<RealTimeUserTimer,
+ &RealTimeUserTimer::fGlobalListLink> > RealTimeUserTimerList;
+
+typedef DoublyLinkedList<TeamTimeUserTimer,
+ DoublyLinkedListMemberGetLink<TeamTimeUserTimer,
+ &TeamTimeUserTimer::fCPUTimeListLink> > TeamTimeUserTimerList;
+
+typedef DoublyLinkedList<TeamUserTimeUserTimer,
+ DoublyLinkedListMemberGetLink<TeamUserTimeUserTimer,
+ &TeamUserTimeUserTimer::fCPUTimeListLink> > TeamUserTimeUserTimerList;
+
+typedef DoublyLinkedList<ThreadTimeUserTimer,
+ DoublyLinkedListMemberGetLink<ThreadTimeUserTimer,
+ &ThreadTimeUserTimer::fCPUTimeListLink> > ThreadTimeUserTimerList;
+
+
+} // namespace BKernel
+
+
+using BKernel::RealTimeUserTimer;
+using BKernel::RealTimeUserTimerList;
+using BKernel::SystemTimeUserTimer;
+using BKernel::TeamUserTimeUserTimer;
+using BKernel::TeamUserTimeUserTimerList;
+using BKernel::TeamTimeUserTimer;
+using BKernel::TeamTimeUserTimerList;
+using BKernel::ThreadTimeUserTimer;
+using BKernel::ThreadTimeUserTimerList;
+using BKernel::UserTimer;
+using BKernel::UserTimerList;
+
+
+__BEGIN_DECLS
+
+status_t user_timer_create_thread_timers(Team* team, Thread* thread);
+status_t user_timer_create_team_timers(Team* team);
+
+status_t user_timer_get_clock(clockid_t clockID, bigtime_t& _time);
+void user_timer_real_time_clock_changed();
+
+void user_timer_stop_cpu_timers(Thread* thread, Thread* nextThread);
+void user_timer_continue_cpu_timers(Thread* thread,
+ Thread* previousThread);
+void user_timer_check_team_user_timers(Team* team);
+
+status_t _user_get_clock(clockid_t clockID, bigtime_t* _time);
+status_t _user_set_clock(clockid_t clockID, bigtime_t time);
+
+int32 _user_create_timer(clockid_t clockID, thread_id threadID,
+ uint32 flags, const struct sigevent* event,
+ const thread_creation_attributes* threadAttributes);
+status_t _user_delete_timer(int32 timerID, thread_id threadID);
+status_t _user_get_timer(int32 timerID, thread_id threadID,
+ struct user_timer_info* info);
+status_t _user_set_timer(int32 timerID, thread_id threadID,
+ bigtime_t startTime, bigtime_t interval, uint32 flags,
+ struct user_timer_info* oldInfo);
+
+__END_DECLS
+
+
+#endif // _KERNEL_USER_TIMER_H
diff --git a/headers/private/kernel/arch/thread.h b/headers/private/kernel/arch/thread.h
index 6efad95..b19066a 100644
--- a/headers/private/kernel/arch/thread.h
+++ b/headers/private/kernel/arch/thread.h
@@ -21,16 +21,16 @@ status_t arch_team_init_team_struct(Team *t, bool kernel);
status_t arch_thread_init_thread_struct(Thread *t);
status_t arch_thread_init_tls(Thread *thread);
void arch_thread_context_switch(Thread *t_from, Thread *t_to);
-status_t arch_thread_init_kthread_stack(Thread *t,
- int (*start_func)(void), void (*entry_func)(void), void (*exit_func)(void));
+void arch_thread_init_kthread_stack(Thread *thread, void *stack,
+ void *stackTop, void (*function)(void*), const void *data);
void arch_thread_dump_info(void *info);
status_t arch_thread_enter_userspace(Thread *t, addr_t entry,
void *args1, void *args2);
bool arch_on_signal_stack(Thread *thread);
-status_t arch_setup_signal_frame(Thread *t, struct sigaction *sa,
- int signal, int signalMask);
-int64 arch_restore_signal_frame(void);
+status_t arch_setup_signal_frame(Thread *thread, struct sigaction *action,
+ struct signal_frame_data *signalFrameData);
+int64 arch_restore_signal_frame(struct signal_frame_data* signalFrameData);
void arch_store_fork_frame(struct arch_fork_arg *arg);
void arch_restore_fork_frame(struct arch_fork_arg *arg);
diff --git a/headers/private/kernel/arch/x86/arch_cpu.h b/headers/private/kernel/arch/x86/arch_cpu.h
index 4f84cc7..53a86ab 100644
--- a/headers/private/kernel/arch/x86/arch_cpu.h
+++ b/headers/private/kernel/arch/x86/arch_cpu.h
@@ -95,6 +95,32 @@
#define IA32_MTR_WRITE_BACK 6
+// EFLAGS register
+#define X86_EFLAGS_CARRY 0x00000001
+#define X86_EFLAGS_RESERVED1 0x00000002
+#define X86_EFLAGS_PARITY 0x00000004
+#define X86_EFLAGS_AUXILIARY_CARRY 0x00000010
+#define X86_EFLAGS_ZERO 0x00000040
+#define X86_EFLAGS_SIGN 0x00000080
+#define X86_EFLAGS_TRAP 0x00000100
+#define X86_EFLAGS_INTERRUPT 0x00000200
+#define X86_EFLAGS_DIRECTION 0x00000400
+#define X86_EFLAGS_OVERFLOW 0x00000800
+#define X86_EFLAGS_IO_PRIVILEG_LEVEL 0x00003000
+#define X86_EFLAGS_IO_PRIVILEG_LEVEL_SHIFT 12
+#define X86_EFLAGS_NESTED_TASK 0x00004000
+#define X86_EFLAGS_RESUME 0x00010000
+#define X86_EFLAGS_V86_MODE 0x00020000
+#define X86_EFLAGS_ALIGNMENT_CHECK 0x00040000
+#define X86_EFLAGS_VIRTUAL_INTERRUPT 0x00080000
+#define X86_EFLAGS_VIRTUAL_INTERRUPT_PENDING 0x00100000
+#define X86_EFLAGS_ID 0x00200000
+
+#define X86_EFLAGS_USER_FLAGS (X86_EFLAGS_CARRY | X86_EFLAGS_PARITY \
+ | X86_EFLAGS_AUXILIARY_CARRY | X86_EFLAGS_ZERO | X86_EFLAGS_SIGN \
+ | X86_EFLAGS_DIRECTION | X86_EFLAGS_OVERFLOW)
+
+
// iframe types
#define IFRAME_TYPE_SYSCALL 0x1
#define IFRAME_TYPE_OTHER 0x2
@@ -276,7 +302,6 @@ void x86_context_switch(struct arch_thread* oldState,
struct arch_thread* newState);
void x86_userspace_thread_exit(void);
void x86_end_userspace_thread_exit(void);
-void x86_enter_userspace(addr_t entry, addr_t stackTop);
void x86_swap_pgdir(uint32 newPageDir);
void i386_set_tss_and_kstack(addr_t kstack);
void i386_fnsave(void* fpuState);
diff --git a/headers/private/kernel/arch/x86/arch_thread.h b/headers/private/kernel/arch/x86/arch_thread.h
index 648006b..ff1f42a 100644
--- a/headers/private/kernel/arch/x86/arch_thread.h
+++ b/headers/private/kernel/arch/x86/arch_thread.h
@@ -24,9 +24,6 @@ uint32 x86_next_page_directory(Thread *from, Thread *to);
void x86_restart_syscall(struct iframe* frame);
-void i386_return_from_signal();
-void i386_end_return_from_signal();
-
// override empty macro
#undef arch_syscall_64_bit_return_value
void arch_syscall_64_bit_return_value(void);
diff --git a/headers/private/kernel/condition_variable.h b/headers/private/kernel/condition_variable.h
index f6dd469..9f4c036 100644
--- a/headers/private/kernel/condition_variable.h
+++ b/headers/private/kernel/condition_variable.h
@@ -56,18 +56,18 @@ public:
void Publish(const void* object,
const char* objectType);
- void Unpublish(bool threadsLocked = false);
+ void Unpublish(bool schedulerLocked = false);
- inline void NotifyOne(bool threadsLocked = false,
+ inline void NotifyOne(bool schedulerLocked = false,
status_t result = B_OK);
- inline void NotifyAll(bool threadsLocked = false,
+ inline void NotifyAll(bool schedulerLocked = false,
status_t result = B_OK);
static void NotifyOne(const void* object,
- bool threadsLocked = false,
+ bool schedulerLocked = false,
status_t result = B_OK);
static void NotifyAll(const void* object,
- bool threadsLocked = false,
+ bool schedulerLocked = false,
status_t result = B_OK);
// (both methods) caller must ensure that
// the variable is not unpublished
@@ -86,7 +86,7 @@ public:
void Dump() const;
private:
- void _Notify(bool all, bool threadsLocked,
+ void _Notify(bool all, bool schedulerLocked,
status_t result);
void _NotifyLocked(bool all, status_t result);
@@ -124,16 +124,16 @@ ConditionVariableEntry::~ConditionVariableEntry()
inline void
-ConditionVariable::NotifyOne(bool threadsLocked, status_t result)
+ConditionVariable::NotifyOne(bool schedulerLocked, status_t result)
{
- _Notify(false, threadsLocked, result);
+ _Notify(false, schedulerLocked, result);
}
inline void
-ConditionVariable::NotifyAll(bool threadsLocked, status_t result)
+ConditionVariable::NotifyAll(bool schedulerLocked, status_t result)
{
- _Notify(true, threadsLocked, result);
+ _Notify(true, schedulerLocked, result);
}
diff --git a/headers/private/kernel/cpu.h b/headers/private/kernel/cpu.h
index 89cd960..850e666 100644
--- a/headers/private/kernel/cpu.h
+++ b/headers/private/kernel/cpu.h
@@ -51,6 +51,7 @@ typedef struct cpu_ent {
jmp_buf fault_jump_buffer;
Thread* running_thread;
+ Thread* previous_thread;
bool invoke_scheduler;
bool invoke_scheduler_if_idle;
bool disabled;
diff --git a/headers/private/kernel/elf.h b/headers/private/kernel/elf.h
index bb8e9e7..292cb9f 100644
--- a/headers/private/kernel/elf.h
+++ b/headers/private/kernel/elf.h
@@ -16,6 +16,12 @@
struct kernel_args;
+struct elf_symbol_info {
+ addr_t address;
+ size_t size;
+};
+
+
#ifdef __cplusplus
extern "C" {
#endif
@@ -34,6 +40,7 @@ status_t elf_debug_lookup_user_symbol_address(Team* team, addr_t address,
addr_t *_baseAddress, const char **_symbolName,
const char **_imageName, bool *_exactMatch);
addr_t elf_debug_lookup_symbol(const char* searchName);
+status_t elf_lookup_kernel_symbol(const char* name, elf_symbol_info* info);
struct elf_image_info* elf_get_kernel_image();
status_t elf_get_image_info_for_address(addr_t address, image_info* info);
image_id elf_create_memory_image(const char* imageName, addr_t text,
diff --git a/headers/private/kernel/kscheduler.h b/headers/private/kernel/kscheduler.h
index 9e19b28..f106ac8 100644
--- a/headers/private/kernel/kscheduler.h
+++ b/headers/private/kernel/kscheduler.h
@@ -18,27 +18,59 @@ struct SchedulerListener;
struct scheduler_ops {
+ /*! Enqueues the thread in the ready-to-run queue.
+ The caller must hold the scheduler lock (with disabled interrupts).
+ */
void (*enqueue_in_run_queue)(Thread* thread);
+
+ /*! Selects a thread from the ready-to-run queue and, if that's not the
+ calling thread, switches the current CPU's context to run the selected
+ thread.
+ If it's the same thread, the thread will just continue to run.
+ In either case, unless the thread is dead or is sleeping/waiting
+ indefinitely, the function will eventually return.
+ The caller must hold the scheduler lock (with disabled interrupts).
+ */
void (*reschedule)(void);
+
+ /*! Sets the given thread's priority.
+ The thread may be running or may be in the ready-to-run queue.
+ The caller must hold the scheduler lock (with disabled interrupts).
+ */
void (*set_thread_priority)(Thread* thread, int32 priority);
bigtime_t (*estimate_max_scheduling_latency)(Thread* thread);
- void (*on_thread_create)(Thread* thread);
- // called when the thread structure is first created -
- // initialization of per-thread housekeeping data structures should
- // be done here
+ /*! Called when the Thread structure is first created.
+ Per-thread housekeeping resources can be allocated.
+ Interrupts must be enabled.
+ */
+ status_t (*on_thread_create)(Thread* thread, bool idleThread);
+
+ /*! Called when a Thread structure is initialized and made ready for
+ use.
+ The per-thread housekeeping data structures are reset, if needed.
+ The caller must hold the scheduler lock (with disabled interrupts).
+ */
void (*on_thread_init)(Thread* thread);
- // called when a thread structure is initialized and made ready for
- // use - should be used to reset the housekeeping data structures
- // if needed
+
+ /*! Called when a Thread structure is freed.
+ Frees up any per-thread resources allocated on the scheduler's part. The
+ function may be called even if on_thread_create() failed.
+ Interrupts must be enabled.
+ */
void (*on_thread_destroy)(Thread* thread);
- // called when a thread structure is freed - freeing up any allocated
- // mem on the scheduler's part should be done here
+ /*! Called in the early boot process to start thread scheduling on the
+ current CPU.
+ The function is called once for each CPU.
+ Interrupts must be disabled, but the caller must not hold the scheduler
+ lock.
+ */
void (*start)(void);
};
extern struct scheduler_ops* gScheduler;
+extern spinlock gSchedulerLock;
#define scheduler_enqueue_in_run_queue(thread) \
gScheduler->enqueue_in_run_queue(thread)
@@ -46,8 +78,8 @@ extern struct scheduler_ops* gScheduler;
gScheduler->set_thread_priority(thread, priority)
#define scheduler_reschedule() gScheduler->reschedule()
#define scheduler_start() gScheduler->start()
-#define scheduler_on_thread_create(thread) \
- gScheduler->on_thread_create(thread)
+#define scheduler_on_thread_create(thread, idleThread) \
+ gScheduler->on_thread_create(thread, idleThread)
#define scheduler_on_thread_init(thread) \
gScheduler->on_thread_init(thread)
#define scheduler_on_thread_destroy(thread) \
@@ -73,7 +105,7 @@ status_t _user_analyze_scheduling(bigtime_t from, bigtime_t until, void* buffer,
/*! Reschedules, if necessary.
- The thread spinlock must be held.
+ The caller must hold the scheduler lock (with disabled interrupts).
*/
static inline void
scheduler_reschedule_if_necessary_locked()
@@ -91,9 +123,11 @@ scheduler_reschedule_if_necessary()
{
if (are_interrupts_enabled()) {
cpu_status state = disable_interrupts();
- GRAB_THREAD_LOCK();
+ acquire_spinlock(&gSchedulerLock);
+
scheduler_reschedule_if_necessary_locked();
- RELEASE_THREAD_LOCK();
+
+ release_spinlock(&gSchedulerLock);
restore_interrupts(state);
}
}
diff --git a/headers/private/kernel/ksignal.h b/headers/private/kernel/ksignal.h
index 088f3bb..b7ec3c3 100644
--- a/headers/private/kernel/ksignal.h
+++ b/headers/private/kernel/ksignal.h
@@ -1,57 +1,240 @@
/*
- * Copyright 2003-2008, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
+ * Copyright 2011, Ingo Weinhold, ingo_weinhold@gmx.de.
+ * Copyright 2003-2008, Axel Dörfler, axeld@pinc-software.de.
+ * All rights reserved.
* Distributed under the terms of the MIT License.
*/
#ifndef _KERNEL_SIGNAL_H
#define _KERNEL_SIGNAL_H
-#include <KernelExport.h>
#include <signal.h>
+#include <KernelExport.h>
+
+#include <signal_defs.h>
+
+#include <heap.h>
+#include <util/DoublyLinkedList.h>
+#include <util/KernelReferenceable.h>
+
namespace BKernel {
+ struct ProcessGroup;
+ struct Team;
struct Thread;
}
+using BKernel::ProcessGroup;
+using BKernel::Team;
using BKernel::Thread;
-#define KILL_SIGNALS ((1L << (SIGKILL - 1)) | (1L << (SIGKILLTHR - 1)))
+#define KILL_SIGNALS \
+ (((sigset_t)1 << (SIGKILL - 1)) | ((sigset_t)1 << (SIGKILLTHR - 1)))
-#define SIGNAL_TO_MASK(signal) (1LL << (signal - 1))
+#define SYSCALL_RESTART_PARAMETER_SIZE 32
-// additional send_signal_etc() flag
-#define SIGNAL_FLAG_TEAMS_LOCKED (0x10000)
- // interrupts are disabled and team lock is held
-#define SIGNAL_FLAG_DONT_RESTART_SYSCALL (0x20000)
+// kernel-internal signals
+#define SIGNAL_CANCEL_THREAD 63
+ // Cancel a thread. Non-blockable.
+#define SIGNAL_CONTINUE_THREAD 64
+ // Continue a thread. Used by resume_thread(). Non-blockable, prevents
+ // syscall restart.
-#ifdef __cplusplus
-extern "C" {
-#endif
+struct signal_frame_data {
+ siginfo_t info;
+ ucontext_t context;
+ void* user_data;
+ void* handler;
+ bool siginfo_handler;
+ int32 thread_flags;
+ uint64 syscall_restart_return_value;
+ uint8 syscall_restart_parameters[SYSCALL_RESTART_PARAMETER_SIZE];
+};
+
+
+namespace BKernel {
+
+
+struct QueuedSignalsCounter : BReferenceable {
+ QueuedSignalsCounter(int32 limit);
+
+ bool Increment();
+ void Decrement() { ReleaseReference(); }
+
+private:
+ int32 fLimit;
+};
+
+
+struct Signal : KernelReferenceable, DoublyLinkedListLinkImpl<Signal> {
+public:
+ Signal();
+ // cheap no-init constructor
+ Signal(const Signal& other);
+ Signal(uint32 number, int32 signalCode,
+ int32 errorCode, pid_t sendingProcess);
+ virtual ~Signal();
+
+ static status_t CreateQueuable(const Signal& signal,
+ bool queuingRequired,
+ Signal*& _signalToQueue);
+
+ void SetTo(uint32 number);
+
+ uint32 Number() const { return fNumber; }
+ void SetNumber(uint32 number)
+ { fNumber = number; }
+
+ int32 Priority() const;
+
+ int32 SignalCode() const
+ { return fSignalCode; }
+ int32 ErrorCode() const
+ { return fErrorCode; }
+ pid_t SendingProcess() const
+ { return fSendingProcess; }
+
+ uid_t SendingUser() const
+ { return fSendingUser; }
+ void SetSendingUser(uid_t user)
+ { fSendingUser = user; }
-extern bool handle_signals(Thread *thread);
-extern bool is_kill_signal_pending(void);
-extern int has_signals_pending(void *_thread);
-extern bool is_signal_blocked(int signal);
+ int32 Status() const
+ { return fStatus; }
+ void SetStatus(int32 status)
+ { fStatus = status; }
-extern void update_current_thread_signals_flag();
+ int32 PollBand() const
+ { return fPollBand; }
+ void SetPollBand(int32 pollBand)
+ { fPollBand = pollBand; }
-extern int sigaction_etc(thread_id threadID, int signal,
- const struct sigaction *newAction, struct sigaction *oldAction);
+ void* Address() const
+ { return fAddress; }
+ void SetAddress(void* address)
+ { fAddress = address; }
+
+ union sigval UserValue() const
+ { return fUserValue; }
+ void SetUserValue(union sigval userValue)
+ { fUserValue = userValue; }
+
+ bool IsPending() const
+ { return fPending; }
+ void SetPending(bool pending)
+ { fPending = pending; }
+
+ virtual void Handled();
+
+protected:
+ virtual void LastReferenceReleased();
+
+private:
+ QueuedSignalsCounter* fCounter;
+ uint32 fNumber;
+ int32 fSignalCode;
+ int32 fErrorCode; // error code associated with the
+ // signal
+ pid_t fSendingProcess;
+ uid_t fSendingUser;
+ int32 fStatus; // exit value
+ int32 fPollBand; // for SIGPOLL
+ void* fAddress;
+ union sigval fUserValue;
+ bool fPending;
+};
+
+
+struct PendingSignals {
+ PendingSignals();
+ ~PendingSignals();
+
+ sigset_t AllSignals() const
+ { return fQueuedSignalsMask
+ | fUnqueuedSignalsMask; }
+
+ int32 HighestSignalPriority(sigset_t nonBlocked)
+ const;
+
+ void Clear();
+ void AddSignal(int32 signal)
+ { fUnqueuedSignalsMask
+ |= SIGNAL_TO_MASK(signal); }
+ void AddSignal(Signal* signal);
+ void RemoveSignal(int32 signal)
+ { RemoveSignals(SIGNAL_TO_MASK(signal)); }
+ void RemoveSignal(Signal* signal);
+ void RemoveSignals(sigset_t mask);
+
+ Signal* DequeueSignal(sigset_t nonBlocked,
+ Signal& buffer);
+
+private:
+ typedef DoublyLinkedList<Signal> SignalList;
+
+private:
+ int32 _GetHighestPrioritySignal(sigset_t nonBlocked,
+ Signal*& _queuedSignal,
+ int32& _unqueuedSignal) const;
+ void _UpdateQueuedSignalMask();
+
+private:
+ sigset_t fQueuedSignalsMask;
+ sigset_t fUnqueuedSignalsMask;
+ SignalList fQueuedSignals;
+};
+
+
+} // namespace BKernel
+
+
+using BKernel::PendingSignals;
+using BKernel::QueuedSignalsCounter;
+using BKernel::Signal;
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
-extern status_t _user_send_signal(pid_t tid, uint sig);
-extern status_t _user_sigprocmask(int how, const sigset_t *set,
- sigset_t *oldSet);
-extern status_t _user_sigaction(int sig, const struct sigaction *newAction,
+void handle_signals(Thread* thread);
+bool is_team_signal_blocked(Team* team, int signal);
+void signal_get_user_stack(addr_t address, stack_t* stack);
+
+status_t send_signal_to_thread_locked(Thread* thread, uint32 signalNumber,
+ Signal* signal, uint32 flags);
+status_t send_signal_to_thread(Thread* thread, const Signal& signal,
+ uint32 flags);
+status_t send_signal_to_thread_id(thread_id threadID, const Signal& signal,
+ uint32 flags);
+
+status_t send_signal_to_team_locked(Team* team, uint32 signalNumber,
+ Signal* signal, uint32 flags);
+status_t send_signal_to_team(Team* team, const Signal& signal, uint32 flags);
+status_t send_signal_to_team_id(team_id teamID, const Signal& signal,
+ uint32 flags);
+
+status_t send_signal_to_process_group_locked(ProcessGroup* group,
+ const Signal& signal, uint32 flags);
+status_t send_signal_to_process_group(pid_t groupID, const Signal& signal,
+ uint32 flags);
+
+status_t _user_send_signal(int32 id, uint32 signal,
+ const union sigval* userValue, uint32 flags);
+status_t _user_set_signal_mask(int how, const sigset_t *set, sigset_t *oldSet);
+status_t _user_sigaction(int sig, const struct sigaction *newAction,
struct sigaction *oldAction);
-extern bigtime_t _user_set_alarm(bigtime_t time, uint32 mode);
-extern status_t _user_sigwait(const sigset_t *set, int *_signal);
-extern status_t _user_sigsuspend(const sigset_t *mask);
-extern status_t _user_sigpending(sigset_t *set);
-extern status_t _user_set_signal_stack(const stack_t *newUserStack,
+bigtime_t _user_set_alarm(bigtime_t time, uint32 mode);
+status_t _user_sigwait(const sigset_t *set, siginfo_t *info, uint32 flags,
+ bigtime_t timeout);
+status_t _user_sigsuspend(const sigset_t *mask);
+status_t _user_sigpending(sigset_t *set);
+status_t _user_set_signal_stack(const stack_t *newUserStack,
stack_t *oldUserStack);
+int64 _user_restore_signal_frame(struct signal_frame_data* signalFrameData);
#ifdef __cplusplus
}
diff --git a/headers/private/kernel/lock.h b/headers/private/kernel/lock.h
index e48cd8e..4381d21 100644
--- a/headers/private/kernel/lock.h
+++ b/headers/private/kernel/lock.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
+ * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
* Copyright 2002-2009, Axel Dörfler, axeld@pinc-software.de.
* Distributed under the terms of the MIT License.
*
@@ -144,11 +144,11 @@ extern status_t mutex_switch_from_read_lock(rw_lock* from, mutex* to);
extern status_t _rw_lock_read_lock(rw_lock* lock);
extern status_t _rw_lock_read_lock_with_timeout(rw_lock* lock,
uint32 timeoutFlags, bigtime_t timeout);
-extern void _rw_lock_read_unlock(rw_lock* lock, bool threadsLocked);
-extern void _rw_lock_write_unlock(rw_lock* lock, bool threadsLocked);
+extern void _rw_lock_read_unlock(rw_lock* lock, bool schedulerLocked);
+extern void _rw_lock_write_unlock(rw_lock* lock, bool schedulerLocked);
-extern status_t _mutex_lock(mutex* lock, bool threadsLocked);
-extern void _mutex_unlock(mutex* lock, bool threadsLocked);
+extern status_t _mutex_lock(mutex* lock, bool schedulerLocked);
+extern void _mutex_unlock(mutex* lock, bool schedulerLocked);
extern status_t _mutex_trylock(mutex* lock);
extern status_t _mutex_lock_with_timeout(mutex* lock, uint32 timeoutFlags,
bigtime_t timeout);
diff --git a/headers/private/kernel/real_time_clock.h b/headers/private/kernel/real_time_clock.h
index a9ba004..ccfd117 100644
--- a/headers/private/kernel/real_time_clock.h
+++ b/headers/private/kernel/real_time_clock.h
@@ -22,6 +22,8 @@ struct kernel_args;
extern "C" {
#endif
+void set_real_time_clock_usecs(bigtime_t currentTime);
+
status_t rtc_init(struct kernel_args *args);
bigtime_t rtc_boot_time(void);
// Returns the time at which the system was booted in microseconds since Jan 1, 1970 UTC.
@@ -34,7 +36,7 @@ void rtc_secs_to_tm(uint32 seconds, struct tm *t);
uint32 get_timezone_offset(void);
bigtime_t _user_system_time(void);
-status_t _user_set_real_time_clock(uint32 time);
+status_t _user_set_real_time_clock(bigtime_t time);
status_t _user_set_timezone(int32 timezoneOffset, const char *name,
size_t nameLength);
status_t _user_get_timezone(int32 *_timezoneOffset, char* name,
diff --git a/headers/private/kernel/team.h b/headers/private/kernel/team.h
index b15e99e..50afdee 100644
--- a/headers/private/kernel/team.h
+++ b/headers/private/kernel/team.h
@@ -22,12 +22,11 @@ extern "C" {
status_t team_init(struct kernel_args *args);
status_t wait_for_team(team_id id, status_t *returnCode);
-void team_remove_team(Team *team);
-port_id team_shutdown_team(Team *team, cpu_status& state);
+
+void team_remove_team(Team *team, pid_t& _signalGroup);
+port_id team_shutdown_team(Team *team);
void team_delete_team(Team *team, port_id debuggerPort);
-struct process_group *team_get_process_group_locked(
- struct process_session *session, pid_t id);
-void team_delete_process_group(struct process_group *group);
+
Team *team_get_kernel_team(void);
team_id team_get_kernel_team_id(void);
team_id team_get_current_team_id(void);
@@ -42,15 +41,11 @@ Team *team_get_team_struct_locked(team_id id);
int32 team_max_teams(void);
int32 team_used_teams(void);
-typedef bool (*team_iterator_callback)(Team* team, void* cookie);
-Team* team_iterate_through_teams(team_iterator_callback callback,
- void* cookie);
-
thread_id load_image_etc(int32 argCount, const char* const* args,
const char* const* env, int32 priority, team_id parentID, uint32 flags);
void team_set_job_control_state(Team* team, job_control_state newState,
- int signal, bool threadsLocked);
+ Signal* signal, bool threadsLocked);
void team_set_controlling_tty(int32 index);
int32 team_get_controlling_tty();
status_t team_set_foreground_process_group(int32 ttyIndex, pid_t processGroup);
@@ -61,7 +56,7 @@ status_t stop_watching_team(team_id team, void (*hook)(team_id, void *),
void *data);
struct user_thread* team_allocate_user_thread(Team* team);
-void team_free_user_thread(Thread* thread);
+void team_free_user_thread(Team* team, struct user_thread* userThread);
bool team_associate_data(AssociatedData* data);
bool team_dissociate_data(AssociatedData* data);
@@ -73,8 +68,7 @@ thread_id _user_load_image(const char* const* flatArgs, size_t flatArgsSize,
status_t _user_wait_for_team(team_id id, status_t *_returnCode);
void _user_exit_team(status_t returnValue);
status_t _user_kill_team(thread_id thread);
-thread_id _user_wait_for_child(thread_id child, uint32 flags, int32 *_reason,
- status_t *_returnCode);
+pid_t _user_wait_for_child(thread_id child, uint32 flags, siginfo_t* info);
status_t _user_exec(const char *path, const char* const* flatArgs,
size_t flatArgsSize, int32 argCount, int32 envCount, mode_t umask);
thread_id _user_fork(void);
diff --git a/headers/private/kernel/thread.h b/headers/private/kernel/thread.h
index 7991f43..c38657f 100644
--- a/headers/private/kernel/thread.h
+++ b/headers/private/kernel/thread.h
@@ -19,6 +19,7 @@
#include <ksignal.h>
+struct arch_fork_arg;
struct kernel_args;
struct select_info;
struct thread_creation_attributes;
@@ -31,6 +32,43 @@ struct thread_creation_attributes;
#define THREAD_NAME_CHANGED 0x04
+namespace BKernel {
+
+
+struct ThreadCreationAttributes : thread_creation_attributes {
+ // when calling from kernel only
+ team_id team;
+ Thread* thread;
+ sigset_t signal_mask;
+ size_t additional_stack_size; // additional space in the stack
+ // area after the TLS region, not
+ // used as thread stack
+ thread_func kernelEntry;
+ void* kernelArgument;
+ arch_fork_arg* forkArgs; // If non-NULL, the userland thread
+ // will be started with this
+ // register context.
+
+public:
+ ThreadCreationAttributes() {}
+ // no-init constructor
+ ThreadCreationAttributes(
+ thread_func function, const char* name,
+ int32 priority, void* arg,
+ team_id team = -1, Thread* thread = NULL);
+
+ status_t InitFromUserAttributes(
+ const thread_creation_attributes*
+ userAttributes,
+ char* nameBuffer);
+};
+
+
+} // namespace BKernel
+
+using BKernel::ThreadCreationAttributes;
+
+
#ifdef __cplusplus
extern "C" {
#endif
@@ -61,9 +99,6 @@ void thread_set_io_priority(int32 priority);
#define thread_get_current_thread arch_thread_get_current_thread
-Thread *thread_get_thread_struct(thread_id id);
-Thread *thread_get_thread_struct_locked(thread_id id);
-
static thread_id thread_get_current_thread_id(void);
static inline thread_id
thread_get_current_thread_id(void)
@@ -75,18 +110,21 @@ thread_get_current_thread_id(void)
static inline bool
thread_is_idle_thread(Thread *thread)
{
- return thread->entry == NULL;
+ return thread->priority == B_IDLE_PRIORITY;
}
-typedef bool (*thread_iterator_callback)(Thread* thread, void* cookie);
-Thread* thread_iterate_through_threads(thread_iterator_callback callback,
- void* cookie);
+thread_id allocate_thread_id();
+thread_id peek_next_thread_id();
-thread_id allocate_thread_id(void);
-thread_id peek_next_thread_id(void);
+status_t thread_enter_userspace_new_team(Thread* thread, addr_t entryFunction,
+ void* argument1, void* argument2);
+status_t thread_create_user_stack(Team* team, Thread* thread, void* stackBase,
+ size_t stackSize, size_t additionalSize);
+thread_id thread_create_thread(const ThreadCreationAttributes& attributes,
+ bool kernel);
thread_id spawn_kernel_thread_etc(thread_func, const char *name, int32 priority,
- void *args, team_id team, thread_id threadID);
+ void *args, team_id team);
status_t wait_for_thread_etc(thread_id id, uint32 flags, bigtime_t timeout,
status_t *_returnCode);
@@ -99,7 +137,6 @@ status_t thread_block();
status_t thread_block_with_timeout(uint32 timeoutFlags, bigtime_t timeout);
status_t thread_block_with_timeout_locked(uint32 timeoutFlags,
bigtime_t timeout);
-void thread_unblock(status_t threadID, status_t status);
// used in syscalls.c
status_t _user_set_thread_priority(thread_id thread, int32 newPriority);
@@ -109,8 +146,10 @@ status_t _user_resume_thread(thread_id thread);
status_t _user_rename_thread(thread_id thread, const char *name);
thread_id _user_spawn_thread(struct thread_creation_attributes* attributes);
status_t _user_wait_for_thread(thread_id id, status_t *_returnCode);
-status_t _user_snooze_etc(bigtime_t timeout, int timebase, uint32 flags);
+status_t _user_snooze_etc(bigtime_t timeout, int timebase, uint32 flags,
+ bigtime_t* _remainingTime);
status_t _user_kill_thread(thread_id thread);
+status_t _user_cancel_thread(thread_id threadID, void (*cancelFunction)(int));
void _user_thread_yield(void);
void _user_exit_thread(status_t return_value);
bool _user_has_data(thread_id thread);
@@ -135,20 +174,41 @@ int _user_setrlimit(int resource, const struct rlimit * rlp);
#endif
-/*!
- \a thread must be the current thread.
- Thread lock can be, but doesn't need to be held.
+/*! Checks whether the current thread would immediately be interrupted when
+ blocking it with the given wait/interrupt flags.
+
+ The caller must hold the scheduler lock.
+
+ \param thread The current thread.
+ \param flags Wait/interrupt flags to be considered. Relevant are:
+ - \c B_CAN_INTERRUPT: The thread can be interrupted by any non-blocked
+ signal. Implies \c B_KILL_CAN_INTERRUPT (specified or not).
+ - \c B_KILL_CAN_INTERRUPT: The thread can be interrupted by a kill
+ signal.
+ \return \c true, if the thread would be interrupted, \c false otherwise.
*/
static inline bool
thread_is_interrupted(Thread* thread, uint32 flags)
{
- return ((flags & B_CAN_INTERRUPT)
- && (thread->sig_pending & ~thread->sig_block_mask) != 0)
- || ((flags & B_KILL_CAN_INTERRUPT)
- && (thread->sig_pending & KILL_SIGNALS));
+ sigset_t pendingSignals = thread->AllPendingSignals();
+ return ((flags & B_CAN_INTERRUPT) != 0
+ && (pendingSignals & ~thread->sig_block_mask) != 0)
+ || ((flags & B_KILL_CAN_INTERRUPT) != 0
+ && (pendingSignals & KILL_SIGNALS) != 0);
}
+/*! Checks wether the given thread is currently blocked (i.e. still waiting for
+ something).
+
+ If a stable answer is required, the caller must hold the scheduler lock.
+ Alternatively, if waiting is not interruptible and cannot time out, holding
+ the client lock held when calling thread_prepare_to_block() and the
+ unblocking functions works as well.
+
+ \param thread The thread in question.
+ \return \c true, if the thread is blocked, \c false otherwise.
+*/
static inline bool
thread_is_blocked(Thread* thread)
{
@@ -156,9 +216,109 @@ thread_is_blocked(Thread* thread)
}
-/*!
- \a thread must be the current thread.
- Thread lock can be, but doesn't need to be locked.
+/*! Prepares the current thread for waiting.
+
+ This is the first of two steps necessary to block the current thread
+ (IOW, to let it wait for someone else to unblock it or optionally time out
+ after a specified delay). The process consists of two steps to avoid race
+ conditions in case a lock other than the scheduler lock is involved.
+
+ Usually the thread waits for some condition to change and this condition is
+ something reflected in the caller's data structures which should be
+ protected by a client lock the caller knows about. E.g. in the semaphore
+ code that lock is a per-semaphore spinlock that protects the semaphore data,
+ including the semaphore count and the queue of waiting threads. For certain
+ low-level locking primitives (e.g. mutexes) that client lock is the
+ scheduler lock itself, which simplifies things a bit.
+
+ If a client lock other than the scheduler lock is used, this function must
+ be called with that lock being held. Afterwards that lock should be dropped
+ and the function that actually blocks the thread shall be invoked
+ (thread_block[_locked]() or thread_block_with_timeout[_locked]()). In
+ between these two steps no functionality that uses the thread blocking API
+ for this thread shall be used.
+
+ When the caller determines that the condition for unblocking the thread
+ occurred, it calls thread_unblock_locked() to unblock the thread. At that
+ time one of locks that are held when calling thread_prepare_to_block() must
+ be held. Usually that would be the client lock. In two cases it generally
+ isn't, however, since the unblocking code doesn't know about the client
+ lock: 1. When thread_block_with_timeout[_locked]() had been used and the
+ timeout occurs. 2. When thread_prepare_to_block() had been called with one
+ or both of the \c B_CAN_INTERRUPT or \c B_KILL_CAN_INTERRUPT flags specified
+ and someone calls thread_interrupt() that is supposed to wake up the thread.
+ In either of these two cases only the scheduler lock is held by the
+ unblocking code. A timeout can only happen after
+ thread_block_with_timeout_locked() has been called, but an interruption is
+ possible at any time. The client code must deal with those situations.
+
+ Generally blocking and unblocking threads proceed in the following manner:
+
+ Blocking thread:
+ - Acquire client lock.
+ - Check client condition and decide whether blocking is necessary.
+ - Modify some client data structure to indicate that this thread is now
+ waiting.
+ - Release client lock (unless client lock is the scheduler lock).
+ - Block.
+ - Acquire client lock (unless client lock is the scheduler lock).
+ - Check client condition and compare with block result. E.g. if the wait was
+ interrupted or timed out, but the client condition indicates success, it
+ may be considered a success after all, since usually that happens when
+ another thread concurrently changed the client condition and also tried
+ to unblock the waiting thread. It is even necessary when that other
+ thread changed the client data structures in a way that associate some
+ resource with the unblocked thread, or otherwise the unblocked thread
+ would have to reverse that here.
+ - If still necessary -- i.e. not already taken care of by an unblocking
+ thread -- modify some client structure to indicate that the thread is no
+ longer waiting, so it isn't erroneously unblocked later.
+
+ Unblocking thread:
+ - Acquire client lock.
+ - Check client condition and decide whether a blocked thread can be woken
+ up.
+ - Check the client data structure that indicates whether one or more threads
+ are waiting and which thread(s) need(s) to be woken up.
+ - Unblock respective thread(s).
+ - Possibly change some client structure, so that an unblocked thread can
+ decide whether a concurrent timeout/interruption can be ignored, or
+ simply so that it doesn't have to do any more cleanup.
+
+ Note that in the blocking thread the steps after blocking are strictly
+ required only if timeouts or interruptions are possible. If they are not,
+ the blocking thread can only be woken up explicitly by an unblocking thread,
+ which could already take care of all the necessary client data structure
+ modifications, so that the blocking thread wouldn't have to do that.
+
+ Note that the client lock can but does not have to be a spinlock.
+ A mutex, a semaphore, or anything that doesn't try to use the thread
+ blocking API for the calling thread when releasing the lock is fine.
+ In particular that means in principle thread_prepare_to_block() can be
+ called with interrupts enabled.
+
+ Care must be taken when the wait can be interrupted or can time out,
+ especially with a client lock that uses the thread blocking API. After a
+ blocked thread has been interrupted or the the time out occurred it cannot
+ acquire the client lock (or any other lock using the thread blocking API)
+ without first making sure that the thread doesn't still appears to be
+ waiting to other client code. Otherwise another thread could try to unblock
+ it which could erroneously unblock the thread while already waiting on the
+ client lock. So usually when interruptions or timeouts are possible a
+ spinlock needs to be involved.
+
+ \param thread The current thread.
+ \param flags The blocking flags. Relevant are:
+ - \c B_CAN_INTERRUPT: The thread can be interrupted by any non-blocked
+ signal. Implies \c B_KILL_CAN_INTERRUPT (specified or not).
+ - \c B_KILL_CAN_INTERRUPT: The thread can be interrupted by a kill
+ signal.
+ \param type The type of object the thread will be blocked at. Informative/
+ for debugging purposes. Must be one of the \c THREAD_BLOCK_TYPE_*
+ constants. \c THREAD_BLOCK_TYPE_OTHER implies that \a object is a
+ string.
+ \param object The object the thread will be blocked at. Informative/for
+ debugging purposes.
*/
static inline void
thread_prepare_to_block(Thread* thread, uint32 flags, uint32 type,
@@ -173,11 +333,27 @@ thread_prepare_to_block(Thread* thread, uint32 flags, uint32 type,
}
+/*! Blocks the current thread.
+
+ The thread is blocked until someone else unblock it. Must be called after a
+ call to thread_prepare_to_block(). If the thread has already been unblocked
+ after the previous call to thread_prepare_to_block(), this function will
+ return immediately. Cf. the documentation of thread_prepare_to_block() for
+ more details.
+
+ The caller must hold the scheduler lock.
+
+ \param thread The current thread.
+ \return The error code passed to the unblocking function. thread_interrupt()
+ uses \c B_INTERRUPTED. By convention \c B_OK means that the wait was
+ successful while another error code indicates a failure (what that means
+ depends on the client code).
+*/
static inline status_t
thread_block_locked(Thread* thread)
{
if (thread->wait.status == 1) {
- // check for signals, if interruptable
+ // check for signals, if interruptible
if (thread_is_interrupted(thread, thread->wait.flags)) {
thread->wait.status = B_INTERRUPTED;
} else {
@@ -190,6 +366,19 @@ thread_block_locked(Thread* thread)
}
+/*! Unblocks the specified blocked thread.
+
+ If the thread is no longer waiting (e.g. because thread_unblock_locked() has
+ already been called in the meantime), this function does not have any
+ effect.
+
+ The caller must hold the scheduler lock and the client lock (might be the
+ same).
+
+ \param thread The thread to be unblocked.
+ \param status The unblocking status. That's what the unblocked thread's
+ call to thread_block_locked() will return.
+*/
static inline void
thread_unblock_locked(Thread* thread, status_t status)
{
@@ -202,6 +391,29 @@ thread_unblock_locked(Thread* thread, status_t status)
}
+/*! Interrupts the specified blocked thread, if possible.
+
+ The function checks whether the thread can be interrupted and, if so, calls
+ \code thread_unblock_locked(thread, B_INTERRUPTED) \endcode. Otherwise the
+ function is a no-op.
+
+ The caller must hold the scheduler lock. Normally thread_unblock_locked()
+ also requires the client lock to be held, but in this case the caller
+ usually doesn't know it. This implies that the client code needs to take
+ special care, if waits are interruptible. See thread_prepare_to_block() for
+ more information.
+
+ \param thread The thread to be interrupted.
+ \param kill If \c false, the blocked thread is only interrupted, when the
+ flag \c B_CAN_INTERRUPT was specified for the blocked thread. If
+ \c true, it is only interrupted, when at least one of the flags
+ \c B_CAN_INTERRUPT or \c B_KILL_CAN_INTERRUPT was specified for the
+ blocked thread.
+ \return \c B_OK, if the thread is interruptible and thread_unblock_locked()
+ was called, \c B_NOT_ALLOWED otherwise. \c B_OK doesn't imply that the
+ thread actually has been interrupted -- it could have been unblocked
+ before already.
+*/
static inline status_t
thread_interrupt(Thread* thread, bool kill)
{
diff --git a/headers/private/kernel/thread_types.h b/headers/private/kernel/thread_types.h
index 2097c1f..017da64 100644
--- a/headers/private/kernel/thread_types.h
+++ b/headers/private/kernel/thread_types.h
@@ -10,30 +10,23 @@
#ifndef _ASSEMBLER
-#include <Referenceable.h>
+#include <pthread.h>
#include <arch/thread_types.h>
#include <condition_variable.h>
+#include <heap.h>
+#include <ksignal.h>
#include <lock.h>
-#include <signal.h>
#include <smp.h>
#include <thread_defs.h>
#include <timer.h>
+#include <UserTimer.h>
#include <user_debugger.h>
#include <util/DoublyLinkedList.h>
+#include <util/KernelReferenceable.h>
#include <util/list.h>
-extern spinlock gThreadSpinlock;
-#define GRAB_THREAD_LOCK() acquire_spinlock(&gThreadSpinlock)
-#define RELEASE_THREAD_LOCK() release_spinlock(&gThreadSpinlock)
-
-extern spinlock gTeamSpinlock;
- // NOTE: TEAM lock can be held over a THREAD lock acquisition,
- // but not the other way (to avoid deadlock)
-#define GRAB_TEAM_LOCK() acquire_spinlock(&gTeamSpinlock)
-#define RELEASE_TEAM_LOCK() release_spinlock(&gTeamSpinlock)
-
enum additional_thread_state {
THREAD_STATE_FREE_ON_RESCHED = 7, // free the thread structure upon reschedule
// THREAD_STATE_BIRTH // thread is being created
@@ -43,9 +36,11 @@ enum additional_thread_state {
#define THREAD_MAX_SET_PRIORITY B_REAL_TIME_PRIORITY
enum team_state {
- TEAM_STATE_NORMAL, // normal state
- TEAM_STATE_BIRTH, // being contructed
- TEAM_STATE_DEATH // being killed
+ TEAM_STATE_NORMAL, // normal state
+ TEAM_STATE_BIRTH, // being constructed
+ TEAM_STATE_SHUTDOWN, // still lives, but is going down
+ TEAM_STATE_DEATH // only the Team object still exists, threads are
+ // gone
};
#define TEAM_FLAG_EXEC_DONE 0x01
@@ -71,33 +66,14 @@ struct xsi_sem_context; // defined in xsi_semaphore.cpp
namespace BKernel {
struct Team;
struct Thread;
+ struct ProcessGroup;
}
-struct death_entry {
+struct thread_death_entry {
struct list_link link;
- pid_t group_id;
thread_id thread;
status_t status;
- uint16 reason;
- uint16 signal;
-};
-
-struct process_session {
- pid_t id;
- int32 group_count;
- int32 controlling_tty; // index of the controlling tty,
- // -1 if none
- pid_t foreground_group;
-};
-
-struct process_group {
- struct process_group *next; // next in hash
- struct process_session *session;
- pid_t id;
- int32 refs;
- BKernel::Team *teams;
- bool orphaned;
};
struct team_loading_info {
@@ -122,7 +98,9 @@ struct team_watcher {
struct job_control_entry : DoublyLinkedListLinkImpl<job_control_entry> {
job_control_state state; // current team job control state
thread_id thread; // main thread ID == team ID
+ uint16 signal; // signal causing the current state
bool has_group_ref;
+ uid_t signaling_user;
// valid while state != JOB_CONTROL_STATE_DEAD
BKernel::Team* team;
@@ -130,8 +108,8 @@ struct job_control_entry : DoublyLinkedListLinkImpl<job_control_entry> {
// valid when state == JOB_CONTROL_STATE_DEAD
pid_t group_id;
status_t status;
- uint16 reason;
- uint16 signal;
+ uint16 reason; // reason for the team's demise, one of the
+ // CLD_* values defined in <signal.h>
job_control_entry();
~job_control_entry();
@@ -215,41 +193,67 @@ typedef bool (*page_fault_callback)(addr_t address, addr_t faultAddress,
namespace BKernel {
-struct Team : AssociatedDataOwner {
- Team *next; // next in hash
- Team *siblings_next;
- Team *parent;
- Team *children;
- Team *group_next;
- team_id id;
+
+template<typename IDType>
+struct TeamThreadIteratorEntry
+ : DoublyLinkedListLinkImpl<TeamThreadIteratorEntry<IDType> > {
+ typedef IDType id_type;
+ typedef TeamThreadIteratorEntry<id_type> iterator_type;
+
+ id_type id; // -1 for iterator entries, >= 0 for actual elements
+ bool visible; // the entry is publicly visible
+};
+
+
+struct Team : TeamThreadIteratorEntry<team_id>, KernelReferenceable,
+ AssociatedDataOwner {
+ DoublyLinkedListLink<Team> global_list_link;
+ Team *hash_next; // next in hash
+ Team *siblings_next; // next in parent's list; protected by
+ // parent's fLock
+ Team *parent; // write-protected by both parent (if any)
+ // and this team's fLock
+ Team *children; // protected by this team's fLock;
+ // adding/removing a child also requires the
+ // child's fLock
+ Team *group_next; // protected by the group's lock
+
+ int64 serial_number; // immutable after adding team to hash
+
+ // process group info -- write-protected by both the group's lock, the
+ // team's lock, and the team's parent's lock
pid_t group_id;
pid_t session_id;
- struct process_group *group;
- char name[B_OS_NAME_LENGTH];
- char args[64]; // contents for the team_info::args field
+ ProcessGroup *group;
+
int num_threads; // number of threads in this team
int state; // current team state, see above
int32 flags;
struct io_context *io_context;
struct realtime_sem_context *realtime_sem_context;
struct xsi_sem_context *xsi_sem_context;
- struct team_death_entry *death_entry;
+ struct team_death_entry *death_entry; // protected by fLock
struct list dead_threads;
int dead_threads_count;
+ // protected by the team's fLock
team_dead_children dead_children;
team_job_control_children stopped_children;
team_job_control_children continued_children;
+
+ // protected by the parent team's fLock
struct job_control_entry* job_control_entry;
VMAddressSpace *address_space;
- Thread *main_thread;
- Thread *thread_list;
- struct team_loading_info *loading_info;
- struct list image_list;
+ Thread *main_thread; // protected by fLock and the scheduler
+ // lock (and the thread's lock), immutable
+ // after first set
+ Thread *thread_list; // protected by fLock and the scheduler lock
+ struct team_loading_info *loading_info; // protected by fLock
+ struct list image_list; // protected by sImageMutex
struct list watcher_list;
- struct list sem_list;
- struct list port_list;
+ struct list sem_list; // protected by sSemsSpinlock
+ struct list port_list; // protected by sPortsLock
struct arch_team arch_info;
addr_t user_data;
@@ -260,9 +264,13 @@ struct Team : AssociatedDataOwner {
struct team_debug_info debug_info;
+ // protected by scheduler lock
bigtime_t dead_threads_kernel_time;
bigtime_t dead_threads_user_time;
+ bigtime_t cpu_clock_offset;
+ // user group information; protected by fLock, the *_uid/*_gid fields also
+ // by the scheduler lock
uid_t saved_set_uid;
uid_t real_uid;
uid_t effective_uid;
@@ -271,44 +279,181 @@ struct Team : AssociatedDataOwner {
gid_t effective_gid;
gid_t* supplementary_groups;
int supplementary_group_count;
+
+ // Exit status information. Set when the first terminal event occurs,
+ // immutable afterwards. Protected by fLock.
+ struct {
+ uint16 reason; // reason for the team's demise, one of the
+ // CLD_* values defined in <signal.h>
+ uint16 signal; // signal killing the team
+ uid_t signaling_user; // real UID of the signal sender
+ status_t status; // exit status, if normal team exit
+ bool initialized; // true when the state has been initialized
+ } exit;
+
+public:
+ ~Team();
+
+ static Team* Create(team_id id, const char* name,
+ bool kernel);
+ static Team* Get(team_id id);
+ static Team* GetAndLock(team_id id);
+
+ bool Lock()
+ { mutex_lock(&fLock); return true; }
+ bool TryLock()
+ { return mutex_trylock(&fLock) == B_OK; }
+ void Unlock()
+ { mutex_unlock(&fLock); }
+
+ void UnlockAndReleaseReference()
+ { Unlock(); ReleaseReference(); }
+
+ void LockTeamAndParent(bool dontLockParentIfKernel);
+ void UnlockTeamAndParent();
+ void LockTeamAndProcessGroup();
+ void UnlockTeamAndProcessGroup();
+ void LockTeamParentAndProcessGroup();
+ void UnlockTeamParentAndProcessGroup();
+ void LockProcessGroup()
+ { LockTeamAndProcessGroup(); Unlock(); }
+
+ const char* Name() const { return fName; }
+ void SetName(const char* name);
+
+ const char* Args() const { return fArgs; }
+ void SetArgs(const char* args);
+ void SetArgs(const char* path,
+ const char* const* otherArgs,
+ int otherArgCount);
+
+ BKernel::QueuedSignalsCounter* QueuedSignalsCounter() const
+ { return fQueuedSignalsCounter; }
+ sigset_t PendingSignals() const
+ { return fPendingSignals.AllSignals(); }
+
+ void AddPendingSignal(int signal)
+ { fPendingSignals.AddSignal(signal); }
+ void AddPendingSignal(Signal* signal)
+ { fPendingSignals.AddSignal(signal); }
+ void RemovePendingSignal(int signal)
+ { fPendingSignals.RemoveSignal(signal); }
+ void RemovePendingSignal(Signal* signal)
+ { fPendingSignals.RemoveSignal(signal); }
+ void RemovePendingSignals(sigset_t mask)
+ { fPendingSignals.RemoveSignals(mask); }
+ void ResetSignalsOnExec();
+
+ inline int32 HighestPendingSignalPriority(
+ sigset_t nonBlocked) const;
+ inline Signal* DequeuePendingSignal(sigset_t nonBlocked,
+ Signal& buffer);
+
+ struct sigaction& SignalActionFor(int32 signal)
+ { return fSignalActions[signal - 1]; }
+ void InheritSignalActions(Team* parent);
+
+ // user timers -- protected by fLock
+ UserTimer* UserTimerFor(int32 id) const
+ { return fUserTimers.TimerFor(id); }
+ status_t AddUserTimer(UserTimer* timer);
+ void RemoveUserTimer(UserTimer* timer);
+ void DeleteUserTimers(bool userDefinedOnly);
+
+ bool CheckAddUserDefinedTimer();
+ void UserDefinedTimersRemoved(int32 count);
+
+ void UserTimerActivated(TeamTimeUserTimer* timer)
+ { fCPUTimeUserTimers.Add(timer); }
+ void UserTimerActivated(TeamUserTimeUserTimer* timer)
+ { fUserTimeUserTimers.Add(timer); }
+ void UserTimerDeactivated(TeamTimeUserTimer* timer)
+ { fCPUTimeUserTimers.Remove(timer); }
+ void UserTimerDeactivated(
+ TeamUserTimeUserTimer* timer)
+ { fUserTimeUserTimers.Remove(timer); }
+ void DeactivateCPUTimeUserTimers();
+ // both total and user CPU timers
+ bool HasActiveCPUTimeUserTimers() const
+ { return !fCPUTimeUserTimers.IsEmpty(); }
+ bool HasActiveUserTimeUserTimers() const
+ { return !fUserTimeUserTimers.IsEmpty(); }
+ TeamTimeUserTimerList::ConstIterator
+ CPUTimeUserTimerIterator() const
+ { return fCPUTimeUserTimers.GetIterator(); }
+ inline TeamUserTimeUserTimerList::ConstIterator
+ UserTimeUserTimerIterator() const;
+
+ bigtime_t CPUTime(bool ignoreCurrentRun) const;
+ bigtime_t UserCPUTime() const;
+
+private:
+ Team(team_id id, bool kernel);
+
+private:
+ mutex fLock;
+ char fName[B_OS_NAME_LENGTH];
+ char fArgs[64];
+ // contents for the team_info::args field
+
+ BKernel::QueuedSignalsCounter* fQueuedSignalsCounter;
+ BKernel::PendingSignals fPendingSignals;
+ // protected by scheduler lock
+ struct sigaction fSignalActions[MAX_SIGNAL_NUMBER];
+ // indexed signal - 1, protected by fLock
+
+ UserTimerList fUserTimers; // protected by fLock
+ TeamTimeUserTimerList fCPUTimeUserTimers;
+ // protected by scheduler lock
+ TeamUserTimeUserTimerList fUserTimeUserTimers;
+ vint32 fUserDefinedTimerCount; // accessed atomically
};
-struct Thread {
+struct Thread : TeamThreadIteratorEntry<thread_id>, KernelReferenceable {
int32 flags; // summary of events relevant in interrupt
// handlers (signals pending, user debugging
// enabled, etc.)
- Thread *all_next;
- Thread *team_next;
- Thread *queue_next; /* i.e. run queue, release queue, etc. */
- timer alarm;
- thread_id id;
- char name[B_OS_NAME_LENGTH];
- int32 priority;
- int32 next_priority;
- int32 io_priority;
- int32 state;
- int32 next_state;
- struct cpu_ent *cpu;
- struct cpu_ent *previous_cpu;
- int32 pinned_to_cpu;
-
- sigset_t sig_pending;
- sigset_t sig_block_mask;
- sigset_t sig_temp_enabled;
- struct sigaction sig_action[32];
- addr_t signal_stack_base;
- size_t signal_stack_size;
- bool signal_stack_enabled;
-
- bool in_kernel;
- bool was_yielded;
- struct scheduler_thread_data* scheduler_data;
-
- struct user_thread* user_thread;
+ int64 serial_number; // immutable after adding thread to hash
+ Thread *hash_next; // protected by thread hash lock
+ Thread *team_next; // protected by team lock and fLock
+ Thread *queue_next; // protected by scheduler lock
+ timer alarm; // protected by scheduler lock
+ char name[B_OS_NAME_LENGTH]; // protected by fLock
+ int32 priority; // protected by scheduler lock
+ int32 next_priority; // protected by scheduler lock
+ int32 io_priority; // protected by fLock
+ int32 state; // protected by scheduler lock
+ int32 next_state; // protected by scheduler lock
+ struct cpu_ent *cpu; // protected by scheduler lock
+ struct cpu_ent *previous_cpu; // protected by scheduler lock
+ int32 pinned_to_cpu; // only accessed by this thread or in the
+ // scheduler, when thread is not running
+
+ sigset_t sig_block_mask; // protected by scheduler lock,
+ // only modified by the thread itself
+ sigset_t sigsuspend_original_unblocked_mask;
+ // non-0 after a return from _user_sigsuspend(), containing the inverted
+ // original signal mask, reset in handle_signals(); only accessed by
+ // this thread
+ ucontext_t* user_signal_context; // only accessed by this thread
+ addr_t signal_stack_base; // only accessed by this thread
+ size_t signal_stack_size; // only accessed by this thread
+ bool signal_stack_enabled; // only accessed by this thread
+
+ bool in_kernel; // protected by time_lock, only written by
+ // this thread
+ bool was_yielded; // protected by scheduler lock
+ struct scheduler_thread_data* scheduler_data; // protected by scheduler lock
+
+ struct user_thread* user_thread; // write-protected by fLock, only
+ // modified by the thread itself and
+ // thus freely readable by it
+
+ void (*cancel_function)(int);
struct {
- uint8 parameters[32];
+ uint8 parameters[SYSCALL_RESTART_PARAMETER_SIZE];
} syscall_restart;
struct {
@@ -322,13 +467,15 @@ struct Thread {
struct PrivateConditionVariableEntry *condition_variable_entry;
struct {
- sem_id write_sem;
- sem_id read_sem;
+ sem_id write_sem; // acquired by writers before writing
+ sem_id read_sem; // release by writers after writing, acquired
+ // by this thread when reading
thread_id sender;
int32 code;
size_t size;
void* buffer;
- } msg;
+ } msg; // write_sem/read_sem are protected by fLock when accessed by
+ // others, the other fields are protected by write_sem/read_sem
union {
addr_t fault_handler;
@@ -340,50 +487,304 @@ struct Thread {
int32 page_faults_allowed;
/* this field may only stay in debug builds in the future */
- thread_entry_func entry;
- void *args1, *args2;
- BKernel::Team *team;
+ BKernel::Team *team; // protected by team lock, thread lock, scheduler
+ // lock
struct {
- sem_id sem;
- status_t status;
- uint16 reason;
- uint16 signal;
- struct list waiters;
+ sem_id sem; // immutable after thread creation
+ status_t status; // accessed only by this thread
+ struct list waiters; // protected by fLock
} exit;
- struct select_info *select_infos;
+ struct select_info *select_infos; // protected by fLock
struct thread_debug_info debug_info;
// stack
- area_id kernel_stack_area;
- addr_t kernel_stack_base;
- addr_t kernel_stack_top;
- area_id user_stack_area;
- addr_t user_stack_base;
- size_t user_stack_size;
+ area_id kernel_stack_area; // immutable after thread creation
+ addr_t kernel_stack_base; // immutable after thread creation
+ addr_t kernel_stack_top; // immutable after thread creation
+ area_id user_stack_area; // protected by thread lock
+ addr_t user_stack_base; // protected by thread lock
+ size_t user_stack_size; // protected by thread lock
addr_t user_local_storage;
// usually allocated at the safe side of the stack
int kernel_errno;
// kernel "errno" differs from its userspace alter ego
- bigtime_t user_time;
- bigtime_t kernel_time;
- bigtime_t last_time;
+ // user_time, kernel_time, and last_time are only written by the thread
+ // itself, so they can be read by the thread without lock. Holding the
+ // scheduler lock and checking that the thread does not run also guarantees
+ // that the times will not change.
+ spinlock time_lock;
+ bigtime_t user_time; // protected by time_lock
+ bigtime_t kernel_time; // protected by time_lock
+ bigtime_t last_time; // protected by time_lock
+ bigtime_t cpu_clock_offset; // protected by scheduler lock
void (*post_interrupt_callback)(void*);
void* post_interrupt_data;
- // architecture dependant section
+ // architecture dependent section
struct arch_thread arch_info;
+
+public:
+ Thread() {}
+ // dummy for the idle threads
+ Thread(const char *name, thread_id threadID,
+ struct cpu_ent *cpu);
+ ~Thread();
+
+ static status_t Create(const char* name, Thread*& _thread);
+
+ static Thread* Get(thread_id id);
+ static Thread* GetAndLock(thread_id id);
+ static Thread* GetDebug(thread_id id);
+ // in kernel debugger only
+
+ static bool IsAlive(thread_id id);
+
+ void* operator new(size_t size);
+ void* operator new(size_t, void* pointer);
+ void operator delete(void* pointer, size_t size);
+
+ status_t Init(bool idleThread);
+
+ bool Lock()
+ { mutex_lock(&fLock); return true; }
+ bool TryLock()
+ { return mutex_trylock(&fLock) == B_OK; }
+ void Unlock()
+ { mutex_unlock(&fLock); }
+
+ void UnlockAndReleaseReference()
+ { Unlock(); ReleaseReference(); }
+
+ bool IsAlive() const;
+
+ bool IsRunning() const
+ { return cpu != NULL; }
+ // scheduler lock must be held
+
+ sigset_t ThreadPendingSignals() const
+ { return fPendingSignals.AllSignals(); }
+ inline sigset_t AllPendingSignals() const;
+ void AddPendingSignal(int signal)
+ { fPendingSignals.AddSignal(signal); }
+ void AddPendingSignal(Signal* signal)
+ { fPendingSignals.AddSignal(signal); }
+ void RemovePendingSignal(int signal)
+ { fPendingSignals.RemoveSignal(signal); }
+ void RemovePendingSignal(Signal* signal)
+ { fPendingSignals.RemoveSignal(signal); }
+ void RemovePendingSignals(sigset_t mask)
+ { fPendingSignals.RemoveSignals(mask); }
+ void ResetSignalsOnExec();
+
+ inline int32 HighestPendingSignalPriority(
+ sigset_t nonBlocked) const;
+ inline Signal* DequeuePendingSignal(sigset_t nonBlocked,
+ Signal& buffer);
+
+ // user timers -- protected by fLock
+ UserTimer* UserTimerFor(int32 id) const
+ { return fUserTimers.TimerFor(id); }
+ status_t AddUserTimer(UserTimer* timer);
+ void RemoveUserTimer(UserTimer* timer);
+ void DeleteUserTimers(bool userDefinedOnly);
+
+ void UserTimerActivated(ThreadTimeUserTimer* timer)
+ { fCPUTimeUserTimers.Add(timer); }
+ void UserTimerDeactivated(ThreadTimeUserTimer* timer)
+ { fCPUTimeUserTimers.Remove(timer); }
+ void DeactivateCPUTimeUserTimers();
+ bool HasActiveCPUTimeUserTimers() const
+ { return !fCPUTimeUserTimers.IsEmpty(); }
+ ThreadTimeUserTimerList::ConstIterator
+ CPUTimeUserTimerIterator() const
+ { return fCPUTimeUserTimers.GetIterator(); }
+
+ inline bigtime_t CPUTime(bool ignoreCurrentRun) const;
+
+private:
+ mutex fLock;
+
+ BKernel::PendingSignals fPendingSignals;
+ // protected by scheduler lock
+
+ UserTimerList fUserTimers; // protected by fLock
+ ThreadTimeUserTimerList fCPUTimeUserTimers;
+ // protected by scheduler lock
+};
+
+
+struct ProcessSession : BReferenceable {
+ pid_t id;
+ int32 controlling_tty; // index of the controlling tty,
+ // -1 if none
+ pid_t foreground_group;
+
+public:
+ ProcessSession(pid_t id);
+ ~ProcessSession();
+
+ bool Lock()
+ { mutex_lock(&fLock); return true; }
+ bool TryLock()
+ { return mutex_trylock(&fLock) == B_OK; }
+ void Unlock()
+ { mutex_unlock(&fLock); }
+
+private:
+ mutex fLock;
+};
+
+
+struct ProcessGroup : KernelReferenceable {
+ struct ProcessGroup *next; // next in hash
+ pid_t id;
+ BKernel::Team *teams;
+
+public:
+ ProcessGroup(pid_t id);
+ ~ProcessGroup();
+
+ static ProcessGroup* Get(pid_t id);
+
+ bool Lock()
+ { mutex_lock(&fLock); return true; }
+ bool TryLock()
+ { return mutex_trylock(&fLock) == B_OK; }
+ void Unlock()
+ { mutex_unlock(&fLock); }
+
+ ProcessSession* Session() const
+ { return fSession; }
+ void Publish(ProcessSession* session);
+ void PublishLocked(ProcessSession* session);
+
+ bool IsOrphaned() const;
+
+ void ScheduleOrphanedCheck();
+ void UnsetOrphanedCheck();
+
+public:
+ SinglyLinkedListLink<ProcessGroup> fOrphanedCheckListLink;
+
+private:
+ mutex fLock;
+ ProcessSession* fSession;
+ bool fInOrphanedCheckList; // protected by
+ // sOrphanedCheckLock
+};
+
+typedef SinglyLinkedList<ProcessGroup,
+ SinglyLinkedListMemberGetLink<ProcessGroup,
+ &ProcessGroup::fOrphanedCheckListLink> > ProcessGroupList;
+
+
+/*! \brief Allows to iterate through all teams.
+*/
+struct TeamListIterator {
+ TeamListIterator();
+ ~TeamListIterator();
+
+ Team* Next();
+
+private:
+ TeamThreadIteratorEntry<team_id> fEntry;
+};
+
+
+/*! \brief Allows to iterate through all threads.
+*/
+struct ThreadListIterator {
+ ThreadListIterator();
+ ~ThreadListIterator();
+
+ Thread* Next();
+
+private:
+ TeamThreadIteratorEntry<thread_id> fEntry;
};
+
+inline int32
+Team::HighestPendingSignalPriority(sigset_t nonBlocked) const
+{
+ return fPendingSignals.HighestSignalPriority(nonBlocked);
+}
+
+
+inline Signal*
+Team::DequeuePendingSignal(sigset_t nonBlocked, Signal& buffer)
+{
+ return fPendingSignals.DequeueSignal(nonBlocked, buffer);
+}
+
+
+inline TeamUserTimeUserTimerList::ConstIterator
+Team::UserTimeUserTimerIterator() const
+{
+ return fUserTimeUserTimers.GetIterator();
+}
+
+
+inline sigset_t
+Thread::AllPendingSignals() const
+{
+ return fPendingSignals.AllSignals() | team->PendingSignals();
+}
+
+
+inline int32
+Thread::HighestPendingSignalPriority(sigset_t nonBlocked) const
+{
+ return fPendingSignals.HighestSignalPriority(nonBlocked);
+}
+
+
+inline Signal*
+Thread::DequeuePendingSignal(sigset_t nonBlocked, Signal& buffer)
+{
+ return fPendingSignals.DequeueSignal(nonBlocked, buffer);
+}
+
+
+/*! Returns the thread's current total CPU time (kernel + user + offset).
+
+ The caller must hold the scheduler lock.
+
+ \param ignoreCurrentRun If \c true and the thread is currently running,
+ don't add the time since the last time \c last_time was updated. Should
+ be used in "thread unscheduled" scheduler callbacks, since although the
+ thread is still running at that time, its time has already been stopped.
+ \return The thread's current total CPU time.
+*/
+inline bigtime_t
+Thread::CPUTime(bool ignoreCurrentRun) const
+{
+ bigtime_t time = user_time + kernel_time + cpu_clock_offset;
+
+ // If currently running, also add the time since the last check, unless
+ // requested otherwise.
+ if (!ignoreCurrentRun && cpu != NULL)
+ time += system_time() - last_time;
+
+ return time;
+}
+
+
} // namespace BKernel
using BKernel::Team;
+using BKernel::TeamListIterator;
using BKernel::Thread;
+using BKernel::ThreadListIterator;
+using BKernel::ProcessSession;
+using BKernel::ProcessGroup;
+using BKernel::ProcessGroupList;
struct thread_queue {
diff --git a/headers/private/kernel/timer.h b/headers/private/kernel/timer.h
index 254bafc..2789b56 100644
--- a/headers/private/kernel/timer.h
+++ b/headers/private/kernel/timer.h
@@ -1,4 +1,4 @@
-/*
+/*
** Copyright 2003-2004, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
** Distributed under the terms of the OpenBeOS License.
*/
@@ -15,8 +15,21 @@ extern "C" {
struct kernel_args;
-#define B_TIMER_ACQUIRE_THREAD_LOCK 0x8000
-#define B_TIMER_FLAGS B_TIMER_ACQUIRE_THREAD_LOCK
+#define B_TIMER_REAL_TIME_BASE 0x2000
+ // For an absolute timer the given time is interpreted as a real-time, not
+ // as a system time. Note that setting the real-time clock will cause the
+ // timer to be updated -- it will expire according to the new clock.
+ // Relative timers are unaffected by this flag.
+#define B_TIMER_USE_TIMER_STRUCT_TIMES 0x4000
+ // For add_timer(): Use the timer::schedule_time (absolute time) and
+ // timer::period values instead of the period parameter.
+#define B_TIMER_ACQUIRE_SCHEDULER_LOCK 0x8000
+ // The timer hook is invoked with the scheduler lock held. When invoking
+ // cancel_timer() with the scheduler lock held, too, this helps to avoid
+ // race conditions.
+#define B_TIMER_FLAGS \
+ (B_TIMER_USE_TIMER_STRUCT_TIMES | B_TIMER_ACQUIRE_SCHEDULER_LOCK \
+ | B_TIMER_REAL_TIME_BASE)
/* Timer info structure */
struct timer_info {
@@ -32,6 +45,8 @@ typedef struct timer_info timer_info;
/* kernel functions */
status_t timer_init(struct kernel_args *);
+void timer_init_post_rtc(void);
+void timer_real_time_clock_changed();
int32 timer_interrupt(void);
#ifdef __cplusplus
diff --git a/headers/private/kernel/user_debugger.h b/headers/private/kernel/user_debugger.h
index cf92522..7774833 100644
--- a/headers/private/kernel/user_debugger.h
+++ b/headers/private/kernel/user_debugger.h
@@ -36,25 +36,26 @@ using BKernel::Thread;
//
// Locking policy:
// 1) When accessing the structure it must be made sure, that the structure,
-// (i.e. the struct team it lives in) isn't deleted. Thus one either needs to
-// acquire the global team lock, or one accesses the structure from a thread
-// of that team.
+// (i.e. the struct Team it lives in) isn't deleted. Thus one either needs to
+// get a team reference, lock the team, or one accesses the structure from a
+// thread of that team.
// 2) Access to the `flags' field is atomic. Reading via atomic_get()
// requires no further locks (in addition to 1) that is). Writing requires
-// `lock' being held and must be done atomically, too
+// `lock' to be held and must be done atomically, too
// (atomic_{set,and,or}()). Reading with `lock' being held doesn't need to
// be done atomically.
-// 3) Access to all other fields (read or write) requires `lock' being held.
+// 3) Access to all other fields (read or write) requires `lock' to be held.
+// 4) Locking order is scheduler lock -> Team -> Thread -> team_debug_info::lock
+// -> thread_debug_info::lock.
//
struct team_debug_info {
spinlock lock;
// Guards the remaining fields. Should always be the innermost lock
- // to be acquired/released.
+ // to be acquired/released, save for thread_debug_info::lock.
int32 flags;
- // Set atomically. So reading atomically is OK, even when the team
- // lock is not held (at least if it is certain, that the team struct
- // won't go).
+ // Set atomically. So reading atomically is OK, even when the lock is
+ // not held (at least if it is certain, that the team struct won't go).
team_id debugger_team;
port_id debugger_port;
@@ -71,12 +72,13 @@ struct team_debug_info {
// counter incremented whenever an image is created/deleted
struct ConditionVariable* debugger_changed_condition;
- // Set whenever someone is going (or planning) to change the debugger.
- // If one wants to do the same, one has to wait for this condition.
- // Both threads lock (outer) and team debug info lock (inner) have to
- // be held when accessing this field. After setting to a condition
- // variable the thread won't be deleted (until unsetting it) -- it might
- // be removed from the team hash table, though.
+ // Set to a condition variable when going to change the debugger. Anyone
+ // who wants to change the debugger as well, needs to wait until the
+ // condition variable is unset again (waiting for the condition and
+ // rechecking again). The field and the condition variable is protected
+ // by 'lock'. After setting the a condition variable the team is
+ // guaranteed not to be deleted (until it is unset) it might be removed
+ // from the team hash table, though.
struct BreakpointManager* breakpoint_manager;
// manages hard- and software breakpoints
@@ -84,11 +86,31 @@ struct team_debug_info {
struct arch_team_debug_info arch_info;
};
+// Thread related debugging data.
+//
+// Locking policy:
+// 1) When accessing the structure it must be made sure, that the structure,
+// (i.e. the struct Thread it lives in) isn't deleted. Thus one either needs
+// to get a thread reference, lock the thread, or one accesses the structure
+// of the current thread.
+// 2) Access to the `flags' field is atomic. Reading via atomic_get()
+// requires no further locks (in addition to 1) that is). Writing requires
+// `lock' to be held and must be done atomically, too
+// (atomic_{set,and,or}()). Reading with `lock' being held doesn't need to
+// be done atomically.
+// 3) Access to all other fields (read or write) requires `lock' to be held.
+// 4) Locking order is scheduler lock -> Team -> Thread -> team_debug_info::lock
+// -> thread_debug_info::lock.
+//
struct thread_debug_info {
+ spinlock lock;
+ // Guards the remaining fields. Should always be the innermost lock
+ // to be acquired/released.
+
int32 flags;
- // Set atomically. So reading atomically is OK, even when the thread
- // lock is not held (at least if it is certain, that the thread struct
- // won't go).
+ // Set atomically. So reading atomically is OK, even when the lock is
+ // not held (at least if it is certain, that the thread struct won't
+ // go).
port_id debug_port;
// the port the thread is waiting on for commands from the nub thread
@@ -238,7 +260,7 @@ void user_debug_stop_thread();
void user_debug_team_created(team_id teamID);
void user_debug_team_deleted(team_id teamID, port_id debuggerPort);
void user_debug_team_exec();
-void user_debug_update_new_thread_flags(thread_id threadID);
+void user_debug_update_new_thread_flags(Thread* thread);
void user_debug_thread_created(thread_id threadID);
void user_debug_thread_deleted(team_id teamID, thread_id threadID);
void user_debug_thread_exiting(Thread* thread);
diff --git a/headers/private/kernel/usergroup.h b/headers/private/kernel/usergroup.h
index 5ddbdb8..dac1c94 100644
--- a/headers/private/kernel/usergroup.h
+++ b/headers/private/kernel/usergroup.h
@@ -25,7 +25,6 @@ extern "C" {
// kernel private functions
void inherit_parent_user_and_group(Team* team, Team* parent);
-void inherit_parent_user_and_group_locked(Team* team, Team* parent);
status_t update_set_id_user_and_group(Team* team, const char* file);
// syscalls
diff --git a/headers/private/kernel/util/AutoLock.h b/headers/private/kernel/util/AutoLock.h
index 07b0914..12f9b97 100644
--- a/headers/private/kernel/util/AutoLock.h
+++ b/headers/private/kernel/util/AutoLock.h
@@ -177,6 +177,10 @@ public:
typedef AutoLocker<Thread, ThreadCPUPinLocking> ThreadCPUPinner;
+typedef AutoLocker<Team> TeamLocker;
+typedef AutoLocker<Thread> ThreadLocker;
+
+
} // namespace BPrivate
using BPrivate::AutoLocker;
@@ -188,5 +192,8 @@ using BPrivate::InterruptsLocker;
using BPrivate::SpinLocker;
using BPrivate::InterruptsSpinLocker;
using BPrivate::ThreadCPUPinner;
+using BPrivate::TeamLocker;
+using BPrivate::ThreadLocker;
+
#endif // KERNEL_UTIL_AUTO_LOCKER_H
diff --git a/headers/private/kernel/util/KernelReferenceable.h b/headers/private/kernel/util/KernelReferenceable.h
new file mode 100644
index 0000000..928075c
--- /dev/null
+++ b/headers/private/kernel/util/KernelReferenceable.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2011, Ingo Weinhold, ingo_weinhold@gmx.de.
+ * Distributed under the terms of the MIT License.
+ */
+#ifndef _KERNEL_UTIL_KERNEL_REFERENCEABLE_H
+#define _KERNEL_UTIL_KERNEL_REFERENCEABLE_H
+
+
+#include <Referenceable.h>
+
+#include <heap.h>
+
+
+namespace BKernel {
+
+
+struct KernelReferenceable : BReferenceable, DeferredDeletable {
+protected:
+ virtual void LastReferenceReleased();
+};
+
+
+} // namespace BKernel
+
+
+using BKernel::KernelReferenceable;
+
+
+#endif /* _KERNEL_UTIL_KERNEL_REFERENCEABLE_H */
diff --git a/headers/private/libroot/libroot_private.h b/headers/private/libroot/libroot_private.h
index eaa9bf2..7a3357b 100644
--- a/headers/private/libroot/libroot_private.h
+++ b/headers/private/libroot/libroot_private.h
@@ -37,12 +37,11 @@ void __init_heap_post_env(void);
void __init_time(void);
void __arch_init_time(struct real_time_data *data, bool setDefaults);
bigtime_t __arch_get_system_time_offset(struct real_time_data *data);
+bigtime_t __get_system_time_offset();
void __init_pwd_backend(void);
void __reinit_pwd_backend_after_fork(void);
void* __arch_get_caller(void);
-void __init_pthread(void);
-
#ifdef __cplusplus
}
diff --git a/headers/private/libroot/pthread_private.h b/headers/private/libroot/pthread_private.h
index 6477d30..f50bd28 100644
--- a/headers/private/libroot/pthread_private.h
+++ b/headers/private/libroot/pthread_private.h
@@ -11,6 +11,17 @@
#include <OS.h>
+
+// _pthread_thread::flags values
+#define THREAD_DETACHED 0x01
+#define THREAD_DEAD 0x02
+#define THREAD_CANCELED 0x04
+#define THREAD_CANCEL_ENABLED 0x08
+#define THREAD_CANCEL_ASYNCHRONOUS 0x10
+
+
+struct thread_creation_attributes;
+
// The public *_t types are only pointers to these structures
// This way, we are completely free to change them, which might be
// necessary in the future (not only due to the incomplete implementation
@@ -55,9 +66,6 @@ typedef struct _pthread_thread {
void *(*entry)(void*);
void *entry_argument;
void *exit_value;
- int cancel_state;
- int cancel_type;
- bool cancelled;
struct pthread_key_data specific[PTHREAD_KEYS_MAX];
struct __pthread_cleanup_handler *cleanup_handlers;
} pthread_thread;
@@ -69,7 +77,13 @@ extern "C" {
void __pthread_key_call_destructors(pthread_thread *thread);
void __pthread_destroy_thread(void);
-pthread_thread *__allocate_pthread(void *data);
+pthread_thread *__allocate_pthread(void* (*entry)(void*), void *data);
+void __init_pthread(pthread_thread* thread, void* (*entry)(void*), void* data);
+status_t __pthread_init_creation_attributes(
+ const pthread_attr_t* pthreadAttributes, pthread_t thread,
+ status_t (*entryFunction)(void*, void*), void* argument1,
+ void* argument2, const char* name,
+ struct thread_creation_attributes* attributes);
#ifdef __cplusplus
}
diff --git a/headers/private/libroot/signal_private.h b/headers/private/libroot/signal_private.h
new file mode 100644
index 0000000..5380aa9
--- /dev/null
+++ b/headers/private/libroot/signal_private.h
@@ -0,0 +1,118 @@
+/*
+ * Copyright 2011, Ingo Weinhold, ingo_weinhold@gmx.de.
+ * Distributed under the terms of the MIT License.
+ */
+#ifndef _LIBROOT_SIGNAL_PRIVATE_H
+#define _LIBROOT_SIGNAL_PRIVATE_H
+
+
+#include <signal.h>
+#include <sys/cdefs.h>
+
+#include <signal_defs.h>
+
+
+#define MAX_SIGNAL_NUMBER_BEOS 29
+
+
+typedef __haiku_int32 sigset_t_beos;
+
+struct sigaction_beos {
+ __sighandler_t sa_handler;
+ sigset_t_beos sa_mask;
+ int sa_flags;
+ void* sa_userdata;
+};
+
+
+static inline sigset_t_beos
+to_beos_sigset(sigset_t set)
+{
+ // restrict to BeOS signals
+ sigset_t_beos beosSet = (sigset_t_beos)(set
+ & SIGNAL_RANGE_TO_MASK(1, MAX_SIGNAL_NUMBER_BEOS));
+
+ // if SIGBUS is set, set SIGSEGV, since they have the same number in BeOS
+ if ((set & SIGNAL_TO_MASK(SIGBUS)) != 0)
+ beosSet |= SIGNAL_TO_MASK(SIGSEGV);
+
+ return beosSet;
+}
+
+
+static inline sigset_t
+from_beos_sigset(sigset_t_beos beosSet)
+{
+ sigset_t set = beosSet;
+
+ // if SIGSEGV is set, set SIGBUS, since they have the same number in BeOS
+ if ((set & SIGNAL_TO_MASK(SIGSEGV)) != 0)
+ set |= SIGNAL_TO_MASK(SIGBUS);
+
+ return set;
+}
+
+
+__BEGIN_DECLS
+
+
+__sighandler_t __signal_beos(int signal, __sighandler_t signalHandler);
+__sighandler_t __signal(int signal, __sighandler_t signalHandler);
+
+int __sigaction_beos(int signal, const struct sigaction_beos* beosAction,
+ struct sigaction_beos* beosOldAction);
+int __sigaction(int signal, const struct sigaction* action,
+ struct sigaction* oldAction);
+
+__sighandler_t __sigset_beos(int signal, __sighandler_t signalHandler);
+__sighandler_t __sigset(int signal, __sighandler_t signalHandler);
+
+int __sigignore_beos(int signal);
+int __sigignore(int signal);
+
+int __sighold_beos(int signal);
+int __sighold(int signal);
+
+int __sigrelse_beos(int signal);
+int __sigrelse(int signal);
+
+int __sigpause_beos(int signal);
+int __sigpause(int signal);
+
+int __siginterrupt_beos(int signal, int flag);
+int __siginterrupt(int signal, int flag);
+
+int __pthread_sigmask_beos(int how, const sigset_t_beos* beosSet,
+ sigset_t_beos* beosOldSet);
+int __sigprocmask_beos(int how, const sigset_t_beos* beosSet,
+ sigset_t_beos* beosOldSet);
+
+int __pthread_sigmask(int how, const sigset_t* set, sigset_t* oldSet);
+int __sigprocmask(int how, const sigset_t* set, sigset_t* oldSet);
+
+int __sigpending_beos(sigset_t_beos* beosSet);
+int __sigpending(sigset_t* set);
+
+int __sigsuspend_beos(const sigset_t_beos* beosMask);
+int __sigsuspend(const sigset_t* mask);
+
+int __sigwait_beos(const sigset_t_beos* beosSet, int* _signal);
+int __sigwait(const sigset_t* set, int* _signal);
+
+int __sigemptyset_beos(sigset_t_beos* set);
+int __sigfillset_beos(sigset_t_beos* set);
+int __sigismember_beos(const sigset_t_beos* set, int signal);
+int __sigaddset_beos(sigset_t_beos* set, int signal);
+int __sigdelset_beos(sigset_t_beos* set, int signal);
+
+int __sigemptyset(sigset_t* set);
+int __sigfillset(sigset_t* set);
+int __sigismember(const sigset_t* set, int signal);
+int __sigaddset(sigset_t* set, int signal);
+int __sigdelset(sigset_t* set, int signal);
+
+
+__END_DECLS
+
+
+#endif // _LIBROOT_SIGNAL_PRIVATE_H
diff --git a/headers/private/libroot/time_private.h b/headers/private/libroot/time_private.h
new file mode 100644
index 0000000..0cfd9fa
--- /dev/null
+++ b/headers/private/libroot/time_private.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright 2011, Ingo Weinhold, ingo_weinhold@gmx.de.
+ * Distributed under the terms of the MIT License.
+ */
+#ifndef _LIBROOT_TIME_PRIVATE_H
+#define _LIBROOT_TIME_PRIVATE_H
+
+
+#include <errno.h>
+#include <sys/cdefs.h>
+#include <sys/time.h>
+#include <time.h>
+
+#include <SupportDefs.h>
+
+#include <new>
+
+#define CLOCKS_PER_SEC_BEOS 1000
+#define CLK_TCK_BEOS CLOCKS_PER_SEC_BEOS
+
+#define MICROSECONDS_PER_CLOCK_TICK (1000000 / CLOCKS_PER_SEC)
+#define MICROSECONDS_PER_CLOCK_TICK_BEOS (1000000 / CLOCKS_PER_SEC_BEOS)
+
+
+struct __timer_t {
+ int32 id;
+ thread_id thread;
+
+ void SetTo(int32 id, thread_id thread)
+ {
+ this->id = id;
+ this->thread = thread;
+ }
+};
+
+
+static inline void
+bigtime_to_timespec(bigtime_t time, timespec& spec)
+{
+ spec.tv_sec = time / 1000000;
+ spec.tv_nsec = (time % 1000000) * 1000;
+}
+
+
+static inline bool
+timespec_to_bigtime(const timespec& spec, bigtime_t& _time)
+{
+ if (spec.tv_sec < 0 || spec.tv_nsec < 0 || spec.tv_nsec >= 1000000000)
+ return false;
+
+ _time = (bigtime_t)spec.tv_sec * 1000000 + (spec.tv_nsec + 999) / 1000;
+
+ return true;
+}
+
+
+static inline bool
+timeval_to_timespec(const timeval& val, timespec& spec)
+{
+ if (val.tv_sec < 0 || val.tv_usec < 0 || val.tv_usec >= 1000000)
+ return false;
+
+ spec.tv_sec = val.tv_sec;
+ spec.tv_nsec = val.tv_usec * 1000;
+
+ return true;
+}
+
+
+static inline void
+timespec_to_timeval(const timespec& spec, timeval& val)
+{
+ val.tv_sec = spec.tv_sec;
+ val.tv_usec = spec.tv_nsec / 1000;
+}
+
+
+__BEGIN_DECLS
+
+
+clock_t __clock_beos(void);
+clock_t __clock(void);
+
+
+__END_DECLS
+
+
+#endif // _LIBROOT_TIME_PRIVATE_H
diff --git a/headers/private/libroot/times_private.h b/headers/private/libroot/times_private.h
new file mode 100644
index 0000000..00b1218
--- /dev/null
+++ b/headers/private/libroot/times_private.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright 2011, Ingo Weinhold, ingo_weinhold@gmx.de.
+ * Distributed under the terms of the MIT License.
+ */
+#ifndef _LIBROOT_TIMES_PRIVATE_H
+#define _LIBROOT_TIMES_PRIVATE_H
+
+
+#include <sys/cdefs.h>
+#include <sys/times.h>
+
+
+__BEGIN_DECLS
+
+
+clock_t __times_beos(struct tms* tms);
+clock_t __times(struct tms* tms);
+
+
+__END_DECLS
+
+
+#endif // _LIBROOT_TIMES_PRIVATE_H
diff --git a/headers/private/libroot/unistd_private.h b/headers/private/libroot/unistd_private.h
new file mode 100644
index 0000000..9d41c00
--- /dev/null
+++ b/headers/private/libroot/unistd_private.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright 2011, Ingo Weinhold, ingo_weinhold@gmx.de.
+ * Distributed under the terms of the MIT License.
+ */
+#ifndef _LIBROOT_UNISTD_PRIVATE_H
+#define _LIBROOT_UNISTD_PRIVATE_H
+
+
+#include <sys/cdefs.h>
+#include <sys/times.h>
+
+
+__BEGIN_DECLS
+
+
+long __sysconf_beos(int name);
+long __sysconf(int name);
+
+
+__END_DECLS
+
+
+#endif // _LIBROOT_UNISTD_PRIVATE_H
diff --git a/headers/private/shared/syscall_utils.h b/headers/private/shared/syscall_utils.h
index 3498b5b..f5890e8 100644
--- a/headers/private/shared/syscall_utils.h
+++ b/headers/private/shared/syscall_utils.h
@@ -1,18 +1,38 @@
/*
- * Copyright 2008, Ingo Weinhold, ingo_weinhold@gmx.de.
+ * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
* Distributed under the terms of the MIT License.
*/
#ifndef _SYSCALL_UTILS_H
#define _SYSCALL_UTILS_H
+
#define RETURN_AND_SET_ERRNO(err) \
do { \
- __typeof(err) raseResult = (err); \
- if (raseResult < 0) { \
- errno = raseResult; \
+ __typeof(err) __result = (err); \
+ if (__result < 0) { \
+ errno = __result; \
return -1; \
} \
- return raseResult; \
- } while (false)
+ return __result; \
+ } while (0)
+
+#define RETURN_AND_TEST_CANCEL(err) \
+ do { \
+ __typeof(err) __result = (err); \
+ pthread_testcancel(); \
+ return __result; \
+ } while (0)
+
+#define RETURN_AND_SET_ERRNO_TEST_CANCEL(err) \
+ do { \
+ __typeof(err) __result = (err); \
+ pthread_testcancel(); \
+ if (__result < 0) { \
+ errno = __result; \
+ return -1; \
+ } \
+ return __result; \
+ } while (0)
+
#endif // _SYSCALL_UTILS_H
diff --git a/headers/private/system/arch/x86/arch_commpage_defs.h b/headers/private/system/arch/x86/arch_commpage_defs.h
index a61678d..5f27f67 100644
--- a/headers/private/system/arch/x86/arch_commpage_defs.h
+++ b/headers/private/system/arch/x86/arch_commpage_defs.h
@@ -12,6 +12,10 @@
#define COMMPAGE_ENTRY_X86_SYSCALL (COMMPAGE_ENTRY_FIRST_ARCH_SPECIFIC + 0)
#define COMMPAGE_ENTRY_X86_MEMCPY (COMMPAGE_ENTRY_FIRST_ARCH_SPECIFIC + 1)
#define COMMPAGE_ENTRY_X86_MEMSET (COMMPAGE_ENTRY_FIRST_ARCH_SPECIFIC + 2)
+#define COMMPAGE_ENTRY_X86_SIGNAL_HANDLER \
+ (COMMPAGE_ENTRY_FIRST_ARCH_SPECIFIC + 3)
+#define COMMPAGE_ENTRY_X86_SIGNAL_HANDLER_BEOS \
+ (COMMPAGE_ENTRY_FIRST_ARCH_SPECIFIC + 4)
#define ARCH_USER_COMMPAGE_ADDR (0xffff0000)
diff --git a/headers/private/system/signal_defs.h b/headers/private/system/signal_defs.h
new file mode 100644
index 0000000..b3e6a7e
--- /dev/null
+++ b/headers/private/system/signal_defs.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2011, Ingo Weinhold, ingo_weinhold@gmx.de.
+ * Distributed under the terms of the MIT License.
+ */
+#ifndef _SYSTEM_SIGNAL_DEFS_H
+#define _SYSTEM_SIGNAL_DEFS_H
+
+
+#include <signal.h>
+#include <unistd.h>
+
+
+// The total number of signals a process may have queued at receivers at any
+// time.
+#define MAX_QUEUED_SIGNALS _POSIX_SIGQUEUE_MAX
+
+// realtime signal number range
+#define SIGNAL_REALTIME_MIN 33
+#define SIGNAL_REALTIME_MAX 40
+
+// greatest actually supported signal number
+#define MAX_SIGNAL_NUMBER SIGNAL_REALTIME_MAX
+
+// additional send_signal_etc() flags
+#define SIGNAL_FLAG_QUEUING_REQUIRED (0x10000)
+ // force signal queuing, i.e. fail instead of falling back to unqueued
+ // signals, when queuing isn't possible
+#define SIGNAL_FLAG_SEND_TO_THREAD (0x20000)
+ // interpret the the given ID as a thread_id rather than a team_id (syscall
+ // use only)
+
+// additional sigaction::sa_flags flag
+#define SA_BEOS_COMPATIBLE_HANDLER 0x80000000
+ // BeOS compatible signal handler, i.e. the vregs argument is passed
+ // per-value, not per-pointer
+
+#define SIGNAL_TO_MASK(signal) ((sigset_t)1 << ((signal) - 1))
+#define SIGNAL_RANGE_TO_MASK(first, last) \
+ ((((SIGNAL_TO_MASK(last) - 1) << 1) | 1) & ~(SIGNAL_TO_MASK(first) - 1))
+ // Note: The last mask computation looks that way to avoid an overflow for
+ // last == 64.
+
+#endif /* _SYSTEM_SIGNAL_DEFS_H */
diff --git a/headers/private/system/syscalls.h b/headers/private/system/syscalls.h
index cd510bd..09488ba 100644
--- a/headers/private/system/syscalls.h
+++ b/headers/private/system/syscalls.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2004-2010, Haiku Inc. All rights reserved.
+ * Copyright 2004-2011, Haiku, Inc. All rights reserved.
* Distributed under the terms of the MIT License.
*/
#ifndef _SYSTEM_SYSCALLS_H
@@ -35,8 +35,10 @@ struct _sem_t;
struct sembuf;
union semun;
struct sigaction;
+struct signal_frame_data;
struct stat;
struct system_profiler_parameters;
+struct user_timer_info;
struct disk_device_job_progress_info;
struct partitionable_space_data;
@@ -134,8 +136,8 @@ extern void __NO_RETURN _kern_exit_team(status_t returnValue);
extern status_t _kern_kill_team(team_id team);
extern team_id _kern_get_current_team();
extern status_t _kern_wait_for_team(team_id team, status_t *_returnCode);
-extern thread_id _kern_wait_for_child(thread_id child, uint32 flags,
- int32 *_reason, status_t *_returnCode);
+extern pid_t _kern_wait_for_child(thread_id child, uint32 flags,
+ siginfo_t* info);
extern status_t _kern_exec(const char *path, const char* const* flatArgs,
size_t flatArgsSize, int32 argCount, int32 envCount,
mode_t umask);
@@ -155,6 +157,8 @@ extern status_t _kern_set_thread_priority(thread_id thread,
int32 newPriority);
extern status_t _kern_kill_thread(thread_id thread);
extern void _kern_exit_thread(status_t returnValue);
+extern status_t _kern_cancel_thread(thread_id threadID,
+ void (*cancelFunction)(int));
extern void _kern_thread_yield(void);
extern status_t _kern_wait_for_thread(thread_id thread,
status_t *_returnCode);
@@ -163,7 +167,8 @@ extern status_t _kern_send_data(thread_id thread, int32 code,
const void *buffer, size_t bufferSize);
extern int32 _kern_receive_data(thread_id *_sender, void *buffer,
size_t bufferSize);
-extern int64 _kern_restore_signal_frame();
+extern int64 _kern_restore_signal_frame(
+ struct signal_frame_data* signalFrameData);
extern status_t _kern_get_thread_info(thread_id id, thread_info *info);
extern status_t _kern_get_next_thread_info(team_id team, int32 *cookie,
@@ -198,13 +203,14 @@ extern ssize_t _kern_getgroups(int groupCount, gid_t* groupList);
extern status_t _kern_setgroups(int groupCount, const gid_t* groupList);
// signal functions
-extern status_t _kern_send_signal(pid_t tid, uint sig);
-extern status_t _kern_sigprocmask(int how, const sigset_t *set,
+extern status_t _kern_send_signal(int32 id, uint32 signal,
+ const union sigval* userValue, uint32 flags);
+extern status_t _kern_set_signal_mask(int how, const sigset_t *set,
sigset_t *oldSet);
extern status_t _kern_sigaction(int sig, const struct sigaction *action,
struct sigaction *oldAction);
-extern bigtime_t _kern_set_alarm(bigtime_t time, uint32 mode);
-extern status_t _kern_sigwait(const sigset_t *set, int *_signal);
+extern status_t _kern_sigwait(const sigset_t *set, siginfo_t *info,
+ uint32 flags, bigtime_t timeout);
extern status_t _kern_sigsuspend(const sigset_t *mask);
extern status_t _kern_sigpending(sigset_t *set);
extern status_t _kern_set_signal_stack(const stack_t *newStack,
@@ -370,7 +376,7 @@ extern status_t _kern_stop_watching(dev_t device, ino_t node, port_id port,
uint32 token);
// time functions
-extern status_t _kern_set_real_time_clock(uint32 time);
+extern status_t _kern_set_real_time_clock(bigtime_t time);
extern status_t _kern_set_timezone(int32 timezoneOffset, const char *name,
size_t nameLength);
extern status_t _kern_get_timezone(int32 *_timezoneOffset, char *name,
@@ -378,8 +384,23 @@ extern status_t _kern_get_timezone(int32 *_timezoneOffset, char *name,
extern status_t _kern_set_real_time_clock_is_gmt(bool isGMT);
extern status_t _kern_get_real_time_clock_is_gmt(bool *_isGMT);
+extern status_t _kern_get_clock(clockid_t clockID, bigtime_t* _time);
+extern status_t _kern_set_clock(clockid_t clockID, bigtime_t time);
+
extern bigtime_t _kern_system_time();
-extern status_t _kern_snooze_etc(bigtime_t time, int timebase, int32 flags);
+extern status_t _kern_snooze_etc(bigtime_t time, int timebase, int32 flags,
+ bigtime_t* _remainingTime);
+
+extern int32 _kern_create_timer(clockid_t clockID, thread_id threadID,
+ uint32 flags, const struct sigevent* event,
+ const struct thread_creation_attributes*
+ threadAttributes);
+extern status_t _kern_delete_timer(int32 timerID, thread_id threadID);
+extern status_t _kern_get_timer(int32 timerID, thread_id threadID,
+ struct user_timer_info* info);
+extern status_t _kern_set_timer(int32 timerID, thread_id threadID,
+ bigtime_t startTime, bigtime_t interval, uint32 flags,
+ struct user_timer_info* oldInfo);
// area functions
extern area_id _kern_create_area(const char *name, void **address,
diff --git a/headers/private/system/thread_defs.h b/headers/private/system/thread_defs.h
index ab99019..9f1b527 100644
--- a/headers/private/system/thread_defs.h
+++ b/headers/private/system/thread_defs.h
@@ -1,17 +1,15 @@
/*
- * Copyright 2008, Ingo Weinhold, ingo_weinhold@gmx.de.
+ * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
* Distributed under the terms of the MIT License.
*/
#ifndef _SYSTEM_THREAD_DEFS_H
#define _SYSTEM_THREAD_DEFS_H
-#include <OS.h>
+#include <pthread.h>
+
+#include <OS.h>
-#define THREAD_RETURN_EXIT 0x1
-#define THREAD_RETURN_INTERRUPTED 0x2
-#define THREAD_STOPPED 0x3
-#define THREAD_CONTINUED 0x4
/** Size of the stack given to teams in user space */
#define USER_STACK_GUARD_PAGES 4 // 16 kB
@@ -39,18 +37,21 @@ enum {
};
+#define THREAD_CREATION_FLAG_DEFER_SIGNALS 0x01
+ // create the thread with signals deferred, i.e. with
+ // user_thread::defer_signals set to 1
+
+
struct thread_creation_attributes {
- int32 (*entry)(thread_func, void *);
+ int32 (*entry)(void*, void*);
const char* name;
int32 priority;
void* args1;
void* args2;
void* stack_address;
size_t stack_size;
-
- // when calling from kernel only
- team_id team;
- thread_id thread;
+ pthread_t pthread;
+ uint32 flags;
};
#endif /* _SYSTEM_THREAD_DEFS_H */
diff --git a/headers/private/system/tls.h b/headers/private/system/tls.h
index ebb470d..f2105da 100644
--- a/headers/private/system/tls.h
+++ b/headers/private/system/tls.h
@@ -19,7 +19,6 @@ enum {
TLS_ERRNO_SLOT,
TLS_ON_EXIT_THREAD_SLOT,
TLS_USER_THREAD_SLOT,
- TLS_PTHREAD_SLOT,
// Note: these entries can safely be changed between
// releases; 3rd party code always calls tls_allocate()
diff --git a/headers/private/system/user_thread_defs.h b/headers/private/system/user_thread_defs.h
index 0bdd1f7..ccf80e8 100644
--- a/headers/private/system/user_thread_defs.h
+++ b/headers/private/system/user_thread_defs.h
@@ -1,18 +1,24 @@
/*
- * Copyright 2008, Ingo Weinhold, ingo_weinhold@gmx.de.
+ * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
* Distributed under the terms of the MIT License.
*/
#ifndef _SYSTEM_USER_THREAD_DEFS_H
#define _SYSTEM_USER_THREAD_DEFS_H
+
+#include <pthread.h>
+#include <signal.h>
+
#include <SupportDefs.h>
struct user_thread {
+ pthread_t pthread; // pthread pointer
+ uint32 flags;
+ status_t wait_status; // wait status for thread blocking
int32 defer_signals; // counter; 0 == signals allowed
- uint32 pending_signals; // signals that are pending, when
+ sigset_t pending_signals; // signals that are pending, when
// signals are deferred
- status_t wait_status;
};
diff --git a/headers/private/system/user_timer_defs.h b/headers/private/system/user_timer_defs.h
new file mode 100644
index 0000000..fdb2b78
--- /dev/null
+++ b/headers/private/system/user_timer_defs.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2011, Ingo Weinhold, ingo_weinhold@gmx.de.
+ * Distributed under the terms of the MIT License.
+ */
+#ifndef _SYSTEM_USER_TIMER_DEFS_H
+#define _SYSTEM_USER_TIMER_DEFS_H
+
+
+#include <limits.h>
+#include <time.h>
+
+#include <SupportDefs.h>
+
+
+#define CLOCK_PROCESS_USER_CPUTIME_ID ((clockid_t)-4)
+ /* clock measuring the used user CPU time of the current process */
+
+// limits
+#define MAX_USER_TIMERS_PER_TEAM _POSIX_TIMER_MAX
+ // maximum numbers of user-defined user timers (timer_create())
+#define MAX_USER_TIMER_OVERRUN_COUNT INT_MAX
+ // cap value of a timer's overrun counter
+
+#if MAX_USER_TIMER_OVERRUN_COUNT < _POSIX_DELAYTIMER_MAX
+# error "MAX_USER_TIMER_OVERRUN_COUNT < _POSIX_DELAYTIMER_MAX"
+#endif
+
+#define USER_TIMER_REAL_TIME_ID 0
+ // predefined ID for the real time timer
+#define USER_TIMER_TEAM_TOTAL_TIME_ID 1
+ // predefined ID for the team's total (kernel + user) time timer
+#define USER_TIMER_TEAM_USER_TIME_ID 2
+ // predefined ID for the team's user time timer
+#define USER_TIMER_FIRST_USER_DEFINED_ID 3
+ // first ID assigned to a user-defined timer (timer_create())
+
+// _kern_create_user_timer() flag:
+#define USER_TIMER_SIGNAL_THREAD 0x01
+ // send the signal to the thread instead of the team (valid only for thread
+ // timers)
+
+
+struct user_timer_info {
+ bigtime_t remaining_time;
+ bigtime_t interval;
+ uint32 overrun_count;
+};
+
+
+#endif /* _SYSTEM_USER_TIMER_DEFS_H */
diff --git a/src/add-ons/kernel/debugger/invalidate_on_exit/invalidate_on_exit.cpp b/src/add-ons/kernel/debugger/invalidate_on_exit/invalidate_on_exit.cpp
index 68cee2d..189247c 100644
--- a/src/add-ons/kernel/debugger/invalidate_on_exit/invalidate_on_exit.cpp
+++ b/src/add-ons/kernel/debugger/invalidate_on_exit/invalidate_on_exit.cpp
@@ -50,7 +50,7 @@ std_ops(int32 op, ...)
if (thread < B_OK)
return thread;
- send_signal_etc(thread, SIGCONT, B_DO_NOT_RESCHEDULE);
+ resume_thread(thread);
return B_OK;
} else if (op == B_MODULE_UNINIT) {
// deleting the sem will also cause the thread to exit
@@ -76,7 +76,7 @@ static struct debugger_module_info sModuleInfo = {
NULL
};
-module_info *modules[] = {
+module_info *modules[] = {
(module_info *)&sModuleInfo,
NULL
};
diff --git a/src/add-ons/kernel/debugger/run_on_exit/run_on_exit.cpp b/src/add-ons/kernel/debugger/run_on_exit/run_on_exit.cpp
index 7bcc488..1987fd0 100644
--- a/src/add-ons/kernel/debugger/run_on_exit/run_on_exit.cpp
+++ b/src/add-ons/kernel/debugger/run_on_exit/run_on_exit.cpp
@@ -103,7 +103,7 @@ std_ops(int32 op, ...)
if (thread < B_OK)
return thread;
- send_signal_etc(thread, SIGCONT, B_DO_NOT_RESCHEDULE);
+ resume_thread(thread);
add_debugger_command_etc("on_exit", &add_run_on_exit_command,
"Adds a command to be run when leaving the kernel debugger",
@@ -136,7 +136,7 @@ static struct debugger_module_info sModuleInfo = {
NULL
};
-module_info *modules[] = {
+module_info *modules[] = {
(module_info *)&sModuleInfo,
NULL
};
diff --git a/src/apps/terminal/TermApp.cpp b/src/apps/terminal/TermApp.cpp
index 37d885c..c72fa6a 100644
--- a/src/apps/terminal/TermApp.cpp
+++ b/src/apps/terminal/TermApp.cpp
@@ -77,7 +77,7 @@ TermApp::ReadyToRun()
// a shell exits.
struct sigaction action;
#ifdef __HAIKU__
- action.sa_handler = (sighandler_t)_SigChildHandler;
+ action.sa_handler = (__sighandler_t)_SigChildHandler;
#else
action.sa_handler = (__signal_func_ptr)_SigChildHandler;
#endif
diff --git a/src/bin/debug/profile/profile.cpp b/src/bin/debug/profile/profile.cpp
index 0878237..1d2ddfc 100644
--- a/src/bin/debug/profile/profile.cpp
+++ b/src/bin/debug/profile/profile.cpp
@@ -653,7 +653,7 @@ profile_all(const char* const* programArgs, int programArgCount)
// install signal handlers so we can exit gracefully
struct sigaction action;
- action.sa_handler = (sighandler_t)signal_handler;
+ action.sa_handler = (__sighandler_t)signal_handler;
sigemptyset(&action.sa_mask);
action.sa_userdata = NULL;
if (sigaction(SIGHUP, &action, NULL) < 0
diff --git a/src/bin/debug/scheduling_recorder/scheduling_recorder.cpp b/src/bin/debug/scheduling_recorder/scheduling_recorder.cpp
index 04a64e3..0be34b5 100644
--- a/src/bin/debug/scheduling_recorder/scheduling_recorder.cpp
+++ b/src/bin/debug/scheduling_recorder/scheduling_recorder.cpp
@@ -116,7 +116,7 @@ public:
// install signal handlers so we can exit gracefully
struct sigaction action;
- action.sa_handler = (sighandler_t)_SignalHandler;
+ action.sa_handler = (__sighandler_t)_SignalHandler;
sigemptyset(&action.sa_mask);
action.sa_userdata = this;
if (sigaction(SIGHUP, &action, NULL) < 0
diff --git a/src/bin/debug/strace/MemoryReader.cpp b/src/bin/debug/strace/MemoryReader.cpp
index 8b57c18..df593b6 100644
--- a/src/bin/debug/strace/MemoryReader.cpp
+++ b/src/bin/debug/strace/MemoryReader.cpp
@@ -1,8 +1,9 @@
/*
- * Copyright 2005, Ingo Weinhold, bonefish@users.sf.net.
+ * Copyright 2005-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
* Distributed under the terms of the MIT License.
*/
+
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
@@ -11,27 +12,41 @@
#include "MemoryReader.h"
-// constructor
-MemoryReader::MemoryReader(port_id nubPort)
- : fNubPort(nubPort),
- fReplyPort(-1)
+
+MemoryReader::MemoryReader()
+ :
+ fNubPort(-1),
+ fReplyPort(-1)
{
- fReplyPort = create_port(1, "memory reader reply");
- if (fReplyPort < 0) {
- fprintf(stderr, "Failed to create memory reader reply port: %s\n",
- strerror(fReplyPort));
- exit(1);
- }
}
-// constructor
+
MemoryReader::~MemoryReader()
{
if (fReplyPort >= 0)
delete_port(fReplyPort);
}
-// Read
+
+status_t
+MemoryReader::Init(port_id nubPort)
+{
+ if (fReplyPort >= 0)
+ delete_port(fReplyPort);
+
+ fNubPort = nubPort;
+
+ fReplyPort = create_port(1, "memory reader reply");
+ if (fReplyPort < 0) {
+ fprintf(stderr, "Failed to create memory reader reply port: %s\n",
+ strerror(fReplyPort));
+ return fReplyPort;
+ }
+
+ return B_OK;
+}
+
+
status_t
MemoryReader::Read(void *_address, void *_buffer, int32 size, int32 &bytesRead)
{
@@ -63,7 +78,7 @@ MemoryReader::Read(void *_address, void *_buffer, int32 size, int32 &bytesRead)
return B_OK;
}
-// _Read
+
status_t
MemoryReader::_Read(void *address, void *buffer, int32 size, int32 &bytesRead)
{
diff --git a/src/bin/debug/strace/MemoryReader.h b/src/bin/debug/strace/MemoryReader.h
index 028be82..b625b9b 100644
--- a/src/bin/debug/strace/MemoryReader.h
+++ b/src/bin/debug/strace/MemoryReader.h
@@ -1,24 +1,31 @@
/*
- * Copyright 2005, Ingo Weinhold, bonefish@users.sf.net.
+ * Copyright 2005-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
* Distributed under the terms of the MIT License.
*/
#ifndef STRACE_MEMORY_READER_H
#define STRACE_MEMORY_READER_H
+
#include <OS.h>
+
class MemoryReader {
public:
- MemoryReader(port_id nubPort);
- ~MemoryReader();
+ MemoryReader();
+ ~MemoryReader();
- status_t Read(void *address, void *buffer, int32 size, int32 &bytesRead);
+ status_t Init(port_id nubPort);
+
+ status_t Read(void *address, void *buffer, int32 size,
+ int32 &bytesRead);
private:
- status_t _Read(void *address, void *buffer, int32 size, int32 &bytesRead);
+ status_t _Read(void *address, void *buffer, int32 size,
+ int32 &bytesRead);
- port_id fNubPort;
- port_id fReplyPort;
+private:
+ port_id fNubPort;
+ port_id fReplyPort;
};
diff --git a/src/bin/debug/strace/strace.cpp b/src/bin/debug/strace/strace.cpp
index b808f33..dbb332f 100644
--- a/src/bin/debug/strace/strace.cpp
+++ b/src/bin/debug/strace/strace.cpp
@@ -1,8 +1,9 @@
/*
- * Copyright 2005-2008, Ingo Weinhold, bonefish@users.sf.net.
+ * Copyright 2005-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
* Distributed under the terms of the MIT License.
*/
+
#include <ctype.h>
#include <stdio.h>
#include <stdlib.h>
@@ -25,10 +26,12 @@
#include "Syscall.h"
#include "TypeHandler.h"
+
using std::map;
using std::string;
using std::vector;
+
extern void get_syscalls0(vector<Syscall*> &syscalls);
extern void get_syscalls1(vector<Syscall*> &syscalls);
extern void get_syscalls2(vector<Syscall*> &syscalls);
@@ -50,9 +53,11 @@ extern void get_syscalls17(vector<Syscall*> &syscalls);
extern void get_syscalls18(vector<Syscall*> &syscalls);
extern void get_syscalls19(vector<Syscall*> &syscalls);
+
extern const char *__progname;
static const char *kCommandName = __progname;
+
// usage
static const char *kUsage =
"Usage: %s [ <options> ] [ <thread or team ID> | <executable with args> ]\n"
@@ -74,6 +79,8 @@ static const char *kUsage =
" -r - Don't print syscall return values.\n"
" -s - Also trace all threads spawned by the supplied thread,\n"
" respectively the loaded executable's main thread.\n"
+" -t - Also recursively trace all teams created by a traced\n"
+" thread or team.\n"
" -T - Trace all threads of the supplied or loaded executable's\n"
" team. If an ID is supplied, it is interpreted as a team\n"
" ID.\n"
@@ -82,6 +89,7 @@ static const char *kUsage =
" -g - turns off signal tracing.\n"
;
+
// terminal color escape sequences
// (http://www.dee.ufcg.edu.br/~rrbrandt/tools/ansi.html)
static const char *kTerminalTextNormal = "\33[0m";
@@ -89,38 +97,6 @@ static const char *kTerminalTextRed = "\33[31m";
static const char *kTerminalTextMagenta = "\33[35m";
static const char *kTerminalTextBlue = "\33[34m";
-static const char *kSignalName[] = {
- /* 0 */ "SIG0",
- /* 1 */ "SIGHUP",
- /* 2 */ "SIGINT",
- /* 3 */ "SIGQUIT",
- /* 4 */ "SIGILL",
- /* 5 */ "SIGCHLD",
- /* 6 */ "SIGABRT",
- /* 7 */ "SIGPIPE",
- /* 8 */ "SIGFPE",
- /* 9 */ "SIGKILL",
- /* 10 */ "SIGSTOP",
- /* 11 */ "SIGSEGV",
- /* 12 */ "SIGCONT",
- /* 13 */ "SIGTSTP",
- /* 14 */ "SIGALRM",
- /* 15 */ "SIGTERM",
- /* 16 */ "SIGTTIN",
- /* 17 */ "SIGTTOU",
- /* 18 */ "SIGUSR1",
- /* 19 */ "SIGUSR2",
- /* 20 */ "SIGWINCH",
- /* 21 */ "SIGKILLTHR",
- /* 22 */ "SIGTRAP",
- /* 23 */ "SIGPOLL",
- /* 24 */ "SIGPROF",
- /* 25 */ "SIGSYS",
- /* 26 */ "SIGURG",
- /* 27 */ "SIGVTALRM",
- /* 28 */ "SIGXCPU",
- /* 29 */ "SIGXFSZ",
-};
// command line args
static int sArgc;
@@ -130,26 +106,73 @@ static const char *const *sArgv;
static vector<Syscall*> sSyscallVector;
static map<string, Syscall*> sSyscallMap;
-// print_usage
-void
+
+struct Team {
+ Team(team_id id)
+ :
+ fID(id),
+ fNubPort(-1)
+ {
+ }
+
+ team_id ID() const
+ {
+ return fID;
+ }
+
+ port_id NubPort() const
+ {
+ return fNubPort;
+ }
+
+ MemoryReader& GetMemoryReader()
+ {
+ return fMemoryReader;
+ }
+
+ status_t InstallDebugger(port_id debuggerPort, bool traceTeam,
+ bool traceChildTeams, bool traceSignal)
+ {
+ fNubPort = install_team_debugger(fID, debuggerPort);
+ if (fNubPort < 0) {
+ fprintf(stderr, "%s: Failed to install team debugger: %s\n",
+ kCommandName, strerror(fNubPort));
+ return fNubPort;
+ }
+
+ // set team debugging flags
+ int32 teamDebugFlags = (traceTeam ? B_TEAM_DEBUG_POST_SYSCALL : 0)
+ | (traceChildTeams ? B_TEAM_DEBUG_TEAM_CREATION : 0)
+ | (traceSignal ? B_TEAM_DEBUG_SIGNALS : 0);
+ set_team_debugging_flags(fNubPort, teamDebugFlags);
+
+ return fMemoryReader.Init(fNubPort);
+ }
+
+private:
+ team_id fID;
+ port_id fNubPort;
+ MemoryReader fMemoryReader;
+};
+
+
+static void
print_usage(bool error)
{
// print usage
fprintf((error ? stderr : stdout), kUsage, kCommandName);
}
-// print_usage_and_exit
-static
-void
+
+static void
print_usage_and_exit(bool error)
{
print_usage(error);
exit(error ? 1 : 0);
}
-// get_id
-static
-bool
+
+static bool
get_id(const char *str, int32 &id)
{
int32 len = strlen(str);
@@ -162,7 +185,7 @@ get_id(const char *str, int32 &id)
return true;
}
-// get_syscall
+
Syscall *
get_syscall(const char *name)
{
@@ -173,7 +196,7 @@ get_syscall(const char *name)
return i->second;
}
-// patch_syscalls
+
static void
patch_syscalls()
{
@@ -185,9 +208,8 @@ patch_syscalls()
patch_ioctl();
}
-// init_syscalls
-static
-void
+
+static void
init_syscalls()
{
// init the syscall vector
@@ -222,9 +244,8 @@ init_syscalls()
patch_syscalls();
}
-// print_to_string
-static
-void
+
+static void
print_to_string(char **_buffer, int32 *_length, const char *format, ...)
{
va_list list;
@@ -236,9 +257,8 @@ print_to_string(char **_buffer, int32 *_length, const char *format, ...)
*_length -= length;
}
-// print_syscall
-static
-void
+
+static void
print_syscall(FILE *outputFile, debug_post_syscall &message,
MemoryReader &memoryReader, bool printArguments, uint32 contentsFlags,
bool printReturnValue, bool colorize, bool decimal)
@@ -324,21 +344,20 @@ print_syscall(FILE *outputFile, debug_post_syscall &message,
_kern_debug_output(buffer);
}
-static
-const char *
+
+static const char *
signal_name(int signal)
{
if (signal >= 0 && signal < NSIG)
- return kSignalName[signal];
+ return strsignal(signal);
static char buffer[32];
sprintf(buffer, "%d", signal);
return buffer;
}
-// print_signal
-static
-void
+
+static void
print_signal(FILE *outputFile, debug_signal_received &message,
bool colorize)
{
@@ -364,7 +383,7 @@ print_signal(FILE *outputFile, debug_signal_received &message,
_kern_debug_output(buffer);
}
-// main
+
int
main(int argc, const char *const *argv)
{
@@ -383,6 +402,7 @@ main(int argc, const char *const *argv)
bool printReturnValues = true;
bool traceChildThreads = false;
bool traceTeam = false;
+ bool traceChildTeams = false;
bool traceSignal = true;
bool serialOutput = false;
FILE *outputFile = stdout;
@@ -433,6 +453,8 @@ main(int argc, const char *const *argv)
printReturnValues = false;
} else if (strcmp(arg, "-s") == 0) {
traceChildThreads = true;
+ } else if (strcmp(arg, "-t") == 0) {
+ traceChildTeams = true;
} else if (strcmp(arg, "-T") == 0) {
traceTeam = true;
} else if (strcmp(arg, "-g") == 0) {
@@ -488,29 +510,29 @@ main(int argc, const char *const *argv)
colorize = false;
// get thread/team to be debugged
- thread_id thread = -1;
- team_id team = -1;
+ thread_id threadID = -1;
+ team_id teamID = -1;
if (programArgCount > 1
- || !get_id(*programArgs, (traceTeam ? team : thread))) {
+ || !get_id(*programArgs, (traceTeam ? teamID : threadID))) {
// we've been given an executable and need to load it
- thread = load_program(programArgs, programArgCount, traceLoading);
- if (thread < 0) {
+ threadID = load_program(programArgs, programArgCount, traceLoading);
+ if (threadID < 0) {
fprintf(stderr, "%s: Failed to start `%s': %s\n", kCommandName,
- programArgs[0], strerror(thread));
+ programArgs[0], strerror(threadID));
exit(1);
}
}
// get the team ID, if we have none yet
- if (team < 0) {
+ if (teamID < 0) {
thread_info threadInfo;
- status_t error = get_thread_info(thread, &threadInfo);
+ status_t error = get_thread_info(threadID, &threadInfo);
if (error != B_OK) {
fprintf(stderr, "%s: Failed to get info for thread %ld: %s\n",
- kCommandName, thread, strerror(error));
+ kCommandName, threadID, strerror(error));
exit(1);
}
- team = threadInfo.team;
+ teamID = threadInfo.team;
}
// create a debugger port
@@ -522,34 +544,36 @@ main(int argc, const char *const *argv)
}
// install ourselves as the team debugger
- port_id nubPort = install_team_debugger(team, debuggerPort);
- if (nubPort < 0) {
- fprintf(stderr, "%s: Failed to install team debugger: %s\n",
- kCommandName, strerror(nubPort));
- exit(1);
- }
+ typedef map<team_id, Team*> TeamMap;
+ TeamMap debuggedTeams;
+ port_id nubPort;
+
+ {
+ Team* team = new Team(teamID);
+ status_t error = team->InstallDebugger(debuggerPort, traceTeam,
+ traceChildTeams, traceSignal);
+ if (error != B_OK)
+ exit(1);
+
+ debuggedTeams[team->ID()] = team;
- // set team debugging flags
- int32 teamDebugFlags = (traceTeam ? B_TEAM_DEBUG_POST_SYSCALL : 0)
- | (traceSignal ? B_TEAM_DEBUG_SIGNALS : 0);
- set_team_debugging_flags(nubPort, teamDebugFlags);
+ nubPort = team->NubPort();
+ }
// set thread debugging flags
- if (thread >= 0) {
+ if (threadID >= 0) {
int32 threadDebugFlags = 0;
if (!traceTeam) {
threadDebugFlags = B_THREAD_DEBUG_POST_SYSCALL
| (traceChildThreads
? B_THREAD_DEBUG_SYSCALL_TRACE_CHILD_THREADS : 0);
}
- set_thread_debugging_flags(nubPort, thread, threadDebugFlags);
+ set_thread_debugging_flags(nubPort, threadID, threadDebugFlags);
// resume the target thread to be sure, it's running
- resume_thread(thread);
+ resume_thread(threadID);
}
- MemoryReader memoryReader(nubPort);
-
// debug loop
while (true) {
bool quitLoop = false;
@@ -570,6 +594,13 @@ main(int argc, const char *const *argv)
switch (code) {
case B_DEBUGGER_MESSAGE_POST_SYSCALL:
{
+ TeamMap::iterator it = debuggedTeams.find(message.origin.team);
+ if (it == debuggedTeams.end())
+ break;
+
+ Team* team = it->second;
+ MemoryReader& memoryReader = team->GetMemoryReader();
+
print_syscall(outputFile, message.post_syscall, memoryReader,
printArguments, contentsFlags, printReturnValues,
colorize, decimalFormat);
@@ -578,10 +609,8 @@ main(int argc, const char *const *argv)
case B_DEBUGGER_MESSAGE_SIGNAL_RECEIVED:
{
- if (traceSignal) {
- print_signal(outputFile, message.signal_received,
- colorize);
- }
+ if (traceSignal)
+ print_signal(outputFile, message.signal_received, colorize);
break;
}
@@ -592,17 +621,50 @@ main(int argc, const char *const *argv)
case B_DEBUGGER_MESSAGE_SINGLE_STEP:
case B_DEBUGGER_MESSAGE_PRE_SYSCALL:
case B_DEBUGGER_MESSAGE_EXCEPTION_OCCURRED:
- case B_DEBUGGER_MESSAGE_TEAM_CREATED:
case B_DEBUGGER_MESSAGE_THREAD_CREATED:
case B_DEBUGGER_MESSAGE_THREAD_DELETED:
case B_DEBUGGER_MESSAGE_IMAGE_CREATED:
case B_DEBUGGER_MESSAGE_IMAGE_DELETED:
break;
+ case B_DEBUGGER_MESSAGE_TEAM_CREATED:
+ {
+ if (!traceChildTeams)
+ break;
+
+ Team* team = new(std::nothrow) Team(
+ message.team_created.new_team);
+ if (team == NULL) {
+ fprintf(stderr, "%s: Out of memory!\n", kCommandName);
+ break;
+ }
+
+ status_t error = team->InstallDebugger(debuggerPort, true, true,
+ traceSignal);
+ if (error != B_OK) {
+ delete team;
+ break;
+ }
+
+ debuggedTeams[team->ID()] = team;
+ break;
+ }
+
case B_DEBUGGER_MESSAGE_TEAM_DELETED:
- // the debugged team is gone
- quitLoop = true;
+ {
+ // a debugged team is gone
+ TeamMap::iterator it = debuggedTeams.find(message.origin.team);
+ if (it == debuggedTeams.end())
+ break;
+
+ Team* team = it->second;
+ debuggedTeams.erase(it);
+ delete team;
+
+ // if all debugged teams are gone, we're done
+ quitLoop = debuggedTeams.empty();
break;
+ }
}
if (quitLoop)
diff --git a/src/bin/gdb/libiberty/config.h b/src/bin/gdb/libiberty/config.h
index 694d1fb..837f5fd 100644
--- a/src/bin/gdb/libiberty/config.h
+++ b/src/bin/gdb/libiberty/config.h
@@ -114,7 +114,7 @@
/* #undef HAVE_ON_EXIT */
/* Define to 1 if you have the `psignal' function. */
-/* #undef HAVE_PSIGNAL */
+#define HAVE_PSIGNAL 1
/* Define to 1 if you have the `pstat_getdynamic' function. */
/* #undef HAVE_PSTAT_GETDYNAMIC */
diff --git a/src/kits/network/socket.cpp b/src/kits/network/socket.cpp
index 5aa32eb..9daf8ec 100644
--- a/src/kits/network/socket.cpp
+++ b/src/kits/network/socket.cpp
@@ -12,6 +12,7 @@
#include <errno.h>
#include <netinet/in.h>
+#include <pthread.h>
#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
@@ -176,7 +177,8 @@ connect(int socket, const struct sockaddr *address, socklen_t addressLength)
addressLength = sizeof(struct sockaddr_in);
}
- RETURN_AND_SET_ERRNO(_kern_connect(socket, address, addressLength));
+ RETURN_AND_SET_ERRNO_TEST_CANCEL(
+ _kern_connect(socket, address, addressLength));
}
@@ -205,6 +207,9 @@ accept(int socket, struct sockaddr *_address, socklen_t *_addressLength)
}
int acceptSocket = _kern_accept(socket, address, &addressLength);
+
+ pthread_testcancel();
+
if (acceptSocket < 0) {
errno = acceptSocket;
return -1;
@@ -224,7 +229,7 @@ accept(int socket, struct sockaddr *_address, socklen_t *_addressLength)
extern "C" ssize_t
recv(int socket, void *data, size_t length, int flags)
{
- RETURN_AND_SET_ERRNO(_kern_recv(socket, data, length, flags));
+ RETURN_AND_SET_ERRNO_TEST_CANCEL(_kern_recv(socket, data, length, flags));
}
@@ -248,6 +253,9 @@ recvfrom(int socket, void *data, size_t length, int flags,
ssize_t bytesReceived = _kern_recvfrom(socket, data, length, flags,
address, &addressLength);
+
+ pthread_testcancel();
+
if (bytesReceived < 0) {
errno = bytesReceived;
return -1;
@@ -267,14 +275,14 @@ recvfrom(int socket, void *data, size_t length, int flags,
extern "C" ssize_t
recvmsg(int socket, struct msghdr *message, int flags)
{
- RETURN_AND_SET_ERRNO(_kern_recvmsg(socket, message, flags));
+ RETURN_AND_SET_ERRNO_TEST_CANCEL(_kern_recvmsg(socket, message, flags));
}
extern "C" ssize_t
send(int socket, const void *data, size_t length, int flags)
{
- RETURN_AND_SET_ERRNO(_kern_send(socket, data, length, flags));
+ RETURN_AND_SET_ERRNO_TEST_CANCEL(_kern_send(socket, data, length, flags));
}
@@ -290,15 +298,15 @@ sendto(int socket, const void *data, size_t length, int flags,
addressLength = sizeof(struct sockaddr_in);
}
- RETURN_AND_SET_ERRNO(_kern_sendto(socket, data, length, flags, address,
- addressLength));
+ RETURN_AND_SET_ERRNO_TEST_CANCEL(
+ _kern_sendto(socket, data, length, flags, address, addressLength));
}
extern "C" ssize_t
sendmsg(int socket, const struct msghdr *message, int flags)
{
- RETURN_AND_SET_ERRNO(_kern_sendmsg(socket, message, flags));
+ RETURN_AND_SET_ERRNO_TEST_CANCEL(_kern_sendmsg(socket, message, flags));
}
diff --git a/src/libs/posix_error_mapper/Jamfile b/src/libs/posix_error_mapper/Jamfile
index e843480..4203401 100644
--- a/src/libs/posix_error_mapper/Jamfile
+++ b/src/libs/posix_error_mapper/Jamfile
@@ -14,4 +14,5 @@ StaticLibrary libposix_error_mapper.a :
pthread_spinlock.cpp
pthread_thread.cpp
signal.cpp
+ time.cpp
;
diff --git a/src/libs/posix_error_mapper/pthread_thread.cpp b/src/libs/posix_error_mapper/pthread_thread.cpp
index 7fcca73..4d66a91 100644
--- a/src/libs/posix_error_mapper/pthread_thread.cpp
+++ b/src/libs/posix_error_mapper/pthread_thread.cpp
@@ -1,10 +1,11 @@
/*
- * Copyright 2009-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
+ * Copyright 2009-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
* Distributed under the terms of the MIT License.
*/
#include <pthread.h>
+#include <signal.h>
#include "posix_error_mapper.h"
diff --git a/src/libs/posix_error_mapper/signal.cpp b/src/libs/posix_error_mapper/signal.cpp
index eac3a80..bfaf4e0 100644
--- a/src/libs/posix_error_mapper/signal.cpp
+++ b/src/libs/posix_error_mapper/signal.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright 2010, Ingo Weinhold, ingo_weinhold@gmx.de.
+ * Copyright 2010-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
* Distributed under the terms of the MIT License.
*/
@@ -13,3 +13,8 @@ WRAPPER_FUNCTION(int, pthread_sigmask,
(int how, const sigset_t *set, sigset_t *oldSet),
return B_TO_POSITIVE_ERROR(sReal_pthread_sigmask(how, set, oldSet));
)
+
+
+WRAPPER_FUNCTION(int, sigwait, (const sigset_t *set, int *signal),
+ return B_TO_POSITIVE_ERROR(sReal_sigwait(set, signal));
+)
diff --git a/src/libs/posix_error_mapper/time.cpp b/src/libs/posix_error_mapper/time.cpp
new file mode 100644
index 0000000..fee45a0
--- /dev/null
+++ b/src/libs/posix_error_mapper/time.cpp
@@ -0,0 +1,23 @@
+/*
+ * Copyright 2011, Ingo Weinhold, ingo_weinhold@gmx.de.
+ * Distributed under the terms of the MIT License.
+ */
+
+
+#include <time.h>
+
+#include "posix_error_mapper.h"
+
+
+WRAPPER_FUNCTION(int, clock_nanosleep,
+ (clockid_t clockID, int flags, const struct timespec* time,
+ struct timespec* remainingTime),
+ return B_TO_POSITIVE_ERROR(sReal_clock_nanosleep(clockID, flags, time,
+ remainingTime));
+)
+
+
+WRAPPER_FUNCTION(int, clock_getcpuclockid,
+ (pid_t pid, clockid_t* _clockID),
+ return B_TO_POSITIVE_ERROR(sReal_clock_getcpuclockid(pid, _clockID));
+)
diff --git a/src/system/kernel/DPC.cpp b/src/system/kernel/DPC.cpp
new file mode 100644
index 0000000..7d64ca4
--- /dev/null
+++ b/src/system/kernel/DPC.cpp
@@ -0,0 +1,327 @@
+/*
+ * Copyright 2011, Ingo Weinhold, ingo_weinhold@gmx.de.
+ * Distributed under the terms of the MIT License.
+ */
+
+
+#include <DPC.h>
+
+#include <util/AutoLock.h>
+
+
+#define NORMAL_PRIORITY B_NORMAL_PRIORITY
+#define HIGH_PRIORITY B_URGENT_DISPLAY_PRIORITY
+#define REAL_TIME_PRIORITY B_FIRST_REAL_TIME_PRIORITY
+
+#define DEFAULT_QUEUE_SLOT_COUNT 64
+
+
+static DPCQueue sNormalPriorityQueue;
+static DPCQueue sHighPriorityQueue;
+static DPCQueue sRealTimePriorityQueue;
+
+
+// #pragma mark - FunctionDPCCallback
+
+
+FunctionDPCCallback::FunctionDPCCallback(DPCQueue* owner)
+ :
+ fOwner(owner)
+{
+}
+
+
+void
+FunctionDPCCallback::SetTo(void (*function)(void*), void* argument)
+{
+ fFunction = function;
+ fArgument = argument;
+}
+
+
+void
+FunctionDPCCallback::DoDPC(DPCQueue* queue)
+{
+ fFunction(fArgument);
+
+ if (fOwner != NULL)
+ fOwner->Recycle(this);
+}
+
+
+// #pragma mark - DPCCallback
+
+
+DPCCallback::DPCCallback()
+ :
+ fInQueue(NULL)
+{
+}
+
+
+DPCCallback::~DPCCallback()
+{
+}
+
+
+// #pragma mark - DPCQueue
+
+
+DPCQueue::DPCQueue()
+ :
+ fThreadID(-1),
+ fCallbackInProgress(NULL),
+ fCallbackDoneCondition(NULL)
+{
+ B_INITIALIZE_SPINLOCK(&fLock);
+
+ fPendingCallbacksCondition.Init(this, "dpc queue");
+}
+
+
+DPCQueue::~DPCQueue()
+{
+ // close, if not closed yet
+ {
+ InterruptsSpinLocker locker(fLock);
+ if (!_IsClosed()) {
+ locker.Unlock();
+ Close(false);
+ }
+ }
+
+ // delete function callbacks
+ while (DPCCallback* callback = fUnusedFunctionCallbacks.RemoveHead())
+ delete callback;
+}
+
+
+/*static*/ DPCQueue*
+DPCQueue::DefaultQueue(int priority)
+{
+ if (priority <= NORMAL_PRIORITY)
+ return &sNormalPriorityQueue;
+
+ if (priority <= HIGH_PRIORITY)
+ return &sHighPriorityQueue;
+
+ return &sRealTimePriorityQueue;
+}
+
+
+status_t
+DPCQueue::Init(const char* name, int32 priority, uint32 reservedSlots)
+{
+ // create function callbacks
+ for (uint32 i = 0; i < reservedSlots; i++) {
+ FunctionDPCCallback* callback
+ = new(std::nothrow) FunctionDPCCallback(this);
+ if (callback == NULL)
+ return B_NO_MEMORY;
+
+ fUnusedFunctionCallbacks.Add(callback);
+ }
+
+ // spawn the thread
+ fThreadID = spawn_kernel_thread(&_ThreadEntry, name, priority, this);
+ if (fThreadID < 0)
+ return fThreadID;
+
+ resume_thread(fThreadID);
+
+ return B_OK;
+}
+
+
+void
+DPCQueue::Close(bool cancelPending)
+{
+ InterruptsSpinLocker locker(fLock);
+
+ if (_IsClosed())
+ return;
+
+ // If requested, dequeue all pending callbacks
+ if (cancelPending)
+ fCallbacks.MakeEmpty();
+
+ // mark the queue closed
+ thread_id thread = fThreadID;
+ fThreadID = -1;
+
+ locker.Unlock();
+
+ // wake up the thread and wait for it
+ fPendingCallbacksCondition.NotifyAll();
+ wait_for_thread(thread, NULL);
+}
+
+
+status_t
+DPCQueue::Add(DPCCallback* callback, bool schedulerLocked)
+{
+ // queue the callback, if the queue isn't closed already
+ InterruptsSpinLocker locker(fLock);
+
+ if (_IsClosed())
+ return B_NOT_INITIALIZED;
+
+ bool wasEmpty = fCallbacks.IsEmpty();
+ fCallbacks.Add(callback);
+ callback->fInQueue = this;
+
+ locker.Unlock();
+
+ // notify the condition variable, if necessary
+ if (wasEmpty)
+ fPendingCallbacksCondition.NotifyAll(schedulerLocked);
+
+ return B_OK;
+}
+
+
+status_t
+DPCQueue::Add(void (*function)(void*), void* argument, bool schedulerLocked)
+{
+ if (function == NULL)
+ return B_BAD_VALUE;
+
+ // get a free callback
+ InterruptsSpinLocker locker(fLock);
+
+ DPCCallback* callback = fUnusedFunctionCallbacks.RemoveHead();
+ if (callback == NULL)
+ return B_NO_MEMORY;
+
+ locker.Unlock();
+
+ // init the callback
+ FunctionDPCCallback* functionCallback
+ = static_cast<FunctionDPCCallback*>(callback);
+ functionCallback->SetTo(function, argument);
+
+ // add it
+ status_t error = Add(functionCallback, schedulerLocked);
+ if (error != B_OK)
+ Recycle(functionCallback);
+
+ return error;
+}
+
+
+bool
+DPCQueue::Cancel(DPCCallback* callback)
+{
+ InterruptsSpinLocker locker(fLock);
+
+ // If the callback is queued, remove it.
+ if (callback->fInQueue == this) {
+ fCallbacks.Remove(callback);
+ return true;
+ }
+
+ // The callback is not queued. If it isn't in progress, we're done, too.
+ if (callback != fCallbackInProgress)
+ return false;
+
+ // The callback is currently being executed. We need to wait for it to be
+ // done.
+
+ // Set the respective condition, if not set yet. For the unlikely case that
+ // there are multiple threads trying to cancel the callback at the same
+ // time, the condition variable of the first thread will be used.
+ ConditionVariable condition;
+ if (fCallbackDoneCondition == NULL)
+ fCallbackDoneCondition = &condition;
+
+ // add our wait entry
+ ConditionVariableEntry waitEntry;
+ fCallbackDoneCondition->Add(&waitEntry);
+
+ // wait
+ locker.Unlock();
+ waitEntry.Wait();
+
+ return false;
+}
+
+
+void
+DPCQueue::Recycle(FunctionDPCCallback* callback)
+{
+ InterruptsSpinLocker locker(fLock);
+ fUnusedFunctionCallbacks.Insert(callback, false);
+}
+
+
+/*static*/ status_t
+DPCQueue::_ThreadEntry(void* data)
+{
+ return ((DPCQueue*)data)->_Thread();
+}
+
+
+status_t
+DPCQueue::_Thread()
+{
+ while (true) {
+ InterruptsSpinLocker locker(fLock);
+
+ // get the next pending callback
+ DPCCallback* callback = fCallbacks.RemoveHead();
+ if (callback == NULL) {
+ // nothing is pending -- wait unless the queue is already closed
+ if (_IsClosed())
+ break;
+
+ ConditionVariableEntry waitEntry;
+ fPendingCallbacksCondition.Add(&waitEntry);
+
+ locker.Unlock();
+ waitEntry.Wait();
+
+ continue;
+ }
+
+ callback->fInQueue = NULL;
+ fCallbackInProgress = callback;
+
+ // call the callback
+ locker.Unlock();
+ callback->DoDPC(this);
+ locker.Lock();
+
+ fCallbackInProgress = NULL;
+
+ // wake up threads waiting for the callback to be done
+ ConditionVariable* doneCondition = fCallbackDoneCondition;
+ fCallbackDoneCondition = NULL;
+ locker.Unlock();
+ if (doneCondition != NULL)
+ doneCondition->NotifyAll();
+ }
+
+ return B_OK;
+}
+
+
+// #pragma mark - kernel private
+
+
+void
+dpc_init()
+{
+ // create the default queues
+ new(&sNormalPriorityQueue) DPCQueue;
+ new(&sHighPriorityQueue) DPCQueue;
+ new(&sRealTimePriorityQueue) DPCQueue;
+
+ if (sNormalPriorityQueue.Init("dpc: normal priority", NORMAL_PRIORITY,
+ DEFAULT_QUEUE_SLOT_COUNT) != B_OK
+ || sHighPriorityQueue.Init("dpc: high priority", HIGH_PRIORITY,
+ DEFAULT_QUEUE_SLOT_COUNT) != B_OK
+ || sRealTimePriorityQueue.Init("dpc: real-time priority",
+ REAL_TIME_PRIORITY, DEFAULT_QUEUE_SLOT_COUNT) != B_OK) {
+ panic("Failed to create default DPC queues!");
+ }
+}
diff --git a/src/system/kernel/Jamfile b/src/system/kernel/Jamfile
index be743c5..bfc433f 100644
--- a/src/system/kernel/Jamfile
+++ b/src/system/kernel/Jamfile
@@ -28,6 +28,7 @@ KernelMergeObject kernel_core.o :
commpage.cpp
condition_variable.cpp
cpu.cpp
+ DPC.cpp
elf.cpp
heap.cpp
image.cpp
@@ -50,7 +51,9 @@ KernelMergeObject kernel_core.o :
team.cpp
thread.cpp
timer.cpp
+ UserEvent.cpp
usergroup.cpp
+ UserTimer.cpp
wait_for_objects.cpp
# locks
diff --git a/src/system/kernel/TeamThreadTables.h b/src/system/kernel/TeamThreadTables.h
new file mode 100644
index 0000000..5816e93
--- /dev/null
+++ b/src/system/kernel/TeamThreadTables.h
@@ -0,0 +1,184 @@
+/*
+ * Copyright 2011, Ingo Weinhold, ingo_weinhold@gmx.de.
+ * Distributed under the terms of the MIT License.
+ */
+#ifndef KERNEL_TEAM_THREAD_TABLES_H
+#define KERNEL_TEAM_THREAD_TABLES_H
+
+
+#include <thread_types.h>
+
+
+namespace BKernel {
+
+
+template<typename Element>
+struct TeamThreadTable {
+public:
+ typedef typename Element::id_type id_type;
+ typedef typename Element::iterator_type IteratorEntry;
+
+ struct Iterator {
+ Iterator()
+ :
+ fNext(NULL)
+ {
+ }
+
+ Iterator(IteratorEntry* nextEntry)
+ {
+ _SetNext(nextEntry);
+ }
+
+ bool HasNext() const
+ {
+ return fNext != NULL;
+ }
+
+ Element* Next()
+ {
+ Element* result = fNext;
+ if (result != NULL)
+ _SetNext(result->GetDoublyLinkedListLink()->next);
+
+ return result;
+ }
+
+ private:
+ void _SetNext(IteratorEntry* entry)
+ {
+ while (entry != NULL) {
+ if (entry->id >= 0) {
+ fNext = static_cast<Element*>(entry);
+ return;
+ }
+
+ entry = entry->GetDoublyLinkedListLink()->next;
+ }
+
+ fNext = NULL;
+ }
+
+ private:
+ Element* fNext;
+ };
+
+public:
+ TeamThreadTable()
+ :
+ fNextSerialNumber(1)
+ {
+ }
+
+ status_t Init(size_t initialSize)
+ {
+ return fTable.Init(initialSize);
+ }
+
+ void Insert(Element* element)
+ {
+ element->serial_number = fNextSerialNumber++;
+ fTable.InsertUnchecked(element);
+ fList.Add(element);
+ }
+
+ void Remove(Element* element)
+ {
+ fTable.RemoveUnchecked(element);
+ fList.Remove(element);
+ }
+
+ Element* Lookup(id_type id, bool visibleOnly = true) const
+ {
+ Element* element = fTable.Lookup(id);
+ return element != NULL && (!visibleOnly || element->visible)
+ ? element : NULL;
+ }
+
+ /*! Gets an iterator.
+ The iterator iterates through all, including invisible, entries!
+ */
+ Iterator GetIterator()
+ {
+ return Iterator(fList.Head());
+ }
+
+ void InsertIteratorEntry(IteratorEntry* entry)
+ {
+ // add to front
+ entry->id = -1;
+ entry->visible = false;
+ fList.Add(entry, false);
+ }
+
+ void RemoveIteratorEntry(IteratorEntry* entry)
+ {
+ fList.Remove(entry);
+ }
+
+ Element* NextElement(IteratorEntry* entry, bool visibleOnly = true)
+ {
+ if (entry == fList.Tail())
+ return NULL;
+
+ IteratorEntry* nextEntry = entry;
+
+ while (true) {
+ nextEntry = nextEntry->GetDoublyLinkedListLink()->next;
+ if (nextEntry == NULL) {
+ // end of list -- requeue entry at the end and return NULL
+ fList.Remove(entry);
+ fList.Add(entry);
+ return NULL;
+ }
+
+ if (nextEntry->id >= 0 && (!visibleOnly || nextEntry->visible)) {
+ // found an element -- requeue entry after element
+ Element* element = static_cast<Element*>(nextEntry);
+ fList.Remove(entry);
+ fList.InsertAfter(nextEntry, entry);
+ return element;
+ }
+ }
+ }
+
+private:
+ struct HashDefinition {
+ typedef id_type KeyType;
+ typedef Element ValueType;
+
+ size_t HashKey(id_type key) const
+ {
+ return key;
+ }
+
+ size_t Hash(Element* value) const
+ {
+ return HashKey(value->id);
+ }
+
+ bool Compare(id_type key, Element* value) const
+ {
+ return value->id == key;
+ }
+
+ Element*& GetLink(Element* value) const
+ {
+ return value->hash_next;
+ }
+ };
+
+ typedef BOpenHashTable<HashDefinition> ElementTable;
+ typedef DoublyLinkedList<IteratorEntry> List;
+
+private:
+ ElementTable fTable;
+ List fList;
+ int64 fNextSerialNumber;
+};
+
+
+} // namespace BKernel
+
+
+#endif // KERNEL_TEAM_THREAD_TABLES_H
diff --git a/src/system/kernel/UserEvent.cpp b/src/system/kernel/UserEvent.cpp
new file mode 100644
index 0000000..7d890d9
--- /dev/null
+++ b/src/system/kernel/UserEvent.cpp
@@ -0,0 +1,243 @@
+/*
+ * Copyright 2011, Ingo Weinhold, ingo_weinhold@gmx.de.
+ * Distributed under the terms of the MIT License.
+ */
+
+
+#include <UserEvent.h>
+
+#include <ksignal.h>
+#include <thread_types.h>
+#include <util/AutoLock.h>
+
+
+// #pragma mark - UserEvent
+
+
+UserEvent::~UserEvent()
+{
+}
+
+
+// #pragma mark - SignalEvent
+
+
+struct SignalEvent::EventSignal : Signal {
+ EventSignal(uint32 number, int32 signalCode, int32 errorCode,
+ pid_t sendingProcess)
+ :
+ Signal(number, signalCode, errorCode, sendingProcess),
+ fInUse(false)
+ {
+ }
+
+ bool IsInUse() const
+ {
+ return fInUse;
+ }
+
+ void SetInUse(bool inUse)
+ {
+ fInUse = inUse;
+ }
+
+ virtual void Handled()
+ {
+ // mark not-in-use
+ InterruptsSpinLocker schedulerLocker(gSchedulerLock);
+ fInUse = false;
+ schedulerLocker.Unlock();
+
+ Signal::Handled();
+ }
+
+private:
+ bool fInUse;
+};
+
+
+SignalEvent::SignalEvent(EventSignal* signal)
+ :
+ fSignal(signal)
+{
+}
+
+
+SignalEvent::~SignalEvent()
+{
+ fSignal->ReleaseReference();
+}
+
+
+void
+SignalEvent::SetUserValue(union sigval userValue)
+{
+ fSignal->SetUserValue(userValue);
+}
+
+
+// #pragma mark - TeamSignalEvent
+
+
+TeamSignalEvent::TeamSignalEvent(Team* team, EventSignal* signal)
+ :
+ SignalEvent(signal),
+ fTeam(team)
+{
+}
+
+
+/*static*/ TeamSignalEvent*
+TeamSignalEvent::Create(Team* team, uint32 signalNumber, int32 signalCode,
+ int32 errorCode)
+{
+ // create the signal
+ EventSignal* signal = new(std::nothrow) EventSignal(signalNumber,
+ signalCode, errorCode, team->id);
+ if (signal == NULL)
+ return NULL;
+
+ // create the event
+ TeamSignalEvent* event = new TeamSignalEvent(team, signal);
+ if (event == NULL) {
+ delete signal;
+ return NULL;
+ }
+
+ return event;
+}
+
+
+status_t
+TeamSignalEvent::Fire()
+{
+ // called with the scheduler lock held
+ if (fSignal->IsInUse())
+ return B_BUSY;
+
+ fSignal->AcquireReference();
+ // one reference is transferred to send_signal_to_team_locked
+ status_t error = send_signal_to_team_locked(fTeam, fSignal->Number(),
+ fSignal, B_DO_NOT_RESCHEDULE);
+ if (error == B_OK) {
+ // Mark the signal in-use. There are situations (for certain signals),
+ // in which send_signal_to_team_locked() succeeds without queuing the
+ // signal.
+ fSignal->SetInUse(fSignal->IsPending());
+ }
+
+ return error;
+}
+
+
+// #pragma mark - ThreadSignalEvent
+
+
+ThreadSignalEvent::ThreadSignalEvent(Thread* thread, EventSignal* signal)
+ :
+ SignalEvent(signal),
+ fThread(thread)
+{
+}
+
+
+/*static*/ ThreadSignalEvent*
+ThreadSignalEvent::Create(Thread* thread, uint32 signalNumber, int32 signalCode,
+ int32 errorCode, pid_t sendingTeam)
+{
+ // create the signal
+ EventSignal* signal = new(std::nothrow) EventSignal(signalNumber,
+ signalCode, errorCode, sendingTeam);
+ if (signal == NULL)
+ return NULL;
+
+ // create the event
+ ThreadSignalEvent* event = new ThreadSignalEvent(thread, signal);
+ if (event == NULL) {
+ delete signal;
+ return NULL;
+ }
+
+ return event;
+}
+
+
+status_t
+ThreadSignalEvent::Fire()
+{
+ // called with the scheduler lock held
+ if (fSignal->IsInUse())
+ return B_BUSY;
+
+ fSignal->AcquireReference();
+ // one reference is transferred to send_signal_to_team_locked
+ status_t error = send_signal_to_thread_locked(fThread, fSignal->Number(),
+ fSignal, B_DO_NOT_RESCHEDULE);
+ if (error == B_OK) {
+ // Mark the signal in-use. There are situations (for certain signals),
+ // in which send_signal_to_team_locked() succeeds without queuing the
+ // signal.
+ fSignal->SetInUse(fSignal->IsPending());
+ }
+
+ return error;
+}
+
+
+// #pragma mark - UserEvent
+
+
+CreateThreadEvent::CreateThreadEvent(const ThreadCreationAttributes& attributes)
+ :
+ fCreationAttributes(attributes),
+ fPendingDPC(false)
+{
+ // attributes.name is a pointer to a temporary buffer. Copy the name into
+ // our own buffer and replace the name pointer.
+ strlcpy(fThreadName, attributes.name, sizeof(fThreadName));
+ fCreationAttributes.name = fThreadName;
+}
+
+
+CreateThreadEvent::~CreateThreadEvent()
+{
+ // cancel the DPC to be on the safe side
+ DPCQueue::DefaultQueue(B_NORMAL_PRIORITY)->Cancel(this);
+}
+
+
+/*static*/ CreateThreadEvent*
+CreateThreadEvent::Create(const ThreadCreationAttributes& attributes)
+{
+ return new(std::nothrow) CreateThreadEvent(attributes);
+}
+
+
+status_t
+CreateThreadEvent::Fire()
+{
+ if (fPendingDPC)
+ return B_BUSY;
+
+ fPendingDPC = true;
+
+ DPCQueue::DefaultQueue(B_NORMAL_PRIORITY)->Add(this, true);
+
+ return B_OK;
+}
+
+
+void
+CreateThreadEvent::DoDPC(DPCQueue* queue)
+{
+ // We're no longer queued in the DPC queue, so we can be reused.
+ {
+ InterruptsSpinLocker schedulerLocker(gSchedulerLock);
+ fPendingDPC = false;
+ }
+
+ // create the thread
+ thread_id threadID = thread_create_thread(fCreationAttributes, false);
+ if (threadID >= 0)
+ resume_thread(threadID);
+}
diff --git a/src/system/kernel/UserTimer.cpp b/src/system/kernel/UserTimer.cpp
new file mode 100644
index 0000000..8c0cabe
--- /dev/null
+++ b/src/system/kernel/UserTimer.cpp
@@ -0,0 +1,1786 @@
+/*
+ * Copyright 2011, Ingo Weinhold, ingo_weinhold@gmx.de.
+ * Distributed under the terms of the MIT License.
+ */
+
+
+#include <UserTimer.h>
+
+#include <algorithm>
+
+#include <AutoDeleter.h>
+
+#include <debug.h>
+#include <kernel.h>
+#include <real_time_clock.h>
+#include <team.h>
+#include <thread_types.h>
+#include <UserEvent.h>
+#include <util/AutoLock.h>
+
+
+// Minimum interval length in microseconds for a periodic timer. This is not a
+// restriction on the user timer interval length itself, but the minimum time
+// span by which we advance the start time for kernel timers. A shorted user
+// timer interval will result in the overrun count to be increased every time
+// the kernel timer is rescheduled.
+static const bigtime_t kMinPeriodicTimerInterval = 100;
+
+static RealTimeUserTimerList sAbsoluteRealTimeTimers;
+static spinlock sAbsoluteRealTimeTimersLock = B_SPINLOCK_INITIALIZER;
+
+
+// #pragma mark - TimerLocker
+
+
+namespace {
+
+struct TimerLocker {
+ Team* team;
+ Thread* thread;
+
+ TimerLocker()
+ :
+ team(NULL),
+ thread(NULL)
+ {
+ }
+
+ ~TimerLocker()
+ {
+ Unlock();
+ }
+
+ void Lock(Team* team, Thread* thread)
+ {
+ this->team = team;
+ team->Lock();
+
+ this->thread = thread;
+
+ if (thread != NULL) {
+ thread->AcquireReference();
+ thread->Lock();
+ }
+
+ // We don't check thread->team != team here, since this method can be
+ // called for new threads not added to the team yet.
+ }
+
+ status_t LockAndGetTimer(thread_id threadID, int32 timerID,
+ UserTimer*& _timer)
+ {
+ team = thread_get_current_thread()->team;
+ team->Lock();
+
+ if (threadID >= 0) {
+ thread = Thread::GetAndLock(threadID);
+ if (thread == NULL)
+ return B_BAD_THREAD_ID;
+ if (thread->team != team)
+ return B_NOT_ALLOWED;
+ }
+
+ UserTimer* timer = thread != NULL
+ ? thread->UserTimerFor(timerID) : team->UserTimerFor(timerID);
+ if (timer == NULL)
+ return B_BAD_VALUE;
+
+ _timer = timer;
+ return B_OK;
+ }
+
+ void Unlock()
+ {
+ if (thread != NULL) {
+ thread->UnlockAndReleaseReference();
+ thread = NULL;
+ }
+ if (team != NULL) {
+ team->Unlock();
+ team = NULL;
+ }
+ }
+};
+
+} // unnamed namespace
+
+
+// #pragma mark - UserTimer
+
+
+UserTimer::UserTimer()
+ :
+ fID(-1),
+ fEvent(NULL),
+ fNextTime(0),
+ fInterval(0),
+ fOverrunCount(0),
+ fScheduled(false)
+{
+ // mark the timer unused
+ fTimer.user_data = this;
+}
+
+
+UserTimer::~UserTimer()
+{
+ delete fEvent;
+}
+
+
+/*! \fn UserTimer::Schedule(bigtime_t nextTime, bigtime_t interval,
+ bigtime_t& _oldRemainingTime, bigtime_t& _oldInterval)
+ Cancels the timer, if it is already scheduled, and optionally schedules it
+ with new parameters.
+
+ The caller must not hold the scheduler lock.
+
+ \param nextTime The time at which the timer should go off the next time. If
+ \c B_INFINITE_TIMEOUT, the timer will not be scheduled. Whether the
+ value is interpreted as absolute or relative time, depends on \c flags.
+ \param interval If <tt> >0 </tt>, the timer will be scheduled to fire
+ periodically every \a interval microseconds. Otherwise it will fire
+ only once at \a nextTime. If \a nextTime is \c B_INFINITE_TIMEOUT, it
+ will fire never in either case.
+ \param flags Bitwise OR of flags. Currently \c B_ABSOLUTE_TIMEOUT and
+ \c B_RELATIVE_TIMEOUT are supported, indicating whether \a nextTime is
+ an absolute or relative time.
+ \param _oldRemainingTime Return variable that will be set to the
+ microseconds remaining to the time for which the timer was scheduled
+ next before the call. If it wasn't scheduled, the variable is set to
+ \c B_INFINITE_TIMEOUT.
+ \param _oldInterval Return variable that will be set to the interval in
+ microseconds the timer was to be scheduled periodically. If the timer
+ wasn't periodic, the variable is set to \c 0.
+*/
+
+
+/*! Cancels the timer, if it is scheduled.
+
+ The caller must not hold the scheduler lock.
+*/
+void
+UserTimer::Cancel()
+{
+ bigtime_t oldNextTime;
+ bigtime_t oldInterval;
+ return Schedule(B_INFINITE_TIMEOUT, 0, 0, oldNextTime, oldInterval);
+}
+
+
+/*! \fn UserTimer::GetInfo(bigtime_t& _remainingTime, bigtime_t& _interval,
+ uint32& _overrunCount)
+ Return information on the current timer.
+
+ The caller must not hold the scheduler lock.
+
+ \param _remainingTime Return variable that will be set to the microseconds
+ remaining to the time for which the timer was scheduled next before the
+ call. If it wasn't scheduled, the variable is set to
+ \c B_INFINITE_TIMEOUT.
+ \param _interval Return variable that will be set to the interval in
+ microseconds the timer is to be scheduled periodically. If the timer
+ isn't periodic, the variable is set to \c 0.
+ \param _overrunCount Return variable that will be set to the number of times
+ the timer went off, but its event couldn't be delivered, since it's
+ previous delivery hasn't been handled yet.
+*/
+
+
+/*static*/ int32
+UserTimer::HandleTimerHook(struct timer* timer)
+{
+ ((UserTimer*)timer->user_data)->HandleTimer();
+ return B_HANDLED_INTERRUPT;
+}
+
+
+void
+UserTimer::HandleTimer()
+{
+ if (fEvent != NULL) {
+ // fire the event and update the overrun count, if necessary
+ status_t error = fEvent->Fire();
+ if (error == B_BUSY) {
+ if (fOverrunCount < MAX_USER_TIMER_OVERRUN_COUNT)
+ fOverrunCount++;
+ }
+ }
+
+ // Since we don't use periodic kernel timers, it isn't scheduled anymore.
+ // If the timer is periodic, the derived class' version will schedule it
+ // again.
+ fScheduled = false;
+}
+
+
+/*! Updates the start time for a periodic timer after it expired, enforcing
+ sanity limits and updating \c fOverrunCount, if necessary.
+
+ The caller must not hold the scheduler lock.
+*/
+void
+UserTimer::UpdatePeriodicStartTime()
+{
+ if (fInterval < kMinPeriodicTimerInterval) {
+ bigtime_t skip = (kMinPeriodicTimerInterval + fInterval - 1) / fInterval;
+ fNextTime += skip * fInterval;
+
+ // One interval is the normal advance, so don't consider it skipped.
+ skip--;
+
+ if (skip + fOverrunCount > MAX_USER_TIMER_OVERRUN_COUNT)
+ fOverrunCount = MAX_USER_TIMER_OVERRUN_COUNT;
+ else
+ fOverrunCount += skip;
+ } else
+ fNextTime += fInterval;
+}
+
+
+/*! Checks whether the timer start time lies too much in the past and, if so,
+ adjusts it and updates \c fOverrunCount.
+
+ The caller must not hold the scheduler lock.
+
+ \param now The current time.
+*/
+void
+UserTimer::CheckPeriodicOverrun(bigtime_t now)
+{
+ if (fNextTime + fInterval > now)
+ return;
+
+ // The start time is a full interval or more in the past. Skip those
+ // intervals.
+ bigtime_t skip = (now - fNextTime) / fInterval;
+ fNextTime += skip * fInterval;
+
+ if (skip + fOverrunCount > MAX_USER_TIMER_OVERRUN_COUNT)
+ fOverrunCount = MAX_USER_TIMER_OVERRUN_COUNT;
+ else
+ fOverrunCount += skip;
+}
+
+
+// #pragma mark - SystemTimeUserTimer
+
+
+void
+SystemTimeUserTimer::Schedule(bigtime_t nextTime, bigtime_t interval,
+ uint32 flags, bigtime_t& _oldRemainingTime, bigtime_t& _oldInterval)
+{
+ InterruptsSpinLocker schedulerLocker(gSchedulerLock);
+
+ // get the current time
+ bigtime_t now = system_time();
+
+ // Cancel the old timer, if still scheduled, and get the previous values.
+ if (fScheduled) {
+ cancel_timer(&fTimer);
+
+ _oldRemainingTime = fNextTime - now;
+ _oldInterval = fInterval;
+
+ fScheduled = false;
+ } else {
+ _oldRemainingTime = B_INFINITE_TIMEOUT;
+ _oldInterval = 0;
+ }
+
+ // schedule the new timer
+ fNextTime = nextTime;
+ fInterval = interval;
+ fOverrunCount = 0;
+
+ if (nextTime == B_INFINITE_TIMEOUT)
+ return;
+
+ if ((flags & B_RELATIVE_TIMEOUT) != 0)
+ fNextTime += now;
+
+ ScheduleKernelTimer(now, fInterval > 0);
+}
+
+
+void
+SystemTimeUserTimer::GetInfo(bigtime_t& _remainingTime, bigtime_t& _interval,
+ uint32& _overrunCount)
+{
+ InterruptsSpinLocker schedulerLocker(gSchedulerLock);
+
+ if (fScheduled) {
+ _remainingTime = fNextTime - system_time();
+ _interval = fInterval;
+ } else {
+ _remainingTime = B_INFINITE_TIMEOUT;
+ _interval = 0;
+ }
+
+ _overrunCount = fOverrunCount;
+}
+
+
+void
+SystemTimeUserTimer::HandleTimer()
+{
+ UserTimer::HandleTimer();
+
+ // if periodic, reschedule the kernel timer
+ if (fInterval > 0) {
+ UpdatePeriodicStartTime();
+ ScheduleKernelTimer(system_time(), true);
+ }
+}
+
+
+/*! Schedules the kernel timer.
+
+ The caller must hold the scheduler lock.
+
+ \param now The current system time to be used.
+ \param checkPeriodicOverrun If \c true, calls CheckPeriodicOverrun() first,
+ i.e. the start time will be adjusted to not lie too much in the past.
+*/
+void
+SystemTimeUserTimer::ScheduleKernelTimer(bigtime_t now,
+ bool checkPeriodicOverrun)
+{
+ // If periodic, check whether the start time is too far in the past.
+ if (checkPeriodicOverrun)
+ CheckPeriodicOverrun(now);
+
+ uint32 timerFlags = B_ONE_SHOT_ABSOLUTE_TIMER
+ | B_TIMER_USE_TIMER_STRUCT_TIMES | B_TIMER_ACQUIRE_SCHEDULER_LOCK;
+ // We use B_TIMER_ACQUIRE_SCHEDULER_LOCK to avoid race conditions
+ // between setting/canceling the timer and the event handler.
+
+ fTimer.schedule_time = std::max(fNextTime, (bigtime_t)0);
+ fTimer.period = 0;
+
+ add_timer(&fTimer, &HandleTimerHook, fTimer.schedule_time, timerFlags);
+
+ fScheduled = true;
+}
+
+
+// #pragma mark - RealTimeUserTimer
+
+
+void
+RealTimeUserTimer::Schedule(bigtime_t nextTime, bigtime_t interval,
+ uint32 flags, bigtime_t& _oldRemainingTime, bigtime_t& _oldInterval)
+{
+ InterruptsSpinLocker schedulerLocker(gSchedulerLock);
+
+ // get the current time
+ bigtime_t now = system_time();
+
+ // Cancel the old timer, if still scheduled, and get the previous values.
+ if (fScheduled) {
+ cancel_timer(&fTimer);
+
+ _oldRemainingTime = fNextTime - now;
+ _oldInterval = fInterval;
+
+ if (fAbsolute) {
+ SpinLocker globalListLocker(sAbsoluteRealTimeTimersLock);
+ sAbsoluteRealTimeTimers.Remove(this);
+ }
+
+ fScheduled = false;
+ } else {
+ _oldRemainingTime = B_INFINITE_TIMEOUT;
+ _oldInterval = 0;
+ }
+
+ // schedule the new timer
+ fNextTime = nextTime;
+ fInterval = interval;
+ fOverrunCount = 0;
+
+ if (nextTime == B_INFINITE_TIMEOUT)
+ return;
+
+ fAbsolute = (flags & B_RELATIVE_TIMEOUT) == 0;
+
+ if (fAbsolute) {
+ fRealTimeOffset = rtc_boot_time();
+ fNextTime -= fRealTimeOffset;
+
+ // If periodic, check whether the start time is too far in the past.
+ if (fInterval > 0)
+ CheckPeriodicOverrun(now);
+
+ // add the absolute timer to the global list
+ SpinLocker globalListLocker(sAbsoluteRealTimeTimersLock);
+ sAbsoluteRealTimeTimers.Insert(this);
+ } else
+ fNextTime += now;
+
+ ScheduleKernelTimer(now, false);
+}
+
+
+/*! Called when the real-time clock has been changed.
+
+ The caller must hold the scheduler lock. Optionally the caller may also
+ hold \c sAbsoluteRealTimeTimersLock.
+*/
+void
+RealTimeUserTimer::TimeWarped()
+{
+ ASSERT(fScheduled && fAbsolute);
+
+ // get the new real-time offset
+ bigtime_t oldRealTimeOffset = fRealTimeOffset;
+ fRealTimeOffset = rtc_boot_time();
+ if (fRealTimeOffset == oldRealTimeOffset)
+ return;
+
+ // cancel the kernel timer and reschedule it
+ cancel_timer(&fTimer);
+
+ fNextTime += oldRealTimeOffset - fRealTimeOffset;
+
+ ScheduleKernelTimer(system_time(), fInterval > 0);
+}
+
+
+void
+RealTimeUserTimer::HandleTimer()
+{
+ SystemTimeUserTimer::HandleTimer();
+
+ // remove from global list, if no longer scheduled
+ if (!fScheduled && fAbsolute) {
+ SpinLocker globalListLocker(sAbsoluteRealTimeTimersLock);
+ sAbsoluteRealTimeTimers.Remove(this);
+ }
+}
+
+
+// #pragma mark - TeamTimeUserTimer
+
+
+TeamTimeUserTimer::TeamTimeUserTimer(team_id teamID)
+ :
+ fTeamID(teamID),
+ fTeam(NULL)
+{
+}
+
+
+TeamTimeUserTimer::~TeamTimeUserTimer()
+{
+ ASSERT(fTeam == NULL);
+}
+
+
+void
+TeamTimeUserTimer::Schedule(bigtime_t nextTime, bigtime_t interval,
+ uint32 flags, bigtime_t& _oldRemainingTime, bigtime_t& _oldInterval)
+{
+ InterruptsSpinLocker schedulerLocker(gSchedulerLock);
+
+ // get the current time, but only if needed
+ bool nowValid = fTeam != NULL;
+ bigtime_t now = nowValid ? fTeam->CPUTime(false) : 0;
+
+ // Cancel the old timer, if still scheduled, and get the previous values.
+ if (fTeam != NULL) {
+ if (fScheduled) {
+ cancel_timer(&fTimer);
+ fScheduled = false;
+ }
+
+ _oldRemainingTime = fNextTime - now;
+ _oldInterval = fInterval;
+
+ fTeam->UserTimerDeactivated(this);
+ fTeam->ReleaseReference();
+ fTeam = NULL;
+ } else {
+ _oldRemainingTime = B_INFINITE_TIMEOUT;
+ _oldInterval = 0;
+ }
+
+ // schedule the new timer
+ fNextTime = nextTime;
+ fInterval = interval;
+ fOverrunCount = 0;
+
+ if (fNextTime == B_INFINITE_TIMEOUT)
+ return;
+
+ // Get the team. If it doesn't exist anymore, just don't schedule the
+ // timer anymore.
+ fTeam = Team::Get(fTeamID);
+ if (fTeam == NULL)
+ return;
+
+ fAbsolute = (flags & B_RELATIVE_TIMEOUT) == 0;
+
+ // convert relative to absolute timeouts
+ if (!fAbsolute) {
+ if (!nowValid)
+ now = fTeam->CPUTime(false);
+ fNextTime += now;
+ }
+
+ fTeam->UserTimerActivated(this);
+
+ // schedule/udpate the kernel timer
+ Update(NULL);
+}
+
+
+void
+TeamTimeUserTimer::GetInfo(bigtime_t& _remainingTime, bigtime_t& _interval,
+ uint32& _overrunCount)
+{
+ InterruptsSpinLocker schedulerLocker(gSchedulerLock);
+
+ if (fTeam != NULL) {
+ _remainingTime = fNextTime - fTeam->CPUTime(false);
+ _interval = fInterval;
+ } else {
+ _remainingTime = B_INFINITE_TIMEOUT;
+ _interval = 0;
+ }
+
+ _overrunCount = fOverrunCount;
+}
+
+
+/*! Deactivates the timer, if it is activated.
+
+ The caller must hold the scheduler lock.
+*/
+void
+TeamTimeUserTimer::Deactivate()
+{
+ if (fTeam == NULL)
+ return;
+
+ // unschedule, if scheduled
+ if (fScheduled) {
+ cancel_timer(&fTimer);
+ fScheduled = false;
+ }
+
+ // deactivate
+ fTeam->UserTimerDeactivated(this);
+ fTeam->ReleaseReference();
+ fTeam = NULL;
+}
+
+
+/*! Starts/stops the timer as necessary, if it is active.
+
+ Called whenever threads of the team whose CPU time is referred to by the
+ timer are scheduled or unscheduled (or leave the team), or when the timer
+ was just set. Schedules a kernel timer for the remaining time, respectively
+ cancels it.
+
+ The caller must hold the scheduler lock.
+
+ \param unscheduledThread If not \c NULL, this is the thread that is
+ currently running and which is in the process of being unscheduled.
+*/
+void
+TeamTimeUserTimer::Update(Thread* unscheduledThread)
+{
+ if (fTeam == NULL)
+ return;
+
+ // determine how many of the team's threads are currently running
+ fRunningThreads = 0;
+ int32 cpuCount = smp_get_num_cpus();
+ for (int32 i = 0; i < cpuCount; i++) {
+ Thread* thread = gCPU[i].running_thread;
+ if (thread != unscheduledThread && thread->team == fTeam)
+ fRunningThreads++;
+ }
+
+ _Update(unscheduledThread != NULL);
+}
+
+
+/*! Called when the team's CPU time clock which this timer refers to has been
+ set.
+
+ The caller must hold the scheduler lock.
+
+ \param changedBy The value by which the clock has changed.
+*/
+void
+TeamTimeUserTimer::TimeWarped(bigtime_t changedBy)
+{
+ if (fTeam == NULL || changedBy == 0)
+ return;
+
+ // If this is a relative timer, adjust fNextTime by the value the clock has
+ // changed.
+ if (!fAbsolute)
+ fNextTime += changedBy;
+
+ // reschedule the kernel timer
+ _Update(false);
+}
+
+
+void
+TeamTimeUserTimer::HandleTimer()
+{
+ UserTimer::HandleTimer();
+
+ // If the timer is not periodic, it is no longer active. Otherwise
+ // reschedule the kernel timer.
+ if (fTeam != NULL) {
+ if (fInterval == 0) {
+ fTeam->UserTimerDeactivated(this);
+ fTeam->ReleaseReference();
+ fTeam = NULL;
+ } else {
+ UpdatePeriodicStartTime();
+ _Update(false);
+ }
+ }
+}
+
+
+/*! Schedules/cancels the kernel timer as necessary.
+
+ \c fRunningThreads must be up-to-date.
+ The caller must hold the scheduler lock.
+
+ \param unscheduling \c true, when the current thread is in the process of
+ being unscheduled.
+*/
+void
+TeamTimeUserTimer::_Update(bool unscheduling)
+{
+ // unschedule the kernel timer, if scheduled
+ if (fScheduled)
+ cancel_timer(&fTimer);
+
+ // if no more threads are running, we're done
+ if (fRunningThreads == 0) {
+ fScheduled = false;
+ return;
+ }
+
+ // There are still threads running. Reschedule the kernel timer.
+ bigtime_t now = fTeam->CPUTime(unscheduling);
+
+ // If periodic, check whether the start time is too far in the past.
+ if (fInterval > 0)
+ CheckPeriodicOverrun(now);
+
+ if (fNextTime > now) {
+ fTimer.schedule_time = system_time()
+ + (fNextTime - now + fRunningThreads - 1) / fRunningThreads;
+ // check for overflow
+ if (fTimer.schedule_time < 0)
+ fTimer.schedule_time = B_INFINITE_TIMEOUT;
+ } else
+ fTimer.schedule_time = 0;
+ fTimer.period = 0;
+ // We reschedule periodic timers manually in HandleTimer() to avoid
+ // rounding errors.
+
+ add_timer(&fTimer, &HandleTimerHook, fTimer.schedule_time,
+ B_ONE_SHOT_ABSOLUTE_TIMER | B_TIMER_USE_TIMER_STRUCT_TIMES
+ | B_TIMER_ACQUIRE_SCHEDULER_LOCK);
+ // We use B_TIMER_ACQUIRE_SCHEDULER_LOCK to avoid race conditions
+ // between setting/canceling the timer and the event handler.
+ // We use B_TIMER_USE_TIMER_STRUCT_TIMES, so period remains 0, which
+ // our base class expects.
+
+ fScheduled = true;
+}
+
+
+// #pragma mark - TeamUserTimeUserTimer
+
+
+TeamUserTimeUserTimer::TeamUserTimeUserTimer(team_id teamID)
+ :
+ fTeamID(teamID),
+ fTeam(NULL)
+{
+}
+
+
+TeamUserTimeUserTimer::~TeamUserTimeUserTimer()
+{
+ ASSERT(fTeam == NULL);
+}
+
+
+void
+TeamUserTimeUserTimer::Schedule(bigtime_t nextTime, bigtime_t interval,
+ uint32 flags, bigtime_t& _oldRemainingTime, bigtime_t& _oldInterval)
+{
+ InterruptsSpinLocker schedulerLocker(gSchedulerLock);
+
+ // get the current time, but only if needed
+ bool nowValid = fTeam != NULL;
+ bigtime_t now = nowValid ? fTeam->UserCPUTime() : 0;
+
+ // Cancel the old timer, if still active, and get the previous values.
+ if (fTeam != NULL) {
+ _oldRemainingTime = fNextTime - now;
+ _oldInterval = fInterval;
+
+ fTeam->UserTimerDeactivated(this);
+ fTeam->ReleaseReference();
+ fTeam = NULL;
+ } else {
+ _oldRemainingTime = B_INFINITE_TIMEOUT;
+ _oldInterval = 0;
+ }
+
+ // schedule the new timer
+ fNextTime = nextTime;
+ fInterval = interval;
+ fOverrunCount = 0;
+
+ if (fNextTime == B_INFINITE_TIMEOUT)
+ return;
+
+ // Get the team. If it doesn't exist anymore, just don't schedule the
+ // timer anymore.
+ fTeam = Team::Get(fTeamID);
+ if (fTeam == NULL)
+ return;
+
+ // convert relative to absolute timeouts
+ if ((flags & B_RELATIVE_TIMEOUT) != 0) {
+ if (!nowValid)
+ now = fTeam->CPUTime(false);
+ fNextTime += now;
+ }
+
+ fTeam->UserTimerActivated(this);
+
+ // fire the event, if already timed out
+ Check();
+}
+
+
+void
+TeamUserTimeUserTimer::GetInfo(bigtime_t& _remainingTime, bigtime_t& _interval,
+ uint32& _overrunCount)
+{
+ InterruptsSpinLocker schedulerLocker(gSchedulerLock);
+
+ if (fTeam != NULL) {
+ _remainingTime = fNextTime - fTeam->UserCPUTime();
+ _interval = fInterval;
+ } else {
+ _remainingTime = B_INFINITE_TIMEOUT;
+ _interval = 0;
+ }
+
+ _overrunCount = fOverrunCount;
+}
+
+
+/*! Deactivates the timer, if it is activated.
+
+ The caller must hold the scheduler lock.
+*/
+void
+TeamUserTimeUserTimer::Deactivate()
+{
+ if (fTeam == NULL)
+ return;
+
+ // deactivate
+ fTeam->UserTimerDeactivated(this);
+ fTeam->ReleaseReference();
+ fTeam = NULL;
+}
+
+
+/*! Checks whether the timer is up, firing an event, if so.
+
+ The caller must hold the scheduler lock.
+*/
+void
+TeamUserTimeUserTimer::Check()
+{
+ if (fTeam == NULL)
+ return;
+
+ // check whether we need to fire the event yet
+ bigtime_t now = fTeam->UserCPUTime();
+ if (now < fNextTime)
+ return;
+
+ HandleTimer();
+
+ // If the timer is not periodic, it is no longer active. Otherwise compute
+ // the event time.
+ if (fInterval == 0) {
+ fTeam->UserTimerDeactivated(this);
+ fTeam->ReleaseReference();
+ fTeam = NULL;
+ return;
+ }
+
+ // First validate fNextTime, then increment it, so that fNextTime is > now
+ // (CheckPeriodicOverrun() only makes it > now - fInterval).
+ CheckPeriodicOverrun(now);
+ fNextTime += fInterval;
+ fScheduled = true;
+}
+
+
+// #pragma mark - ThreadTimeUserTimer
+
+
+ThreadTimeUserTimer::ThreadTimeUserTimer(thread_id threadID)
+ :
+ fThreadID(threadID),
+ fThread(NULL)
+{
+}
+
+
+ThreadTimeUserTimer::~ThreadTimeUserTimer()
+{
+ ASSERT(fThread == NULL);
+}
+
+
+void
+ThreadTimeUserTimer::Schedule(bigtime_t nextTime, bigtime_t interval,
+ uint32 flags, bigtime_t& _oldRemainingTime, bigtime_t& _oldInterval)
+{
+ InterruptsSpinLocker schedulerLocker(gSchedulerLock);
+
+ // get the current time, but only if needed
+ bool nowValid = fThread != NULL;
+ bigtime_t now = nowValid ? fThread->CPUTime(false) : 0;
+
+ // Cancel the old timer, if still scheduled, and get the previous values.
+ if (fThread != NULL) {
+ if (fScheduled) {
+ cancel_timer(&fTimer);
+ fScheduled = false;
+ }
+
+ _oldRemainingTime = fNextTime - now;
+ _oldInterval = fInterval;
+
+ fThread->UserTimerDeactivated(this);
+ fThread->ReleaseReference();
+ fThread = NULL;
+ } else {
+ _oldRemainingTime = B_INFINITE_TIMEOUT;
+ _oldInterval = 0;
+ }
+
+ // schedule the new timer
+ fNextTime = nextTime;
+ fInterval = interval;
+ fOverrunCount = 0;
+
+ if (fNextTime == B_INFINITE_TIMEOUT)
+ return;
+
+ // Get the thread. If it doesn't exist anymore, just don't schedule the
+ // timer anymore.
+ fThread = Thread::Get(fThreadID);
+ if (fThread == NULL)
+ return;
+
+ fAbsolute = (flags & B_RELATIVE_TIMEOUT) == 0;
+
+ // convert relative to absolute timeouts
+ if (!fAbsolute) {
+ if (!nowValid)
+ now = fThread->CPUTime(false);
+ fNextTime += now;
+ }
+
+ fThread->UserTimerActivated(this);
+
+ // If the thread is currently running, also schedule a kernel timer.
+ if (fThread->cpu != NULL)
+ Start();
+}
+
+
+void
+ThreadTimeUserTimer::GetInfo(bigtime_t& _remainingTime, bigtime_t& _interval,
+ uint32& _overrunCount)
+{
+ InterruptsSpinLocker schedulerLocker(gSchedulerLock);
+
+ if (fThread != NULL) {
+ _remainingTime = fNextTime - fThread->CPUTime(false);
+ _interval = fInterval;
+ } else {
+ _remainingTime = B_INFINITE_TIMEOUT;
+ _interval = 0;
+ }
+
+ _overrunCount = fOverrunCount;
+}
+
+
+/*! Deactivates the timer, if it is activated.
+
+ The caller must hold the scheduler lock.
+*/
+void
+ThreadTimeUserTimer::Deactivate()
+{
+ if (fThread == NULL)
+ return;
+
+ // unschedule, if scheduled
+ if (fScheduled) {
+ cancel_timer(&fTimer);
+ fScheduled = false;
+ }
+
+ // deactivate
+ fThread->UserTimerDeactivated(this);
+ fThread->ReleaseReference();
+ fThread = NULL;
+}
+
+
+/*! Starts the timer, if it is active.
+
+ Called when the thread whose CPU time is referred to by the timer is
+ scheduled, or, when the timer was just set and the thread is already
+ running. Schedules a kernel timer for the remaining time.
+
+ The caller must hold the scheduler lock.
+*/
+void
+ThreadTimeUserTimer::Start()
+{
+ if (fThread == NULL)
+ return;
+
+ ASSERT(!fScheduled);
+
+ // add the kernel timer
+ bigtime_t now = fThread->CPUTime(false);
+
+ // If periodic, check whether the start time is too far in the past.
+ if (fInterval > 0)
+ CheckPeriodicOverrun(now);
+
+ if (fNextTime > now) {
+ fTimer.schedule_time = system_time() + fNextTime - now;
+ // check for overflow
+ if (fTimer.schedule_time < 0)
+ fTimer.schedule_time = B_INFINITE_TIMEOUT;
+ } else
+ fTimer.schedule_time = 0;
+ fTimer.period = 0;
+
+ uint32 flags = B_ONE_SHOT_ABSOLUTE_TIMER
+ | B_TIMER_USE_TIMER_STRUCT_TIMES | B_TIMER_ACQUIRE_SCHEDULER_LOCK;
+ // We use B_TIMER_ACQUIRE_SCHEDULER_LOCK to avoid race conditions
+ // between setting/canceling the timer and the event handler.
+
+ add_timer(&fTimer, &HandleTimerHook, fTimer.schedule_time, flags);
+
+ fScheduled = true;
+}
+
+
+/*! Stops the timer, if it is active.
+
+ Called when the thread whose CPU time is referred to by the timer is
+ unscheduled, or, when the timer is canceled.
+
+ The caller must hold the scheduler lock.
+*/
+void
+ThreadTimeUserTimer::Stop()
+{
+ if (fThread == NULL)
+ return;
+
+ ASSERT(fScheduled);
+
+ // cancel the kernel timer
+ cancel_timer(&fTimer);
+ fScheduled = false;
+
+ // TODO: To avoid odd race conditions, we should check the current time of
+ // the thread (ignoring the time since last_time) and manually fire the
+ // user event, if necessary.
+}
+
+
+/*! Called when the team's CPU time clock which this timer refers to has been
+ set.
+
+ The caller must hold the scheduler lock.
+
+ \param changedBy The value by which the clock has changed.
+*/
+void
+ThreadTimeUserTimer::TimeWarped(bigtime_t changedBy)
+{
+ if (fThread == NULL || changedBy == 0)
+ return;
+
+ // If this is a relative timer, adjust fNextTime by the value the clock has
+ // changed.
+ if (!fAbsolute)
+ fNextTime += changedBy;
+
+ // reschedule the kernel timer
+ if (fScheduled) {
+ Stop();
+ Start();
+ }
+}
+
+
+void
+ThreadTimeUserTimer::HandleTimer()
+{
+ UserTimer::HandleTimer();
+
+ if (fThread != NULL) {
+ // If the timer is periodic, reschedule the kernel timer. Otherwise it
+ // is no longer active.
+ if (fInterval > 0) {
+ UpdatePeriodicStartTime();
+ Start();
+ } else {
+ fThread->UserTimerDeactivated(this);
+ fThread->ReleaseReference();
+ fThread = NULL;
+ }
+ }
+}
+
+
+// #pragma mark - UserTimerList
+
+
+UserTimerList::UserTimerList()
+{
+}
+
+
+UserTimerList::~UserTimerList()
+{
+ ASSERT(fTimers.IsEmpty());
+}
+
+
+/*! Returns the user timer with the given ID.
+
+ \param id The timer's ID
+ \return The user timer with the given ID or \c NULL, if there is no such
+ timer.
+*/
+UserTimer*
+UserTimerList::TimerFor(int32 id) const
+{
+ // TODO: Use a more efficient data structure. E.g. a sorted array.
+ for (TimerList::ConstIterator it = fTimers.GetIterator();
+ UserTimer* timer = it.Next();) {
+ if (timer->ID() == id)
+ return timer;
+ }
+
+ return NULL;
+}
+
+
+/*! Adds the given user timer and assigns it an ID.
+
+ \param timer The timer to be added.
+*/
+void
+UserTimerList::AddTimer(UserTimer* timer)
+{
+ int32 id = timer->ID();
+ if (id < 0) {
+ // user-defined timer -- find an usused ID
+ id = USER_TIMER_FIRST_USER_DEFINED_ID;
+ UserTimer* insertAfter = NULL;
+ for (TimerList::Iterator it = fTimers.GetIterator();
+ UserTimer* other = it.Next();) {
+ if (other->ID() > id)
+ break;
+ if (other->ID() == id)
+ id++;
+ insertAfter = other;
+ }
+
+ // insert the timer
+ timer->SetID(id);
+ fTimers.InsertAfter(insertAfter, timer);
+ } else {
+ // default timer -- find the insertion point
+ UserTimer* insertAfter = NULL;
+ for (TimerList::Iterator it = fTimers.GetIterator();
+ UserTimer* other = it.Next();) {
+ if (other->ID() > id)
+ break;
+ if (other->ID() == id) {
+ panic("UserTimerList::AddTimer(): timer with ID %" B_PRId32
+ " already exists!", id);
+ }
+ insertAfter = other;
+ }
+
+ // insert the timer
+ fTimers.InsertAfter(insertAfter, timer);
+ }
+}
+
+
+/*! Deletes all (or all user-defined) user timers.
+
+ \param userDefinedOnly If \c true, only the user-defined timers are deleted,
+ otherwise all timers are deleted.
+ \return The number of user-defined timers that were removed and deleted.
+*/
+int32
+UserTimerList::DeleteTimers(bool userDefinedOnly)
+{
+ int32 userDefinedCount = 0;
+
+ for (TimerList::Iterator it = fTimers.GetIterator();
+ UserTimer* timer = it.Next();) {
+ if (timer->ID() < USER_TIMER_FIRST_USER_DEFINED_ID) {
+ if (userDefinedOnly)
+ continue;
+ } else
+ userDefinedCount++;
+
+ // remove, cancel, and delete the timer
+ it.Remove();
+ timer->Cancel();
+ delete timer;
+ }
+
+ return userDefinedCount;
+}
+
+
+// #pragma mark - private
+
+
+static int32
+create_timer(clockid_t clockID, int32 timerID, Team* team, Thread* thread,
+ uint32 flags, const struct sigevent& event,
+ ThreadCreationAttributes* threadAttributes, bool isDefaultEvent)
+{
+ // create the timer object
+ UserTimer* timer;
+ switch (clockID) {
+ case CLOCK_MONOTONIC:
+ timer = new(std::nothrow) SystemTimeUserTimer;
+ break;
+
+ case CLOCK_REALTIME:
+ timer = new(std::nothrow) RealTimeUserTimer;
+ break;
+
+ case CLOCK_THREAD_CPUTIME_ID:
+ timer = new(std::nothrow) ThreadTimeUserTimer(
+ thread_get_current_thread()->id);
+ break;
+
+ case CLOCK_PROCESS_CPUTIME_ID:
+ if (team == NULL)
+ return B_BAD_VALUE;
+ timer = new(std::nothrow) TeamTimeUserTimer(team->id);
+ break;
+
+ case CLOCK_PROCESS_USER_CPUTIME_ID:
+ if (team == NULL)
+ return B_BAD_VALUE;
+ timer = new(std::nothrow) TeamUserTimeUserTimer(team->id);
+ break;
+
+ default:
+ {
+ // The clock ID is a ID of the team whose CPU time the clock refers
+ // to. Check whether the team exists and we have permission to
+ // access its clock.
+ if (clockID <= 0)
+ return B_BAD_VALUE;
+ if (clockID == team_get_kernel_team_id())
+ return B_NOT_ALLOWED;
+
+ Team* timedTeam = Team::GetAndLock(clockID);
+ if (timedTeam == NULL)
+ return B_BAD_VALUE;
+
+ uid_t uid = geteuid();
+ uid_t teamUID = timedTeam->effective_uid;
+
+ timedTeam->UnlockAndReleaseReference();
+
+ if (uid != 0 && uid != teamUID)
+ return B_NOT_ALLOWED;
+
+ timer = new(std::nothrow) TeamTimeUserTimer(clockID);
+ break;
+ }
+ }
+
+ if (timer == NULL)
+ return B_NO_MEMORY;
+ ObjectDeleter<UserTimer> timerDeleter(timer);
+
+ if (timerID >= 0)
+ timer->SetID(timerID);
+
+ SignalEvent* signalEvent = NULL;
+
+ switch (event.sigev_notify) {
+ case SIGEV_NONE:
+ // the timer's event remains NULL
+ break;
+
+ case SIGEV_SIGNAL:
+ {
+ if (event.sigev_signo <= 0 || event.sigev_signo > MAX_SIGNAL_NUMBER)
+ return B_BAD_VALUE;
+
+ if (thread != NULL && (flags & USER_TIMER_SIGNAL_THREAD) != 0) {
+ // The signal shall be sent to the thread.
+ signalEvent = ThreadSignalEvent::Create(thread,
+ event.sigev_signo, SI_TIMER, 0, team->id);
+ } else {
+ // The signal shall be sent to the team.
+ signalEvent = TeamSignalEvent::Create(team, event.sigev_signo,
+ SI_TIMER, 0);
+ }
+
+ if (signalEvent == NULL)
+ return B_NO_MEMORY;
+
+ timer->SetEvent(signalEvent);
+ break;
+ }
+
+ case SIGEV_THREAD:
+ {
+ if (threadAttributes == NULL)
+ return B_BAD_VALUE;
+
+ CreateThreadEvent* event
+ = CreateThreadEvent::Create(*threadAttributes);
+ if (event == NULL)
+ return B_NO_MEMORY;
+
+ timer->SetEvent(event);
+ break;
+ }
+
+ default:
+ return B_BAD_VALUE;
+ }
+
+ // add it to the team/thread
+ TimerLocker timerLocker;
+ timerLocker.Lock(team, thread);
+
+ status_t error = thread != NULL
+ ? thread->AddUserTimer(timer) : team->AddUserTimer(timer);
+ if (error != B_OK)
+ return error;
+
+ // set a signal event's user value
+ if (signalEvent != NULL) {
+ // If no sigevent structure was given, use the timer ID.
+ union sigval signalValue = event.sigev_value;
+ if (isDefaultEvent)
+ signalValue.sival_int = timer->ID();
+
+ signalEvent->SetUserValue(signalValue);
+ }
+
+ return timerDeleter.Detach()->ID();
+}
+
+
+/*! Called when the CPU time clock of the given thread has been set.
+
+ The caller must hold the scheduler lock.
+
+ \param thread The thread whose CPU time clock has been set.
+ \param changedBy The value by which the CPU time clock has changed
+ (new = old + changedBy).
+*/
+static void
+thread_clock_changed(Thread* thread, bigtime_t changedBy)
+{
+ for (ThreadTimeUserTimerList::ConstIterator it
+ = thread->CPUTimeUserTimerIterator();
+ ThreadTimeUserTimer* timer = it.Next();) {
+ timer->TimeWarped(changedBy);
+ }
+}
+
+
+/*! Called when the CPU time clock of the given team has been set.
+
+ The caller must hold the scheduler lock.
+
+ \param team The team whose CPU time clock has been set.
+ \param changedBy The value by which the CPU time clock has changed
+ (new = old + changedBy).
+*/
+static void
+team_clock_changed(Team* team, bigtime_t changedBy)
+{
+ for (TeamTimeUserTimerList::ConstIterator it
+ = team->CPUTimeUserTimerIterator();
+ TeamTimeUserTimer* timer = it.Next();) {
+ timer->TimeWarped(changedBy);
+ }
+}
+
+
+// #pragma mark - kernel private
+
+
+/*! Creates the pre-defined user timers for the given thread.
+ The thread may not have been added to its team yet, hence the team must be
+ passed
+
+ \param team The thread's (future) team.
+ \param thread The thread whose pre-defined timers shall be created.
+ \return \c B_OK, when everything when fine, another error code otherwise.
+*/
+status_t
+user_timer_create_thread_timers(Team* team, Thread* thread)
+{
+ // create a real time user timer
+ struct sigevent event;
+ event.sigev_notify = SIGEV_SIGNAL;
+ event.sigev_signo = SIGALRM;
+
+ int32 timerID = create_timer(CLOCK_MONOTONIC, USER_TIMER_REAL_TIME_ID,
+ team, thread, USER_TIMER_SIGNAL_THREAD, event, NULL, true);
+ if (timerID < 0)
+ return timerID;
+
+ return B_OK;
+}
+
+
+/*! Creates the pre-defined user timers for the given team.
+
+ \param team The team whose pre-defined timers shall be created.
+ \return \c B_OK, when everything when fine, another error code otherwise.
+*/
+status_t
+user_timer_create_team_timers(Team* team)
+{
+ // create a real time user timer
+ struct sigevent event;
+ event.sigev_notify = SIGEV_SIGNAL;
+ event.sigev_signo = SIGALRM;
+
+ int32 timerID = create_timer(CLOCK_MONOTONIC, USER_TIMER_REAL_TIME_ID,
+ team, NULL, 0, event, NULL, true);
+ if (timerID < 0)
+ return timerID;
+
+ // create a total CPU time user timer
+ event.sigev_notify = SIGEV_SIGNAL;
+ event.sigev_signo = SIGPROF;
+
+ timerID = create_timer(CLOCK_PROCESS_CPUTIME_ID,
+ USER_TIMER_TEAM_TOTAL_TIME_ID, team, NULL, 0, event, NULL, true);
+ if (timerID < 0)
+ return timerID;
+
+ // create a user CPU time user timer
+ event.sigev_notify = SIGEV_SIGNAL;
+ event.sigev_signo = SIGVTALRM;
+
+ timerID = create_timer(CLOCK_PROCESS_USER_CPUTIME_ID,
+ USER_TIMER_TEAM_USER_TIME_ID, team, NULL, 0, event, NULL, true);
+ if (timerID < 0)
+ return timerID;
+
+ return B_OK;
+}
+
+
+status_t
+user_timer_get_clock(clockid_t clockID, bigtime_t& _time)
+{
+ switch (clockID) {
+ case CLOCK_MONOTONIC:
+ _time = system_time();
+ return B_OK;
+
+ case CLOCK_REALTIME:
+ _time = real_time_clock_usecs();
+ return B_OK;
+
+ case CLOCK_THREAD_CPUTIME_ID:
+ {
+ Thread* thread = thread_get_current_thread();
+ InterruptsSpinLocker schedulerLocker(gSchedulerLock);
+ _time = thread->CPUTime(false);
+ return B_OK;
+ }
+
+ case CLOCK_PROCESS_USER_CPUTIME_ID:
+ {
+ Team* team = thread_get_current_thread()->team;
+ InterruptsSpinLocker schedulerLocker(gSchedulerLock);
+ _time = team->UserCPUTime();
+ return B_OK;
+ }
+
+ case CLOCK_PROCESS_CPUTIME_ID:
+ default:
+ {
+ // get the ID of the target team (or the respective placeholder)
+ team_id teamID;
+ if (clockID == CLOCK_PROCESS_CPUTIME_ID) {
+ teamID = B_CURRENT_TEAM;
+ } else {
+ if (clockID < 0)
+ return B_BAD_VALUE;
+ if (clockID == team_get_kernel_team_id())
+ return B_NOT_ALLOWED;
+
+ teamID = clockID;
+ }
+
+ // get the team
+ Team* team = Team::Get(teamID);
+ if (team == NULL)
+ return B_BAD_VALUE;
+ BReference<Team> teamReference(team, true);
+
+ // get the time
+ InterruptsSpinLocker schedulerLocker(gSchedulerLock);
+ _time = team->CPUTime(false);
+
+ return B_OK;
+ }
+ }
+}
+
+
+void
+user_timer_real_time_clock_changed()
+{
+ // we need to update all absolute real-time timers
+ InterruptsSpinLocker schedulerLocker(gSchedulerLock);
+ SpinLocker globalListLocker(sAbsoluteRealTimeTimersLock);
+
+ for (RealTimeUserTimerList::Iterator it
+ = sAbsoluteRealTimeTimers.GetIterator();
+ RealTimeUserTimer* timer = it.Next();) {
+ timer->TimeWarped();
+ }
+}
+
+
+void
+user_timer_stop_cpu_timers(Thread* thread, Thread* nextThread)
+{
+ // stop thread timers
+ for (ThreadTimeUserTimerList::ConstIterator it
+ = thread->CPUTimeUserTimerIterator();
+ ThreadTimeUserTimer* timer = it.Next();) {
+ timer->Stop();
+ }
+
+ // update team timers
+ if (nextThread == NULL || nextThread->team != thread->team) {
+ for (TeamTimeUserTimerList::ConstIterator it
+ = thread->team->CPUTimeUserTimerIterator();
+ TeamTimeUserTimer* timer = it.Next();) {
+ timer->Update(thread);
+ }
+ }
+}
+
+
+void
+user_timer_continue_cpu_timers(Thread* thread, Thread* previousThread)
+{
+ // update team timers
+ if (previousThread == NULL || previousThread->team != thread->team) {
+ for (TeamTimeUserTimerList::ConstIterator it
+ = thread->team->CPUTimeUserTimerIterator();
+ TeamTimeUserTimer* timer = it.Next();) {
+ timer->Update(NULL);
+ }
+ }
+
+ // start thread timers
+ for (ThreadTimeUserTimerList::ConstIterator it
+ = thread->CPUTimeUserTimerIterator();
+ ThreadTimeUserTimer* timer = it.Next();) {
+ timer->Start();
+ }
+}
+
+
+void
+user_timer_check_team_user_timers(Team* team)
+{
+ for (TeamUserTimeUserTimerList::ConstIterator it
+ = team->UserTimeUserTimerIterator();
+ TeamUserTimeUserTimer* timer = it.Next();) {
+ timer->Check();
+ }
+}
+
+
+// #pragma mark - syscalls
+
+
+status_t
+_user_get_clock(clockid_t clockID, bigtime_t* userTime)
+{
+ // get the time
+ bigtime_t time;
+ status_t error = user_timer_get_clock(clockID, time);
+ if (error != B_OK)
+ return error;
+
+ // copy the value back to userland
+ if (userTime == NULL || !IS_USER_ADDRESS(userTime)
+ || user_memcpy(userTime, &time, sizeof(time)) != B_OK) {
+ return B_BAD_ADDRESS;
+ }
+
+ return B_OK;
+}
+
+
+status_t
+_user_set_clock(clockid_t clockID, bigtime_t time)
+{
+ switch (clockID) {
+ case CLOCK_MONOTONIC:
+ return B_BAD_VALUE;
+
+ case CLOCK_REALTIME:
+ // only root may set the time
+ if (geteuid() != 0)
+ return B_NOT_ALLOWED;
+
+ set_real_time_clock_usecs(time);
+ return B_OK;
+
+ case CLOCK_THREAD_CPUTIME_ID:
+ {
+ Thread* thread = thread_get_current_thread();
+ InterruptsSpinLocker schedulerLocker(gSchedulerLock);
+ bigtime_t diff = time - thread->CPUTime(false);
+ thread->cpu_clock_offset += diff;
+
+ thread_clock_changed(thread, diff);
+ return B_OK;
+ }
+
+ case CLOCK_PROCESS_USER_CPUTIME_ID:
+ // not supported -- this clock is an Haiku-internal extension
+ return B_BAD_VALUE;
+
+ case CLOCK_PROCESS_CPUTIME_ID:
+ default:
+ {
+ // get the ID of the target team (or the respective placeholder)
+ team_id teamID;
+ if (clockID == CLOCK_PROCESS_CPUTIME_ID) {
+ teamID = B_CURRENT_TEAM;
+ } else {
+ if (clockID < 0)
+ return B_BAD_VALUE;
+ if (clockID == team_get_kernel_team_id())
+ return B_NOT_ALLOWED;
+
+ teamID = clockID;
+ }
+
+ // get the team
+ Team* team = Team::Get(teamID);
+ if (team == NULL)
+ return B_BAD_VALUE;
+ BReference<Team> teamReference(team, true);
+
+ // set the time offset
+ InterruptsSpinLocker schedulerLocker(gSchedulerLock);
+ bigtime_t diff = time - team->CPUTime(false);
+ team->cpu_clock_offset += diff;
+
+ team_clock_changed(team, diff);
+ return B_OK;
+ }
+ }
+
+ return B_OK;
+}
+
+
+int32
+_user_create_timer(clockid_t clockID, thread_id threadID, uint32 flags,
+ const struct sigevent* userEvent,
+ const thread_creation_attributes* userThreadAttributes)
+{
+ // copy the sigevent structure from userland
+ struct sigevent event;
+ if (userEvent != NULL) {
+ if (!IS_USER_ADDRESS(userEvent)
+ || user_memcpy(&event, userEvent, sizeof(event)) != B_OK) {
+ return B_BAD_ADDRESS;
+ }
+ } else {
+ // none given -- use defaults
+ event.sigev_notify = SIGEV_SIGNAL;
+ event.sigev_signo = SIGALRM;
+ }
+
+ // copy thread creation attributes from userland, if specified
+ char nameBuffer[B_OS_NAME_LENGTH];
+ ThreadCreationAttributes threadAttributes;
+ if (event.sigev_notify == SIGEV_THREAD) {
+ status_t error = threadAttributes.InitFromUserAttributes(
+ userThreadAttributes, nameBuffer);
+ if (error != B_OK)
+ return error;
+ }
+
+ // get team and thread
+ Team* team = thread_get_current_thread()->team;
+ Thread* thread = NULL;
+ if (threadID >= 0) {
+ thread = Thread::GetAndLock(threadID);
+ if (thread == NULL)
+ return B_BAD_THREAD_ID;
+
+ thread->Unlock();
+ }
+ BReference<Thread> threadReference(thread, true);
+
+ // create the timer
+ return create_timer(clockID, -1, team, thread, flags, event,
+ userThreadAttributes != NULL ? &threadAttributes : NULL,
+ userEvent == NULL);
+}
+
+
+status_t
+_user_delete_timer(int32 timerID, thread_id threadID)
+{
+ // can only delete user-defined timers
+ if (timerID < USER_TIMER_FIRST_USER_DEFINED_ID)
+ return B_BAD_VALUE;
+
+ // get the timer
+ TimerLocker timerLocker;
+ UserTimer* timer;
+ status_t error = timerLocker.LockAndGetTimer(threadID, timerID, timer);
+ if (error != B_OK)
+ return error;
+
+ // cancel, remove, and delete it
+ timer->Cancel();
+
+ if (threadID >= 0)
+ timerLocker.thread->RemoveUserTimer(timer);
+ else
+ timerLocker.team->RemoveUserTimer(timer);
+
+ delete timer;
+
+ return B_OK;
+}
+
+
+status_t
+_user_get_timer(int32 timerID, thread_id threadID,
+ struct user_timer_info* userInfo)
+{
+ // get the timer
+ TimerLocker timerLocker;
+ UserTimer* timer;
+ status_t error = timerLocker.LockAndGetTimer(threadID, timerID, timer);
+ if (error != B_OK)
+ return error;
+
+ // get the info
+ user_timer_info info;
+ timer->GetInfo(info.remaining_time, info.interval, info.overrun_count);
+
+ // Sanitize remaining_time. If it's <= 0, we set it to 1, the least valid
+ // value.
+ if (info.remaining_time <= 0)
+ info.remaining_time = 1;
+
+ timerLocker.Unlock();
+
+ // copy it back to userland
+ if (userInfo != NULL
+ && (!IS_USER_ADDRESS(userInfo)
+ || user_memcpy(userInfo, &info, sizeof(info)) != B_OK)) {
+ return B_BAD_ADDRESS;
+ }
+
+ return B_OK;
+}
+
+
+status_t
+_user_set_timer(int32 timerID, thread_id threadID, bigtime_t startTime,
+ bigtime_t interval, uint32 flags, struct user_timer_info* userOldInfo)
+{
+ // check the values
+ if (startTime < 0 || interval < 0)
+ return B_BAD_VALUE;
+
+ // get the timer
+ TimerLocker timerLocker;
+ UserTimer* timer;
+ status_t error = timerLocker.LockAndGetTimer(threadID, timerID, timer);
+ if (error != B_OK)
+ return error;
+
+ // schedule the timer
+ user_timer_info oldInfo;
+ timer->Schedule(startTime, interval, flags, oldInfo.remaining_time,
+ oldInfo.interval);
+
+ // Sanitize remaining_time. If it's <= 0, we set it to 1, the least valid
+ // value.
+ if (oldInfo.remaining_time <= 0)
+ oldInfo.remaining_time = 1;
+
+ timerLocker.Unlock();
+
+ // copy back the old info
+ if (userOldInfo != NULL
+ && (!IS_USER_ADDRESS(userOldInfo)
+ || user_memcpy(userOldInfo, &oldInfo, sizeof(oldInfo)) != B_OK)) {
+ return B_BAD_ADDRESS;
+ }
+
+ return B_OK;
+}
diff --git a/src/system/kernel/arch/arm/arch_thread.cpp b/src/system/kernel/arch/arm/arch_thread.cpp
index 46e63f5..42ba058 100644
--- a/src/system/kernel/arch/arm/arch_thread.cpp
+++ b/src/system/kernel/arch/arm/arch_thread.cpp
@@ -63,47 +63,11 @@ arch_thread_init_thread_struct(Thread *thread)
}
-status_t
-arch_thread_init_kthread_stack(Thread *t, int (*start_func)(void),
- void (*entry_func)(void), void (*exit_func)(void))
+void
+arch_thread_init_kthread_stack(Thread* thread, void* _stack, void* _stackTop,
+ void (*function)(void*), const void* data)
{
-/* addr_t *kstack = (addr_t *)t->kernel_stack_base;
- addr_t *kstackTop = (addr_t *)t->kernel_stack_base;
-
- // clear the kernel stack
-#ifdef DEBUG_KERNEL_STACKS
-# ifdef STACK_GROWS_DOWNWARDS
- memset((void *)((addr_t)kstack + KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE), 0,
- KERNEL_STACK_SIZE);
-# else
- memset(kstack, 0, KERNEL_STACK_SIZE);
-# endif
-#else
- memset(kstack, 0, KERNEL_STACK_SIZE);
-#endif
-
- // space for frame pointer and return address, and stack frames must be
- // 16 byte aligned
- kstackTop -= 2;
- kstackTop = (addr_t*)((addr_t)kstackTop & ~0xf);
-
- // LR, CR, r2, r13-r31, f13-f31, as pushed by m68k_context_switch()
- kstackTop -= 22 + 2 * 19;
-
- // let LR point to m68k_kernel_thread_root()
- kstackTop[0] = (addr_t)&m68k_kernel_thread_root;
-
- // the arguments of m68k_kernel_thread_root() are the functions to call,
- // provided in registers r13-r15
- kstackTop[3] = (addr_t)entry_func;
- kstackTop[4] = (addr_t)start_func;
- kstackTop[5] = (addr_t)exit_func;
-
- // save this stack position
- t->arch_info.sp = (void *)kstackTop;
-*/
#warning ARM:WRITEME
- return B_OK;
}
@@ -157,14 +121,15 @@ arch_on_signal_stack(Thread *thread)
status_t
-arch_setup_signal_frame(Thread *thread, struct sigaction *sa, int sig, int sigMask)
+arch_setup_signal_frame(Thread *thread, struct sigaction *sa,
+ struct signal_frame_data *signalFrameData)
{
return B_ERROR;
}
int64
-arch_restore_signal_frame(void)
+arch_restore_signal_frame(struct signal_frame_data* signalFrameData)
{
return 0;
}
diff --git a/src/system/kernel/arch/m68k/arch_debug.cpp b/src/system/kernel/arch/m68k/arch_debug.cpp
index 5ddbcda..eaa3819 100644
--- a/src/system/kernel/arch/m68k/arch_debug.cpp
+++ b/src/system/kernel/arch/m68k/arch_debug.cpp
@@ -129,7 +129,7 @@ stack_trace(int argc, char **argv)
} else {
// TODO: Add support for stack traces of other threads.
/* thread_id id = strtoul(argv[1], NULL, 0);
- thread = thread_get_thread_struct_locked(id);
+ thread = Thread::GetDebug(id);
if (thread == NULL) {
kprintf("could not find thread %ld\n", id);
return 0;
diff --git a/src/system/kernel/arch/m68k/arch_int.cpp b/src/system/kernel/arch/m68k/arch_int.cpp
index 0569f0a..a982a75 100644
--- a/src/system/kernel/arch/m68k/arch_int.cpp
+++ b/src/system/kernel/arch/m68k/arch_int.cpp
@@ -23,6 +23,7 @@
#include <smp.h>
#include <thread.h>
#include <timer.h>
+#include <util/AutoLock.h>
#include <util/DoublyLinkedList.h>
#include <util/kernel_cpp.h>
#include <vm/vm.h>
@@ -296,18 +297,19 @@ dprintf("handling I/O interrupts done\n");
int state = disable_interrupts();
if (thread->cpu->invoke_scheduler) {
- GRAB_THREAD_LOCK();
+ SpinLocker schedulerLocker(gSchedulerLock);
scheduler_reschedule();
- RELEASE_THREAD_LOCK();
+ schedulerLocker.Unlock();
restore_interrupts(state);
} else if (hardwareInterrupt && thread->post_interrupt_callback != NULL) {
- restore_interrupts(state);
void (*callback)(void*) = thread->post_interrupt_callback;
void* data = thread->post_interrupt_data;
thread->post_interrupt_callback = NULL;
thread->post_interrupt_data = NULL;
+ restore_interrupts(state);
+
callback(data);
}
diff --git a/src/system/kernel/arch/m68k/arch_thread.cpp b/src/system/kernel/arch/m68k/arch_thread.cpp
index b5e0e8f..36d196c 100644
--- a/src/system/kernel/arch/m68k/arch_thread.cpp
+++ b/src/system/kernel/arch/m68k/arch_thread.cpp
@@ -146,10 +146,11 @@ arch_thread_init_thread_struct(Thread *thread)
}
-status_t
-arch_thread_init_kthread_stack(Thread *t, int (*start_func)(void),
- void (*entry_func)(void), void (*exit_func)(void))
+void
+arch_thread_init_kthread_stack(Thread* thread, void* _stack, void* _stackTop,
+ void (*function)(void*), const void* data)
{
+#if 0
addr_t *kstack = (addr_t *)t->kernel_stack_base;
addr_t *kstackTop = (addr_t *)t->kernel_stack_base;
@@ -186,6 +187,9 @@ arch_thread_init_kthread_stack(Thread *t, int (*start_func)(void),
t->arch_info.sp = (void *)kstackTop;
return B_OK;
+#else
+ panic("arch_thread_init_kthread_stack(): Implement me!");
+#endif
}
@@ -237,14 +241,15 @@ arch_on_signal_stack(Thread *thread)
status_t
-arch_setup_signal_frame(Thread *thread, struct sigaction *sa, int sig, int sigMask)
+arch_setup_signal_frame(Thread *thread, struct sigaction *sa,
+ struct signal_frame_data *signalFrameData)
{
return B_ERROR;
}
int64
-arch_restore_signal_frame(void)
+arch_restore_signal_frame(struct signal_frame_data* signalFrameData)
{
return 0;
}
diff --git a/src/system/kernel/arch/mipsel/arch_thread.cpp b/src/system/kernel/arch/mipsel/arch_thread.cpp
index 0730a98..5cc0a2d 100644
--- a/src/system/kernel/arch/mipsel/arch_thread.cpp
+++ b/src/system/kernel/arch/mipsel/arch_thread.cpp
@@ -86,12 +86,11 @@ arch_thread_init_thread_struct(Thread *thread)
}
-status_t
-arch_thread_init_kthread_stack(Thread *t, int (*start_func)(void),
- void (*entry_func)(void), void (*exit_func)(void))
+void
+arch_thread_init_kthread_stack(Thread* thread, void* _stack, void* _stackTop,
+ void (*function)(void*), const void* data)
{
#warning IMPLEMENT arch_thread_init_kthread_stack
- return B_ERROR;
}
@@ -136,8 +135,8 @@ arch_on_signal_stack(Thread *thread)
status_t
-arch_setup_signal_frame(Thread *thread, struct sigaction *sa, int sig,
- int sigMask)
+arch_setup_signal_frame(Thread *thread, struct sigaction *sa,
+ struct signal_frame_data *signalFrameData)
{
#warning IMPLEMENT arch_setup_signal_frame
return B_ERROR;
@@ -145,7 +144,7 @@ arch_setup_signal_frame(Thread *thread, struct sigaction *sa, int sig,
int64
-arch_restore_signal_frame(void)
+arch_restore_signal_frame(struct signal_frame_data* signalFrameData)
{
#warning IMPLEMENT arch_restore_signal_frame
return 0;
diff --git a/src/system/kernel/arch/ppc/arch_debug.cpp b/src/system/kernel/arch/ppc/arch_debug.cpp
index 99c8c30..597e37b 100644
--- a/src/system/kernel/arch/ppc/arch_debug.cpp
+++ b/src/system/kernel/arch/ppc/arch_debug.cpp
@@ -129,7 +129,7 @@ stack_trace(int argc, char **argv)
} else {
// TODO: Add support for stack traces of other threads.
/* thread_id id = strtoul(argv[1], NULL, 0);
- thread = thread_get_thread_struct_locked(id);
+ thread = Thread::GetDebug(id);
if (thread == NULL) {
kprintf("could not find thread %ld\n", id);
return 0;
diff --git a/src/system/kernel/arch/ppc/arch_int.cpp b/src/system/kernel/arch/ppc/arch_int.cpp
index 6830136..61ede96 100644
--- a/src/system/kernel/arch/ppc/arch_int.cpp
+++ b/src/system/kernel/arch/ppc/arch_int.cpp
@@ -21,6 +21,7 @@
#include <smp.h>
#include <thread.h>
#include <timer.h>
+#include <util/AutoLock.h>
#include <util/DoublyLinkedList.h>
#include <util/kernel_cpp.h>
#include <vm/vm.h>
@@ -243,18 +244,19 @@ dprintf("handling I/O interrupts done\n");
cpu_status state = disable_interrupts();
if (thread->cpu->invoke_scheduler) {
- GRAB_THREAD_LOCK();
+ SpinLocker schedulerLocker(gSchedulerLock);
scheduler_reschedule();
- RELEASE_THREAD_LOCK();
+ schedulerLocker.Unlock();
restore_interrupts(state);
} else if (thread->post_interrupt_callback != NULL) {
- restore_interrupts(state);
void (*callback)(void*) = thread->post_interrupt_callback;
void* data = thread->post_interrupt_data;
thread->post_interrupt_callback = NULL;
thread->post_interrupt_data = NULL;
+ restore_interrupts(state);
+
callback(data);
}
diff --git a/src/system/kernel/arch/ppc/arch_thread.cpp b/src/system/kernel/arch/ppc/arch_thread.cpp
index 5b3d471..c9856cb 100644
--- a/src/system/kernel/arch/ppc/arch_thread.cpp
+++ b/src/system/kernel/arch/ppc/arch_thread.cpp
@@ -114,10 +114,11 @@ arch_thread_init_thread_struct(Thread *thread)
}
-status_t
-arch_thread_init_kthread_stack(Thread *t, int (*start_func)(void),
- void (*entry_func)(void), void (*exit_func)(void))
+void
+arch_thread_init_kthread_stack(Thread* thread, void* _stack, void* _stackTop,
+ void (*function)(void*), const void* data)
{
+#if 0
addr_t *kstack = (addr_t *)t->kernel_stack_base;
addr_t *kstackTop = (addr_t *)t->kernel_stack_top;
@@ -154,6 +155,9 @@ arch_thread_init_kthread_stack(Thread *t, int (*start_func)(void),
t->arch_info.sp = (void *)kstackTop;
return B_OK;
+#else
+ panic("arch_thread_init_kthread_stack(): Implement me!");
+#endif
}
@@ -212,14 +216,15 @@ arch_on_signal_stack(Thread *thread)
status_t
-arch_setup_signal_frame(Thread *thread, struct sigaction *sa, int sig, int sigMask)
+arch_setup_signal_frame(Thread *thread, struct sigaction *sa,
+ struct signal_frame_data *signalFrameData)
{
return B_ERROR;
}
int64
-arch_restore_signal_frame(void)
+arch_restore_signal_frame(struct signal_frame_data* signalFrameData)
{
return 0;
}
diff --git a/src/system/kernel/arch/x86/Jamfile b/src/system/kernel/arch/x86/Jamfile
index 7c3d75a..ecb9706 100644
--- a/src/system/kernel/arch/x86/Jamfile
+++ b/src/system/kernel/arch/x86/Jamfile
@@ -45,11 +45,13 @@ KernelMergeObject kernel_arch_x86.o :
pic.cpp
syscall.S
vm86.cpp
+ x86_signals.cpp
+ x86_signals_asm.S
+ x86_syscalls.cpp
# paging
x86_physical_page_mapper.cpp
x86_physical_page_mapper_large_memory.cpp
- x86_syscalls.cpp
X86PagingMethod.cpp
X86PagingStructures.cpp
X86VMTranslationMap.cpp
diff --git a/src/system/kernel/arch/x86/arch_commpage.cpp b/src/system/kernel/arch/x86/arch_commpage.cpp
index 581b078..8b9cc0b 100644
--- a/src/system/kernel/arch/x86/arch_commpage.cpp
+++ b/src/system/kernel/arch/x86/arch_commpage.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright 2008-2009, Ingo Weinhold, ingo_weinhold@gmx.de.
+ * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
* Copyright 2007, Travis Geiselbrecht. All rights reserved.
* Distributed under the terms of the MIT License.
*/
@@ -7,6 +7,7 @@
#include <commpage.h>
+#include "x86_signals.h"
#include "x86_syscalls.h"
@@ -23,5 +24,8 @@ arch_commpage_init_post_cpus(void)
// select the optimum syscall mechanism and patch the commpage
x86_initialize_commpage_syscall();
+ // initialize the signal handler code in the commpage
+ x86_initialize_commpage_signal_handler();
+
return B_OK;
}
diff --git a/src/system/kernel/arch/x86/arch_debug.cpp b/src/system/kernel/arch/x86/arch_debug.cpp
index 1da75ef..d3c6b8b 100644
--- a/src/system/kernel/arch/x86/arch_debug.cpp
+++ b/src/system/kernel/arch/x86/arch_debug.cpp
@@ -388,7 +388,7 @@ setup_for_thread(char *arg, Thread **_thread, uint32 *_ebp,
if (arg != NULL) {
thread_id id = strtoul(arg, NULL, 0);
- thread = thread_get_thread_struct_locked(id);
+ thread = Thread::GetDebug(id);
if (thread == NULL) {
kprintf("could not find thread %ld\n", id);
return false;
@@ -855,7 +855,7 @@ dump_iframes(int argc, char **argv)
thread = thread_get_current_thread();
} else if (argc == 2) {
thread_id id = strtoul(argv[1], NULL, 0);
- thread = thread_get_thread_struct_locked(id);
+ thread = Thread::GetDebug(id);
if (thread == NULL) {
kprintf("could not find thread %ld\n", id);
return 0;
@@ -925,7 +925,7 @@ cmd_in_context(int argc, char** argv)
return 0;
// get the thread
- Thread* thread = thread_get_thread_struct_locked(threadID);
+ Thread* thread = Thread::GetDebug(threadID);
if (thread == NULL) {
kprintf("Could not find thread with ID \"%s\".\n", threadIDString);
return 0;
diff --git a/src/system/kernel/arch/x86/arch_int.cpp b/src/system/kernel/arch/x86/arch_int.cpp
index 69aa5da..fe746b3 100644
--- a/src/system/kernel/arch/x86/arch_int.cpp
+++ b/src/system/kernel/arch/x86/arch_int.cpp
@@ -17,6 +17,7 @@
#include <smp.h>
#include <team.h>
#include <thread.h>
+#include <util/AutoLock.h>
#include <vm/vm.h>
#include <vm/vm_priv.h>
@@ -244,7 +245,10 @@ static void
unexpected_exception(struct iframe* frame)
{
debug_exception_type type;
- int signal;
+ uint32 signalNumber;
+ int32 signalCode;
+ addr_t signalAddress = 0;
+ int32 signalError = B_ERROR;
if (IFRAME_IS_VM86(frame)) {
x86_vm86_return((struct vm86_iframe *)frame, (frame->vector == 13) ?
@@ -255,42 +259,62 @@ unexpected_exception(struct iframe* frame)
switch (frame->vector) {
case 0: // Divide Error Exception (#DE)
type = B_DIVIDE_ERROR;
- signal = SIGFPE;
+ signalNumber = SIGFPE;
+ signalCode = FPE_INTDIV;
+ signalAddress = frame->eip;
break;
case 4: // Overflow Exception (#OF)
type = B_OVERFLOW_EXCEPTION;
- signal = SIGTRAP;
+ signalNumber = SIGFPE;
+ signalCode = FPE_INTOVF;
+ signalAddress = frame->eip;
break;
case 5: // BOUND Range Exceeded Exception (#BR)
type = B_BOUNDS_CHECK_EXCEPTION;
- signal = SIGTRAP;
+ signalNumber = SIGTRAP;
+ signalCode = SI_USER;
break;
case 6: // Invalid Opcode Exception (#UD)
type = B_INVALID_OPCODE_EXCEPTION;
- signal = SIGILL;
+ signalNumber = SIGILL;
+ signalCode = ILL_ILLOPC;
+ signalAddress = frame->eip;
break;
case 13: // General Protection Exception (#GP)
type = B_GENERAL_PROTECTION_FAULT;
- signal = SIGILL;
+ signalNumber = SIGILL;
+ signalCode = ILL_PRVOPC; // or ILL_PRVREG
+ signalAddress = frame->eip;
break;
case 16: // x87 FPU Floating-Point Error (#MF)
type = B_FLOATING_POINT_EXCEPTION;
- signal = SIGFPE;
+ signalNumber = SIGFPE;
+ signalCode = FPE_FLTDIV;
+ // TODO: Determine the correct cause via the FPU status
+ // register!
+ signalAddress = frame->eip;
break;
case 17: // Alignment Check Exception (#AC)
type = B_ALIGNMENT_EXCEPTION;
- signal = SIGTRAP;
+ signalNumber = SIGBUS;
+ signalCode = BUS_ADRALN;
+ // TODO: Also get the address (from where?). Since we don't enable
+ // alignment checking this exception should never happen, though.
+ signalError = EFAULT;
break;
case 19: // SIMD Floating-Point Exception (#XF)
type = B_FLOATING_POINT_EXCEPTION;
- signal = SIGFPE;
+ signalNumber = SIGFPE;
+ signalCode = FPE_FLTDIV;
+ // TODO: Determine the correct cause via the MXCSR register!
+ signalAddress = frame->eip;
break;
default:
@@ -306,12 +330,15 @@ unexpected_exception(struct iframe* frame)
// If the thread has a signal handler for the signal, we simply send it
// the signal. Otherwise we notify the user debugger first.
- if (sigaction(signal, NULL, &action) == 0
- && action.sa_handler != SIG_DFL
- && action.sa_handler != SIG_IGN) {
- send_signal(thread->id, signal);
- } else if (user_debug_exception_occurred(type, signal))
- send_signal(team_get_current_team_id(), signal);
+ if ((sigaction(signalNumber, NULL, &action) == 0
+ && action.sa_handler != SIG_DFL
+ && action.sa_handler != SIG_IGN)
+ || user_debug_exception_occurred(type, signalNumber)) {
+ Signal signal(signalNumber, signalCode, signalError,
+ thread->team->id);
+ signal.SetAddress((void*)signalAddress);
+ send_signal_to_thread(thread, signal, 0);
+ }
} else {
char name[32];
panic("Unexpected exception \"%s\" occurred in kernel mode! "
@@ -500,18 +527,19 @@ hardware_interrupt(struct iframe* frame)
cpu_status state = disable_interrupts();
if (thread->cpu->invoke_scheduler) {
- GRAB_THREAD_LOCK();
+ SpinLocker schedulerLocker(gSchedulerLock);
scheduler_reschedule();
- RELEASE_THREAD_LOCK();
+ schedulerLocker.Unlock();
restore_interrupts(state);
} else if (thread->post_interrupt_callback != NULL) {
- restore_interrupts(state);
void (*callback)(void*) = thread->post_interrupt_callback;
void* data = thread->post_interrupt_data;
thread->post_interrupt_callback = NULL;
thread->post_interrupt_data = NULL;
+ restore_interrupts(state);
+
callback(data);
}
}
diff --git a/src/system/kernel/arch/x86/arch_interrupts.S b/src/system/kernel/arch/x86/arch_interrupts.S
index f0fdca8..dca676c 100644
--- a/src/system/kernel/arch/x86/arch_interrupts.S
+++ b/src/system/kernel/arch/x86/arch_interrupts.S
@@ -22,6 +22,18 @@
#include "syscall_table.h"
+#define LOCK_THREAD_TIME() \
+ lea THREAD_time_lock(%edi), %eax; \
+ pushl %eax; \
+ call acquire_spinlock;
+ /* leave spinlock address on stack for UNLOCK_THREAD_TIME() */
+
+#define UNLOCK_THREAD_TIME() \
+ /* spinlock address still on stack from */ \
+ /* LOCK_THREAD_TIME() */ \
+ call release_spinlock; \
+ addl $4, %esp;
+
#define UPDATE_THREAD_USER_TIME_COMMON() \
movl %eax, %ebx; /* save for later */ \
movl %edx, %ecx; \
@@ -37,19 +49,33 @@
movl %ecx, (THREAD_last_time + 4)(%edi); \
\
/* thread->in_kernel = true; */ \
- movb $1, THREAD_in_kernel(%edi)
+ movb $1, THREAD_in_kernel(%edi);
#define UPDATE_THREAD_USER_TIME() \
+ LOCK_THREAD_TIME() \
call system_time; \
- UPDATE_THREAD_USER_TIME_COMMON()
+ UPDATE_THREAD_USER_TIME_COMMON() \
+ UNLOCK_THREAD_TIME()
#define UPDATE_THREAD_USER_TIME_PUSH_TIME() \
call system_time; \
push %edx; \
push %eax; \
- UPDATE_THREAD_USER_TIME_COMMON()
+ \
+ LOCK_THREAD_TIME() \
+ \
+ /* recover the system time, note that */ \
+ /* LOCK_THREAD_TIME() leaves an address on the stack */ \
+ movl 4(%esp), %eax; \
+ movl 8(%esp), %edx; \
+ \
+ UPDATE_THREAD_USER_TIME_COMMON() \
+ \
+ UNLOCK_THREAD_TIME()
#define UPDATE_THREAD_KERNEL_TIME() \
+ LOCK_THREAD_TIME() \
+ \
call system_time; \
\
movl %eax, %ebx; /* save for later */ \
@@ -66,7 +92,9 @@
movl %ecx, (THREAD_last_time + 4)(%edi); \
\
/* thread->in_kernel = false; */ \
- movb $0, THREAD_in_kernel(%edi)
+ movb $0, THREAD_in_kernel(%edi); \
+ \
+ UNLOCK_THREAD_TIME() \
#define PUSH_IFRAME_BOTTOM(iframeType) \
pusha; \
@@ -798,32 +826,22 @@ FUNCTION(x86_sysenter):
FUNCTION_END(x86_sysenter)
-/*! Is copied to the signal stack call to restore the original frame when
- the signal handler exits.
- The copying code (in arch_thread.c::arch_setup_signal_frame()) copies
- everything between the i386_return_from_signal and i386_end_return_from_signal
- symbols.
-*/
-FUNCTION(i386_return_from_signal):
- addl $12, %esp // Flushes the 3 arguments to sa_handler
- movl $SYSCALL_RESTORE_SIGNAL_FRAME, %eax
- // This syscall will restore the cpu context to the
- // one existing before calling the signal handler
- movl $0, %ecx
- lea 4(%esp), %edx
- int $99
- ret
-FUNCTION_END(i386_return_from_signal)
-SYMBOL(i386_end_return_from_signal):
+/*! \fn void x86_return_to_userland(iframe* frame)
+ \brief Returns to the userland environment given by \a frame.
+
+ Before returning to userland all potentially necessary kernel exit work is
+ done.
+ \a frame must point to a location somewhere on the caller's stack (e.g. a
+ local variable).
+ The function must be called with interrupts disabled.
-/*! void i386_restore_frame_from_syscall(struct iframe iframe);
- Pops the regs of the iframe from the stack to make it current and then
- return to userland.
- Interrupts are disabled.
+ \param frame The iframe defining the userland environment.
*/
-FUNCTION(i386_restore_frame_from_syscall):
- lea 4(%esp), %ebp // iframe to %ebp
+FUNCTION(x86_return_to_userland):
+ // get the iframe* parameter
+ movl 4(%esp), %ebp
+ movl %ebp, %esp
// check, if any kernel exit work has to be done
movl %dr3, %edi
@@ -835,7 +853,7 @@ FUNCTION(i386_restore_frame_from_syscall):
// update the thread's kernel time and return
UPDATE_THREAD_KERNEL_TIME()
POP_IFRAME_AND_RETURN()
-FUNCTION_END(i386_restore_frame_from_syscall)
+FUNCTION_END(x86_return_to_userland)
/* status_t x86_vm86_enter(struct vm86_iframe *frame) */
diff --git a/src/system/kernel/arch/x86/arch_thread.cpp b/src/system/kernel/arch/x86/arch_thread.cpp
index b3457d6..59ad054 100644
--- a/src/system/kernel/arch/x86/arch_thread.cpp
+++ b/src/system/kernel/arch/x86/arch_thread.cpp
@@ -22,11 +22,13 @@
#include <thread.h>
#include <tls.h>
#include <tracing.h>
+#include <util/AutoLock.h>
#include <vm/vm_types.h>
#include <vm/VMAddressSpace.h>
#include "paging/X86PagingStructures.h"
#include "paging/X86VMTranslationMap.h"
+#include "x86_signals.h"
#include "x86_syscalls.h"
@@ -66,7 +68,7 @@ class RestartSyscall : public AbstractTraceEntry {
// from arch_interrupts.S
extern "C" void i386_stack_init(struct farcall *interrupt_stack_offset);
-extern "C" void i386_restore_frame_from_syscall(struct iframe frame);
+extern "C" void x86_return_to_userland(iframe* frame);
// from arch_cpu.c
extern void (*gX86SwapFPUFunc)(void *oldState, const void *newState);
@@ -135,6 +137,48 @@ get_current_iframe(void)
}
+static inline void
+set_fs_register(uint32 segment)
+{
+ asm("movl %0,%%fs" :: "r" (segment));
+}
+
+
+static void
+set_tls_context(Thread *thread)
+{
+ int entry = smp_get_current_cpu() + TLS_BASE_SEGMENT;
+
+ set_segment_descriptor_base(&gGDT[entry], thread->user_local_storage);
+ set_fs_register((entry << 3) | DPL_USER);
+}
+
+
+/*! Returns to the userland environment given by \a frame for a thread not
+ having been userland before.
+
+ Before returning to userland all potentially necessary kernel exit work is
+ done.
+
+ \param thread The current thread.
+ \param frame The iframe defining the userland environment. Must point to a
+ location somewhere on the caller's stack (e.g. a local variable).
+*/
+static void
+initial_return_to_userland(Thread* thread, iframe* frame)
+{
+ // disable interrupts and set up CPU specifics for this thread
+ disable_interrupts();
+
+ i386_set_tss_and_kstack(thread->kernel_stack_top);
+ set_tls_context(thread);
+ x86_set_syscall_stack(thread->kernel_stack_top);
+
+ // return to userland
+ x86_return_to_userland(frame);
+}
+
+
/*!
\brief Returns the current thread's topmost (i.e. most recent)
userland->kernel transition iframe (usually the first one, save for
@@ -206,23 +250,6 @@ x86_next_page_directory(Thread *from, Thread *to)
}
-static inline void
-set_fs_register(uint32 segment)
-{
- asm("movl %0,%%fs" :: "r" (segment));
-}
-
-
-static void
-set_tls_context(Thread *thread)
-{
- int entry = smp_get_current_cpu() + TLS_BASE_SEGMENT;
-
- set_segment_descriptor_base(&gGDT[entry], thread->user_local_storage);
- set_fs_register((entry << 3) | DPL_USER);
-}
-
-
void
x86_restart_syscall(struct iframe* frame)
{
@@ -241,20 +268,19 @@ x86_restart_syscall(struct iframe* frame)
}
-static uint32 *
-get_signal_stack(Thread *thread, struct iframe *frame, int signal)
+static uint8*
+get_signal_stack(Thread* thread, struct iframe* frame, struct sigaction* action)
{
// use the alternate signal stack if we should and can
if (thread->signal_stack_enabled
- && (thread->sig_action[signal - 1].sa_flags & SA_ONSTACK) != 0
+ && (action->sa_flags & SA_ONSTACK) != 0
&& (frame->user_esp < thread->signal_stack_base
|| frame->user_esp >= thread->signal_stack_base
+ thread->signal_stack_size)) {
- return (uint32 *)(thread->signal_stack_base
- + thread->signal_stack_size);
+ return (uint8*)(thread->signal_stack_base + thread->signal_stack_size);
}
- return (uint32 *)frame->user_esp;
+ return (uint8*)frame->user_esp;
}
@@ -277,57 +303,40 @@ arch_thread_init_thread_struct(Thread *thread)
}
-status_t
-arch_thread_init_kthread_stack(Thread *t, int (*start_func)(void),
- void (*entry_func)(void), void (*exit_func)(void))
+/*! Prepares the given thread's kernel stack for executing its entry function.
+
+ \param thread The thread.
+ \param stack The usable bottom of the thread's kernel stack.
+ \param stackTop The usable top of the thread's kernel stack.
+ \param function The entry function the thread shall execute.
+ \param data Pointer to be passed to the entry function.
+*/
+void
+arch_thread_init_kthread_stack(Thread* thread, void* _stack, void* _stackTop,
+ void (*function)(void*), const void* data)
{
- addr_t *kstack = (addr_t *)t->kernel_stack_base;
- addr_t *kstack_top = (addr_t *)t->kernel_stack_top;
- int i;
-
- TRACE(("arch_thread_initialize_kthread_stack: kstack 0x%p, start_func 0x%p, entry_func 0x%p\n",
- kstack, start_func, entry_func));
-
- // clear the kernel stack
-#ifdef DEBUG_KERNEL_STACKS
-# ifdef STACK_GROWS_DOWNWARDS
- memset((void *)((addr_t)kstack + KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE), 0,
- KERNEL_STACK_SIZE);
-# else
- memset(kstack, 0, KERNEL_STACK_SIZE);
-# endif
-#else
- memset(kstack, 0, KERNEL_STACK_SIZE);
-#endif
+ addr_t* stackTop = (addr_t*)_stackTop;
- // set the final return address to be thread_kthread_exit
- kstack_top--;
- *kstack_top = (unsigned int)exit_func;
+ TRACE(("arch_thread_init_kthread_stack: stack top %p, function %, data: "
+ "%p\n", stackTop, function, data));
- // set the return address to be the start of the first function
- kstack_top--;
- *kstack_top = (unsigned int)start_func;
+ // push the function argument, a pointer to the data
+ *--stackTop = (addr_t)data;
- // set the return address to be the start of the entry (thread setup)
- // function
- kstack_top--;
- *kstack_top = (unsigned int)entry_func;
+ // push a dummy return address for the function
+ *--stackTop = 0;
- // simulate pushfl
-// kstack_top--;
-// *kstack_top = 0x00; // interrupts still disabled after the switch
+ // push the function address -- that's the return address used after the
+ // context switch
+ *--stackTop = (addr_t)function;
- // simulate initial popad
- for (i = 0; i < 8; i++) {
- kstack_top--;
- *kstack_top = 0;
- }
+ // simulate pushad as done by x86_context_switch()
+ for (int i = 0; i < 8; i++)
+ *--stackTop = 0;
// save the stack position
- t->arch_info.current_stack.esp = kstack_top;
- t->arch_info.current_stack.ss = (addr_t *)KERNEL_DATA_SEG;
-
- return B_OK;
+ thread->arch_info.current_stack.esp = stackTop;
+ thread->arch_info.current_stack.ss = (addr_t*)KERNEL_DATA_SEG;
}
@@ -411,20 +420,19 @@ arch_thread_dump_info(void *info)
}
-/** Sets up initial thread context and enters user space
- */
-
+/*! Sets up initial thread context and enters user space
+*/
status_t
-arch_thread_enter_userspace(Thread *t, addr_t entry, void *args1,
- void *args2)
+arch_thread_enter_userspace(Thread* thread, addr_t entry, void* args1,
+ void* args2)
{
- addr_t stackTop = t->user_stack_base + t->user_stack_size;
+ addr_t stackTop = thread->user_stack_base + thread->user_stack_size;
uint32 codeSize = (addr_t)x86_end_userspace_thread_exit
- (addr_t)x86_userspace_thread_exit;
uint32 args[3];
- TRACE(("arch_thread_enter_uspace: entry 0x%lx, args %p %p, ustack_top 0x%lx\n",
- entry, args1, args2, stackTop));
+ TRACE(("arch_thread_enter_userspace: entry 0x%lx, args %p %p, "
+ "ustack_top 0x%lx\n", entry, args1, args2, stackTop));
// copy the little stub that calls exit_thread() when the thread entry
// function returns, as well as the arguments of the entry function
@@ -441,20 +449,22 @@ arch_thread_enter_userspace(Thread *t, addr_t entry, void *args1,
if (user_memcpy((void *)stackTop, args, sizeof(args)) < B_OK)
return B_BAD_ADDRESS;
- thread_at_kernel_exit();
- // also disables interrupts
-
- // install user breakpoints, if any
- if ((t->flags & THREAD_FLAGS_BREAKPOINTS_DEFINED) != 0)
- x86_init_user_debug_at_kernel_exit(NULL);
-
- i386_set_tss_and_kstack(t->kernel_stack_top);
-
- // set the CPU dependent GDT entry for TLS
- set_tls_context(t);
-
- x86_set_syscall_stack(t->kernel_stack_top);
- x86_enter_userspace(entry, stackTop);
+ // prepare the user iframe
+ iframe frame = {};
+ frame.type = IFRAME_TYPE_SYSCALL;
+ frame.gs = USER_DATA_SEG;
+ // frame.fs not used -- we call set_tls_context() below
+ frame.es = USER_DATA_SEG;
+ frame.ds = USER_DATA_SEG;
+ frame.eip = entry;
+ frame.cs = USER_CODE_SEG;
+ frame.flags = X86_EFLAGS_RESERVED1 | X86_EFLAGS_INTERRUPT
+ | (3 << X86_EFLAGS_IO_PRIVILEG_LEVEL_SHIFT);
+ frame.user_esp = stackTop;
+ frame.user_ss = USER_DATA_SEG;
+
+ // return to userland
+ initial_return_to_userland(thread, &frame);
return B_OK;
// never gets here
@@ -472,9 +482,35 @@ arch_on_signal_stack(Thread *thread)
}
+/*! Sets up the user iframe for invoking a signal handler.
+
+ The function fills in the remaining fields of the given \a signalFrameData,
+ copies it to the thread's userland stack (the one on which the signal shall
+ be handled), and sets up the user iframe so that when returning to userland
+ a wrapper function is executed that calls the user-defined signal handler.
+ When the signal handler returns, the wrapper function shall call the
+ "restore signal frame" syscall with the (possibly modified) signal frame
+ data.
+
+ The following fields of the \a signalFrameData structure still need to be
+ filled in:
+ - \c context.uc_stack: The stack currently used by the thread.
+ - \c context.uc_mcontext: The current userland state of the registers.
+ - \c syscall_restart_return_value: Architecture specific use. On x86 the
+ value of eax and edx which are overwritten by the syscall return value.
+
+ Furthermore the function needs to set \c thread->user_signal_context to the
+ userland pointer to the \c ucontext_t on the user stack.
+
+ \param thread The current thread.
+ \param action The signal action specified for the signal to be handled.
+ \param signalFrameData A partially initialized structure of all the data
+ that need to be copied to userland.
+ \return \c B_OK on success, another error code, if something goes wrong.
+*/
status_t
-arch_setup_signal_frame(Thread *thread, struct sigaction *action,
- int signal, int signalMask)
+arch_setup_signal_frame(Thread* thread, struct sigaction* action,
+ struct signal_frame_data* signalFrameData)
{
struct iframe *frame = get_current_iframe();
if (!IFRAME_IS_USER(frame)) {
@@ -482,136 +518,94 @@ arch_setup_signal_frame(Thread *thread, struct sigaction *action,
return B_BAD_VALUE;
}
- uint32 *signalCode;
- uint32 *userRegs;
- struct vregs regs;
- uint32 buffer[6];
- status_t status;
-
- // start stuffing stuff on the user stack
- uint32* userStack = get_signal_stack(thread, frame, signal);
-
- // copy syscall restart info onto the user stack
- userStack -= (sizeof(thread->syscall_restart.parameters) + 12 + 3) / 4;
- uint32 threadFlags = atomic_and(&thread->flags,
- ~(THREAD_FLAGS_RESTART_SYSCALL | THREAD_FLAGS_64_BIT_SYSCALL_RETURN));
- if (user_memcpy(userStack, &threadFlags, 4) < B_OK
- || user_memcpy(userStack + 1, &frame->orig_eax, 4) < B_OK
- || user_memcpy(userStack + 2, &frame->orig_edx, 4) < B_OK)
+ // In case of a BeOS compatible handler map SIGBUS to SIGSEGV, since they
+ // had the same signal number.
+ if ((action->sa_flags & SA_BEOS_COMPATIBLE_HANDLER) != 0
+ && signalFrameData->info.si_signo == SIGBUS) {
+ signalFrameData->info.si_signo = SIGSEGV;
+ }
+
+ // store the register state in signalFrameData->context.uc_mcontext
+ signalFrameData->context.uc_mcontext.eip = frame->eip;
+ signalFrameData->context.uc_mcontext.eflags = frame->flags;
+ signalFrameData->context.uc_mcontext.eax = frame->eax;
+ signalFrameData->context.uc_mcontext.ecx = frame->ecx;
+ signalFrameData->context.uc_mcontext.edx = frame->edx;
+ signalFrameData->context.uc_mcontext.ebp = frame->ebp;
+ signalFrameData->context.uc_mcontext.esp = frame->user_esp;
+ signalFrameData->context.uc_mcontext.edi = frame->edi;
+ signalFrameData->context.uc_mcontext.esi = frame->esi;
+ signalFrameData->context.uc_mcontext.ebx = frame->ebx;
+ i386_fnsave((void *)(&signalFrameData->context.uc_mcontext.xregs));
+
+ // fill in signalFrameData->context.uc_stack
+ signal_get_user_stack(frame->user_esp, &signalFrameData->context.uc_stack);
+
+ // store orig_eax/orig_edx in syscall_restart_return_value
+ signalFrameData->syscall_restart_return_value
+ = (uint64)frame->orig_edx << 32 | frame->orig_eax;
+
+ // get the stack to use -- that's either the current one or a special signal
+ // stack
+ uint8* userStack = get_signal_stack(thread, frame, action);
+
+ // copy the signal frame data onto the stack
+ userStack -= sizeof(*signalFrameData);
+ signal_frame_data* userSignalFrameData = (signal_frame_data*)userStack;
+ if (user_memcpy(userSignalFrameData, signalFrameData,
+ sizeof(*signalFrameData)) != B_OK) {
+ return B_BAD_ADDRESS;
+ }
+
+ // prepare the user stack frame for a function call to the signal handler
+ // wrapper function
+ uint32 stackFrame[2] = {
+ frame->eip, // return address
+ (addr_t)userSignalFrameData, // parameter: pointer to signal frame data
+ };
+
+ userStack -= sizeof(stackFrame);
+ if (user_memcpy(userStack, stackFrame, sizeof(stackFrame)) != B_OK)
return B_BAD_ADDRESS;
- status = user_memcpy(userStack + 3, thread->syscall_restart.parameters,
- sizeof(thread->syscall_restart.parameters));
- if (status < B_OK)
- return status;
-
- // store the saved regs onto the user stack
- regs.eip = frame->eip;
- regs.eflags = frame->flags;
- regs.eax = frame->eax;
- regs.ecx = frame->ecx;
- regs.edx = frame->edx;
- regs.ebp = frame->ebp;
- regs.esp = frame->esp;
- regs._reserved_1 = frame->user_esp;
- regs._reserved_2[0] = frame->edi;
- regs._reserved_2[1] = frame->esi;
- regs._reserved_2[2] = frame->ebx;
- i386_fnsave((void *)(&regs.xregs));
-
- userStack -= (sizeof(struct vregs) + 3) / 4;
- userRegs = userStack;
- status = user_memcpy(userRegs, &regs, sizeof(regs));
- if (status < B_OK)
- return status;
-
- // now store a code snippet on the stack
- userStack -= ((uint32)i386_end_return_from_signal + 3
- - (uint32)i386_return_from_signal) / 4;
- signalCode = userStack;
- status = user_memcpy(signalCode, (const void *)&i386_return_from_signal,
- ((uint32)i386_end_return_from_signal
- - (uint32)i386_return_from_signal));
- if (status < B_OK)
- return status;
-
- // now set up the final part
- buffer[0] = (uint32)signalCode; // return address when sa_handler done
- buffer[1] = signal; // arguments to sa_handler
- buffer[2] = (uint32)action->sa_userdata;
- buffer[3] = (uint32)userRegs;
-
- buffer[4] = signalMask; // Old signal mask to restore
- buffer[5] = (uint32)userRegs; // Int frame + extra regs to restore
-
- userStack -= sizeof(buffer) / 4;
-
- status = user_memcpy(userStack, buffer, sizeof(buffer));
- if (status < B_OK)
- return status;
-
- frame->user_esp = (uint32)userStack;
- frame->eip = (uint32)action->sa_handler;
+
+ // Update Thread::user_signal_context, now that everything seems to have
+ // gone fine.
+ thread->user_signal_context = &userSignalFrameData->context;
+
+ // Adjust the iframe's esp and eip, so that the thread will continue with
+ // the prepared stack, executing the signal handler wrapper function.
+ frame->user_esp = (addr_t)userStack;
+ frame->eip = x86_get_user_signal_handler_wrapper(
+ (action->sa_flags & SA_BEOS_COMPATIBLE_HANDLER) != 0);
return B_OK;
}
int64
-arch_restore_signal_frame(void)
+arch_restore_signal_frame(struct signal_frame_data* signalFrameData)
{
- Thread *thread = thread_get_current_thread();
- struct iframe *frame = get_current_iframe();
- int32 signalMask;
- uint32 *userStack;
- struct vregs* regsPointer;
- struct vregs regs;
+ struct iframe* frame = get_current_iframe();
TRACE(("### arch_restore_signal_frame: entry\n"));
- userStack = (uint32 *)frame->user_esp;
- if (user_memcpy(&signalMask, &userStack[0], 4) < B_OK
- || user_memcpy(&regsPointer, &userStack[1], 4) < B_OK
- || user_memcpy(&regs, regsPointer, sizeof(vregs)) < B_OK) {
- return B_BAD_ADDRESS;
- }
+ frame->orig_eax = (uint32)signalFrameData->syscall_restart_return_value;
+ frame->orig_edx
+ = (uint32)(signalFrameData->syscall_restart_return_value >> 32);
- uint32* syscallRestartInfo
- = (uint32*)regsPointer + (sizeof(struct vregs) + 3) / 4;
- uint32 threadFlags;
- if (user_memcpy(&threadFlags, syscallRestartInfo, 4) < B_OK
- || user_memcpy(&frame->orig_eax, syscallRestartInfo + 1, 4) < B_OK
- || user_memcpy(&frame->orig_edx, syscallRestartInfo + 2, 4) < B_OK
- || user_memcpy(thread->syscall_restart.parameters,
- syscallRestartInfo + 3,
- sizeof(thread->syscall_restart.parameters)) < B_OK) {
- return B_BAD_ADDRESS;
- }
+ frame->eip = signalFrameData->context.uc_mcontext.eip;
+ frame->flags = (frame->flags & ~(uint32)X86_EFLAGS_USER_FLAGS)
+ | (signalFrameData->context.uc_mcontext.eflags & X86_EFLAGS_USER_FLAGS);
+ frame->eax = signalFrameData->context.uc_mcontext.eax;
+ frame->ecx = signalFrameData->context.uc_mcontext.ecx;
+ frame->edx = signalFrameData->context.uc_mcontext.edx;
+ frame->ebp = signalFrameData->context.uc_mcontext.ebp;
+ frame->user_esp = signalFrameData->context.uc_mcontext.esp;
+ frame->edi = signalFrameData->context.uc_mcontext.edi;
+ frame->esi = signalFrameData->context.uc_mcontext.esi;
+ frame->ebx = signalFrameData->context.uc_mcontext.ebx;
- // set restart/64bit return value flags from previous syscall
- atomic_and(&thread->flags,
- ~(THREAD_FLAGS_RESTART_SYSCALL | THREAD_FLAGS_64_BIT_SYSCALL_RETURN));
- atomic_or(&thread->flags, threadFlags
- & (THREAD_FLAGS_RESTART_SYSCALL | THREAD_FLAGS_64_BIT_SYSCALL_RETURN));
-
- // TODO: Verify that just restoring the old signal mask is right! Bash for
- // instance changes the procmask in a signal handler. Those changes are
- // lost the way we do it.
- atomic_set(&thread->sig_block_mask, signalMask);
- update_current_thread_signals_flag();
-
- frame->eip = regs.eip;
- frame->flags = regs.eflags;
- frame->eax = regs.eax;
- frame->ecx = regs.ecx;
- frame->edx = regs.edx;
- frame->ebp = regs.ebp;
- frame->esp = regs.esp;
- frame->user_esp = regs._reserved_1;
- frame->edi = regs._reserved_2[0];
- frame->esi = regs._reserved_2[1];
- frame->ebx = regs._reserved_2[2];
-
- i386_frstor((void *)(&regs.xregs));
+ i386_frstor((void*)(&signalFrameData->context.uc_mcontext.xregs));
TRACE(("### arch_restore_signal_frame: exit\n"));
@@ -637,27 +631,21 @@ arch_store_fork_frame(struct arch_fork_arg *arg)
}
-/** Restores the frame from a forked team as specified by the provided
- * arch_fork_arg structure.
- * Needs to be called from within the child team, ie. instead of
- * arch_thread_enter_uspace() as thread "starter".
- * This function does not return to the caller, but will enter userland
- * in the child team at the same position where the parent team left of.
- */
+/*! Restores the frame from a forked team as specified by the provided
+ arch_fork_arg structure.
+ Needs to be called from within the child team, i.e. instead of
+ arch_thread_enter_userspace() as thread "starter".
+ This function does not return to the caller, but will enter userland
+ in the child team at the same position where the parent team left of.
+ \param arg The architecture specific fork arguments including the
+ environment to restore. Must point to a location somewhere on the
+ caller's stack.
+*/
void
-arch_restore_fork_frame(struct arch_fork_arg *arg)
+arch_restore_fork_frame(struct arch_fork_arg* arg)
{
- Thread *thread = thread_get_current_thread();
-
- disable_interrupts();
-
- i386_set_tss_and_kstack(thread->kernel_stack_top);
-
- // set the CPU dependent GDT entry for TLS (set the current %fs register)
- set_tls_context(thread);
-
- i386_restore_frame_from_syscall(arg->iframe);
+ initial_return_to_userland(thread_get_current_thread(), &arg->iframe);
}
diff --git a/src/system/kernel/arch/x86/arch_user_debugger.cpp b/src/system/kernel/arch/x86/arch_user_debugger.cpp
index eb3bcf6..3a2673c 100644
--- a/src/system/kernel/arch/x86/arch_user_debugger.cpp
+++ b/src/system/kernel/arch/x86/arch_user_debugger.cpp
@@ -781,18 +781,16 @@ x86_init_user_debug_at_kernel_exit(struct iframe *frame)
// disable kernel breakpoints
disable_breakpoints();
- GRAB_THREAD_LOCK();
+ // install the user breakpoints
GRAB_TEAM_DEBUG_INFO_LOCK(thread->team->debug_info);
arch_team_debug_info &teamInfo = thread->team->debug_info.arch_info;
- // install the user breakpoints
install_breakpoints(teamInfo);
atomic_or(&thread->flags, THREAD_FLAGS_BREAKPOINTS_INSTALLED);
RELEASE_TEAM_DEBUG_INFO_LOCK(thread->team->debug_info);
- RELEASE_THREAD_LOCK();
}
@@ -815,20 +813,19 @@ x86_exit_user_debug_at_kernel_entry()
if (!(thread->flags & THREAD_FLAGS_BREAKPOINTS_INSTALLED))
return;
- GRAB_THREAD_LOCK();
-
// disable user breakpoints
disable_breakpoints();
// install kernel breakpoints
Team* kernelTeam = team_get_kernel_team();
+
GRAB_TEAM_DEBUG_INFO_LOCK(kernelTeam->debug_info);
+
install_breakpoints(kernelTeam->debug_info.arch_info);
- RELEASE_TEAM_DEBUG_INFO_LOCK(kernelTeam->debug_info);
atomic_and(&thread->flags, ~THREAD_FLAGS_BREAKPOINTS_INSTALLED);
- RELEASE_THREAD_LOCK();
+ RELEASE_TEAM_DEBUG_INFO_LOCK(kernelTeam->debug_info);
}
@@ -927,7 +924,8 @@ x86_handle_debug_exception(struct iframe *frame)
// We need to ignore the exception now and send a single-step
// notification later, when the thread wants to return from the
// kernel.
- InterruptsSpinLocker threadLocker(gThreadSpinlock);
+ InterruptsSpinLocker threadDebugInfoLocker(
+ thread->debug_info.lock);
// Check whether the team is still being debugged and set
// the B_THREAD_DEBUG_NOTIFY_SINGLE_STEP and
diff --git a/src/system/kernel/arch/x86/arch_x86.S b/src/system/kernel/arch/x86/arch_x86.S
index 056f170..a0c60ff 100644
--- a/src/system/kernel/arch/x86/arch_x86.S
+++ b/src/system/kernel/arch/x86/arch_x86.S
@@ -157,27 +157,6 @@ FUNCTION_END(x86_userspace_thread_exit)
SYMBOL(x86_end_userspace_thread_exit):
-/* void x86_enter_userspace(addr_t entry, addr_t stackTop); */
-FUNCTION(x86_enter_userspace):
- movl 4(%esp), %eax // get entry point
- movl 8(%esp), %ebx // get user stack
- movl $USER_DATA_SEG, %ecx
- movw %cx, %ds
- movw %cx, %es
- //movw $0x33 + cpu_num, %fs -> fs points to the TLS storage (CPU dependent segment)
- movw %cx, %gs
-
- xorl %ebp, %ebp // this is the last stack frame - we don't need one on return
- // (%ebp marks the beginning of this stack frame)
- pushl %ecx // user data segment
- pushl %ebx // user stack
- pushl $(1 << 9) | 2 // user flags
- pushl $USER_CODE_SEG // user code segment
- pushl %eax // user IP
- iret
-FUNCTION_END(x86_enter_userspace)
-
-
null_idt_descr:
.word 0
.word 0,0
diff --git a/src/system/kernel/arch/x86/asm_offsets.cpp b/src/system/kernel/arch/x86/asm_offsets.cpp
index 490fab9..b38f803 100644
--- a/src/system/kernel/arch/x86/asm_offsets.cpp
+++ b/src/system/kernel/arch/x86/asm_offsets.cpp
@@ -13,6 +13,7 @@
#include <arch_cpu.h>
#include <cpu.h>
+#include <ksignal.h>
#include <ksyscalls.h>
#include <thread_types.h>
@@ -34,6 +35,7 @@ dummy()
DEFINE_OFFSET_MACRO(CPU_ENT, cpu_ent, fault_handler_stack_pointer);
// struct Thread
+ DEFINE_OFFSET_MACRO(THREAD, Thread, time_lock);
DEFINE_OFFSET_MACRO(THREAD, Thread, kernel_time);
DEFINE_OFFSET_MACRO(THREAD, Thread, user_time);
DEFINE_OFFSET_MACRO(THREAD, Thread, last_time);
@@ -43,6 +45,7 @@ dummy()
DEFINE_OFFSET_MACRO(THREAD, Thread, fault_handler);
// struct iframe
+ DEFINE_SIZEOF_MACRO(IFRAME, iframe);
DEFINE_OFFSET_MACRO(IFRAME, iframe, cs);
DEFINE_OFFSET_MACRO(IFRAME, iframe, eax);
DEFINE_OFFSET_MACRO(IFRAME, iframe, edx);
@@ -66,4 +69,20 @@ dummy()
memcpy);
DEFINE_OFFSET_MACRO(X86_OPTIMIZED_FUNCTIONS, x86_optimized_functions,
memset);
+
+ // struct signal_frame_data
+ DEFINE_SIZEOF_MACRO(SIGNAL_FRAME_DATA, signal_frame_data);
+ DEFINE_OFFSET_MACRO(SIGNAL_FRAME_DATA, signal_frame_data, info);
+ DEFINE_OFFSET_MACRO(SIGNAL_FRAME_DATA, signal_frame_data, context);
+ DEFINE_OFFSET_MACRO(SIGNAL_FRAME_DATA, signal_frame_data, user_data);
+ DEFINE_OFFSET_MACRO(SIGNAL_FRAME_DATA, signal_frame_data, handler);
+
+ // struct ucontext_t
+ DEFINE_OFFSET_MACRO(UCONTEXT_T, __ucontext_t, uc_mcontext);
+
+ // struct vregs
+ DEFINE_SIZEOF_MACRO(VREGS, vregs);
+
+ // struct siginfo_t
+ DEFINE_OFFSET_MACRO(SIGINFO_T, __siginfo_t, si_signo);
}
diff --git a/src/system/kernel/arch/x86/x86_signals.cpp b/src/system/kernel/arch/x86/x86_signals.cpp
new file mode 100644
index 0000000..80977a3
--- /dev/null
+++ b/src/system/kernel/arch/x86/x86_signals.cpp
@@ -0,0 +1,125 @@
+/*
+ * Copyright 2011, Ingo Weinhold, ingo_weinhold@gmx.de.
+ * Distributed under the terms of the MIT License.
+ */
+
+
+#include "x86_signals.h"
+
+#include <string.h>
+
+#include <KernelExport.h>
+
+#include <commpage.h>
+#include <cpu.h>
+#include <elf.h>
+#include <smp.h>
+
+#include "syscall_numbers.h"
+
+
+// implemented in assembly
+extern "C" void x86_signal_frame_function_beos(signal_frame_data* frameData);
+
+
+extern "C" void
+x86_signal_frame_function(signal_frame_data* frameData)
+{
+ // Note: This function is copied to the commpage. Hence it needs to be
+ // position independent. We don't build this source file with the respective
+ // flags, but the code the compiler generates for this function is position
+ // independent anyway. It simply doesn't contain constructs that could
+ // result in position dependent code. The potentially problematic jumps
+ // needed due to the "if" statement are all harmless relative jumps.
+
+ if (frameData->siginfo_handler) {
+ // SA_SIGINFO style handler function -- we additionally pass the user
+ // data pointer
+ void (*handler)(int, siginfo_t*, void*, void*)
+ = (void (*)(int, siginfo_t*, void*, void*))frameData->handler;
+ handler(frameData->info.si_signo, &frameData->info,
+ &frameData->context, frameData->user_data);
+ } else {
+ // Simple handler function -- we call it with additional user data
+ // pointer and vregs parameters. Note that unlike in BeOS the last
+ // parameter is a pointer to a vregs structure, while in BeOS the
+ // structure was passed be value. For setting up a BeOS binary
+ // compatible signal handler call x86_signal_frame_function_beos() is
+ // used instead.
+ void (*handler)(int, void*, vregs*)
+ = (void (*)(int, void*, vregs*))frameData->handler;
+ handler(frameData->info.si_signo, frameData->user_data,
+ &frameData->context.uc_mcontext);
+ }
+
+ #define TO_STRING_LITERAL_HELPER(number) #number
+ #define TO_STRING_LITERAL(number) TO_STRING_LITERAL_HELPER(number)
+
+ // call the restore_signal_frame() syscall -- does not return (here)
+ asm volatile(
+ // push frameData -- the parameter to restore_signal_frame()
+ "pushl %0;"
+ // push a dummy return value
+ "pushl $0;"
+ // syscall number to eax
+ "movl $" TO_STRING_LITERAL(SYSCALL_RESTORE_SIGNAL_FRAME) ", %%eax;"
+ // syscall
+ "int $99;"
+ :: "r"(frameData)
+ );
+
+ #undef TO_STRING_LITERAL_HELPER
+ #undef TO_STRING_LITERAL
+}
+
+
+static void
+register_signal_handler_function(const char* functionName, int32 commpageIndex,
+ const char* commpageSymbolName, addr_t expectedAddress)
+{
+ // look up the x86_signal_frame_function() symbol -- we have its address,
+ // but also need its size
+ elf_symbol_info symbolInfo;
+ if (elf_lookup_kernel_symbol(functionName, &symbolInfo)
+ != B_OK) {
+ panic("x86_initialize_commpage_signal_handler(): Failed to find "
+ "signal frame function \"%s\"!", functionName);
+ }
+
+ ASSERT(expectedAddress == symbolInfo.address);
+
+ // fill in the commpage table entry
+ fill_commpage_entry(commpageIndex, (void*)symbolInfo.address,
+ symbolInfo.size);
+
+ // add symbol to the commpage image
+ image_id image = get_commpage_image();
+ elf_add_memory_image_symbol(image, commpageSymbolName,
+ ((addr_t*)USER_COMMPAGE_ADDR)[commpageIndex], symbolInfo.size,
+ B_SYMBOL_TYPE_TEXT);
+}
+
+
+void
+x86_initialize_commpage_signal_handler()
+{
+ // standard handler
+ register_signal_handler_function("x86_signal_frame_function",
+ COMMPAGE_ENTRY_X86_SIGNAL_HANDLER, "commpage_signal_handler",
+ (addr_t)&x86_signal_frame_function);
+
+ // handler for BeOS backwards compatibility
+ register_signal_handler_function("x86_signal_frame_function_beos",
+ COMMPAGE_ENTRY_X86_SIGNAL_HANDLER_BEOS, "commpage_signal_handler_beos",
+ (addr_t)&x86_signal_frame_function_beos);
+}
+
+
+addr_t
+x86_get_user_signal_handler_wrapper(bool beosHandler)
+{
+ int32 index = beosHandler
+ ? COMMPAGE_ENTRY_X86_SIGNAL_HANDLER_BEOS
+ : COMMPAGE_ENTRY_X86_SIGNAL_HANDLER;
+ return ((addr_t*)USER_COMMPAGE_ADDR)[index];
+}
diff --git a/src/system/kernel/arch/x86/x86_signals.h b/src/system/kernel/arch/x86/x86_signals.h
new file mode 100644
index 0000000..282d4af
--- /dev/null
+++ b/src/system/kernel/arch/x86/x86_signals.h
@@ -0,0 +1,16 @@
+/*
+ * Copyright 2011, Ingo Weinhold, ingo_weinhold@gmx.de.
+ * Distributed under the terms of the MIT License.
+ */
+#ifndef _KERNEL_ARCH_X86_SIGNALS_H
+#define _KERNEL_ARCH_X86_SIGNALS_H
+
+
+#include <SupportDefs.h>
+
+
+void x86_initialize_commpage_signal_handler();
+addr_t x86_get_user_signal_handler_wrapper(bool beosHandler);
+
+
+#endif // _KERNEL_ARCH_X86_SIGNALS_H
diff --git a/src/system/kernel/arch/x86/x86_signals_asm.S b/src/system/kernel/arch/x86/x86_signals_asm.S
new file mode 100644
index 0000000..38c32e9
--- /dev/null
+++ b/src/system/kernel/arch/x86/x86_signals_asm.S
@@ -0,0 +1,71 @@
+/*
+ * Copyright 2011, Ingo Weinhold, ingo_weinhold@gmx.de.
+ * Distributed under the terms of the MIT License.
+ */
+
+
+#include <asm_defs.h>
+#include <commpage_defs.h>
+
+#include "asm_offsets.h"
+#include "syscall_numbers.h"
+
+
+/*! \fn void x86_signal_frame_function_beos(signal_frame_data* frameData)
+ \brief Wrapper function for BeOS-style signal handler functions.
+ \param frameData The signal frame data.
+*/
+FUNCTION(x86_signal_frame_function_beos):
+ // set up a stack frame
+ push %ebp
+ mov %esp, %ebp
+
+ // Move our parameter to %esi, so we can conveniently work with it. Note
+ // that we're free to use non-scratch registers without saving them, since
+ // we don't have any caller to save them for. The caller will restore the
+ // interrupted environment anyway.
+ mov 8(%ebp), %esi
+
+ // push the parameters for the handler function
+
+ // make space for the vregs parameter
+ lea -VREGS_sizeof(%esp), %esp
+ mov %esp, %edi
+
+ // copy the vregs via memcpy()
+ pushl $VREGS_sizeof
+ lea SIGNAL_FRAME_DATA_context + UCONTEXT_T_uc_mcontext(%esi), %eax
+ push %eax
+ push %edi
+ movl USER_COMMPAGE_ADDR + 4 * COMMPAGE_ENTRY_X86_MEMCPY, %eax
+ call *%eax
+ addl $12, %esp
+
+ // the vregs are on the stack -- push user data and signal number
+ movl SIGNAL_FRAME_DATA_user_data(%esi), %eax
+ push %eax
+ movl SIGNAL_FRAME_DATA_info+SIGINFO_T_si_signo(%esi), %eax
+ push %eax
+
+ // call the signal handler
+ movl SIGNAL_FRAME_DATA_handler(%esi), %eax
+ call *%eax
+ addl $8, %esp // pop only signal number and user data arguments
+
+ // copy the vregs back to the frameData structure
+ pushl $VREGS_sizeof
+ push %edi
+ lea SIGNAL_FRAME_DATA_context + UCONTEXT_T_uc_mcontext(%esi), %eax
+ push %eax
+ movl USER_COMMPAGE_ADDR + 4 * COMMPAGE_ENTRY_X86_MEMCPY, %eax
+ call *%eax
+ addl $12 + VREGS_sizeof, %esp
+
+ // call the _kern_restore_signal_frame() syscall -- does not return (here)
+ pushl %esi
+ pushl $0 // dummy return value
+ movl $SYSCALL_RESTORE_SIGNAL_FRAME, %eax
+ int $99
+
+ // never gets here
+FUNCTION_END(x86_signal_frame_function_beos)
diff --git a/src/system/kernel/arch/x86/x86_syscalls.cpp b/src/system/kernel/arch/x86/x86_syscalls.cpp
index d0ba8d0..bd14bb0 100644
--- a/src/system/kernel/arch/x86/x86_syscalls.cpp
+++ b/src/system/kernel/arch/x86/x86_syscalls.cpp
@@ -75,7 +75,7 @@ init_amd_syscall_registers(void* dummy, int cpuNum)
// #pragma mark -
-status_t
+void
x86_initialize_commpage_syscall(void)
{
void* syscallCode = (void *)&_user_syscall_int;
@@ -113,6 +113,4 @@ x86_initialize_commpage_syscall(void)
elf_add_memory_image_symbol(image, "commpage_syscall",
((addr_t*)USER_COMMPAGE_ADDR)[COMMPAGE_ENTRY_X86_SYSCALL], len,
B_SYMBOL_TYPE_TEXT);
-
- return B_OK;
}
diff --git a/src/system/kernel/arch/x86/x86_syscalls.h b/src/system/kernel/arch/x86/x86_syscalls.h
index f6434ac..db0dd75 100644
--- a/src/system/kernel/arch/x86/x86_syscalls.h
+++ b/src/system/kernel/arch/x86/x86_syscalls.h
@@ -12,7 +12,7 @@
extern void (*gX86SetSyscallStack)(addr_t stackTop);
-status_t x86_initialize_commpage_syscall();
+void x86_initialize_commpage_syscall();
static inline void
diff --git a/src/system/kernel/condition_variable.cpp b/src/system/kernel/condition_variable.cpp
index 6fb71c5..ef0dcd2 100644
--- a/src/system/kernel/condition_variable.cpp
+++ b/src/system/kernel/condition_variable.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright 2007-2008, Ingo Weinhold, bonefish@cs.tu-berlin.de.
+ * Copyright 2007-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
* Distributed under the terms of the MIT License.
*/
@@ -134,14 +134,15 @@ ConditionVariableEntry::Wait(uint32 flags, bigtime_t timeout)
conditionLocker.Unlock();
- SpinLocker threadLocker(gThreadSpinlock);
+ SpinLocker schedulerLocker(gSchedulerLock);
status_t error;
if ((flags & (B_RELATIVE_TIMEOUT | B_ABSOLUTE_TIMEOUT)) != 0)
error = thread_block_with_timeout_locked(flags, timeout);
else
error = thread_block_locked(thread_get_current_thread());
- threadLocker.Unlock();
+
+ schedulerLocker.Unlock();
conditionLocker.Lock();
@@ -220,12 +221,12 @@ ConditionVariable::Publish(const void* object, const char* objectType)
void
-ConditionVariable::Unpublish(bool threadsLocked)
+ConditionVariable::Unpublish(bool schedulerLocked)
{
ASSERT(fObject != NULL);
InterruptsLocker _;
- SpinLocker threadLocker(threadsLocked ? NULL : &gThreadSpinlock);
+ SpinLocker schedulerLocker(schedulerLocked ? NULL : &gSchedulerLock);
SpinLocker locker(sConditionVariablesLock);
#if KDEBUG
@@ -262,7 +263,7 @@ ConditionVariable::Wait(uint32 flags, bigtime_t timeout)
/*static*/ void
-ConditionVariable::NotifyOne(const void* object, bool threadsLocked,
+ConditionVariable::NotifyOne(const void* object, bool schedulerLocked,
status_t result)
{
InterruptsSpinLocker locker(sConditionVariablesLock);
@@ -271,13 +272,13 @@ ConditionVariable::NotifyOne(const void* object, bool threadsLocked,
if (variable == NULL)
return;
- variable->NotifyOne(threadsLocked, result);
+ variable->NotifyOne(schedulerLocked, result);
}
/*static*/ void
-ConditionVariable::NotifyAll(const void* object,
- bool threadsLocked, status_t result)
+ConditionVariable::NotifyAll(const void* object, bool schedulerLocked,
+ status_t result)
{
InterruptsSpinLocker locker(sConditionVariablesLock);
ConditionVariable* variable = sConditionVariableHash.Lookup(object);
@@ -285,7 +286,7 @@ ConditionVariable::NotifyAll(const void* object,
if (variable == NULL)
return;
- variable->NotifyAll(threadsLocked, result);
+ variable->NotifyAll(schedulerLocked, result);
}
@@ -321,10 +322,10 @@ ConditionVariable::Dump() const
void
-ConditionVariable::_Notify(bool all, bool threadsLocked, status_t result)
+ConditionVariable::_Notify(bool all, bool schedulerLocked, status_t result)
{
InterruptsLocker _;
- SpinLocker threadLocker(threadsLocked ? NULL : &gThreadSpinlock);
+ SpinLocker schedulerLocker(schedulerLocked ? NULL : &gSchedulerLock);
SpinLocker locker(sConditionVariablesLock);
if (!fEntries.IsEmpty()) {
@@ -339,7 +340,7 @@ ConditionVariable::_Notify(bool all, bool threadsLocked, status_t result)
/*! Called with interrupts disabled and the condition variable spinlock and
- thread lock held.
+ scheduler lock held.
*/
void
ConditionVariable::_NotifyLocked(bool all, status_t result)
diff --git a/src/system/kernel/cpu.cpp b/src/system/kernel/cpu.cpp
index 46bb56b..44bff7e 100644
--- a/src/system/kernel/cpu.cpp
+++ b/src/system/kernel/cpu.cpp
@@ -10,12 +10,14 @@
#include <cpu.h>
-#include <thread_types.h>
#include <arch/cpu.h>
-#include <boot/kernel_args.h>
#include <string.h>
+#include <boot/kernel_args.h>
+#include <thread_types.h>
+#include <util/AutoLock.h>
+
/* global per-cpu structure */
cpu_ent gCPU[MAX_BOOT_CPUS];
@@ -30,7 +32,7 @@ cpu_init(kernel_args *args)
}
-status_t
+status_t
cpu_init_percpu(kernel_args *args, int curr_cpu)
{
return arch_cpu_init_percpu(args, curr_cpu);
@@ -66,24 +68,15 @@ cpu_preboot_init_percpu(kernel_args *args, int curr_cpu)
bigtime_t
cpu_get_active_time(int32 cpu)
{
- bigtime_t activeTime;
- cpu_status state;
-
if (cpu < 0 || cpu > smp_get_num_cpus())
return 0;
- // We need to grab the thread lock here, because the thread activity
- // time is not maintained atomically (because there is no need to)
-
- state = disable_interrupts();
- GRAB_THREAD_LOCK();
-
- activeTime = gCPU[cpu].active_time;
+ // We need to grab the scheduler lock here, because the thread activity
+ // time is not maintained atomically (because there is no need to).
- RELEASE_THREAD_LOCK();
- restore_interrupts(state);
+ InterruptsSpinLocker schedulerLocker(gSchedulerLock);
- return activeTime;
+ return gCPU[cpu].active_time;
}
diff --git a/src/system/kernel/debug/debug_heap.cpp b/src/system/kernel/debug/debug_heap.cpp
index 633b89e..069e97d 100644
--- a/src/system/kernel/debug/debug_heap.cpp
+++ b/src/system/kernel/debug/debug_heap.cpp
@@ -12,11 +12,11 @@
#include <vm/vm.h>
-#define INITIAL_HEAP_SIZE B_PAGE_SIZE
+#define INITIAL_DEBUG_HEAP_SIZE B_PAGE_SIZE
-static char sInitialHeap[INITIAL_HEAP_SIZE] __attribute__ ((aligned (8)));
+static char sInitialHeap[INITIAL_DEBUG_HEAP_SIZE] __attribute__ ((aligned (8)));
static void* sHeapBase = sInitialHeap;
-static size_t sHeapSize = INITIAL_HEAP_SIZE;
+static size_t sHeapSize = INITIAL_DEBUG_HEAP_SIZE;
const kdebug_alloc_t kdebug_alloc = {};
diff --git a/src/system/kernel/debug/system_profiler.cpp b/src/system/kernel/debug/system_profiler.cpp
index 6c03450..c2d7549 100644
--- a/src/system/kernel/debug/system_profiler.cpp
+++ b/src/system/kernel/debug/system_profiler.cpp
@@ -108,10 +108,6 @@ private:
inline void _MaybeNotifyProfilerThreadLocked();
inline void _MaybeNotifyProfilerThread();
- static bool _InitialTeamIterator(Team* team,
- void* cookie);
- static bool _InitialThreadIterator(Thread* thread,
- void* cookie);
static bool _InitialImageIterator(struct image* image,
void* cookie);
@@ -189,6 +185,8 @@ private:
size_t fBufferStart;
size_t fBufferSize;
uint64 fDroppedEvents;
+ int64 fLastTeamAddedSerialNumber;
+ int64 fLastThreadAddedSerialNumber;
bool fTeamNotificationsRequested;
bool fTeamNotificationsEnabled;
bool fThreadNotificationsRequested;
@@ -203,7 +201,6 @@ private:
bool fProfilingActive;
bool fReentered[B_MAX_CPU_COUNT];
CPUProfileData fCPUData[B_MAX_CPU_COUNT];
- Thread** fRunningThreads;
WaitObject* fWaitObjectBuffer;
int32 fWaitObjectCount;
WaitObjectList fUsedWaitObjects;
@@ -212,6 +209,9 @@ private:
};
+/*! Notifies the profiler thread when the profiling buffer is full enough.
+ The caller must hold the scheduler lock and fLock.
+*/
inline void
SystemProfiler::_MaybeNotifyProfilerThreadLocked()
{
@@ -219,7 +219,9 @@ SystemProfiler::_MaybeNotifyProfilerThreadLocked()
if (fWaitingProfilerThread != NULL && fBufferSize > fBufferCapacity / 2) {
int cpu = smp_get_current_cpu();
fReentered[cpu] = true;
+
thread_unblock_locked(fWaitingProfilerThread, B_OK);
+
fWaitingProfilerThread = NULL;
fReentered[cpu] = false;
}
@@ -232,7 +234,7 @@ SystemProfiler::_MaybeNotifyProfilerThread()
if (fWaitingProfilerThread == NULL)
return;
- InterruptsSpinLocker threadsLocker(gThreadSpinlock);
+ InterruptsSpinLocker schedulerLocker(gSchedulerLock);
SpinLocker locker(fLock);
_MaybeNotifyProfilerThreadLocked();
@@ -255,6 +257,8 @@ SystemProfiler::SystemProfiler(team_id team, const area_info& userAreaInfo,
fBufferStart(0),
fBufferSize(0),
fDroppedEvents(0),
+ fLastTeamAddedSerialNumber(0),
+ fLastThreadAddedSerialNumber(0),
fTeamNotificationsRequested(false),
fTeamNotificationsEnabled(false),
fThreadNotificationsRequested(false),
@@ -294,6 +298,7 @@ SystemProfiler::~SystemProfiler()
// inactive.
InterruptsSpinLocker locker(fLock);
if (fWaitingProfilerThread != NULL) {
+ InterruptsSpinLocker schedulerLocker(gSchedulerLock);
thread_unblock_locked(fWaitingProfilerThread, B_OK);
fWaitingProfilerThread = NULL;
}
@@ -302,7 +307,7 @@ SystemProfiler::~SystemProfiler()
// stop scheduler listening
if (fSchedulerNotificationsRequested) {
- InterruptsSpinLocker threadsLocker(gThreadSpinlock);
+ InterruptsSpinLocker schedulerLocker(gSchedulerLock);
scheduler_remove_listener(this);
}
@@ -446,11 +451,24 @@ SystemProfiler::Init()
// teams
if ((fFlags & B_SYSTEM_PROFILER_TEAM_EVENTS) != 0) {
- InterruptsSpinLocker teamsLocker(gTeamSpinlock);
- if (team_iterate_through_teams(&_InitialTeamIterator, this) != NULL)
- return B_BUFFER_OVERFLOW;
+ InterruptsSpinLocker locker(fLock);
+
+ TeamListIterator iterator;
+ while (Team* team = iterator.Next()) {
+ locker.Unlock();
+
+ bool added = _TeamAdded(team);
+
+ // release the reference returned by the iterator
+ team->ReleaseReference();
+
+ if (!added)
+ return B_BUFFER_OVERFLOW;
+
+ locker.Lock();
+ }
+
fTeamNotificationsEnabled = true;
- teamsLocker.Unlock();
}
// images
@@ -460,21 +478,29 @@ SystemProfiler::Init()
}
// threads
- Thread* runningThreads[B_MAX_CPU_COUNT];
- memset(runningThreads, 0, sizeof(runningThreads));
- fRunningThreads = runningThreads;
-
- InterruptsSpinLocker threadsLocker(gThreadSpinlock);
- if ((fFlags & B_SYSTEM_PROFILER_THREAD_EVENTS) != 0
- || (fFlags & B_SYSTEM_PROFILER_SCHEDULING_EVENTS) != 0) {
- if (thread_iterate_through_threads(&_InitialThreadIterator, this)
- != NULL) {
- return B_BUFFER_OVERFLOW;
+ if ((fFlags & B_SYSTEM_PROFILER_THREAD_EVENTS) != 0) {
+ InterruptsSpinLocker locker(fLock);
+
+ ThreadListIterator iterator;
+ while (Thread* thread = iterator.Next()) {
+ locker.Unlock();
+
+ bool added = _ThreadAdded(thread);
+
+ // release the reference returned by the iterator
+ thread->ReleaseReference();
+
+ if (!added)
+ return B_BUFFER_OVERFLOW;
+
+ locker.Lock();
}
- fThreadNotificationsEnabled
- = (fFlags & B_SYSTEM_PROFILER_THREAD_EVENTS) != 0;
+
+ fThreadNotificationsEnabled = true;
}
+ InterruptsSpinLocker schedulerLocker(gSchedulerLock);
+
fProfilingActive = true;
// start scheduler and wait object listening
@@ -490,12 +516,13 @@ SystemProfiler::Init()
// fake schedule events for the initially running threads
int32 cpuCount = smp_get_num_cpus();
for (int32 i = 0; i < cpuCount; i++) {
- if (runningThreads[i] != NULL)
- ThreadScheduled(runningThreads[i], runningThreads[i]);
+ Thread* thread = gCPU[i].running_thread;
+ if (thread != NULL)
+ ThreadScheduled(thread, thread);
}
}
- threadsLocker.Unlock();
+ schedulerLocker.Unlock();
// I/O scheduling
if ((fFlags & B_SYSTEM_PROFILER_IO_SCHEDULING_EVENTS) != 0) {
@@ -545,9 +572,12 @@ SystemProfiler::NextBuffer(size_t bytesRead, uint64* _droppedEvents)
Thread* thread = thread_get_current_thread();
fWaitingProfilerThread = thread;
+ InterruptsSpinLocker schedulerLocker(gSchedulerLock);
+
thread_prepare_to_block(thread, B_CAN_INTERRUPT,
THREAD_BLOCK_TYPE_OTHER, "system profiler buffer");
+ schedulerLocker.Unlock();
locker.Unlock();
status_t error = thread_block_with_timeout(B_RELATIVE_TIMEOUT, 1000000);
@@ -587,16 +617,14 @@ SystemProfiler::EventOccurred(NotificationService& service,
return;
if (strcmp(service.Name(), "teams") == 0) {
- if (!fTeamNotificationsEnabled)
- return;
-
Team* team = (Team*)event->GetPointer("teamStruct", NULL);
if (team == NULL)
return;
switch (eventCode) {
case TEAM_ADDED:
- _TeamAdded(team);
+ if (fTeamNotificationsEnabled)
+ _TeamAdded(team);
break;
case TEAM_REMOVED:
@@ -613,28 +641,39 @@ SystemProfiler::EventOccurred(NotificationService& service,
return;
}
- _TeamRemoved(team);
+ // When we're still doing the initial team list scan, we are
+ // also interested in removals that happened to teams we have
+ // already seen.
+ if (fTeamNotificationsEnabled
+ || team->serial_number <= fLastTeamAddedSerialNumber) {
+ _TeamRemoved(team);
+ }
break;
case TEAM_EXEC:
- _TeamExec(team);
+ if (fTeamNotificationsEnabled)
+ _TeamExec(team);
break;
}
} else if (strcmp(service.Name(), "threads") == 0) {
- if (!fThreadNotificationsEnabled)
- return;
-
Thread* thread = (Thread*)event->GetPointer("threadStruct", NULL);
if (thread == NULL)
return;
switch (eventCode) {
case THREAD_ADDED:
- _ThreadAdded(thread);
+ if (fThreadNotificationsEnabled)
+ _ThreadAdded(thread);
break;
case THREAD_REMOVED:
- _ThreadRemoved(thread);
+ // When we're still doing the initial thread list scan, we are
+ // also interested in removals that happened to threads we have
+ // already seen.
+ if (fThreadNotificationsEnabled
+ || thread->serial_number <= fLastThreadAddedSerialNumber) {
+ _ThreadRemoved(thread);
+ }
break;
}
} else if (strcmp(service.Name(), "images") == 0) {
@@ -820,11 +859,22 @@ SystemProfiler::RWLockInitialized(rw_lock* lock)
bool
SystemProfiler::_TeamAdded(Team* team)
{
- size_t nameLen = strlen(team->name);
- size_t argsLen = strlen(team->args);
+ TeamLocker teamLocker(team);
+
+ size_t nameLen = strlen(team->Name());
+ size_t argsLen = strlen(team->Args());
InterruptsSpinLocker locker(fLock);
+ // During the initial scan check whether the team is already gone again.
+ // Later this cannot happen, since the team creator notifies us before
+ // actually starting the team.
+ if (!fTeamNotificationsEnabled && team->state >= TEAM_STATE_DEATH)
+ return true;
+
+ if (team->serial_number > fLastTeamAddedSerialNumber)
+ fLastTeamAddedSerialNumber = team->serial_number;
+
system_profiler_team_added* event = (system_profiler_team_added*)
_AllocateBuffer(
sizeof(system_profiler_team_added) + nameLen + 1 + argsLen,
@@ -833,9 +883,9 @@ SystemProfiler::_TeamAdded(Team* team)
return false;
event->team = team->id;
- strcpy(event->name, team->name);
+ strcpy(event->name, team->Name());
event->args_offset = nameLen + 1;
- strcpy(event->name + nameLen + 1, team->args);
+ strcpy(event->name + nameLen + 1, team->Args());
fHeader->size = fBufferSize;
@@ -846,6 +896,12 @@ SystemProfiler::_TeamAdded(Team* team)
bool
SystemProfiler::_TeamRemoved(Team* team)
{
+ // TODO: It is possible that we get remove notifications for teams that
+ // had already been removed from the global team list when we did the
+ // initial scan, but were still in the process of dying. ATM it is not
+ // really possible to identify such a case.
+
+ TeamLocker teamLocker(team);
InterruptsSpinLocker locker(fLock);
system_profiler_team_removed* event = (system_profiler_team_removed*)
@@ -865,7 +921,9 @@ SystemProfiler::_TeamRemoved(Team* team)
bool
SystemProfiler::_TeamExec(Team* team)
{
- size_t argsLen = strlen(team->args);
+ TeamLocker teamLocker(team);
+
+ size_t argsLen = strlen(team->Args());
InterruptsSpinLocker locker(fLock);
@@ -878,7 +936,7 @@ SystemProfiler::_TeamExec(Team* team)
event->team = team->id;
strlcpy(event->thread_name, team->main_thread->name,
sizeof(event->thread_name));
- strcpy(event->args, team->args);
+ strcpy(event->args, team->Args());
fHeader->size = fBufferSize;
@@ -889,8 +947,18 @@ SystemProfiler::_TeamExec(Team* team)
bool
SystemProfiler::_ThreadAdded(Thread* thread)
{
+ ThreadLocker threadLocker(thread);
InterruptsSpinLocker locker(fLock);
+ // During the initial scan check whether the team is already gone again.
+ // Later this cannot happen, since the team creator notifies us before
+ // actually starting the thread.
+ if (!fThreadNotificationsEnabled && !thread->IsAlive())
+ return true;
+
+ if (thread->serial_number > fLastThreadAddedSerialNumber)
+ fLastThreadAddedSerialNumber = thread->serial_number;
+
system_profiler_thread_added* event = (system_profiler_thread_added*)
_AllocateBuffer(sizeof(system_profiler_thread_added),
B_SYSTEM_PROFILER_THREAD_ADDED, 0, 0);
@@ -910,6 +978,12 @@ SystemProfiler::_ThreadAdded(Thread* thread)
bool
SystemProfiler::_ThreadRemoved(Thread* thread)
{
+ // TODO: It is possible that we get remove notifications for threads that
+ // had already been removed from the global thread list when we did the
+ // initial scan, but were still in the process of dying. ATM it is not
+ // really possible to identify such a case.
+
+ ThreadLocker threadLocker(thread);
InterruptsSpinLocker locker(fLock);
system_profiler_thread_removed* event
@@ -1237,28 +1311,6 @@ SystemProfiler::_WaitObjectUsed(addr_t object, uint32 type)
/*static*/ bool
-SystemProfiler::_InitialTeamIterator(Team* team, void* cookie)
-{
- SystemProfiler* self = (SystemProfiler*)cookie;
- return !self->_TeamAdded(team);
-}
-
-
-/*static*/ bool
-SystemProfiler::_InitialThreadIterator(Thread* thread, void* cookie)
-{
- SystemProfiler* self = (SystemProfiler*)cookie;
-
- if ((self->fFlags & B_SYSTEM_PROFILER_SCHEDULING_EVENTS) != 0
- && thread->state == B_THREAD_RUNNING && thread->cpu != NULL) {
- self->fRunningThreads[thread->cpu->cpu_num] = thread;
- }
-
- return !self->_ThreadAdded(thread);
-}
-
-
-/*static*/ bool
SystemProfiler::_InitialImageIterator(struct image* image, void* cookie)
{
SystemProfiler* self = (SystemProfiler*)cookie;
diff --git a/src/system/kernel/debug/user_debugger.cpp b/src/system/kernel/debug/user_debugger.cpp
index bde389b..ebb1736 100644
--- a/src/system/kernel/debug/user_debugger.cpp
+++ b/src/system/kernel/debug/user_debugger.cpp
@@ -64,12 +64,12 @@ static status_t ensure_debugger_installed();
static void get_team_debug_info(team_debug_info &teamDebugInfo);
-static status_t
+static inline status_t
kill_interruptable_write_port(port_id port, int32 code, const void *buffer,
size_t bufferSize)
{
- return write_port_etc(port, code, buffer, bufferSize,
- B_KILL_CAN_INTERRUPT, 0);
+ return write_port_etc(port, code, buffer, bufferSize, B_KILL_CAN_INTERRUPT,
+ 0);
}
@@ -127,7 +127,7 @@ debugger_write(port_id port, int32 code, const void *buffer, size_t bufferSize,
/*! Updates the thread::flags field according to what user debugger flags are
set for the thread.
- Interrupts must be disabled and the thread lock must be held.
+ Interrupts must be disabled and the thread's debug info lock must be held.
*/
static void
update_thread_user_debug_flag(Thread* thread)
@@ -141,7 +141,7 @@ update_thread_user_debug_flag(Thread* thread)
/*! Updates the thread::flags THREAD_FLAGS_BREAKPOINTS_DEFINED bit of the
given thread.
- Interrupts must be disabled and the team lock must be held.
+ Interrupts must be disabled and the thread debug info lock must be held.
*/
static void
update_thread_breakpoints_flag(Thread* thread)
@@ -155,15 +155,16 @@ update_thread_breakpoints_flag(Thread* thread)
}
-/*! Updates the thread::flags THREAD_FLAGS_BREAKPOINTS_DEFINED bit of all
+/*! Updates the Thread::flags THREAD_FLAGS_BREAKPOINTS_DEFINED bit of all
threads of the current team.
*/
static void
update_threads_breakpoints_flag()
{
- InterruptsSpinLocker _(gTeamSpinlock);
-
Team* team = thread_get_current_thread()->team;
+
+ TeamLocker teamLocker(team);
+
Thread* thread = team->thread_list;
if (arch_has_breakpoints(&team->debug_info.arch_info)) {
@@ -177,8 +178,7 @@ update_threads_breakpoints_flag()
/*! Updates the thread::flags B_TEAM_DEBUG_DEBUGGER_INSTALLED bit of the
- given thread.
- Interrupts must be disabled and the team lock must be held.
+ given thread, which must be the current thread.
*/
static void
update_thread_debugger_installed_flag(Thread* thread)
@@ -194,7 +194,7 @@ update_thread_debugger_installed_flag(Thread* thread)
/*! Updates the thread::flags THREAD_FLAGS_DEBUGGER_INSTALLED bit of all
threads of the given team.
- Interrupts must be disabled and the team lock must be held.
+ The team's lock must be held.
*/
static void
update_threads_debugger_installed_flag(Team* team)
@@ -296,6 +296,7 @@ void
init_thread_debug_info(struct thread_debug_info *info)
{
if (info) {
+ B_INITIALIZE_SPINLOCK(&info->lock);
arch_clear_thread_debug_info(&info->arch_info);
info->flags = B_THREAD_DEBUG_DEFAULT_FLAGS;
info->debug_port = -1;
@@ -309,7 +310,8 @@ init_thread_debug_info(struct thread_debug_info *info)
}
-/*! Invoked with thread lock being held.
+/*! Clears the debug info for the current thread.
+ Invoked with thread debug info lock being held.
*/
void
clear_thread_debug_info(struct thread_debug_info *info, bool dying)
@@ -373,19 +375,18 @@ prepare_debugger_change(team_id teamID, ConditionVariable& condition,
while (true) {
// get the team
- InterruptsSpinLocker teamLocker(gTeamSpinlock);
-
- team = team_get_team_struct_locked(teamID);
- if (team == NULL || team->death_entry != NULL)
+ team = Team::GetAndLock(teamID);
+ if (team == NULL)
return B_BAD_TEAM_ID;
+ BReference<Team> teamReference(team, true);
+ TeamLocker teamLocker(team, true);
// don't allow messing with the kernel team
if (team == team_get_kernel_team())
return B_NOT_ALLOWED;
// check whether the condition is already set
- SpinLocker threadLocker(gThreadSpinlock);
- SpinLocker debugInfoLocker(team->debug_info.lock);
+ InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
if (team->debug_info.debugger_changed_condition == NULL) {
// nobody there yet -- set our condition variable and be done
@@ -398,7 +399,6 @@ prepare_debugger_change(team_id teamID, ConditionVariable& condition,
team->debug_info.debugger_changed_condition->Add(&entry);
debugInfoLocker.Unlock();
- threadLocker.Unlock();
teamLocker.Unlock();
entry.Wait();
@@ -411,8 +411,7 @@ prepare_debugger_change(Team* team, ConditionVariable& condition)
{
while (true) {
// check whether the condition is already set
- InterruptsSpinLocker threadLocker(gThreadSpinlock);
- SpinLocker debugInfoLocker(team->debug_info.lock);
+ InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
if (team->debug_info.debugger_changed_condition == NULL) {
// nobody there yet -- set our condition variable and be done
@@ -425,7 +424,6 @@ prepare_debugger_change(Team* team, ConditionVariable& condition)
team->debug_info.debugger_changed_condition->Add(&entry);
debugInfoLocker.Unlock();
- threadLocker.Unlock();
entry.Wait();
}
@@ -436,13 +434,12 @@ static void
finish_debugger_change(Team* team)
{
// unset our condition variable and notify all threads waiting on it
- InterruptsSpinLocker threadLocker(gThreadSpinlock);
- SpinLocker debugInfoLocker(team->debug_info.lock);
+ InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
ConditionVariable* condition = team->debug_info.debugger_changed_condition;
team->debug_info.debugger_changed_condition = NULL;
- condition->NotifyAll(true);
+ condition->NotifyAll(false);
}
@@ -460,14 +457,12 @@ user_debug_prepare_for_exec()
// get the port
port_id debugPort = -1;
- cpu_status state = disable_interrupts();
- GRAB_THREAD_LOCK();
+ InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock);
- if (thread->debug_info.flags & B_THREAD_DEBUG_INITIALIZED)
+ if ((thread->debug_info.flags & B_THREAD_DEBUG_INITIALIZED) != 0)
debugPort = thread->debug_info.debug_port;
- RELEASE_THREAD_LOCK();
- restore_interrupts(state);
+ threadDebugInfoLocker.Unlock();
// set the new port ownership
if (debugPort >= 0)
@@ -489,14 +484,12 @@ user_debug_finish_after_exec()
// get the port
port_id debugPort = -1;
- cpu_status state = disable_interrupts();
- GRAB_THREAD_LOCK();
+ InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock);
if (thread->debug_info.flags & B_THREAD_DEBUG_INITIALIZED)
debugPort = thread->debug_info.debug_port;
- RELEASE_THREAD_LOCK();
- restore_interrupts(state);
+ threadDebugInfoLocker.Unlock();
// set the new port ownership
if (debugPort >= 0)
@@ -565,8 +558,8 @@ thread_hit_debug_event_internal(debug_debugger_message event,
port_id nubPort = -1;
status_t error = B_OK;
cpu_status state = disable_interrupts();
- GRAB_THREAD_LOCK();
GRAB_TEAM_DEBUG_INFO_LOCK(thread->team->debug_info);
+ SpinLocker threadDebugInfoLocker(thread->debug_info.lock);
uint32 threadFlags = thread->debug_info.flags;
threadFlags &= ~B_THREAD_DEBUG_STOP;
@@ -578,7 +571,6 @@ thread_hit_debug_event_internal(debug_debugger_message event,
thread->id));
error = B_ERROR;
-
} else if (debuggerInstalled || !requireDebugger) {
if (debuggerInstalled) {
debuggerPort = thread->team->debug_info.debugger_port;
@@ -613,8 +605,8 @@ thread_hit_debug_event_internal(debug_debugger_message event,
update_thread_user_debug_flag(thread);
+ threadDebugInfoLocker.Unlock();
RELEASE_TEAM_DEBUG_INFO_LOCK(thread->team->debug_info);
- RELEASE_THREAD_LOCK();
restore_interrupts(state);
// delete the superfluous port
@@ -738,7 +730,7 @@ thread_hit_debug_event_internal(debug_debugger_message event,
thread_debug_info threadDebugInfo;
state = disable_interrupts();
- GRAB_THREAD_LOCK();
+ threadDebugInfoLocker.Lock();
// check, if the team is still being debugged
int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags);
@@ -765,7 +757,7 @@ thread_hit_debug_event_internal(debug_debugger_message event,
destroyThreadInfo = true;
}
- RELEASE_THREAD_LOCK();
+ threadDebugInfoLocker.Unlock();
restore_interrupts(state);
// enable/disable single stepping
@@ -790,7 +782,7 @@ thread_hit_debug_event(debug_debugger_message event, const void *message,
requireDebugger, restart);
} while (result >= 0 && restart);
- // Prepare to continue -- we install a debugger change condition, so no-one
+ // Prepare to continue -- we install a debugger change condition, so no one
// will change the debugger while we're playing with the breakpoint manager.
// TODO: Maybe better use ref-counting and a flag in the breakpoint manager.
Team* team = thread_get_current_thread()->team;
@@ -954,8 +946,9 @@ void
user_debug_stop_thread()
{
// check whether this is actually an emulated single-step notification
- InterruptsSpinLocker threadsLocker(gThreadSpinlock);
Thread* thread = thread_get_current_thread();
+ InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock);
+
bool singleStepped = false;
if ((atomic_and(&thread->debug_info.flags,
~B_THREAD_DEBUG_NOTIFY_SINGLE_STEP)
@@ -963,7 +956,7 @@ user_debug_stop_thread()
singleStepped = true;
}
- threadsLocker.Unlock();
+ threadDebugInfoLocker.Unlock();
if (singleStepped) {
user_debug_single_stepped();
@@ -1035,19 +1028,15 @@ user_debug_team_exec()
}
+/*! Called by a new userland thread to update the debugging related flags of
+ \c Thread::flags before the thread first enters userland.
+ \param thread The calling thread.
+*/
void
-user_debug_update_new_thread_flags(thread_id threadID)
+user_debug_update_new_thread_flags(Thread* thread)
{
- // Update thread::flags of the thread.
-
- InterruptsLocker interruptsLocker;
-
- SpinLocker teamLocker(gTeamSpinlock);
- SpinLocker threadLocker(gThreadSpinlock);
-
- Thread *thread = thread_get_thread_struct_locked(threadID);
- if (!thread)
- return;
+ // lock it and update it's flags
+ InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock);
update_thread_user_debug_flag(thread);
update_thread_breakpoints_flag(thread);
@@ -1082,20 +1071,18 @@ user_debug_thread_deleted(team_id teamID, thread_id threadID)
// the debugged team (but to the kernel). So we can't use debugger_write().
// get the team debug flags and debugger port
- InterruptsSpinLocker teamLocker(gTeamSpinlock);
-
- Team *team = team_get_team_struct_locked(teamID);
+ Team* team = Team::Get(teamID);
if (team == NULL)
return;
+ BReference<Team> teamReference(team, true);
- SpinLocker debugInfoLocker(team->debug_info.lock);
+ InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
int32 teamDebugFlags = atomic_get(&team->debug_info.flags);
port_id debuggerPort = team->debug_info.debugger_port;
sem_id writeLock = team->debug_info.debugger_write_lock;
debugInfoLocker.Unlock();
- teamLocker.Unlock();
// check, if a debugger is installed and is interested in thread events
if (~teamDebugFlags
@@ -1109,19 +1096,12 @@ user_debug_thread_deleted(team_id teamID, thread_id threadID)
return;
// re-get the team debug info -- we need to check whether anything changed
- teamLocker.Lock();
-
- team = team_get_team_struct_locked(teamID);
- if (team == NULL)
- return;
-
debugInfoLocker.Lock();
teamDebugFlags = atomic_get(&team->debug_info.flags);
port_id newDebuggerPort = team->debug_info.debugger_port;
debugInfoLocker.Unlock();
- teamLocker.Unlock();
// Send the message only if the debugger hasn't changed in the meantime or
// the team is about to be handed over.
@@ -1141,14 +1121,18 @@ user_debug_thread_deleted(team_id teamID, thread_id threadID)
}
+/*! Called for a thread that is about to die, cleaning up all user debug
+ facilities installed for the thread.
+ \param thread The current thread, the one that is going to die.
+*/
void
user_debug_thread_exiting(Thread* thread)
{
- InterruptsLocker interruptsLocker;
- SpinLocker teamLocker(gTeamSpinlock);
-
+ // thread is the current thread, so using team is safe
Team* team = thread->team;
+ InterruptsLocker interruptsLocker;
+
GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info);
int32 teamDebugFlags = atomic_get(&team->debug_info.flags);
@@ -1156,8 +1140,6 @@ user_debug_thread_exiting(Thread* thread)
RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info);
- teamLocker.Unlock();
-
// check, if a debugger is installed
if ((teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) == 0
|| debuggerPort < 0) {
@@ -1165,7 +1147,7 @@ user_debug_thread_exiting(Thread* thread)
}
// detach the profile info and mark the thread dying
- SpinLocker threadLocker(gThreadSpinlock);
+ SpinLocker threadDebugInfoLocker(thread->debug_info.lock);
thread_debug_info& threadDebugInfo = thread->debug_info;
if (threadDebugInfo.profile.samples == NULL)
@@ -1183,7 +1165,7 @@ user_debug_thread_exiting(Thread* thread)
atomic_or(&threadDebugInfo.flags, B_THREAD_DEBUG_DYING);
- threadLocker.Unlock();
+ threadDebugInfoLocker.Unlock();
interruptsLocker.Unlock();
// notify the debugger
@@ -1294,17 +1276,26 @@ user_debug_single_stepped()
}
+/*! Schedules the profiling timer for the current thread.
+ The caller must hold the thread's debug info lock.
+ \param thread The current thread.
+ \param interval The time after which the timer should fire.
+*/
static void
schedule_profiling_timer(Thread* thread, bigtime_t interval)
{
struct timer* timer = &sProfilingTimers[thread->cpu->cpu_num];
thread->debug_info.profile.installed_timer = timer;
thread->debug_info.profile.timer_end = system_time() + interval;
- add_timer(timer, &profiling_event, interval,
- B_ONE_SHOT_RELATIVE_TIMER | B_TIMER_ACQUIRE_THREAD_LOCK);
+ add_timer(timer, &profiling_event, interval, B_ONE_SHOT_RELATIVE_TIMER);
}
+/*! Samples the current thread's instruction pointer/stack trace.
+ The caller must hold the current thread's debug info lock.
+ \param flushBuffer Return parameter: Set to \c true when the sampling
+ buffer must be flushed.
+*/
static bool
profiling_do_sample(bool& flushBuffer)
{
@@ -1385,10 +1376,15 @@ profiling_do_sample(bool& flushBuffer)
static void
profiling_buffer_full(void*)
{
+ // It is undefined whether the function is called with interrupts enabled
+ // or disabled. We are allowed to enable interrupts, though. First make
+ // sure interrupts are disabled.
+ disable_interrupts();
+
Thread* thread = thread_get_current_thread();
thread_debug_info& debugInfo = thread->debug_info;
- GRAB_THREAD_LOCK();
+ SpinLocker threadDebugInfoLocker(debugInfo.lock);
if (debugInfo.profile.samples != NULL && debugInfo.profile.buffer_full) {
int32 sampleCount = debugInfo.profile.sample_count;
@@ -1401,7 +1397,7 @@ profiling_buffer_full(void*)
debugInfo.profile.sample_count = 0;
debugInfo.profile.dropped_ticks = 0;
- RELEASE_THREAD_LOCK();
+ threadDebugInfoLocker.Unlock();
enable_interrupts();
// prepare the message
@@ -1417,7 +1413,7 @@ profiling_buffer_full(void*)
sizeof(message), false);
disable_interrupts();
- GRAB_THREAD_LOCK();
+ threadDebugInfoLocker.Lock();
// do the sampling and reschedule timer, if still profiling this thread
bool flushBuffer;
@@ -1427,11 +1423,13 @@ profiling_buffer_full(void*)
}
}
- RELEASE_THREAD_LOCK();
+ threadDebugInfoLocker.Unlock();
+ enable_interrupts();
}
-/*! The thread spinlock is being held.
+/*! Profiling timer event callback.
+ Called with interrupts disabled.
*/
static int32
profiling_event(timer* /*unused*/)
@@ -1439,6 +1437,8 @@ profiling_event(timer* /*unused*/)
Thread* thread = thread_get_current_thread();
thread_debug_info& debugInfo = thread->debug_info;
+ SpinLocker threadDebugInfoLocker(debugInfo.lock);
+
bool flushBuffer = false;
if (profiling_do_sample(flushBuffer)) {
if (flushBuffer) {
@@ -1458,9 +1458,14 @@ profiling_event(timer* /*unused*/)
}
+/*! Called by the scheduler when a debugged thread has been unscheduled.
+ The scheduler lock is being held.
+*/
void
user_debug_thread_unscheduled(Thread* thread)
{
+ SpinLocker threadDebugInfoLocker(thread->debug_info.lock);
+
// if running, cancel the profiling timer
struct timer* timer = thread->debug_info.profile.installed_timer;
if (timer != NULL) {
@@ -1470,14 +1475,23 @@ user_debug_thread_unscheduled(Thread* thread)
thread->debug_info.profile.installed_timer = NULL;
// cancel timer
+ threadDebugInfoLocker.Unlock();
+ // not necessary, but doesn't harm and reduces contention
cancel_timer(timer);
+ // since invoked on the same CPU, this will not possibly wait for
+ // an already called timer hook
}
}
+/*! Called by the scheduler when a debugged thread has been scheduled.
+ The scheduler lock is being held.
+*/
void
user_debug_thread_scheduled(Thread* thread)
{
+ SpinLocker threadDebugInfoLocker(thread->debug_info.lock);
+
if (thread->debug_info.profile.samples != NULL
&& !thread->debug_info.profile.buffer_full) {
// install profiling timer
@@ -1500,22 +1514,26 @@ broadcast_debugged_thread_message(Thread *nubThread, int32 code,
int32 cookie = 0;
while (get_next_thread_info(nubThread->team->id, &cookie, &threadInfo)
== B_OK) {
- // find the thread and get its debug port
- cpu_status state = disable_interrupts();
- GRAB_THREAD_LOCK();
+ // get the thread and lock it
+ Thread* thread = Thread::GetAndLock(threadInfo.thread);
+ if (thread == NULL)
+ continue;
+
+ BReference<Thread> threadReference(thread, true);
+ ThreadLocker threadLocker(thread, true);
+
+ // get the thread's debug port
+ InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock);
port_id threadDebugPort = -1;
- thread_id threadID = -1;
- Thread *thread = thread_get_thread_struct_locked(threadInfo.thread);
if (thread && thread != nubThread && thread->team == nubThread->team
&& (thread->debug_info.flags & B_THREAD_DEBUG_INITIALIZED) != 0
&& (thread->debug_info.flags & B_THREAD_DEBUG_STOPPED) != 0) {
threadDebugPort = thread->debug_info.debug_port;
- threadID = thread->id;
}
- RELEASE_THREAD_LOCK();
- restore_interrupts(state);
+ threadDebugInfoLocker.Unlock();
+ threadLocker.Unlock();
// send the message to the thread
if (threadDebugPort >= 0) {
@@ -1523,7 +1541,7 @@ broadcast_debugged_thread_message(Thread *nubThread, int32 code,
code, message, size);
if (error != B_OK) {
TRACE(("broadcast_debugged_thread_message(): Failed to send "
- "message to thread %ld: %lx\n", threadID, error));
+ "message to thread %ld: %lx\n", thread->id, error));
}
}
}
@@ -1542,6 +1560,9 @@ nub_thread_cleanup(Thread *nubThread)
team_debug_info teamDebugInfo;
bool destroyDebugInfo = false;
+ TeamLocker teamLocker(nubThread->team);
+ // required by update_threads_debugger_installed_flag()
+
cpu_status state = disable_interrupts();
GRAB_TEAM_DEBUG_INFO_LOCK(nubThread->team->debug_info);
@@ -1559,6 +1580,8 @@ nub_thread_cleanup(Thread *nubThread)
RELEASE_TEAM_DEBUG_INFO_LOCK(nubThread->team->debug_info);
restore_interrupts(state);
+ teamLocker.Unlock();
+
if (destroyDebugInfo)
teamDebugInfo.breakpoint_manager->RemoveAllBreakpoints();
@@ -1580,30 +1603,31 @@ static status_t
debug_nub_thread_get_thread_debug_port(Thread *nubThread,
thread_id threadID, port_id &threadDebugPort)
{
- status_t result = B_OK;
threadDebugPort = -1;
- cpu_status state = disable_interrupts();
- GRAB_THREAD_LOCK();
+ // get the thread
+ Thread* thread = Thread::GetAndLock(threadID);
+ if (thread == NULL)
+ return B_BAD_THREAD_ID;
+ BReference<Thread> threadReference(thread, true);
+ ThreadLocker threadLocker(thread, true);
- Thread *thread = thread_get_thread_struct_locked(threadID);
- if (thread) {
- if (thread->team != nubThread->team)
- result = B_BAD_VALUE;
- else if (thread->debug_info.flags & B_THREAD_DEBUG_STOPPED)
- threadDebugPort = thread->debug_info.debug_port;
- else
- result = B_BAD_THREAD_STATE;
- } else
- result = B_BAD_THREAD_ID;
+ // get the debug port
+ InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock);
- RELEASE_THREAD_LOCK();
- restore_interrupts(state);
+ if (thread->team != nubThread->team)
+ return B_BAD_VALUE;
+ if ((thread->debug_info.flags & B_THREAD_DEBUG_STOPPED) == 0)
+ return B_BAD_THREAD_STATE;
- if (result == B_OK && threadDebugPort < 0)
- result = B_ERROR;
+ threadDebugPort = thread->debug_info.debug_port;
- return result;
+ threadDebugInfoLocker.Unlock();
+
+ if (threadDebugPort < 0)
+ return B_ERROR;
+
+ return B_OK;
}
@@ -1614,7 +1638,6 @@ debug_nub_thread(void *)
// check, if we're still the current nub thread and get our port
cpu_status state = disable_interrupts();
-
GRAB_TEAM_DEBUG_INFO_LOCK(nubThread->team->debug_info);
if (nubThread->team->debug_info.nub_thread != nubThread->id) {
@@ -1779,20 +1802,21 @@ debug_nub_thread(void *)
flags));
// set the flags
- cpu_status state = disable_interrupts();
- GRAB_THREAD_LOCK();
+ Thread* thread = Thread::GetAndLock(threadID);
+ if (thread == NULL)
+ break;
+ BReference<Thread> threadReference(thread, true);
+ ThreadLocker threadLocker(thread, true);
+
+ InterruptsSpinLocker threadDebugInfoLocker(
+ thread->debug_info.lock);
- Thread *thread = thread_get_thread_struct_locked(threadID);
- if (thread
- && thread->team == thread_get_current_thread()->team) {
+ if (thread->team == thread_get_current_thread()->team) {
flags |= thread->debug_info.flags
& B_THREAD_DEBUG_KERNEL_FLAG_MASK;
atomic_set(&thread->debug_info.flags, flags);
}
- RELEASE_THREAD_LOCK();
- restore_interrupts(state);
-
break;
}
@@ -2025,12 +2049,16 @@ debug_nub_thread(void *)
ignoreOp, ignoreOnce, ignoreOnceOp));
// set the masks
- cpu_status state = disable_interrupts();
- GRAB_THREAD_LOCK();
+ Thread* thread = Thread::GetAndLock(threadID);
+ if (thread == NULL)
+ break;
+ BReference<Thread> threadReference(thread, true);
+ ThreadLocker threadLocker(thread, true);
- Thread *thread = thread_get_thread_struct_locked(threadID);
- if (thread
- && thread->team == thread_get_current_thread()->team) {
+ InterruptsSpinLocker threadDebugInfoLocker(
+ thread->debug_info.lock);
+
+ if (thread->team == thread_get_current_thread()->team) {
thread_debug_info &threadDebugInfo = thread->debug_info;
// set ignore mask
switch (ignoreOp) {
@@ -2059,9 +2087,6 @@ debug_nub_thread(void *)
}
}
- RELEASE_THREAD_LOCK();
- restore_interrupts(state);
-
break;
}
@@ -2076,19 +2101,19 @@ debug_nub_thread(void *)
uint64 ignore = 0;
uint64 ignoreOnce = 0;
- cpu_status state = disable_interrupts();
- GRAB_THREAD_LOCK();
+ Thread* thread = Thread::GetAndLock(threadID);
+ if (thread != NULL) {
+ BReference<Thread> threadReference(thread, true);
+ ThreadLocker threadLocker(thread, true);
+
+ InterruptsSpinLocker threadDebugInfoLocker(
+ thread->debug_info.lock);
- Thread *thread = thread_get_thread_struct_locked(threadID);
- if (thread) {
ignore = thread->debug_info.ignore_signals;
ignoreOnce = thread->debug_info.ignore_signals_once;
} else
result = B_BAD_THREAD_ID;
- RELEASE_THREAD_LOCK();
- restore_interrupts(state);
-
TRACE(("nub thread %ld: B_DEBUG_MESSAGE_GET_SIGNAL_MASKS: "
"reply port: %ld, thread: %ld, ignore: %llx, "
"ignore once: %llx, result: %lx\n", nubThread->id,
@@ -2106,30 +2131,15 @@ debug_nub_thread(void *)
case B_DEBUG_MESSAGE_SET_SIGNAL_HANDLER:
{
// get the parameters
- thread_id threadID = message.set_signal_handler.thread;
int signal = message.set_signal_handler.signal;
struct sigaction &handler = message.set_signal_handler.handler;
TRACE(("nub thread %ld: B_DEBUG_MESSAGE_SET_SIGNAL_HANDLER: "
- "thread: %ld, signal: %d, handler: %p\n", nubThread->id,
- threadID, signal, handler.sa_handler));
-
- // check, if the thread exists and is ours
- cpu_status state = disable_interrupts();
- GRAB_THREAD_LOCK();
-
- Thread *thread = thread_get_thread_struct_locked(threadID);
- if (thread
- && thread->team != thread_get_current_thread()->team) {
- thread = NULL;
- }
-
- RELEASE_THREAD_LOCK();
- restore_interrupts(state);
+ "signal: %d, handler: %p\n", nubThread->id,
+ signal, handler.sa_handler));
// set the handler
- if (thread)
- sigaction_etc(threadID, signal, &handler, NULL);
+ sigaction(signal, &handler, NULL);
break;
}
@@ -2138,35 +2148,18 @@ debug_nub_thread(void *)
{
// get the parameters
replyPort = message.get_signal_handler.reply_port;
- thread_id threadID = message.get_signal_handler.thread;
int signal = message.get_signal_handler.signal;
status_t result = B_OK;
- // check, if the thread exists and is ours
- cpu_status state = disable_interrupts();
- GRAB_THREAD_LOCK();
-
- Thread *thread = thread_get_thread_struct_locked(threadID);
- if (thread) {
- if (thread->team != thread_get_current_thread()->team)
- result = B_BAD_VALUE;
- } else
- result = B_BAD_THREAD_ID;
-
- RELEASE_THREAD_LOCK();
- restore_interrupts(state);
-
// get the handler
- if (result == B_OK
- && sigaction_etc(threadID, signal, NULL,
- &reply.get_signal_handler.handler) != 0) {
+ if (sigaction(signal, NULL, &reply.get_signal_handler.handler)
+ != 0) {
result = errno;
}
TRACE(("nub thread %ld: B_DEBUG_MESSAGE_GET_SIGNAL_HANDLER: "
- "reply port: %ld, thread: %ld, signal: %d, "
- "handler: %p\n", nubThread->id, replyPort,
- threadID, signal,
+ "reply port: %ld, signal: %d, handler: %p\n", nubThread->id,
+ replyPort, signal,
reply.get_signal_handler.handler.sa_handler));
// prepare the message
@@ -2275,12 +2268,16 @@ debug_nub_thread(void *)
// get the thread and set the profile info
int32 imageEvent = nubThread->team->debug_info.image_event;
if (result == B_OK) {
- cpu_status state = disable_interrupts();
- GRAB_THREAD_LOCK();
+ Thread* thread = Thread::GetAndLock(threadID);
+ BReference<Thread> threadReference(thread, true);
+ ThreadLocker threadLocker(thread, true);
- Thread *thread = thread_get_thread_struct_locked(threadID);
- if (thread && thread->team == nubThread->team) {
+ if (thread != NULL && thread->team == nubThread->team) {
thread_debug_info &threadDebugInfo = thread->debug_info;
+
+ InterruptsSpinLocker threadDebugInfoLocker(
+ threadDebugInfo.lock);
+
if (threadDebugInfo.profile.samples == NULL) {
threadDebugInfo.profile.interval = interval;
threadDebugInfo.profile.sample_area
@@ -2307,9 +2304,6 @@ debug_nub_thread(void *)
result = B_BAD_VALUE;
} else
result = B_BAD_THREAD_ID;
-
- RELEASE_THREAD_LOCK();
- restore_interrupts(state);
}
// on error unlock and delete the sample area
@@ -2349,12 +2343,16 @@ debug_nub_thread(void *)
int32 droppedTicks = 0;
// get the thread and detach the profile info
- cpu_status state = disable_interrupts();
- GRAB_THREAD_LOCK();
+ Thread* thread = Thread::GetAndLock(threadID);
+ BReference<Thread> threadReference(thread, true);
+ ThreadLocker threadLocker(thread, true);
- Thread *thread = thread_get_thread_struct_locked(threadID);
if (thread && thread->team == nubThread->team) {
thread_debug_info &threadDebugInfo = thread->debug_info;
+
+ InterruptsSpinLocker threadDebugInfoLocker(
+ threadDebugInfo.lock);
+
if (threadDebugInfo.profile.samples != NULL) {
sampleArea = threadDebugInfo.profile.sample_area;
samples = threadDebugInfo.profile.samples;
@@ -2373,8 +2371,7 @@ debug_nub_thread(void *)
} else
result = B_BAD_THREAD_ID;
- RELEASE_THREAD_LOCK();
- restore_interrupts(state);
+ threadLocker.Unlock();
// prepare the reply
if (result == B_OK) {
@@ -2424,9 +2421,7 @@ debug_nub_thread(void *)
/** \brief Helper function for install_team_debugger(), that sets up the team
and thread debug infos.
- Interrupts must be disabled and the team debug info lock of the team to be
- debugged must be held. The function will release the lock, but leave
- interrupts disabled.
+ The caller must hold the team's lock as well as the team debug info lock.
The function also clears the arch specific team and thread debug infos
(including among other things formerly set break/watchpoints).
@@ -2447,14 +2442,11 @@ install_team_debugger_init_debug_infos(Team *team, team_id debuggerTeam,
arch_clear_team_debug_info(&team->debug_info.arch_info);
- RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info);
-
// set the user debug flags and signal masks of all threads to the default
- GRAB_THREAD_LOCK();
+ for (Thread *thread = team->thread_list; thread;
+ thread = thread->team_next) {
+ SpinLocker threadDebugInfoLocker(thread->debug_info.lock);
- for (Thread *thread = team->thread_list;
- thread;
- thread = thread->team_next) {
if (thread->id == nubThread) {
atomic_set(&thread->debug_info.flags, B_THREAD_DEBUG_NUB_THREAD);
} else {
@@ -2469,8 +2461,6 @@ install_team_debugger_init_debug_infos(Team *team, team_id debuggerTeam,
}
}
- RELEASE_THREAD_LOCK();
-
// update the thread::flags fields
update_threads_debugger_installed_flag(team);
}
@@ -2519,10 +2509,10 @@ install_team_debugger(team_id teamID, port_id debuggerPort,
bool done = false;
port_id result = B_ERROR;
bool handOver = false;
- bool releaseDebugInfoLock = true;
port_id oldDebuggerPort = -1;
port_id nubPort = -1;
+ TeamLocker teamLocker(team);
cpu_status state = disable_interrupts();
GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info);
@@ -2553,7 +2543,6 @@ install_team_debugger(team_id teamID, port_id debuggerPort,
debuggerPort, nubPort, team->debug_info.nub_thread,
team->debug_info.debugger_write_lock, causingThread);
- releaseDebugInfoLock = false;
handOver = true;
done = true;
}
@@ -2570,11 +2559,9 @@ install_team_debugger(team_id teamID, port_id debuggerPort,
error = B_BAD_VALUE;
}
- // in case of a handover the lock has already been released
- if (releaseDebugInfoLock)
- RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info);
-
+ RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info);
restore_interrupts(state);
+ teamLocker.Unlock();
if (handOver && set_port_owner(nubPort, debuggerTeam) != B_OK) {
// The old debugger must just have died. Just proceed as
@@ -2681,13 +2668,14 @@ install_team_debugger(team_id teamID, port_id debuggerPort,
if (error == B_OK) {
snprintf(nameBuffer, sizeof(nameBuffer), "team %ld debug task", teamID);
nubThread = spawn_kernel_thread_etc(debug_nub_thread, nameBuffer,
- B_NORMAL_PRIORITY, NULL, teamID, -1);
+ B_NORMAL_PRIORITY, NULL, teamID);
if (nubThread < 0)
error = nubThread;
}
// now adjust the debug info accordingly
if (error == B_OK) {
+ TeamLocker teamLocker(team);
state = disable_interrupts();
GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info);
@@ -2696,6 +2684,7 @@ install_team_debugger(team_id teamID, port_id debuggerPort,
debuggerPort, nubPort, nubThread, debuggerWriteLock,
causingThread);
+ RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info);
restore_interrupts(state);
}
@@ -2865,52 +2854,59 @@ _user_debug_thread(thread_id threadID)
{
TRACE(("[%ld] _user_debug_thread(%ld)\n", find_thread(NULL), threadID));
- // tell the thread to stop as soon as possible
- status_t error = B_OK;
- cpu_status state = disable_interrupts();
- GRAB_THREAD_LOCK();
-
- Thread *thread = thread_get_thread_struct_locked(threadID);
- if (!thread) {
- // thread doesn't exist any longer
- error = B_BAD_THREAD_ID;
- } else if (thread->team == team_get_kernel_team()) {
- // we can't debug the kernel team
- error = B_NOT_ALLOWED;
- } else if (thread->debug_info.flags & B_THREAD_DEBUG_DYING) {
- // the thread is already dying -- too late to debug it
- error = B_BAD_THREAD_ID;
- } else if (thread->debug_info.flags & B_THREAD_DEBUG_NUB_THREAD) {
- // don't debug the nub thread
- error = B_NOT_ALLOWED;
- } else if (!(thread->debug_info.flags & B_THREAD_DEBUG_STOPPED)) {
- // set the flag that tells the thread to stop as soon as possible
- atomic_or(&thread->debug_info.flags, B_THREAD_DEBUG_STOP);
+ // get the thread
+ Thread* thread = Thread::GetAndLock(threadID);
+ if (thread == NULL)
+ return B_BAD_THREAD_ID;
+ BReference<Thread> threadReference(thread, true);
+ ThreadLocker threadLocker(thread, true);
- update_thread_user_debug_flag(thread);
+ // we can't debug the kernel team
+ if (thread->team == team_get_kernel_team())
+ return B_NOT_ALLOWED;
- switch (thread->state) {
- case B_THREAD_SUSPENDED:
- // thread suspended: wake it up
- scheduler_enqueue_in_run_queue(thread);
- break;
+ InterruptsLocker interruptsLocker;
+ SpinLocker threadDebugInfoLocker(thread->debug_info.lock);
- default:
- // thread may be waiting: interrupt it
- thread_interrupt(thread, false);
- // TODO: If the thread is already in the kernel and e.g.
- // about to acquire a semaphore (before
- // thread_prepare_to_block()), we won't interrupt it.
- // Maybe we should rather send a signal (SIGTRAP).
- scheduler_reschedule_if_necessary_locked();
- break;
- }
- }
+ // If the thread is already dying, it's too late to debug it.
+ if ((thread->debug_info.flags & B_THREAD_DEBUG_DYING) != 0)
+ return B_BAD_THREAD_ID;
- RELEASE_THREAD_LOCK();
- restore_interrupts(state);
+ // don't debug the nub thread
+ if ((thread->debug_info.flags & B_THREAD_DEBUG_NUB_THREAD) != 0)
+ return B_NOT_ALLOWED;
- return error;
+ // already marked stopped?
+ if ((thread->debug_info.flags & B_THREAD_DEBUG_STOPPED) != 0)
+ return B_OK;
+
+ // set the flag that tells the thread to stop as soon as possible
+ atomic_or(&thread->debug_info.flags, B_THREAD_DEBUG_STOP);
+
+ update_thread_user_debug_flag(thread);
+
+ // resume/interrupt the thread, if necessary
+ threadDebugInfoLocker.Unlock();
+ SpinLocker schedulerLocker(gSchedulerLock);
+
+ switch (thread->state) {
+ case B_THREAD_SUSPENDED:
+ // thread suspended: wake it up
+ scheduler_enqueue_in_run_queue(thread);
+ break;
+
+ default:
+ // thread may be waiting: interrupt it
+ thread_interrupt(thread, false);
+ // TODO: If the thread is already in the kernel and e.g.
+ // about to acquire a semaphore (before
+ // thread_prepare_to_block()), we won't interrupt it.
+ // Maybe we should rather send a signal (SIGTRAP).
+ scheduler_reschedule_if_necessary_locked();
+ break;
+ }
+
+ return B_OK;
}
diff --git a/src/system/kernel/device_manager/IOSchedulerSimple.cpp b/src/system/kernel/device_manager/IOSchedulerSimple.cpp
index f2acf90..07bd88a 100644
--- a/src/system/kernel/device_manager/IOSchedulerSimple.cpp
+++ b/src/system/kernel/device_manager/IOSchedulerSimple.cpp
@@ -810,8 +810,7 @@ IOSchedulerSimple::_GetRequestOwner(team_id team, thread_id thread,
RequestOwnerList existingOwners;
while ((owner = fUnusedRequestOwners.RemoveHead()) != NULL) {
- if (owner->thread < 0
- || thread_get_thread_struct(owner->thread) == NULL) {
+ if (owner->thread < 0 || !Thread::IsAlive(owner->thread)) {
if (owner->thread >= 0)
fRequestOwners->RemoveUnchecked(owner);
owner->team = team;
diff --git a/src/system/kernel/elf.cpp b/src/system/kernel/elf.cpp
index 5874697..ffc90cc 100644
--- a/src/system/kernel/elf.cpp
+++ b/src/system/kernel/elf.cpp
@@ -1773,6 +1773,20 @@ elf_debug_lookup_symbol(const char* searchName)
status_t
+elf_lookup_kernel_symbol(const char* name, elf_symbol_info* info)
+{
+ // find the symbol
+ Elf32_Sym* foundSymbol = elf_find_symbol(sKernelImage, name, NULL, false);
+ if (foundSymbol == NULL)
+ return B_MISSING_SYMBOL;
+
+ info->address = foundSymbol->st_value + sKernelImage->text_region.delta;
+ info->size = foundSymbol->st_size;
+ return B_OK;
+}
+
+
+status_t
elf_load_user_image(const char *path, Team *team, int flags, addr_t *entry)
{
struct Elf32_Ehdr elfHeader;
diff --git a/src/system/kernel/fs/Vnode.cpp b/src/system/kernel/fs/Vnode.cpp
index 7238290..890e29b 100644
--- a/src/system/kernel/fs/Vnode.cpp
+++ b/src/system/kernel/fs/Vnode.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright 2009, Ingo Weinhold, ingo_weinhold@gmx.de.
+ * Copyright 2009-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
* Distributed under the terms of the MIT License.
*/
@@ -88,6 +88,6 @@ vnode::_WakeUpLocker()
atomic_and(&fFlags, ~kFlagsWaitingLocker);
// and wake it up
- InterruptsSpinLocker threadLocker(gThreadSpinlock);
+ InterruptsSpinLocker threadLocker(gSchedulerLock);
thread_unblock_locked(waiter->thread, B_OK);
}
diff --git a/src/system/kernel/fs/fd.cpp b/src/system/kernel/fs/fd.cpp
index 5c85e8a..d1e89b6 100644
--- a/src/system/kernel/fs/fd.cpp
+++ b/src/system/kernel/fs/fd.cpp
@@ -476,17 +476,12 @@ int
dup_foreign_fd(team_id fromTeam, int fd, bool kernel)
{
// get the I/O context for the team in question
- InterruptsSpinLocker teamsLocker(gTeamSpinlock);
- Team* team = team_get_team_struct_locked(fromTeam);
+ Team* team = Team::Get(fromTeam);
if (team == NULL)
return B_BAD_TEAM_ID;
+ BReference<Team> teamReference(team, true);
io_context* fromContext = team->io_context;
- vfs_get_io_context(fromContext);
-
- teamsLocker.Unlock();
-
- CObjectDeleter<io_context> _(fromContext, vfs_put_io_context);
// get the file descriptor
file_descriptor* descriptor = get_fd(fromContext, fd);
diff --git a/src/system/kernel/fs/fifo.cpp b/src/system/kernel/fs/fifo.cpp
index 905e716..9c771ff 100644
--- a/src/system/kernel/fs/fifo.cpp
+++ b/src/system/kernel/fs/fifo.cpp
@@ -100,7 +100,7 @@ public:
TRACE("ReadRequest %p::Notify(), fNotified %d\n", this, fNotified);
if (!fNotified) {
- SpinLocker threadLocker(gThreadSpinlock);
+ SpinLocker schedulerLocker(gSchedulerLock);
thread_unblock_locked(fThread, status);
fNotified = true;
}
diff --git a/src/system/kernel/fs/vfs.cpp b/src/system/kernel/fs/vfs.cpp
index 85be4e8..e1e1d06 100644
--- a/src/system/kernel/fs/vfs.cpp
+++ b/src/system/kernel/fs/vfs.cpp
@@ -1764,62 +1764,18 @@ replace_vnode_if_disconnected(struct fs_mount* mount,
This is not a cheap function and should be used with care and rarely.
TODO: there is currently no means to stop a blocking read/write!
*/
-void
+static void
disconnect_mount_or_vnode_fds(struct fs_mount* mount,
struct vnode* vnodeToDisconnect)
{
// iterate over all teams and peek into their file descriptors
- int32 nextTeamID = 0;
-
- while (true) {
- struct io_context* context = NULL;
- bool contextLocked = false;
- Team* team = NULL;
- team_id lastTeamID;
-
- cpu_status state = disable_interrupts();
- SpinLocker teamsLock(gTeamSpinlock);
-
- lastTeamID = peek_next_thread_id();
- if (nextTeamID < lastTeamID) {
- // get next valid team
- while (nextTeamID < lastTeamID
- && !(team = team_get_team_struct_locked(nextTeamID))) {
- nextTeamID++;
- }
-
- if (team) {
- context = (io_context*)team->io_context;
-
- // Some acrobatics to lock the context in a safe way
- // (cf. _kern_get_next_fd_info() for details).
- GRAB_THREAD_LOCK();
- teamsLock.Unlock();
- contextLocked = mutex_lock_threads_locked(&context->io_mutex)
- == B_OK;
- RELEASE_THREAD_LOCK();
-
- nextTeamID++;
- }
- }
-
- teamsLock.Unlock();
- restore_interrupts(state);
-
- if (context == NULL)
- break;
-
- // we now have a context - since we couldn't lock it while having
- // safe access to the team structure, we now need to lock the mutex
- // manually
+ TeamListIterator teamIterator;
+ while (Team* team = teamIterator.Next()) {
+ BReference<Team> teamReference(team, true);
- if (!contextLocked) {
- // team seems to be gone, go over to the next team
- continue;
- }
-
- // the team cannot be deleted completely while we're owning its
- // io_context mutex, so we can safely play with it now
+ // lock the I/O context
+ io_context* context = team->io_context;
+ MutexLocker contextLocker(context->io_mutex);
replace_vnode_if_disconnected(mount, vnodeToDisconnect, context->root,
sRoot, true);
@@ -1843,8 +1799,6 @@ disconnect_mount_or_vnode_fds(struct fs_mount* mount,
put_fd(descriptor);
}
}
-
- mutex_unlock(&context->io_mutex);
}
}
@@ -7828,38 +7782,15 @@ _kern_get_next_fd_info(team_id teamID, uint32* _cookie, fd_info* info,
if (infoSize != sizeof(fd_info))
return B_BAD_VALUE;
- struct io_context* context = NULL;
- Team* team = NULL;
-
- cpu_status state = disable_interrupts();
- GRAB_TEAM_LOCK();
-
- bool contextLocked = false;
- team = team_get_team_struct_locked(teamID);
- if (team) {
- // We cannot lock the IO context while holding the team lock, nor can
- // we just drop the team lock, since it might be deleted in the
- // meantime. team_remove_team() acquires the thread lock when removing
- // the team from the team hash table, though. Hence we switch to the
- // thread lock and use mutex_lock_threads_locked().
- context = (io_context*)team->io_context;
-
- GRAB_THREAD_LOCK();
- RELEASE_TEAM_LOCK();
- contextLocked = mutex_lock_threads_locked(&context->io_mutex) == B_OK;
- RELEASE_THREAD_LOCK();
- } else
- RELEASE_TEAM_LOCK();
-
- restore_interrupts(state);
-
- if (!contextLocked) {
- // team doesn't exit or seems to be gone
+ // get the team
+ Team* team = Team::Get(teamID);
+ if (team == NULL)
return B_BAD_TEAM_ID;
- }
+ BReference<Team> teamReference(team, true);
- // the team cannot be deleted completely while we're owning its
- // io_context mutex, so we can safely play with it now
+ // now that we have a team reference, its I/O context won't go away
+ io_context* context = team->io_context;
+ MutexLocker contextLocker(context->io_mutex);
uint32 slot = *_cookie;
@@ -7869,10 +7800,8 @@ _kern_get_next_fd_info(team_id teamID, uint32* _cookie, fd_info* info,
slot++;
}
- if (slot >= context->table_size) {
- mutex_unlock(&context->io_mutex);
+ if (slot >= context->table_size)
return B_ENTRY_NOT_FOUND;
- }
info->number = slot;
info->open_mode = descriptor->open_mode;
@@ -7886,8 +7815,6 @@ _kern_get_next_fd_info(team_id teamID, uint32* _cookie, fd_info* info,
info->node = -1;
}
- mutex_unlock(&context->io_mutex);
-
*_cookie = slot + 1;
return B_OK;
}
diff --git a/src/system/kernel/image.cpp b/src/system/kernel/image.cpp
index 0643e00..05bd513 100644
--- a/src/system/kernel/image.cpp
+++ b/src/system/kernel/image.cpp
@@ -145,7 +145,7 @@ unregister_image(Team *team, image_id id)
/*! Counts the registered images from the specified team.
- The team lock must be held when you call this function.
+ Interrupts must be enabled.
*/
int32
count_images(Team *team)
@@ -153,6 +153,8 @@ count_images(Team *team)
struct image *image = NULL;
int32 count = 0;
+ MutexLocker locker(sImageMutex);
+
while ((image = (struct image*)list_get_next_item(&team->image_list, image))
!= NULL) {
count++;
@@ -215,45 +217,29 @@ _get_next_image_info(team_id teamID, int32 *cookie, image_info *info,
if (size > sizeof(image_info))
return B_BAD_VALUE;
- status_t status = B_ENTRY_NOT_FOUND;
- Team *team;
- cpu_status state;
+ // get the team
+ Team* team = Team::Get(teamID);
+ if (team == NULL)
+ return B_BAD_TEAM_ID;
+ BReference<Team> teamReference(team, true);
- mutex_lock(&sImageMutex);
+ // iterate through the team's images
+ MutexLocker imageLocker(sImageMutex);
- state = disable_interrupts();
- GRAB_TEAM_LOCK();
+ struct image* image = NULL;
+ int32 count = 0;
- if (teamID == B_CURRENT_TEAM)
- team = thread_get_current_thread()->team;
- else if (teamID == B_SYSTEM_TEAM)
- team = team_get_kernel_team();
- else
- team = team_get_team_struct_locked(teamID);
-
- if (team) {
- struct image *image = NULL;
- int32 count = 0;
-
- while ((image = (struct image*)list_get_next_item(&team->image_list,
- image)) != NULL) {
- if (count == *cookie) {
- memcpy(info, &image->info, size);
- status = B_OK;
- (*cookie)++;
- break;
- }
- count++;
+ while ((image = (struct image*)list_get_next_item(&team->image_list,
+ image)) != NULL) {
+ if (count == *cookie) {
+ memcpy(info, &image->info, size);
+ (*cookie)++;
+ return B_OK;
}
- } else
- status = B_BAD_TEAM_ID;
-
- RELEASE_TEAM_LOCK();
- restore_interrupts(state);
-
- mutex_unlock(&sImageMutex);
+ count++;
+ }
- return status;
+ return B_ENTRY_NOT_FOUND;
}
@@ -366,24 +352,21 @@ image_init(void)
static void
notify_loading_app(status_t result, bool suspend)
{
- cpu_status state;
- Team *team;
+ Team* team = thread_get_current_thread()->team;
- state = disable_interrupts();
- GRAB_TEAM_LOCK();
+ TeamLocker teamLocker(team);
- team = thread_get_current_thread()->team;
if (team->loading_info) {
// there's indeed someone waiting
- struct team_loading_info *loadingInfo = team->loading_info;
+ struct team_loading_info* loadingInfo = team->loading_info;
team->loading_info = NULL;
loadingInfo->result = result;
loadingInfo->done = true;
- // we're done with the team stuff, get the thread lock instead
- RELEASE_TEAM_LOCK();
- GRAB_THREAD_LOCK();
+ // we're done with the team stuff, get the scheduler lock instead
+ teamLocker.Unlock();
+ InterruptsSpinLocker schedulerLocker(gSchedulerLock);
// wake up the waiting thread
if (loadingInfo->thread->state == B_THREAD_SUSPENDED)
@@ -394,14 +377,7 @@ notify_loading_app(status_t result, bool suspend)
thread_get_current_thread()->next_state = B_THREAD_SUSPENDED;
scheduler_reschedule();
}
-
- RELEASE_THREAD_LOCK();
- } else {
- // no-one is waiting
- RELEASE_TEAM_LOCK();
}
-
- restore_interrupts(state);
}
diff --git a/src/system/kernel/kernel_versions b/src/system/kernel/kernel_versions
index 895ff9c..9b4daf5 100644
--- a/src/system/kernel/kernel_versions
+++ b/src/system/kernel/kernel_versions
@@ -8,3 +8,6 @@ KERNEL_1_ALPHA1 {
KERNEL_1_ALPHA3 {
} KERNEL_1_ALPHA1;
+
+KERNEL_1_ALPHA4 {
+} KERNEL_1_ALPHA3;
diff --git a/src/system/kernel/lib/Jamfile b/src/system/kernel/lib/Jamfile
index f6cf003..77e0958 100644
--- a/src/system/kernel/lib/Jamfile
+++ b/src/system/kernel/lib/Jamfile
@@ -3,6 +3,12 @@ SubDir HAIKU_TOP src system kernel lib ;
UsePrivateHeaders shared ;
UsePrivateHeaders [ FDirName libroot locale ] ;
+# force inclusion of kernel_lib.h
+local kernelLibHeader = [ FDirName $(SUBDIR) kernel_lib.h ] ;
+CCFLAGS += -include $(kernelLibHeader) ;
+C++FLAGS += -include $(kernelLibHeader) ;
+
+
# kernel libroot os files
KernelMergeObject kernel_os_main.o :
diff --git a/src/system/kernel/lib/kernel_lib.h b/src/system/kernel/lib/kernel_lib.h
new file mode 100644
index 0000000..e34b5b5
--- /dev/null
+++ b/src/system/kernel/lib/kernel_lib.h
@@ -0,0 +1,15 @@
+/*
+ * Copyright 2011, Ingo Weinhold, ingo_weinhold@gmx.de.
+ * Distributed under the terms of the MIT License.
+ */
+#ifndef _KERNEL_KERNEL_LIB_H
+#define _KERNEL_KERNEL_LIB_H
+
+
+#include <pthread.h>
+
+
+#define pthread_testcancel() do {} while (false)
+
+
+#endif // _KERNEL_KERNEL_LIB_H
diff --git a/src/system/kernel/locks/lock.cpp b/src/system/kernel/locks/lock.cpp
index 9f69313..1cc5c35 100644
--- a/src/system/kernel/locks/lock.cpp
+++ b/src/system/kernel/locks/lock.cpp
@@ -260,7 +260,7 @@ rw_lock_destroy(rw_lock* lock)
? (char*)lock->name : NULL;
// unblock all waiters
- InterruptsSpinLocker locker(gThreadSpinlock);
+ InterruptsSpinLocker locker(gSchedulerLock);
#if KDEBUG
if (lock->waiters != NULL && thread_get_current_thread_id()
@@ -296,7 +296,7 @@ rw_lock_destroy(rw_lock* lock)
status_t
_rw_lock_read_lock(rw_lock* lock)
{
- InterruptsSpinLocker locker(gThreadSpinlock);
+ InterruptsSpinLocker locker(gSchedulerLock);
// We might be the writer ourselves.
if (lock->holder == thread_get_current_thread_id()) {
@@ -328,7 +328,7 @@ status_t
_rw_lock_read_lock_with_timeout(rw_lock* lock, uint32 timeoutFlags,
bigtime_t timeout)
{
- InterruptsSpinLocker locker(gThreadSpinlock);
+ InterruptsSpinLocker locker(gSchedulerLock);
// We might be the writer ourselves.
if (lock->holder == thread_get_current_thread_id()) {
@@ -407,9 +407,9 @@ _rw_lock_read_lock_with_timeout(rw_lock* lock, uint32 timeoutFlags,
void
-_rw_lock_read_unlock(rw_lock* lock, bool threadsLocked)
+_rw_lock_read_unlock(rw_lock* lock, bool schedulerLocked)
{
- InterruptsSpinLocker locker(gThreadSpinlock, false, !threadsLocked);
+ InterruptsSpinLocker locker(gSchedulerLock, false, !schedulerLocked);
// If we're still holding the write lock or if there are other readers,
// no-one can be woken up.
@@ -437,7 +437,7 @@ _rw_lock_read_unlock(rw_lock* lock, bool threadsLocked)
status_t
rw_lock_write_lock(rw_lock* lock)
{
- InterruptsSpinLocker locker(gThreadSpinlock);
+ InterruptsSpinLocker locker(gSchedulerLock);
// If we're already the lock holder, we just need to increment the owner
// count.
@@ -473,9 +473,9 @@ rw_lock_write_lock(rw_lock* lock)
void
-_rw_lock_write_unlock(rw_lock* lock, bool threadsLocked)
+_rw_lock_write_unlock(rw_lock* lock, bool schedulerLocked)
{
- InterruptsSpinLocker locker(gThreadSpinlock, false, !threadsLocked);
+ InterruptsSpinLocker locker(gSchedulerLock, false, !schedulerLocked);
if (thread_get_current_thread_id() != lock->holder) {
panic("rw_lock_write_unlock(): lock %p not write-locked by this thread",
@@ -600,7 +600,7 @@ mutex_destroy(mutex* lock)
? (char*)lock->name : NULL;
// unblock all waiters
- InterruptsSpinLocker locker(gThreadSpinlock);
+ InterruptsSpinLocker locker(gSchedulerLock);
#if KDEBUG
if (lock->waiters != NULL && thread_get_current_thread_id()
@@ -631,7 +631,7 @@ mutex_destroy(mutex* lock)
status_t
mutex_switch_lock(mutex* from, mutex* to)
{
- InterruptsSpinLocker locker(gThreadSpinlock);
+ InterruptsSpinLocker locker(gSchedulerLock);
#if !KDEBUG
if (atomic_add(&from->count, 1) < -1)
@@ -645,7 +645,7 @@ mutex_switch_lock(mutex* from, mutex* to)
status_t
mutex_switch_from_read_lock(rw_lock* from, mutex* to)
{
- InterruptsSpinLocker locker(gThreadSpinlock);
+ InterruptsSpinLocker locker(gSchedulerLock);
#if KDEBUG_RW_LOCK_DEBUG
_rw_lock_write_unlock(from, true);
@@ -660,17 +660,17 @@ mutex_switch_from_read_lock(rw_lock* from, mutex* to)
status_t
-_mutex_lock(mutex* lock, bool threadsLocked)
+_mutex_lock(mutex* lock, bool schedulerLocked)
{
#if KDEBUG
- if (!gKernelStartup && !threadsLocked && !are_interrupts_enabled()) {
+ if (!gKernelStartup && !schedulerLocked && !are_interrupts_enabled()) {
panic("_mutex_lock(): called with interrupts disabled for lock %p",
lock);
}
#endif
// lock only, if !threadsLocked
- InterruptsSpinLocker locker(gThreadSpinlock, false, !threadsLocked);
+ InterruptsSpinLocker locker(gSchedulerLock, false, !schedulerLocked);
// Might have been released after we decremented the count, but before
// we acquired the spinlock.
@@ -716,10 +716,10 @@ _mutex_lock(mutex* lock, bool threadsLocked)
void
-_mutex_unlock(mutex* lock, bool threadsLocked)
+_mutex_unlock(mutex* lock, bool schedulerLocked)
{
// lock only, if !threadsLocked
- InterruptsSpinLocker locker(gThreadSpinlock, false, !threadsLocked);
+ InterruptsSpinLocker locker(gSchedulerLock, false, !schedulerLocked);
#if KDEBUG
if (thread_get_current_thread_id() != lock->holder) {
@@ -768,7 +768,7 @@ status_t
_mutex_trylock(mutex* lock)
{
#if KDEBUG
- InterruptsSpinLocker _(gThreadSpinlock);
+ InterruptsSpinLocker _(gSchedulerLock);
if (lock->holder <= 0) {
lock->holder = thread_get_current_thread_id();
@@ -789,7 +789,7 @@ _mutex_lock_with_timeout(mutex* lock, uint32 timeoutFlags, bigtime_t timeout)
}
#endif
- InterruptsSpinLocker locker(gThreadSpinlock);
+ InterruptsSpinLocker locker(gSchedulerLock);
// Might have been released after we decremented the count, but before
// we acquired the spinlock.
diff --git a/src/system/kernel/main.cpp b/src/system/kernel/main.cpp
index 34fd5ee..a45a947 100644
--- a/src/system/kernel/main.cpp
+++ b/src/system/kernel/main.cpp
@@ -23,6 +23,7 @@
#include <condition_variable.h>
#include <cpu.h>
#include <debug.h>
+#include <DPC.h>
#include <elf.h>
#include <fs/devfs.h>
#include <fs/KPath.h>
@@ -157,6 +158,7 @@ _start(kernel_args *bootKernelArgs, int currentCPU)
timer_init(&sKernelArgs);
TRACE("init real time clock\n");
rtc_init(&sKernelArgs);
+ timer_init_post_rtc();
TRACE("init condition variables\n");
condition_variable_init();
@@ -180,6 +182,8 @@ _start(kernel_args *bootKernelArgs, int currentCPU)
TRACE("init VM threads\n");
vm_init_post_thread(&sKernelArgs);
low_resource_manager_init_post_thread();
+ TRACE("init DPC\n");
+ dpc_init();
TRACE("init VFS\n");
vfs_init(&sKernelArgs);
#if ENABLE_SWAP_SUPPORT
diff --git a/src/system/kernel/port.cpp b/src/system/kernel/port.cpp
index bb19d60..3c076bd 100644
--- a/src/system/kernel/port.cpp
+++ b/src/system/kernel/port.cpp
@@ -1,4 +1,5 @@
/*
+ * Copyright 2011, Ingo Weinhold, ingo_weinhold@gmx.de.
* Copyright 2002-2010, Axel Dörfler, axeld@pinc-software.de.
* Distributed under the terms of the MIT License.
*
@@ -19,6 +20,8 @@
#include <OS.h>
+#include <AutoDeleter.h>
+
#include <arch/int.h>
#include <heap.h>
#include <kernel.h>
@@ -41,6 +44,23 @@
#endif
+// Locking:
+// * sPortsLock: Protects the sPorts hash table, Team::port_list, and
+// Port::owner.
+// * Port::lock: Protects all Port members save team_link, hash_link, and lock.
+// id is immutable.
+//
+// The locking order is sPortsLock -> Port::lock. A port must be looked up
+// in sPorts and locked with sPortsLock held. Afterwards sPortsLock can be
+// dropped, unless any field guarded by sPortsLock is accessed.
+
+
+struct port_message;
+
+
+static void put_port_message(port_message* message);
+
+
struct port_message : DoublyLinkedListLinkImpl<port_message> {
int32 code;
size_t size;
@@ -52,8 +72,10 @@ struct port_message : DoublyLinkedListLinkImpl<port_message> {
typedef DoublyLinkedList<port_message> MessageList;
-struct port_entry {
+
+struct Port {
struct list_link team_link;
+ Port* hash_link;
port_id id;
team_id owner;
int32 capacity;
@@ -66,8 +88,62 @@ struct port_entry {
// messages read from port since creation
select_info* select_infos;
MessageList messages;
+
+ Port(team_id owner, int32 queueLength, char* name)
+ :
+ owner(owner),
+ capacity(queueLength),
+ read_count(0),
+ write_count(queueLength),
+ total_count(0),
+ select_infos(NULL)
+ {
+ // id is initialized when the caller adds the port to the hash table
+
+ mutex_init(&lock, name);
+ read_condition.Init(this, "port read");
+ write_condition.Init(this, "port write");
+ }
+
+ ~Port()
+ {
+ while (port_message* message = messages.RemoveHead())
+ put_port_message(message);
+
+ free((char*)lock.name);
+ lock.name = NULL;
+ }
+};
+
+
+struct PortHashDefinition {
+ typedef port_id KeyType;
+ typedef Port ValueType;
+
+ size_t HashKey(port_id key) const
+ {
+ return key;
+ }
+
+ size_t Hash(Port* value) const
+ {
+ return HashKey(value->id);
+ }
+
+ bool Compare(port_id key, Port* value) const
+ {
+ return value->id == key;
+ }
+
+ Port*& GetLink(Port* value) const
+ {
+ return value->hash_link;
+ }
};
+typedef BOpenHashTable<PortHashDefinition> PortHashTable;
+
+
class PortNotificationService : public DefaultNotificationService {
public:
PortNotificationService();
@@ -81,13 +157,13 @@ namespace PortTracing {
class Create : public AbstractTraceEntry {
public:
- Create(port_entry& port)
+ Create(Port* port)
:
- fID(port.id),
- fOwner(port.owner),
- fCapacity(port.capacity)
+ fID(port->id),
+ fOwner(port->owner),
+ fCapacity(port->capacity)
{
- fName = alloc_tracing_buffer_strcpy(port.lock.name, B_OS_NAME_LENGTH,
+ fName = alloc_tracing_buffer_strcpy(port->lock.name, B_OS_NAME_LENGTH,
false);
Initialized();
@@ -109,9 +185,9 @@ private:
class Delete : public AbstractTraceEntry {
public:
- Delete(port_entry& port)
+ Delete(Port* port)
:
- fID(port.id)
+ fID(port->id)
{
Initialized();
}
@@ -128,11 +204,12 @@ private:
class Read : public AbstractTraceEntry {
public:
- Read(port_entry& port, int32 code, ssize_t result)
+ Read(port_id id, int32 readCount, int32 writeCount, int32 code,
+ ssize_t result)
:
- fID(port.id),
- fReadCount(port.read_count),
- fWriteCount(port.write_count),
+ fID(id),
+ fReadCount(readCount),
+ fWriteCount(writeCount),
fCode(code),
fResult(result)
{
@@ -156,11 +233,12 @@ private:
class Write : public AbstractTraceEntry {
public:
- Write(port_entry& port, int32 code, size_t bufferSize, ssize_t result)
+ Write(port_id id, int32 readCount, int32 writeCount, int32 code,
+ size_t bufferSize, ssize_t result)
:
- fID(port.id),
- fReadCount(port.read_count),
- fWriteCount(port.write_count),
+ fID(id),
+ fReadCount(readCount),
+ fWriteCount(writeCount),
fCode(code),
fBufferSize(bufferSize),
fResult(result)
@@ -186,11 +264,12 @@ private:
class Info : public AbstractTraceEntry {
public:
- Info(port_entry& port, int32 code, ssize_t result)
+ Info(port_id id, int32 readCount, int32 writeCount, int32 code,
+ ssize_t result)
:
- fID(port.id),
- fReadCount(port.read_count),
- fWriteCount(port.write_count),
+ fID(id),
+ fReadCount(readCount),
+ fWriteCount(writeCount),
fCode(code),
fResult(result)
{
@@ -214,10 +293,10 @@ private:
class OwnerChange : public AbstractTraceEntry {
public:
- OwnerChange(port_entry& port, team_id newOwner, status_t status)
+ OwnerChange(Port* port, team_id newOwner, status_t status)
:
- fID(port.id),
- fOldOwner(port.owner),
+ fID(port->id),
+ fOldOwner(port->owner),
fNewOwner(newOwner),
fStatus(status)
{
@@ -253,20 +332,17 @@ static const size_t kBufferGrowRate = kInitialPortBufferSize;
#define MAX_QUEUE_LENGTH 4096
#define PORT_MAX_MESSAGE_SIZE (256 * 1024)
-// sMaxPorts must be power of 2
static int32 sMaxPorts = 4096;
static int32 sUsedPorts = 0;
-static struct port_entry* sPorts;
-static area_id sPortArea;
+static PortHashTable sPorts;
static heap_allocator* sPortAllocator;
static ConditionVariable sNoSpaceCondition;
static vint32 sTotalSpaceInUse;
static vint32 sAreaChangeCounter;
static vint32 sAllocatingArea;
+static port_id sNextPortID = 1;
static bool sPortsActive = false;
-static port_id sNextPort = 1;
-static int32 sFirstFreeSlot = 1;
static mutex sPortsLock = MUTEX_INITIALIZER("ports list");
static PortNotificationService sNotificationService;
@@ -303,7 +379,6 @@ dump_port_list(int argc, char** argv)
{
const char* name = NULL;
team_id owner = -1;
- int32 i;
if (argc > 2) {
if (!strcmp(argv[1], "team") || !strcmp(argv[1], "owner"))
@@ -316,10 +391,9 @@ dump_port_list(int argc, char** argv)
kprintf("port id cap read-cnt write-cnt total team "
"name\n");
- for (i = 0; i < sMaxPorts; i++) {
- struct port_entry* port = &sPorts[i];
- if (port->id < 0
- || (owner != -1 && port->owner != owner)
+ for (PortHashTable::Iterator it = sPorts.GetIterator();
+ Port* port = it.Next();) {
+ if ((owner != -1 && port->owner != owner)
|| (name != NULL && strstr(port->lock.name, name) == NULL))
continue;
@@ -333,7 +407,7 @@ dump_port_list(int argc, char** argv)
static void
-_dump_port_info(struct port_entry* port)
+_dump_port_info(Port* port)
{
kprintf("PORT: %p\n", port);
kprintf(" id: %ld\n", port->id);
@@ -372,7 +446,7 @@ dump_port_info(int argc, char** argv)
if (argc > 2) {
if (!strcmp(argv[1], "address")) {
- _dump_port_info((struct port_entry*)parse_expression(argv[2]));
+ _dump_port_info((Port*)parse_expression(argv[2]));
return 0;
} else if (!strcmp(argv[1], "condition"))
condition = (ConditionVariable*)parse_expression(argv[2]);
@@ -381,23 +455,24 @@ dump_port_info(int argc, char** argv)
} else if (parse_expression(argv[1]) > 0) {
// if the argument looks like a number, treat it as such
int32 num = parse_expression(argv[1]);
- int32 slot = num % sMaxPorts;
- if (sPorts[slot].id != num) {
+ Port* port = sPorts.Lookup(num);
+ if (port == NULL) {
kprintf("port %ld (%#lx) doesn't exist!\n", num, num);
return 0;
}
- _dump_port_info(&sPorts[slot]);
+ _dump_port_info(port);
return 0;
} else
name = argv[1];
// walk through the ports list, trying to match name
- for (int32 i = 0; i < sMaxPorts; i++) {
- if ((name != NULL && sPorts[i].lock.name != NULL
- && !strcmp(name, sPorts[i].lock.name))
- || (condition != NULL && (&sPorts[i].read_condition == condition
- || &sPorts[i].write_condition == condition))) {
- _dump_port_info(&sPorts[i]);
+ for (PortHashTable::Iterator it = sPorts.GetIterator();
+ Port* port = it.Next();) {
+ if ((name != NULL && port->lock.name != NULL
+ && !strcmp(name, port->lock.name))
+ || (condition != NULL && (&port->read_condition == condition
+ || &port->write_condition == condition))) {
+ _dump_port_info(port);
return 0;
}
}
@@ -406,11 +481,14 @@ dump_port_info(int argc, char** argv)
}
+/*! Notifies the port's select events.
+ The port must be locked.
+*/
static void
-notify_port_select_events(int slot, uint16 events)
+notify_port_select_events(Port* port, uint16 events)
{
- if (sPorts[slot].select_infos)
- notify_select_events_list(sPorts[slot].select_infos, events);
+ if (port->select_infos)
+ notify_select_events_list(port->select_infos, events);
}
@@ -512,19 +590,19 @@ get_port_message(int32 code, size_t bufferSize, uint32 flags, bigtime_t timeout,
/*! You need to own the port's lock when calling this function */
-static bool
-is_port_closed(int32 slot)
+static inline bool
+is_port_closed(Port* port)
{
- return sPorts[slot].capacity == 0;
+ return port->capacity == 0;
}
/*! Fills the port_info structure with information from the specified
port.
- The port lock must be held when called.
+ The port's lock must be held when called.
*/
static void
-fill_port_info(struct port_entry* port, port_info* info, size_t size)
+fill_port_info(Port* port, port_info* info, size_t size)
{
info->port = port->id;
info->team = port->owner;
@@ -562,66 +640,64 @@ copy_port_message(port_message* message, int32* _code, void* buffer,
static void
-uninit_port_locked(struct port_entry& port)
+uninit_port_locked(Port* port)
{
- int32 id = port.id;
+ notify_port_select_events(port, B_EVENT_INVALID);
+ port->select_infos = NULL;
- // mark port as invalid
- port.id = -1;
- free((char*)port.lock.name);
- port.lock.name = NULL;
+ // Release the threads that were blocking on this port.
+ // read_port() will see the B_BAD_PORT_ID return value, and act accordingly
+ port->read_condition.NotifyAll(false, B_BAD_PORT_ID);
+ port->write_condition.NotifyAll(false, B_BAD_PORT_ID);
+ sNotificationService.Notify(PORT_REMOVED, port->id);
+}
- while (port_message* message = port.messages.RemoveHead()) {
- put_port_message(message);
- }
- notify_port_select_events(id % sMaxPorts, B_EVENT_INVALID);
- port.select_infos = NULL;
+static Port*
+get_locked_port(port_id id)
+{
+ MutexLocker portsLocker(sPortsLock);
- // Release the threads that were blocking on this port.
- // read_port() will see the B_BAD_PORT_ID return value, and act accordingly
- port.read_condition.NotifyAll(false, B_BAD_PORT_ID);
- port.write_condition.NotifyAll(false, B_BAD_PORT_ID);
- sNotificationService.Notify(PORT_REMOVED, id);
+ Port* port = sPorts.Lookup(id);
+ if (port != NULL)
+ mutex_lock(&port->lock);
+ return port;
}
// #pragma mark - private kernel API
-/*! This function delets all the ports that are owned by the passed team.
+/*! This function deletes all the ports that are owned by the passed team.
*/
void
delete_owned_ports(Team* team)
{
TRACE(("delete_owned_ports(owner = %ld)\n", team->id));
- struct list queue;
-
- {
- InterruptsSpinLocker locker(gTeamSpinlock);
- list_move_to_list(&team->port_list, &queue);
- }
-
- int32 firstSlot = sMaxPorts;
- int32 count = 0;
+ MutexLocker portsLocker(sPortsLock);
- while (port_entry* port = (port_entry*)list_remove_head_item(&queue)) {
- if (firstSlot > port->id % sMaxPorts)
- firstSlot = port->id % sMaxPorts;
- count++;
+ // move the ports from the team's port list to a local list
+ struct list queue;
+ list_move_to_list(&team->port_list, &queue);
+ // iterate through the list or ports, remove them from the hash table and
+ // uninitialize them
+ Port* port = (Port*)list_get_first_item(&queue);
+ while (port != NULL) {
MutexLocker locker(port->lock);
- uninit_port_locked(*port);
- }
+ sPorts.Remove(port);
+ uninit_port_locked(port);
+ sUsedPorts--;
- MutexLocker _(sPortsLock);
+ port = (Port*)list_get_next_item(&queue, port);
+ }
- // update the first free slot hint in the array
- if (firstSlot < sFirstFreeSlot)
- sFirstFreeSlot = firstSlot;
+ portsLocker.Unlock();
- sUsedPorts -= count;
+ // delete the ports
+ while (Port* port = (Port*)list_remove_head_item(&queue))
+ delete port;
}
@@ -642,26 +718,11 @@ port_used_ports(void)
status_t
port_init(kernel_args *args)
{
- size_t size = sizeof(struct port_entry) * sMaxPorts;
-
- // create and initialize ports table
- virtual_address_restrictions virtualRestrictions = {};
- virtualRestrictions.address_specification = B_ANY_KERNEL_ADDRESS;
- physical_address_restrictions physicalRestrictions = {};
- sPortArea = create_area_etc(B_SYSTEM_TEAM, "port_table", size, B_FULL_LOCK,
- B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, CREATE_AREA_DONT_WAIT,
- &virtualRestrictions, &physicalRestrictions, (void**)&sPorts);
- if (sPortArea < 0) {
- panic("unable to allocate kernel port table!\n");
- return sPortArea;
- }
-
- memset(sPorts, 0, size);
- for (int32 i = 0; i < sMaxPorts; i++) {
- mutex_init(&sPorts[i].lock, NULL);
- sPorts[i].id = -1;
- sPorts[i].read_condition.Init(&sPorts[i], "port read");
- sPorts[i].write_condition.Init(&sPorts[i], "port write");
+ // initialize ports table
+ new(&sPorts) PortHashTable;
+ if (sPorts.Init() != B_OK) {
+ panic("Failed to init port hash table!");
+ return B_NO_MEMORY;
}
addr_t base;
@@ -686,7 +747,7 @@ port_init(kernel_args *args)
return B_NO_MEMORY;
}
- sNoSpaceCondition.Init(sPorts, "port space");
+ sNoSpaceCondition.Init(&sPorts, "port space");
// add debugger commands
add_debugger_command_etc("ports", &dump_port_list,
@@ -731,64 +792,53 @@ create_port(int32 queueLength, const char* name)
if (team == NULL)
return B_BAD_TEAM_ID;
- MutexLocker locker(sPortsLock);
-
- // check early on if there are any free port slots to use
- if (sUsedPorts >= sMaxPorts)
- return B_NO_MORE_PORTS;
-
// check & dup name
char* nameBuffer = strdup(name != NULL ? name : "unnamed port");
if (nameBuffer == NULL)
return B_NO_MEMORY;
- sUsedPorts++;
+ // create a port
+ Port* port = new(std::nothrow) Port(team_get_current_team_id(), queueLength,
+ nameBuffer);
+ if (port == NULL) {
+ free(nameBuffer);
+ return B_NO_MEMORY;
+ }
+ ObjectDeleter<Port> portDeleter(port);
- // find the first empty spot
- for (int32 slot = 0; slot < sMaxPorts; slot++) {
- int32 i = (slot + sFirstFreeSlot) % sMaxPorts;
+ MutexLocker locker(sPortsLock);
- if (sPorts[i].id == -1) {
- // make the port_id be a multiple of the slot it's in
- if (i >= sNextPort % sMaxPorts)
- sNextPort += i - sNextPort % sMaxPorts;
- else
- sNextPort += sMaxPorts - (sNextPort % sMaxPorts - i);
- sFirstFreeSlot = slot + 1;
+ // check the ports limit
+ if (sUsedPorts >= sMaxPorts)
+ return B_NO_MORE_PORTS;
- MutexLocker portLocker(sPorts[i].lock);
- sPorts[i].id = sNextPort++;
- locker.Unlock();
+ sUsedPorts++;
- sPorts[i].capacity = queueLength;
- sPorts[i].owner = team_get_current_team_id();
- sPorts[i].lock.name = nameBuffer;
- sPorts[i].read_count = 0;
- sPorts[i].write_count = queueLength;
- sPorts[i].total_count = 0;
- sPorts[i].select_infos = NULL;
-
- {
- InterruptsSpinLocker teamLocker(gTeamSpinlock);
- list_add_item(&team->port_list, &sPorts[i].team_link);
- }
+ // allocate a port ID
+ do {
+ port->id = sNextPortID++;
- port_id id = sPorts[i].id;
+ // handle integer overflow
+ if (sNextPortID < 0)
+ sNextPortID = 1;
+ } while (sPorts.Lookup(port->id) != NULL);
- T(Create(sPorts[i]));
- portLocker.Unlock();
+ // insert port in table and team list
+ sPorts.Insert(port);
+ list_add_item(&team->port_list, &port->team_link);
+ portDeleter.Detach();
- TRACE(("create_port() done: port created %ld\n", id));
+ // tracing, notifications, etc.
+ T(Create(port));
- sNotificationService.Notify(PORT_ADDED, id);
- return id;
- }
- }
+ port_id id = port->id;
+
+ locker.Unlock();
+
+ TRACE(("create_port() done: port created %ld\n", id));
- // Still not enough ports... - due to sUsedPorts, this cannot really
- // happen anymore.
- panic("out of ports, but sUsedPorts is broken");
- return B_NO_MORE_PORTS;
+ sNotificationService.Notify(PORT_ADDED, id);
+ return id;
}
@@ -800,25 +850,23 @@ close_port(port_id id)
if (!sPortsActive || id < 0)
return B_BAD_PORT_ID;
- int32 slot = id % sMaxPorts;
-
- // walk through the sem list, trying to match name
- MutexLocker locker(sPorts[slot].lock);
-
- if (sPorts[slot].id != id) {
+ // get the port
+ Port* port = get_locked_port(id);
+ if (port == NULL) {
TRACE(("close_port: invalid port_id %ld\n", id));
return B_BAD_PORT_ID;
}
+ MutexLocker lock(&port->lock, true);
// mark port to disable writing - deleting the semaphores will
// wake up waiting read/writes
- sPorts[slot].capacity = 0;
+ port->capacity = 0;
- notify_port_select_events(slot, B_EVENT_INVALID);
- sPorts[slot].select_infos = NULL;
+ notify_port_select_events(port, B_EVENT_INVALID);
+ port->select_infos = NULL;
- sPorts[slot].read_condition.NotifyAll(false, B_BAD_PORT_ID);
- sPorts[slot].write_condition.NotifyAll(false, B_BAD_PORT_ID);
+ port->read_condition.NotifyAll(false, B_BAD_PORT_ID);
+ port->write_condition.NotifyAll(false, B_BAD_PORT_ID);
return B_OK;
}
@@ -832,33 +880,34 @@ delete_port(port_id id)
if (!sPortsActive || id < 0)
return B_BAD_PORT_ID;
- int32 slot = id % sMaxPorts;
+ // get the port and remove it from the hash table and the team
+ Port* port;
+ MutexLocker locker;
+ {
+ MutexLocker portsLocker(sPortsLock);
+
+ port = sPorts.Lookup(id);
+ if (port == NULL) {
+ TRACE(("delete_port: invalid port_id %ld\n", id));
+ return B_BAD_PORT_ID;
+ }
- MutexLocker locker(sPorts[slot].lock);
+ sPorts.Remove(port);
+ list_remove_link(&port->team_link);
- if (sPorts[slot].id != id) {
- TRACE(("delete_port: invalid port_id %ld\n", id));
- return B_BAD_PORT_ID;
- }
+ sUsedPorts--;
- T(Delete(sPorts[slot]));
+ locker.SetTo(port->lock, false);
- {
- InterruptsSpinLocker teamLocker(gTeamSpinlock);
- list_remove_link(&sPorts[slot].team_link);
+ uninit_port_locked(port);
}
- uninit_port_locked(sPorts[slot]);
+ T(Delete(port));
locker.Unlock();
- MutexLocker _(sPortsLock);
+ delete port;
- // update the first free slot hint in the array
- if (slot < sFirstFreeSlot)
- sFirstFreeSlot = slot;
-
- sUsedPorts--;
return B_OK;
}
@@ -869,13 +918,17 @@ select_port(int32 id, struct select_info* info, bool kernel)
if (id < 0)
return B_BAD_PORT_ID;
- int32 slot = id % sMaxPorts;
-
- MutexLocker locker(sPorts[slot].lock);
+ // get the port
+ Port* port = get_locked_port(id);
+ if (port == NULL)
+ return B_BAD_PORT_ID;
+ MutexLocker locker(port->lock, true);
- if (sPorts[slot].id != id || is_port_closed(slot))
+ // port must not yet be closed
+ if (is_port_closed(port))
return B_BAD_PORT_ID;
- if (!kernel && sPorts[slot].owner == team_get_kernel_team_id()) {
+
+ if (!kernel && port->owner == team_get_kernel_team_id()) {
// kernel port, but call from userland
return B_NOT_ALLOWED;
}
@@ -885,16 +938,16 @@ select_port(int32 id, struct select_info* info, bool kernel)
if (info->selected_events != 0) {
uint16 events = 0;
- info->next = sPorts[slot].select_infos;
- sPorts[slot].select_infos = info;
+ info->next = port->select_infos;
+ port->select_infos = info;
// check for events
if ((info->selected_events & B_EVENT_READ) != 0
- && !sPorts[slot].messages.IsEmpty()) {
+ && !port->messages.IsEmpty()) {
events |= B_EVENT_READ;
}
- if (sPorts[slot].write_count > 0)
+ if (port->write_count > 0)
events |= B_EVENT_WRITE;
if (events != 0)
@@ -913,18 +966,19 @@ deselect_port(int32 id, struct select_info* info, bool kernel)
if (info->selected_events == 0)
return B_OK;
- int32 slot = id % sMaxPorts;
+ // get the port
+ Port* port = get_locked_port(id);
+ if (port == NULL)
+ return B_BAD_PORT_ID;
+ MutexLocker locker(port->lock, true);
- MutexLocker locker(sPorts[slot].lock);
+ // find and remove the infos
+ select_info** infoLocation = &port->select_infos;
+ while (*infoLocation != NULL && *infoLocation != info)
+ infoLocation = &(*infoLocation)->next;
- if (sPorts[slot].id == id) {
- select_info** infoLocation = &sPorts[slot].select_infos;
- while (*infoLocation != NULL && *infoLocation != info)
- infoLocation = &(*infoLocation)->next;
-
- if (*infoLocation == info)
- *infoLocation = info->next;
- }
+ if (*infoLocation == info)
+ *infoLocation = info->next;
return B_OK;
}
@@ -942,17 +996,12 @@ find_port(const char* name)
if (name == NULL)
return B_BAD_VALUE;
- // Since we have to check every single port, and we don't
- // care if it goes away at any point, we're only grabbing
- // the port lock in question, not the port list lock
-
- // loop over list
- for (int32 i = 0; i < sMaxPorts; i++) {
- // lock every individual port before comparing
- MutexLocker _(sPorts[i].lock);
+ MutexLocker portsLocker(sPortsLock);
- if (sPorts[i].id >= 0 && !strcmp(name, sPorts[i].lock.name))
- return sPorts[i].id;
+ for (PortHashTable::Iterator it = sPorts.GetIterator();
+ Port* port = it.Next();) {
+ if (!strcmp(name, port->lock.name))
+ return port->id;
}
return B_NAME_NOT_FOUND;
@@ -969,60 +1018,64 @@ _get_port_info(port_id id, port_info* info, size_t size)
if (!sPortsActive || id < 0)
return B_BAD_PORT_ID;
- int32 slot = id % sMaxPorts;
-
- MutexLocker locker(sPorts[slot].lock);
-
- if (sPorts[slot].id != id || sPorts[slot].capacity == 0) {
+ // get the port
+ Port* port = get_locked_port(id);
+ if (port == NULL) {
TRACE(("get_port_info: invalid port_id %ld\n", id));
return B_BAD_PORT_ID;
}
+ MutexLocker locker(port->lock, true);
// fill a port_info struct with info
- fill_port_info(&sPorts[slot], info, size);
+ fill_port_info(port, info, size);
return B_OK;
}
status_t
-_get_next_port_info(team_id team, int32* _cookie, struct port_info* info,
+_get_next_port_info(team_id teamID, int32* _cookie, struct port_info* info,
size_t size)
{
- TRACE(("get_next_port_info(team = %ld)\n", team));
+ TRACE(("get_next_port_info(team = %ld)\n", teamID));
if (info == NULL || size != sizeof(port_info) || _cookie == NULL
- || team < B_OK)
+ || teamID < 0) {
return B_BAD_VALUE;
+ }
if (!sPortsActive)
return B_BAD_PORT_ID;
- int32 slot = *_cookie;
- if (slot >= sMaxPorts)
- return B_BAD_PORT_ID;
-
- if (team == B_CURRENT_TEAM)
- team = team_get_current_team_id();
+ Team* team = Team::Get(teamID);
+ if (team == NULL)
+ return B_BAD_TEAM_ID;
+ BReference<Team> teamReference(team, true);
- info->port = -1; // used as found flag
+ // iterate through the team's port list
+ MutexLocker portsLocker(sPortsLock);
- while (slot < sMaxPorts) {
- MutexLocker locker(sPorts[slot].lock);
+ int32 stopIndex = *_cookie;
+ int32 index = 0;
- if (sPorts[slot].id != -1 && !is_port_closed(slot)
- && sPorts[slot].owner == team) {
- // found one!
- fill_port_info(&sPorts[slot], info, size);
- slot++;
- break;
+ Port* port = (Port*)list_get_first_item(&team->port_list);
+ while (port != NULL) {
+ if (!is_port_closed(port)) {
+ if (index == stopIndex)
+ break;
+ index++;
}
- slot++;
+ port = (Port*)list_get_next_item(&team->port_list, port);
}
- if (info->port == -1)
+ if (port == NULL)
return B_BAD_PORT_ID;
- *_cookie = slot;
+ // fill in the port info
+ MutexLocker locker(port->lock);
+ portsLocker.Unlock();
+ fill_port_info(port, info, size);
+
+ *_cookie = stopIndex + 1;
return B_OK;
}
@@ -1054,25 +1107,26 @@ _get_port_message_info_etc(port_id id, port_message_info* info,
flags &= B_CAN_INTERRUPT | B_KILL_CAN_INTERRUPT | B_RELATIVE_TIMEOUT
| B_ABSOLUTE_TIMEOUT;
- int32 slot = id % sMaxPorts;
- MutexLocker locker(sPorts[slot].lock);
+ // get the port
+ Port* port = get_locked_port(id);
+ if (port == NULL)
+ return B_BAD_PORT_ID;
+ MutexLocker locker(port->lock, true);
- if (sPorts[slot].id != id
- || (is_port_closed(slot) && sPorts[slot].messages.IsEmpty())) {
- T(Info(sPorts[slot], 0, B_BAD_PORT_ID));
- TRACE(("_get_port_message_info_etc(): %s port %ld\n",
- sPorts[slot].id == id ? "closed" : "invalid", id));
+ if (is_port_closed(port) && port->messages.IsEmpty()) {
+ T(Info(port, 0, B_BAD_PORT_ID));
+ TRACE(("_get_port_message_info_etc(): closed port %ld\n", id));
return B_BAD_PORT_ID;
}
- while (sPorts[slot].read_count == 0) {
+ while (port->read_count == 0) {
// We need to wait for a message to appear
if ((flags & B_RELATIVE_TIMEOUT) != 0 && timeout <= 0)
return B_WOULD_BLOCK;
ConditionVariableEntry entry;
- sPorts[slot].read_condition.Add(&entry);
+ port->read_condition.Add(&entry);
locker.Unlock();
@@ -1080,24 +1134,30 @@ _get_port_message_info_etc(port_id id, port_message_info* info,
status_t status = entry.Wait(flags, timeout);
if (status != B_OK) {
- T(Info(sPorts[slot], 0, status));
+ T(Info(port, 0, status));
return status;
}
- locker.Lock();
+ // re-lock
+ Port* newPort = get_locked_port(id);
+ if (newPort == NULL) {
+ T(Info(id, 0, 0, 0, B_BAD_PORT_ID));
+ return B_BAD_PORT_ID;
+ }
+ locker.SetTo(newPort->lock, true);
- if (sPorts[slot].id != id
- || (is_port_closed(slot) && sPorts[slot].messages.IsEmpty())) {
+ if (newPort != port
+ || (is_port_closed(port) && port->messages.IsEmpty())) {
// the port is no longer there
- T(Info(sPorts[slot], 0, B_BAD_PORT_ID));
+ T(Info(id, 0, 0, 0, B_BAD_PORT_ID));
return B_BAD_PORT_ID;
}
}
// determine tail & get the length of the message
- port_message* message = sPorts[slot].messages.Head();
+ port_message* message = port->messages.Head();
if (message == NULL) {
- panic("port %ld: no messages found\n", sPorts[slot].id);
+ panic("port %ld: no messages found\n", port->id);
return B_ERROR;
}
@@ -1106,10 +1166,10 @@ _get_port_message_info_etc(port_id id, port_message_info* info,
info->sender_group = message->sender_group;
info->sender_team = message->sender_team;
- T(Info(sPorts[slot], message->code, B_OK));
+ T(Info(id, id->read_count, id->write_count, message->code, B_OK));
// notify next one, as we haven't read from the port
- sPorts[slot].read_condition.NotifyOne();
+ port->read_condition.NotifyOne();
return B_OK;
}
@@ -1121,17 +1181,16 @@ port_count(port_id id)
if (!sPortsActive || id < 0)
return B_BAD_PORT_ID;
- int32 slot = id % sMaxPorts;
-
- MutexLocker locker(sPorts[slot].lock);
-
- if (sPorts[slot].id != id) {
+ // get the port
+ Port* port = get_locked_port(id);
+ if (port == NULL) {
TRACE(("port_count: invalid port_id %ld\n", id));
return B_BAD_PORT_ID;
}
+ MutexLocker locker(port->lock, true);
// return count of messages
- return sPorts[slot].read_count;
+ return port->read_count;
}
@@ -1158,50 +1217,56 @@ read_port_etc(port_id id, int32* _code, void* buffer, size_t bufferSize,
flags &= B_CAN_INTERRUPT | B_KILL_CAN_INTERRUPT | B_RELATIVE_TIMEOUT
| B_ABSOLUTE_TIMEOUT;
- int32 slot = id % sMaxPorts;
-
- MutexLocker locker(sPorts[slot].lock);
+ // get the port
+ Port* port = get_locked_port(id);
+ if (port == NULL)
+ return B_BAD_PORT_ID;
+ MutexLocker locker(port->lock, true);
- if (sPorts[slot].id != id
- || (is_port_closed(slot) && sPorts[slot].messages.IsEmpty())) {
- T(Read(sPorts[slot], 0, B_BAD_PORT_ID));
- TRACE(("read_port_etc(): %s port %ld\n",
- sPorts[slot].id == id ? "closed" : "invalid", id));
+ if (is_port_closed(port) && port->messages.IsEmpty()) {
+ T(Read(port, 0, B_BAD_PORT_ID));
+ TRACE(("read_port_etc(): closed port %ld\n", id));
return B_BAD_PORT_ID;
}
- while (sPorts[slot].read_count == 0) {
+ while (port->read_count == 0) {
if ((flags & B_RELATIVE_TIMEOUT) != 0 && timeout <= 0)
return B_WOULD_BLOCK;
// We need to wait for a message to appear
ConditionVariableEntry entry;
- sPorts[slot].read_condition.Add(&entry);
+ port->read_condition.Add(&entry);
locker.Unlock();
// block if no message, or, if B_TIMEOUT flag set, block with timeout
status_t status = entry.Wait(flags, timeout);
- locker.Lock();
+ // re-lock
+ Port* newPort = get_locked_port(id);
+ if (newPort == NULL) {
+ T(Read(id, 0, 0, 0, B_BAD_PORT_ID));
+ return B_BAD_PORT_ID;
+ }
+ locker.SetTo(newPort->lock, true);
- if (sPorts[slot].id != id
- || (is_port_closed(slot) && sPorts[slot].messages.IsEmpty())) {
+ if (newPort != port
+ || (is_port_closed(port) && port->messages.IsEmpty())) {
// the port is no longer there
- T(Read(sPorts[slot], 0, B_BAD_PORT_ID));
+ T(Read(id, 0, 0, 0, B_BAD_PORT_ID));
return B_BAD_PORT_ID;
}
if (status != B_OK) {
- T(Read(sPorts[slot], 0, status));
+ T(Read(port, 0, status));
return status;
}
}
// determine tail & get the length of the message
- port_message* message = sPorts[slot].messages.Head();
+ port_message* message = port->messages.Head();
if (message == NULL) {
- panic("port %ld: no messages found\n", sPorts[slot].id);
+ panic("port %ld: no messages found\n", port->id);
return B_ERROR;
}
@@ -1209,27 +1274,29 @@ read_port_etc(port_id id, int32* _code, void* buffer, size_t bufferSize,
size_t size = copy_port_message(message, _code, buffer, bufferSize,
userCopy);
- T(Read(sPorts[slot], message->code, size));
+ T(Read(port, message->code, size));
- sPorts[slot].read_condition.NotifyOne();
+ port->read_condition.NotifyOne();
// we only peeked, but didn't grab the message
return size;
}
- sPorts[slot].messages.RemoveHead();
- sPorts[slot].total_count++;
- sPorts[slot].write_count++;
- sPorts[slot].read_count--;
+ port->messages.RemoveHead();
+ port->total_count++;
+ port->write_count++;
+ port->read_count--;
- notify_port_select_events(slot, B_EVENT_WRITE);
- sPorts[slot].write_condition.NotifyOne();
+ notify_port_select_events(port, B_EVENT_WRITE);
+ port->write_condition.NotifyOne();
// make one spot in queue available again for write
+ T(Read(id, port->read_count, port->write_count, message->code,
+ min_c(bufferSize, message->size)));
+
locker.Unlock();
size_t size = copy_port_message(message, _code, buffer, bufferSize,
userCopy);
- T(Read(sPorts[slot], message->code, size));
put_port_message(message);
return size;
@@ -1277,47 +1344,54 @@ writev_port_etc(port_id id, int32 msgCode, const iovec* msgVecs,
bool userCopy = (flags & PORT_FLAG_USE_USER_MEMCPY) > 0;
- int32 slot = id % sMaxPorts;
status_t status;
port_message* message = NULL;
- MutexLocker locker(sPorts[slot].lock);
-
- if (sPorts[slot].id != id) {
+ // get the port
+ Port* port = get_locked_port(id);
+ if (port == NULL) {
TRACE(("write_port_etc: invalid port_id %ld\n", id));
return B_BAD_PORT_ID;
}
- if (is_port_closed(slot)) {
+ MutexLocker locker(port->lock, true);
+
+ if (is_port_closed(port)) {
TRACE(("write_port_etc: port %ld closed\n", id));
return B_BAD_PORT_ID;
}
- if (sPorts[slot].write_count <= 0) {
+ if (port->write_count <= 0) {
if ((flags & B_RELATIVE_TIMEOUT) != 0 && timeout <= 0)
return B_WOULD_BLOCK;
- sPorts[slot].write_count--;
+ port->write_count--;
// We need to block in order to wait for a free message slot
ConditionVariableEntry entry;
- sPorts[slot].write_condition.Add(&entry);
+ port->write_condition.Add(&entry);
locker.Unlock();
status = entry.Wait(flags, timeout);
- locker.Lock();
+ // re-lock
+ Port* newPort = get_locked_port(id);
+ if (newPort == NULL) {
+ T(Write(id, 0, 0, 0, 0, B_BAD_PORT_ID));
+ return B_BAD_PORT_ID;
+ }
+ locker.SetTo(newPort->lock, true);
- if (sPorts[slot].id != id || is_port_closed(slot)) {
+ if (newPort != port || is_port_closed(port)) {
// the port is no longer there
- T(Write(sPorts[slot], 0, 0, B_BAD_PORT_ID));
+ T(Write(id, 0, 0, 0, 0, B_BAD_PORT_ID));
return B_BAD_PORT_ID;
}
if (status != B_OK)
goto error;
} else
- sPorts[slot].write_count--;
+ port->write_count--;
status = get_port_message(msgCode, bufferSize, flags, timeout,
&message);
@@ -1365,22 +1439,23 @@ writev_port_etc(port_id id, int32 msgCode, const iovec* msgVecs,
}
}
- sPorts[slot].messages.Add(message);
- sPorts[slot].read_count++;
+ port->messages.Add(message);
+ port->read_count++;
- T(Write(sPorts[slot], message->code, message->size, B_OK));
+ T(Write(id, port->read_count, port->write_count, message->code,
+ message->size, B_OK));
- notify_port_select_events(slot, B_EVENT_READ);
- sPorts[slot].read_condition.NotifyOne();
+ notify_port_select_events(port, B_EVENT_READ);
+ port->read_condition.NotifyOne();
return B_OK;
error:
// Give up our slot in the queue again, and let someone else
// try and fail
- T(Write(sPorts[slot], 0, 0, status));
- sPorts[slot].write_count++;
- notify_port_select_events(slot, B_EVENT_WRITE);
- sPorts[slot].write_condition.NotifyOne();
+ T(Write(id, port->read_count, port->write_count, 0, 0, status));
+ port->write_count++;
+ notify_port_select_events(port, B_EVENT_WRITE);
+ port->write_condition.NotifyOne();
return status;
}
@@ -1394,29 +1469,29 @@ set_port_owner(port_id id, team_id newTeamID)
if (id < 0)
return B_BAD_PORT_ID;
- int32 slot = id % sMaxPorts;
-
- MutexLocker locker(sPorts[slot].lock);
+ // get the new team
+ Team* team = Team::Get(newTeamID);
+ if (team == NULL)
+ return B_BAD_TEAM_ID;
+ BReference<Team> teamReference(team, true);
- if (sPorts[slot].id != id) {
+ // get the port
+ MutexLocker portsLocker(sPortsLock);
+ Port* port = sPorts.Lookup(id);
+ if (port == NULL) {
TRACE(("set_port_owner: invalid port_id %ld\n", id));
return B_BAD_PORT_ID;
}
-
- InterruptsSpinLocker teamLocker(gTeamSpinlock);
-
- Team* team = team_get_team_struct_locked(newTeamID);
- if (team == NULL) {
- T(OwnerChange(sPorts[slot], newTeamID, B_BAD_TEAM_ID));
- return B_BAD_TEAM_ID;
- }
+ MutexLocker locker(port->lock);
// transfer ownership to other team
- list_remove_link(&sPorts[slot].team_link);
- list_add_item(&team->port_list, &sPorts[slot].team_link);
- sPorts[slot].owner = newTeamID;
+ if (team->id != port->owner) {
+ list_remove_link(&port->team_link);
+ list_add_item(&team->port_list, &port->team_link);
+ port->owner = team->id;
+ }
- T(OwnerChange(sPorts[slot], newTeamID, B_OK));
+ T(OwnerChange(port, team->id, B_OK));
return B_OK;
}
diff --git a/src/system/kernel/posix/xsi_message_queue.cpp b/src/system/kernel/posix/xsi_message_queue.cpp
index cab9239..85aa4d2 100644
--- a/src/system/kernel/posix/xsi_message_queue.cpp
+++ b/src/system/kernel/posix/xsi_message_queue.cpp
@@ -126,7 +126,7 @@ public:
// Unlock the queue before blocking
queueLocker->Unlock();
- InterruptsSpinLocker _(gThreadSpinlock);
+ InterruptsSpinLocker schedulerLocker(gSchedulerLock);
// TODO: We've got a serious race condition: If BlockAndUnlock() returned due to
// interruption, we will still be queued. A WakeUpThread() at this point will
// call thread_unblock() and might thus screw with our trying to re-lock the
@@ -246,7 +246,8 @@ public:
void WakeUpThread(bool waitForMessage)
{
- InterruptsSpinLocker _(gThreadSpinlock);
+ InterruptsSpinLocker schedulerLocker(gSchedulerLock);
+
if (waitForMessage) {
// Wake up all waiting thread for a message
// TODO: this can cause starvation for any
@@ -399,7 +400,8 @@ XsiMessageQueue::~XsiMessageQueue()
// Wake up any threads still waiting
if (fThreadsWaitingToSend || fThreadsWaitingToReceive) {
- InterruptsSpinLocker _(gThreadSpinlock);
+ InterruptsSpinLocker schedulerLocker(gSchedulerLock);
+
while (queued_thread *entry = fWaitingToReceive.RemoveHead()) {
entry->queued = false;
thread_unblock_locked(entry->thread, EIDRM);
diff --git a/src/system/kernel/posix/xsi_semaphore.cpp b/src/system/kernel/posix/xsi_semaphore.cpp
index 941ceca..161479c 100644
--- a/src/system/kernel/posix/xsi_semaphore.cpp
+++ b/src/system/kernel/posix/xsi_semaphore.cpp
@@ -101,7 +101,8 @@ public:
{
// For some reason the semaphore is getting destroyed.
// Wake up any remaing awaiting threads
- InterruptsSpinLocker _(gThreadSpinlock);
+ InterruptsSpinLocker schedulerLocker(gSchedulerLock);
+
while (queued_thread *entry = fWaitingToIncreaseQueue.RemoveHead()) {
entry->queued = false;
thread_unblock_locked(entry->thread, EIDRM);
@@ -143,7 +144,7 @@ public:
// Unlock the set before blocking
setLocker->Unlock();
- InterruptsSpinLocker _(gThreadSpinlock);
+ InterruptsSpinLocker schedulerLocker(gSchedulerLock);
// TODO: We've got a serious race condition: If BlockAndUnlock() returned due to
// interruption, we will still be queued. A WakeUpThread() at this point will
// call thread_unblock() and might thus screw with our trying to re-lock the
@@ -217,7 +218,7 @@ public:
void WakeUpThread(bool waitingForZero)
{
- InterruptsSpinLocker _(gThreadSpinlock);
+ InterruptsSpinLocker schedulerLocker(gSchedulerLock);
if (waitingForZero) {
// Wake up all threads waiting on zero
while (queued_thread *entry = fWaitingToBeZeroQueue.RemoveHead()) {
diff --git a/src/system/kernel/real_time_clock.cpp b/src/system/kernel/real_time_clock.cpp
index 2ef8569..6f99492 100644
--- a/src/system/kernel/real_time_clock.cpp
+++ b/src/system/kernel/real_time_clock.cpp
@@ -35,6 +35,14 @@ static bigtime_t sTimezoneOffset = 0;
static char sTimezoneName[B_FILE_NAME_LENGTH] = "GMT";
+static void
+real_time_clock_changed()
+{
+ timer_real_time_clock_changed();
+ user_timer_real_time_clock_changed();
+}
+
+
/*! Write the system time to CMOS. */
static void
rtc_system_to_hw(void)
@@ -106,11 +114,18 @@ rtc_init(kernel_args *args)
void
-set_real_time_clock(uint32 currentTime)
+set_real_time_clock_usecs(bigtime_t currentTime)
{
- arch_rtc_set_system_time_offset(sRealTimeData,
- currentTime * 1000000LL - system_time());
+ arch_rtc_set_system_time_offset(sRealTimeData, currentTime - system_time());
rtc_system_to_hw();
+ real_time_clock_changed();
+}
+
+
+void
+set_real_time_clock(uint32 currentTime)
+{
+ set_real_time_clock_usecs((bigtime_t)currentTime * 1000000);
}
@@ -207,12 +222,12 @@ _user_system_time(void)
status_t
-_user_set_real_time_clock(uint32 time)
+_user_set_real_time_clock(bigtime_t time)
{
if (geteuid() != 0)
return B_NOT_ALLOWED;
- set_real_time_clock(time);
+ set_real_time_clock_usecs(time);
return B_OK;
}
@@ -242,6 +257,7 @@ _user_set_timezone(time_t timezoneOffset, const char *name, size_t nameLength)
arch_rtc_set_system_time_offset(sRealTimeData,
arch_rtc_get_system_time_offset(sRealTimeData) + sTimezoneOffset
- offset);
+ real_time_clock_changed();
}
sTimezoneOffset = offset;
@@ -286,6 +302,7 @@ _user_set_real_time_clock_is_gmt(bool isGMT)
arch_rtc_set_system_time_offset(sRealTimeData,
arch_rtc_get_system_time_offset(sRealTimeData)
+ (sIsGMT ? 1 : -1) * sTimezoneOffset);
+ real_time_clock_changed();
}
return B_OK;
diff --git a/src/system/kernel/scheduler/scheduler.cpp b/src/system/kernel/scheduler/scheduler.cpp
index fc1a5d4..0d571d6 100644
--- a/src/system/kernel/scheduler/scheduler.cpp
+++ b/src/system/kernel/scheduler/scheduler.cpp
@@ -16,6 +16,7 @@
struct scheduler_ops* gScheduler;
+spinlock gSchedulerLock = B_SPINLOCK_INITIALIZER;
SchedulerListenerList gSchedulerListeners;
static void (*sRescheduleFunction)(void);
@@ -30,7 +31,7 @@ scheduler_reschedule_no_op(void)
}
-// #pragma mark -
+// #pragma mark - SchedulerListener
SchedulerListener::~SchedulerListener()
@@ -38,6 +39,9 @@ SchedulerListener::~SchedulerListener()
}
+// #pragma mark - kernel private
+
+
/*! Add the given scheduler listener. Thread lock must be held.
*/
void
@@ -106,12 +110,19 @@ _user_estimate_max_scheduling_latency(thread_id id)
{
syscall_64_bit_return_value();
- InterruptsSpinLocker locker(gThreadSpinlock);
-
- Thread* thread = id < 0
- ? thread_get_current_thread() : thread_get_thread_struct_locked(id);
- if (thread == NULL)
- return 0;
+ // get the thread
+ Thread* thread;
+ if (id < 0) {
+ thread = thread_get_current_thread();
+ thread->AcquireReference();
+ } else {
+ thread = Thread::Get(id);
+ if (thread == NULL)
+ return 0;
+ }
+ BReference<Thread> threadReference(thread, true);
+ // ask the scheduler for the thread's latency
+ InterruptsSpinLocker locker(gSchedulerLock);
return gScheduler->estimate_max_scheduling_latency(thread);
}
diff --git a/src/system/kernel/scheduler/scheduler_affine.cpp b/src/system/kernel/scheduler/scheduler_affine.cpp
index 508808f..721b9af 100644
--- a/src/system/kernel/scheduler/scheduler_affine.cpp
+++ b/src/system/kernel/scheduler/scheduler_affine.cpp
@@ -25,8 +25,8 @@
#include <smp.h>
#include <thread.h>
#include <timer.h>
-#include <user_debugger.h>
+#include "scheduler_common.h"
#include "scheduler_tracing.h"
@@ -339,28 +339,6 @@ affine_estimate_max_scheduling_latency(Thread* thread)
}
-static void
-context_switch(Thread *fromThread, Thread *toThread)
-{
- if ((fromThread->flags & THREAD_FLAGS_DEBUGGER_INSTALLED) != 0)
- user_debug_thread_unscheduled(fromThread);
-
- cpu_ent* cpu = fromThread->cpu;
- toThread->previous_cpu = toThread->cpu = cpu;
- fromThread->cpu = NULL;
- cpu->running_thread = toThread;
-
- arch_thread_set_current_thread(toThread);
- arch_thread_context_switch(fromThread, toThread);
-
- // Looks weird, but is correct. fromThread had been unscheduled earlier,
- // but is back now. The notification for a thread scheduled the first time
- // happens in thread.cpp:thread_kthread_entry().
- if ((fromThread->flags & THREAD_FLAGS_DEBUGGER_INSTALLED) != 0)
- user_debug_thread_scheduled(fromThread);
-}
-
-
static int32
reschedule_event(timer *unused)
{
@@ -486,9 +464,7 @@ affine_reschedule(void)
oldThread->was_yielded = false;
// track kernel time (user time is tracked in thread_at_kernel_entry())
- bigtime_t now = system_time();
- oldThread->kernel_time += now - oldThread->last_time;
- nextThread->last_time = now;
+ scheduler_update_thread_times(oldThread, nextThread);
// track CPU activity
if (!thread_is_idle_thread(oldThread)) {
@@ -525,20 +501,27 @@ affine_reschedule(void)
quantum = kMaxThreadQuantum;
add_timer(quantumTimer, &reschedule_event, quantum,
- B_ONE_SHOT_RELATIVE_TIMER | B_TIMER_ACQUIRE_THREAD_LOCK);
+ B_ONE_SHOT_RELATIVE_TIMER | B_TIMER_ACQUIRE_SCHEDULER_LOCK);
if (nextThread != oldThread)
- context_switch(oldThread, nextThread);
+ scheduler_switch_thread(oldThread, nextThread);
}
}
-static void
-affine_on_thread_create(Thread* thread)
+static status_t
+affine_on_thread_create(Thread* thread, bool idleThread)
{
+ // we don't need a data structure for the idle threads
+ if (idleThread) {
+ thread->scheduler_data = NULL;
+ return B_OK;
+ }
+
thread->scheduler_data = new(std::nothrow) scheduler_thread_data();
if (thread->scheduler_data == NULL)
- panic("affine_scheduler: Unable to allocate scheduling data structure for thread %ld\n", thread->id);
+ return B_NO_MEMORY;
+ return B_OK;
}
@@ -562,11 +545,9 @@ affine_on_thread_destroy(Thread* thread)
static void
affine_start(void)
{
- GRAB_THREAD_LOCK();
+ SpinLocker schedulerLocker(gSchedulerLock);
affine_reschedule();
-
- RELEASE_THREAD_LOCK();
}
diff --git a/src/system/kernel/scheduler/scheduler_common.h b/src/system/kernel/scheduler/scheduler_common.h
new file mode 100644
index 0000000..0de57db
--- /dev/null
+++ b/src/system/kernel/scheduler/scheduler_common.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright 2011, Ingo Weinhold, ingo_weinhold@gmx.de.
+ * Distributed under the terms of the MIT License.
+ */
+#ifndef KERNEL_SCHEDULER_COMMON_H
+#define KERNEL_SCHEDULER_COMMON_H
+
+
+#include <kscheduler.h>
+#include <smp.h>
+#include <user_debugger.h>
+
+
+/*! Switches the currently running thread.
+ This is a service function for scheduler implementations.
+
+ \param fromThread The currently running thread.
+ \param toThread The thread to switch to. Must be different from
+ \a fromThread.
+*/
+static inline void
+scheduler_switch_thread(Thread* fromThread, Thread* toThread)
+{
+ // notify the user debugger code
+ if ((fromThread->flags & THREAD_FLAGS_DEBUGGER_INSTALLED) != 0)
+ user_debug_thread_unscheduled(fromThread);
+
+ // stop CPU time based user timers
+ if (fromThread->HasActiveCPUTimeUserTimers()
+ || fromThread->team->HasActiveCPUTimeUserTimers()) {
+ user_timer_stop_cpu_timers(fromThread, toThread);
+ }
+
+ // update CPU and Thread structures and perform the context switch
+ cpu_ent* cpu = fromThread->cpu;
+ toThread->previous_cpu = toThread->cpu = cpu;
+ fromThread->cpu = NULL;
+ cpu->running_thread = toThread;
+ cpu->previous_thread = fromThread;
+
+ arch_thread_set_current_thread(toThread);
+ arch_thread_context_switch(fromThread, toThread);
+
+ // The use of fromThread below looks weird, but is correct. fromThread had
+ // been unscheduled earlier, but is back now. For a thread scheduled the
+ // first time the same is done in thread.cpp:common_thread_entry().
+
+ // continue CPU time based user timers
+ if (fromThread->HasActiveCPUTimeUserTimers()
+ || fromThread->team->HasActiveCPUTimeUserTimers()) {
+ user_timer_continue_cpu_timers(fromThread, cpu->previous_thread);
+ }
+
+ // notify the user debugger code
+ if ((fromThread->flags & THREAD_FLAGS_DEBUGGER_INSTALLED) != 0)
+ user_debug_thread_scheduled(fromThread);
+}
+
+
+static inline void
+scheduler_update_thread_times(Thread* oldThread, Thread* nextThread)
+{
+ bigtime_t now = system_time();
+ if (oldThread == nextThread) {
+ acquire_spinlock(&oldThread->time_lock);
+ oldThread->kernel_time += now - oldThread->last_time;
+ oldThread->last_time = now;
+ release_spinlock(&oldThread->time_lock);
+ } else {
+ acquire_spinlock(&oldThread->time_lock);
+ oldThread->kernel_time += now - oldThread->last_time;
+ release_spinlock(&oldThread->time_lock);
+
+ acquire_spinlock(&nextThread->time_lock);
+ nextThread->last_time = now;
+ release_spinlock(&nextThread->time_lock);
+ }
+
+ // If the old thread's team has user time timers, check them now.
+ Team* team = oldThread->team;
+ if (team->HasActiveUserTimeUserTimers())
+ user_timer_check_team_user_timers(team);
+}
+
+
+#endif // KERNEL_SCHEDULER_COMMON_H
diff --git a/src/system/kernel/scheduler/scheduler_simple.cpp b/src/system/kernel/scheduler/scheduler_simple.cpp
index b7a72d4..2b1acc9 100644
--- a/src/system/kernel/scheduler/scheduler_simple.cpp
+++ b/src/system/kernel/scheduler/scheduler_simple.cpp
@@ -23,8 +23,8 @@
#include <scheduler_defs.h>
#include <thread.h>
#include <timer.h>
-#include <user_debugger.h>
+#include "scheduler_common.h"
#include "scheduler_tracing.h"
@@ -179,28 +179,6 @@ simple_estimate_max_scheduling_latency(Thread* thread)
}
-static void
-context_switch(Thread *fromThread, Thread *toThread)
-{
- if ((fromThread->flags & THREAD_FLAGS_DEBUGGER_INSTALLED) != 0)
- user_debug_thread_unscheduled(fromThread);
-
- cpu_ent* cpu = fromThread->cpu;
- toThread->previous_cpu = toThread->cpu = cpu;
- fromThread->cpu = NULL;
- cpu->running_thread = toThread;
-
- arch_thread_set_current_thread(toThread);
- arch_thread_context_switch(fromThread, toThread);
-
- // Looks weird, but is correct. fromThread had been unscheduled earlier,
- // but is back now. The notification for a thread scheduled the first time
- // happens in thread.cpp:thread_kthread_entry().
- if ((fromThread->flags & THREAD_FLAGS_DEBUGGER_INSTALLED) != 0)
- user_debug_thread_scheduled(fromThread);
-}
-
-
static int32
reschedule_event(timer *unused)
{
@@ -324,9 +302,7 @@ simple_reschedule(void)
oldThread->was_yielded = false;
// track kernel time (user time is tracked in thread_at_kernel_entry())
- bigtime_t now = system_time();
- oldThread->kernel_time += now - oldThread->last_time;
- nextThread->last_time = now;
+ scheduler_update_thread_times(oldThread, nextThread);
// track CPU activity
if (!thread_is_idle_thread(oldThread)) {
@@ -349,18 +325,19 @@ simple_reschedule(void)
oldThread->cpu->preempted = 0;
add_timer(quantumTimer, &reschedule_event, quantum,
- B_ONE_SHOT_RELATIVE_TIMER | B_TIMER_ACQUIRE_THREAD_LOCK);
+ B_ONE_SHOT_RELATIVE_TIMER | B_TIMER_ACQUIRE_SCHEDULER_LOCK);
if (nextThread != oldThread)
- context_switch(oldThread, nextThread);
+ scheduler_switch_thread(oldThread, nextThread);
}
}
-static void
-simple_on_thread_create(Thread* thread)
+static status_t
+simple_on_thread_create(Thread* thread, bool idleThread)
{
// do nothing
+ return B_OK;
}
@@ -384,11 +361,9 @@ simple_on_thread_destroy(Thread* thread)
static void
simple_start(void)
{
- GRAB_THREAD_LOCK();
+ SpinLocker schedulerLocker(gSchedulerLock);
simple_reschedule();
-
- RELEASE_THREAD_LOCK();
}
diff --git a/src/system/kernel/scheduler/scheduler_simple_smp.cpp b/src/system/kernel/scheduler/scheduler_simple_smp.cpp
index 49b434e..721860a 100644
--- a/src/system/kernel/scheduler/scheduler_simple_smp.cpp
+++ b/src/system/kernel/scheduler/scheduler_simple_smp.cpp
@@ -24,8 +24,8 @@
#include <smp.h>
#include <thread.h>
#include <timer.h>
-#include <user_debugger.h>
+#include "scheduler_common.h"
#include "scheduler_tracing.h"
@@ -244,28 +244,6 @@ estimate_max_scheduling_latency(Thread* thread)
}
-static void
-context_switch(Thread *fromThread, Thread *toThread)
-{
- if ((fromThread->flags & THREAD_FLAGS_DEBUGGER_INSTALLED) != 0)
- user_debug_thread_unscheduled(fromThread);
-
- cpu_ent* cpu = fromThread->cpu;
- toThread->previous_cpu = toThread->cpu = cpu;
- fromThread->cpu = NULL;
- cpu->running_thread = toThread;
-
- arch_thread_set_current_thread(toThread);
- arch_thread_context_switch(fromThread, toThread);
-
- // Looks weird, but is correct. fromThread had been unscheduled earlier,
- // but is back now. The notification for a thread scheduled the first time
- // happens in thread.cpp:thread_kthread_entry().
- if ((fromThread->flags & THREAD_FLAGS_DEBUGGER_INSTALLED) != 0)
- user_debug_thread_scheduled(fromThread);
-}
-
-
static int32
reschedule_event(timer *unused)
{
@@ -423,9 +401,7 @@ reschedule(void)
oldThread->was_yielded = false;
// track kernel time (user time is tracked in thread_at_kernel_entry())
- bigtime_t now = system_time();
- oldThread->kernel_time += now - oldThread->last_time;
- nextThread->last_time = now;
+ scheduler_update_thread_times(oldThread, nextThread);
// track CPU activity
if (!thread_is_idle_thread(oldThread)) {
@@ -448,18 +424,19 @@ reschedule(void)
oldThread->cpu->preempted = 0;
add_timer(quantumTimer, &reschedule_event, quantum,
- B_ONE_SHOT_RELATIVE_TIMER | B_TIMER_ACQUIRE_THREAD_LOCK);
+ B_ONE_SHOT_RELATIVE_TIMER | B_TIMER_ACQUIRE_SCHEDULER_LOCK);
if (nextThread != oldThread)
- context_switch(oldThread, nextThread);
+ scheduler_switch_thread(oldThread, nextThread);
}
}
-static void
-on_thread_create(Thread* thread)
+static status_t
+on_thread_create(Thread* thread, bool idleThread)
{
// do nothing
+ return B_OK;
}
@@ -483,11 +460,9 @@ on_thread_destroy(Thread* thread)
static void
start(void)
{
- GRAB_THREAD_LOCK();
+ SpinLocker schedulerLocker(gSchedulerLock);
reschedule();
-
- RELEASE_THREAD_LOCK();
}
diff --git a/src/system/kernel/sem.cpp b/src/system/kernel/sem.cpp
index cf47fb2..2c460b9 100644
--- a/src/system/kernel/sem.cpp
+++ b/src/system/kernel/sem.cpp
@@ -56,6 +56,20 @@
#endif
+// Locking:
+// * sSemsSpinlock: Protects the semaphore free list (sFreeSemsHead,
+// sFreeSemsTail), Team::sem_list, and together with sem_entry::lock
+// write access to sem_entry::owner/team_link.
+// * sem_entry::lock: Protects all sem_entry members. owner, team_link
+// additional need sSemsSpinlock for write access.
+// lock itself doesn't need protection -- sem_entry objects are never deleted.
+//
+// The locking order is sSemsSpinlock -> sem_entry::lock -> scheduler lock. All
+// semaphores are in the sSems array (sem_entry[]). Access by sem_id requires
+// computing the object index (id % sMaxSems), locking the respective
+// sem_entry::lock and verifying that sem_entry::id matches afterwards.
+
+
struct queued_thread : DoublyLinkedListLinkImpl<queued_thread> {
queued_thread(Thread *thread, int32 count)
:
@@ -82,7 +96,7 @@ struct sem_entry {
// count + acquisition count of all blocked
// threads
char* name;
- Team* owner;
+ team_id owner;
select_info* select_infos;
thread_id last_acquirer;
#if DEBUG_SEM_LAST_ACQUIRER
@@ -146,15 +160,13 @@ dump_sem_list(int argc, char** argv)
if (sem->id < 0
|| (last != -1 && sem->u.used.last_acquirer != last)
|| (name != NULL && strstr(sem->u.used.name, name) == NULL)
- || (owner != -1
- && (sem->u.used.owner == NULL
- || sem->u.used.owner->id != owner)))
+ || (owner != -1 && sem->u.used.owner != owner))
continue;
kprintf("%p %6ld %5ld %6ld "
"%6ld "
" %s\n", sem, sem->id, sem->u.used.count,
- sem->u.used.owner != NULL ? sem->u.used.owner->id : -1,
+ sem->u.used.owner,
sem->u.used.last_acquirer > 0 ? sem->u.used.last_acquirer : 0,
sem->u.used.name);
}
@@ -170,8 +182,7 @@ dump_sem(struct sem_entry* sem)
kprintf("id: %ld (%#lx)\n", sem->id, sem->id);
if (sem->id >= 0) {
kprintf("name: '%s'\n", sem->u.used.name);
- kprintf("owner: %ld\n",
- sem->u.used.owner != NULL ? sem->u.used.owner->id : -1);
+ kprintf("owner: %ld\n", sem->u.used.owner);
kprintf("count: %ld\n", sem->u.used.count);
kprintf("queue: ");
if (!sem->queue.IsEmpty()) {
@@ -184,8 +195,7 @@ dump_sem(struct sem_entry* sem)
set_debug_variable("_sem", (addr_t)sem);
set_debug_variable("_semID", sem->id);
- set_debug_variable("_owner",
- sem->u.used.owner != NULL ? sem->u.used.owner->id : -1);
+ set_debug_variable("_owner", sem->u.used.owner);
#if DEBUG_SEM_LAST_ACQUIRER
kprintf("last acquired by: %ld, count: %ld\n",
@@ -291,15 +301,14 @@ notify_sem_select_events(struct sem_entry* sem, uint16 events)
}
-/*! Fills the thread_info structure with information from the specified
- thread.
- The thread lock must be held when called.
+/*! Fills the sem_info structure with information from the given semaphore.
+ The semaphore's lock must be held when called.
*/
static void
fill_sem_info(struct sem_entry* sem, sem_info* info, size_t size)
{
info->sem = sem->id;
- info->team = sem->u.used.owner != NULL ? sem->u.used.owner->id : -1;
+ info->team = sem->u.used.owner;
strlcpy(info->name, sem->u.used.name, sizeof(info->name));
info->count = sem->u.used.count;
info->latest_holder = sem->u.used.last_acquirer;
@@ -320,12 +329,12 @@ uninit_sem_locked(struct sem_entry& sem, char** _name)
sem.u.used.select_infos = NULL;
// free any threads waiting for this semaphore
- GRAB_THREAD_LOCK();
+ SpinLocker schedulerLocker(gSchedulerLock);
while (queued_thread* entry = sem.queue.RemoveHead()) {
entry->queued = false;
thread_unblock_locked(entry->thread, B_BAD_SEM_ID);
}
- RELEASE_THREAD_LOCK();
+ schedulerLocker.Unlock();
int32 id = sem.id;
sem.id = -1;
@@ -353,41 +362,41 @@ delete_sem_internal(sem_id id, bool checkPermission)
int32 slot = id % sMaxSems;
cpu_status state = disable_interrupts();
- GRAB_TEAM_LOCK();
+ GRAB_SEM_LIST_LOCK();
GRAB_SEM_LOCK(sSems[slot]);
if (sSems[slot].id != id) {
RELEASE_SEM_LOCK(sSems[slot]);
- RELEASE_TEAM_LOCK();
+ RELEASE_SEM_LIST_LOCK();
restore_interrupts(state);
TRACE(("delete_sem: invalid sem_id %ld\n", id));
return B_BAD_SEM_ID;
}
if (checkPermission
- && sSems[slot].u.used.owner == team_get_kernel_team()) {
+ && sSems[slot].u.used.owner == team_get_kernel_team_id()) {
RELEASE_SEM_LOCK(sSems[slot]);
- RELEASE_TEAM_LOCK();
+ RELEASE_SEM_LIST_LOCK();
restore_interrupts(state);
dprintf("thread %ld tried to delete kernel semaphore %ld.\n",
thread_get_current_thread_id(), id);
return B_NOT_ALLOWED;
}
- if (sSems[slot].u.used.owner != NULL) {
+ if (sSems[slot].u.used.owner >= 0) {
list_remove_link(&sSems[slot].u.used.team_link);
- sSems[slot].u.used.owner = NULL;
+ sSems[slot].u.used.owner = -1;
} else
panic("sem %ld has no owner", id);
- RELEASE_TEAM_LOCK();
+ RELEASE_SEM_LIST_LOCK();
char* name;
uninit_sem_locked(sSems[slot], &name);
- GRAB_THREAD_LOCK();
+ SpinLocker schedulerLocker(gSchedulerLock);
scheduler_reschedule_if_necessary_locked();
- RELEASE_THREAD_LOCK();
+ schedulerLocker.Unlock();
restore_interrupts(state);
@@ -480,6 +489,13 @@ create_sem_etc(int32 count, const char* name, team_id owner)
if (name == NULL)
name = "unnamed semaphore";
+ // get the owning team
+ Team* team = Team::Get(owner);
+ if (team == NULL)
+ return B_BAD_TEAM_ID;
+ BReference<Team> teamReference(team, true);
+
+ // clone the name
nameLength = strlen(name) + 1;
nameLength = min_c(nameLength, B_OS_NAME_LENGTH);
tempName = (char*)malloc(nameLength);
@@ -488,30 +504,7 @@ create_sem_etc(int32 count, const char* name, team_id owner)
strlcpy(tempName, name, nameLength);
- Team* team = NULL;
- if (owner == team_get_kernel_team_id())
- team = team_get_kernel_team();
- else if (owner == team_get_current_team_id())
- team = thread_get_current_thread()->team;
-
- bool teamsLocked = false;
state = disable_interrupts();
-
- if (team == NULL) {
- // We need to hold the team lock to make sure this one exists (and
- // won't go away.
- GRAB_TEAM_LOCK();
-
- team = team_get_team_struct_locked(owner);
- if (team == NULL) {
- RELEASE_TEAM_LOCK();
- restore_interrupts(state);
- free(tempName);
- return B_BAD_TEAM_ID;
- }
-
- teamsLocked = true;
- }
GRAB_SEM_LIST_LOCK();
// get the first slot from the free list
@@ -529,14 +522,11 @@ create_sem_etc(int32 count, const char* name, team_id owner)
sem->u.used.net_count = count;
new(&sem->queue) ThreadQueue;
sem->u.used.name = tempName;
- sem->u.used.owner = team;
+ sem->u.used.owner = team->id;
sem->u.used.select_infos = NULL;
id = sem->id;
- if (teamsLocked) {
- // insert now
- list_add_item(&team->sem_list, &sem->u.used.team_link);
- }
+ list_add_item(&team->sem_list, &sem->u.used.team_link);
RELEASE_SEM_LOCK(*sem);
@@ -551,20 +541,6 @@ create_sem_etc(int32 count, const char* name, team_id owner)
}
RELEASE_SEM_LIST_LOCK();
-
- int32 slot = id % sMaxSems;
- if (sem != NULL && !teamsLocked) {
- GRAB_TEAM_LOCK();
- GRAB_SEM_LOCK(sSems[slot]);
-
- list_add_item(&team->sem_list, &sem->u.used.team_link);
-
- RELEASE_SEM_LOCK(sSems[slot]);
- teamsLocked = true;
- }
-
- if (teamsLocked)
- RELEASE_TEAM_LOCK();
restore_interrupts(state);
if (sem == NULL)
@@ -593,7 +569,7 @@ select_sem(int32 id, struct select_info* info, bool kernel)
// bad sem ID
error = B_BAD_SEM_ID;
} else if (!kernel
- && sSems[slot].u.used.owner == team_get_kernel_team()) {
+ && sSems[slot].u.used.owner == team_get_kernel_team_id()) {
// kernel semaphore, but call from userland
error = B_NOT_ALLOWED;
} else {
@@ -665,10 +641,11 @@ remove_thread_from_sem(queued_thread *entry, struct sem_entry *sem)
// We're done with this entry. We only have to check, if other threads
// need unblocking, too.
- // Now see if more threads need to be woken up. We get the thread lock for
- // that time, so the blocking state of threads won't change. We need that
- // lock anyway when unblocking a thread.
- GRAB_THREAD_LOCK();
+ // Now see if more threads need to be woken up. We get the scheduler lock
+ // for that time, so the blocking state of threads won't change (due to
+ // interruption or timeout). We need that lock anyway when unblocking a
+ // thread.
+ SpinLocker schedulerLocker(gSchedulerLock);
while ((entry = sem->queue.Head()) != NULL) {
if (thread_is_blocked(entry->thread)) {
@@ -689,7 +666,7 @@ remove_thread_from_sem(queued_thread *entry, struct sem_entry *sem)
entry->queued = false;
}
- RELEASE_THREAD_LOCK();
+ schedulerLocker.Unlock();
// select notification, if the semaphore is now acquirable
if (sem->u.used.count > 0)
@@ -702,19 +679,20 @@ remove_thread_from_sem(queued_thread *entry, struct sem_entry *sem)
void
sem_delete_owned_sems(Team* team)
{
- struct list queue;
-
- {
- InterruptsSpinLocker locker(gTeamSpinlock);
- list_move_to_list(&team->sem_list, &queue);
- }
-
- while (sem_entry* sem = (sem_entry*)list_remove_head_item(&queue)) {
+ while (true) {
char* name;
{
+ // get the next semaphore from the team's sem list
InterruptsLocker locker;
+ SpinLocker semListLocker(sSemsSpinlock);
+ sem_entry* sem = (sem_entry*)list_remove_head_item(&team->sem_list);
+ if (sem == NULL)
+ break;
+
+ // delete the semaphore
GRAB_SEM_LOCK(*sem);
+ semListLocker.Unlock();
uninit_sem_locked(*sem, &name);
}
@@ -814,7 +792,7 @@ switch_sem_etc(sem_id semToBeReleased, sem_id id, int32 count,
// TODO: the B_CHECK_PERMISSION flag should be made private, as it
// doesn't have any use outside the kernel
if ((flags & B_CHECK_PERMISSION) != 0
- && sSems[slot].u.used.owner == team_get_kernel_team()) {
+ && sSems[slot].u.used.owner == team_get_kernel_team_id()) {
dprintf("thread %ld tried to acquire kernel semaphore %ld.\n",
thread_get_current_thread_id(), id);
status = B_NOT_ALLOWED;
@@ -846,7 +824,9 @@ switch_sem_etc(sem_id semToBeReleased, sem_id id, int32 count,
// do a quick check to see if the thread has any pending signals
// this should catch most of the cases where the thread had a signal
+ SpinLocker schedulerLocker(gSchedulerLock);
if (thread_is_interrupted(thread, flags)) {
+ schedulerLocker.Unlock();
sSems[slot].u.used.count += count;
status = B_INTERRUPTED;
// the other semaphore will be released later
@@ -872,13 +852,13 @@ switch_sem_etc(sem_id semToBeReleased, sem_id id, int32 count,
semToBeReleased = -1;
}
- GRAB_THREAD_LOCK();
+ schedulerLocker.Lock();
status_t acquireStatus = timeout == B_INFINITE_TIMEOUT
? thread_block_locked(thread)
: thread_block_with_timeout_locked(flags, timeout);
- RELEASE_THREAD_LOCK();
+ schedulerLocker.Unlock();
GRAB_SEM_LOCK(sSems[slot]);
// If we're still queued, this means the acquiration failed, and we
@@ -963,7 +943,7 @@ release_sem_etc(sem_id id, int32 count, uint32 flags)
// ToDo: the B_CHECK_PERMISSION flag should be made private, as it
// doesn't have any use outside the kernel
if ((flags & B_CHECK_PERMISSION) != 0
- && sSems[slot].u.used.owner == team_get_kernel_team()) {
+ && sSems[slot].u.used.owner == team_get_kernel_team_id()) {
dprintf("thread %ld tried to release kernel semaphore.\n",
thread_get_current_thread_id());
return B_NOT_ALLOWED;
@@ -990,7 +970,9 @@ release_sem_etc(sem_id id, int32 count, uint32 flags)
flags |= B_RELEASE_IF_WAITING_ONLY;
}
- SpinLocker threadLocker(gThreadSpinlock);
+ // Grab the scheduler lock, so thread_is_blocked() is reliable (due to
+ // possible interruptions or timeouts, it wouldn't be otherwise).
+ SpinLocker schedulerLocker(gSchedulerLock);
while (count > 0) {
queued_thread* entry = sSems[slot].queue.Head();
@@ -1027,7 +1009,7 @@ release_sem_etc(sem_id id, int32 count, uint32 flags)
entry->queued = false;
}
- threadLocker.Unlock();
+ schedulerLocker.Unlock();
if (sSems[slot].u.used.count > 0)
notify_sem_select_events(&sSems[slot], B_EVENT_ACQUIRE_SEMAPHORE);
@@ -1036,7 +1018,7 @@ release_sem_etc(sem_id id, int32 count, uint32 flags)
// been told not to.
if ((flags & B_DO_NOT_RESCHEDULE) == 0) {
semLocker.Unlock();
- threadLocker.Lock();
+ schedulerLocker.Lock();
scheduler_reschedule_if_necessary_locked();
}
@@ -1123,16 +1105,12 @@ _get_next_sem_info(team_id teamID, int32 *_cookie, struct sem_info *info,
if (teamID < 0)
return B_BAD_TEAM_ID;
- InterruptsSpinLocker locker(gTeamSpinlock);
-
- Team* team;
- if (teamID == B_CURRENT_TEAM)
- team = thread_get_current_thread()->team;
- else
- team = team_get_team_struct_locked(teamID);
-
+ Team* team = Team::Get(teamID);
if (team == NULL)
return B_BAD_TEAM_ID;
+ BReference<Team> teamReference(team, true);
+
+ InterruptsSpinLocker semListLocker(sSemsSpinlock);
// TODO: find a way to iterate the list that is more reliable
sem_entry* sem = (sem_entry*)list_get_first_item(&team->sem_list);
@@ -1152,7 +1130,7 @@ _get_next_sem_info(team_id teamID, int32 *_cookie, struct sem_info *info,
GRAB_SEM_LOCK(*sem);
- if (sem->id != -1 && sem->u.used.owner == team) {
+ if (sem->id != -1 && sem->u.used.owner == team->id) {
// found one!
fill_sem_info(sem, info, size);
newIndex = index + 1;
@@ -1183,12 +1161,13 @@ set_sem_owner(sem_id id, team_id newTeamID)
int32 slot = id % sMaxSems;
- InterruptsSpinLocker teamLocker(gTeamSpinlock);
-
- Team* newTeam = team_get_team_struct_locked(newTeamID);
+ // get the new team
+ Team* newTeam = Team::Get(newTeamID);
if (newTeam == NULL)
return B_BAD_TEAM_ID;
+ BReference<Team> newTeamReference(newTeam, true);
+ InterruptsSpinLocker semListLocker(sSemsSpinlock);
SpinLocker semLocker(sSems[slot].lock);
if (sSems[slot].id != id) {
@@ -1199,7 +1178,7 @@ set_sem_owner(sem_id id, team_id newTeamID)
list_remove_link(&sSems[slot].u.used.team_link);
list_add_item(&newTeam->sem_list, &sSems[slot].u.used.team_link);
- sSems[slot].u.used.owner = newTeam;
+ sSems[slot].u.used.owner = newTeam->id;
return B_OK;
}
diff --git a/src/system/kernel/signal.cpp b/src/system/kernel/signal.cpp
index bd2520c..21542da 100644
--- a/src/system/kernel/signal.cpp
+++ b/src/system/kernel/signal.cpp
@@ -1,4 +1,5 @@
/*
+ * Copyright 2011, Ingo Weinhold, ingo_weinhold@gmx.de.
* Copyright 2002-2009, Axel Dörfler, axeld@pinc-software.de.
* Copyright 2002, Angelo Mottola, a.mottola@libero.it.
*
@@ -41,13 +42,19 @@
#endif
-#define BLOCKABLE_SIGNALS (~(KILL_SIGNALS | SIGNAL_TO_MASK(SIGSTOP)))
+#define BLOCKABLE_SIGNALS \
+ (~(KILL_SIGNALS | SIGNAL_TO_MASK(SIGSTOP) \
+ | SIGNAL_TO_MASK(SIGNAL_CONTINUE_THREAD) \
+ | SIGNAL_TO_MASK(SIGNAL_CANCEL_THREAD)))
#define STOP_SIGNALS \
(SIGNAL_TO_MASK(SIGSTOP) | SIGNAL_TO_MASK(SIGTSTP) \
| SIGNAL_TO_MASK(SIGTTIN) | SIGNAL_TO_MASK(SIGTTOU))
+#define CONTINUE_SIGNALS \
+ (SIGNAL_TO_MASK(SIGCONT) | SIGNAL_TO_MASK(SIGNAL_CONTINUE_THREAD))
#define DEFAULT_IGNORE_SIGNALS \
(SIGNAL_TO_MASK(SIGCHLD) | SIGNAL_TO_MASK(SIGWINCH) \
- | SIGNAL_TO_MASK(SIGCONT))
+ | SIGNAL_TO_MASK(SIGCONT) \
+ | SIGNAL_RANGE_TO_MASK(SIGNAL_REALTIME_MIN, SIGNAL_REALTIME_MAX))
#define NON_DEFERRABLE_SIGNALS \
(KILL_SIGNALS \
| SIGNAL_TO_MASK(SIGILL) \
@@ -55,17 +62,501 @@
| SIGNAL_TO_MASK(SIGSEGV))
-const char * const sigstr[NSIG] = {
- "NONE", "HUP", "INT", "QUIT", "ILL", "CHLD", "ABRT", "PIPE",
- "FPE", "KILL", "STOP", "SEGV", "CONT", "TSTP", "ALRM", "TERM",
- "TTIN", "TTOU", "USR1", "USR2", "WINCH", "KILLTHR", "TRAP",
- "POLL", "PROF", "SYS", "URG", "VTALRM", "XCPU", "XFSZ"
+static const struct {
+ const char* name;
+ int32 priority;
+} kSignalInfos[__MAX_SIGNO + 1] = {
+ {"NONE", -1},
+ {"HUP", 0},
+ {"INT", 0},
+ {"QUIT", 0},
+ {"ILL", 0},
+ {"CHLD", 0},
+ {"ABRT", 0},
+ {"PIPE", 0},
+ {"FPE", 0},
+ {"KILL", 100},
+ {"STOP", 0},
+ {"SEGV", 0},
+ {"CONT", 0},
+ {"TSTP", 0},
+ {"ALRM", 0},
+ {"TERM", 0},
+ {"TTIN", 0},
+ {"TTOU", 0},
+ {"USR1", 0},
+ {"USR2", 0},
+ {"WINCH", 0},
+ {"KILLTHR", 100},
+ {"TRAP", 0},
+ {"POLL", 0},
+ {"PROF", 0},
+ {"SYS", 0},
+ {"URG", 0},
+ {"VTALRM", 0},
+ {"XCPU", 0},
+ {"XFSZ", 0},
+ {"SIGBUS", 0},
+ {"SIGRESERVED1", 0},
+ {"SIGRESERVED2", 0},
+ {"SIGRT1", 8},
+ {"SIGRT2", 7},
+ {"SIGRT3", 6},
+ {"SIGRT4", 5},
+ {"SIGRT5", 4},
+ {"SIGRT6", 3},
+ {"SIGRT7", 2},
+ {"SIGRT8", 1},
+ {"invalid 41", 0},
+ {"invalid 42", 0},
+ {"invalid 43", 0},
+ {"invalid 44", 0},
+ {"invalid 45", 0},
+ {"invalid 46", 0},
+ {"invalid 47", 0},
+ {"invalid 48", 0},
+ {"invalid 49", 0},
+ {"invalid 50", 0},
+ {"invalid 51", 0},
+ {"invalid 52", 0},
+ {"invalid 53", 0},
+ {"invalid 54", 0},
+ {"invalid 55", 0},
+ {"invalid 56", 0},
+ {"invalid 57", 0},
+ {"invalid 58", 0},
+ {"invalid 59", 0},
+ {"invalid 60", 0},
+ {"invalid 61", 0},
+ {"invalid 62", 0},
+ {"CANCEL_THREAD", 0},
+ {"CONTINUE_THREAD", 0} // priority must be <= that of SIGSTOP
};
-static status_t deliver_signal(Thread *thread, uint signal, uint32 flags);
+static inline const char*
+signal_name(uint32 number)
+{
+ return number <= __MAX_SIGNO ? kSignalInfos[number].name : "invalid";
+}
+
+
+// #pragma mark - SignalHandledCaller
+
+
+struct SignalHandledCaller {
+ SignalHandledCaller(Signal* signal)
+ :
+ fSignal(signal)
+ {
+ }
+
+ ~SignalHandledCaller()
+ {
+ Done();
+ }
+
+ void Done()
+ {
+ if (fSignal != NULL) {
+ fSignal->Handled();
+ fSignal = NULL;
+ }
+ }
+
+private:
+ Signal* fSignal;
+};
+
+
+// #pragma mark - QueuedSignalsCounter
+/*! Creates a counter with the given limit.
+ The limit defines the maximum the counter may reach. Since the
+ BReferenceable's reference count is used, it is assumed that the owning
+ team holds a reference and the reference count is one greater than the
+ counter value.
+ \param limit The maximum allowed value the counter may have. When
+ \code < 0 \endcode, the value is not limited.
+*/
+QueuedSignalsCounter::QueuedSignalsCounter(int32 limit)
+ :
+ fLimit(limit)
+{
+}
+
+
+/*! Increments the counter, if the limit allows that.
+ \return \c true, if incrementing the counter succeeded, \c false otherwise.
+*/
+bool
+QueuedSignalsCounter::Increment()
+{
+ // no limit => no problem
+ if (fLimit < 0) {
+ AcquireReference();
+ return true;
+ }
+
+ // Increment the reference count manually, so we can check atomically. We
+ // compare the old value > fLimit, assuming that our (primary) owner has a
+ // reference, we don't want to count.
+ if (atomic_add(&fReferenceCount, 1) > fLimit) {
+ ReleaseReference();
+ return false;
+ }
+
+ return true;
+}
+
+
+// #pragma mark - Signal
+
+
+Signal::Signal()
+ :
+ fCounter(NULL),
+ fPending(false)
+{
+}
+
+
+Signal::Signal(const Signal& other)
+ :
+ fCounter(NULL),
+ fNumber(other.fNumber),
+ fSignalCode(other.fSignalCode),
+ fErrorCode(other.fErrorCode),
+ fSendingProcess(other.fSendingProcess),
+ fSendingUser(other.fSendingUser),
+ fStatus(other.fStatus),
+ fPollBand(other.fPollBand),
+ fAddress(other.fAddress),
+ fUserValue(other.fUserValue),
+ fPending(false)
+{
+}
+
+
+Signal::Signal(uint32 number, int32 signalCode, int32 errorCode,
+ pid_t sendingProcess)
+ :
+ fCounter(NULL),
+ fNumber(number),
+ fSignalCode(signalCode),
+ fErrorCode(errorCode),
+ fSendingProcess(sendingProcess),
+ fSendingUser(getuid()),
+ fStatus(0),
+ fPollBand(0),
+ fAddress(NULL),
+ fPending(false)
+{
+ fUserValue.sival_ptr = NULL;
+}
+
+
+Signal::~Signal()
+{
+ if (fCounter != NULL)
+ fCounter->ReleaseReference();
+}
+
+
+/*! Creates a queuable clone of the given signal.
+ Also enforces the current team's signal queuing limit.
+
+ \param signal The signal to clone.
+ \param queuingRequired If \c true, the function will return an error code
+ when creating the clone fails for any reason. Otherwise, the function
+ will set \a _signalToQueue to \c NULL, but still return \c B_OK.
+ \param _signalToQueue Return parameter. Set to the clone of the signal.
+ \return When \c queuingRequired is \c false, always \c B_OK. Otherwise
+ \c B_OK, when creating the signal clone succeeds, another error code,
+ when it fails.
+*/
+/*static*/ status_t
+Signal::CreateQueuable(const Signal& signal, bool queuingRequired,
+ Signal*& _signalToQueue)
+{
+ _signalToQueue = NULL;
+
+ // If interrupts are disabled, we can't allocate a signal.
+ if (!are_interrupts_enabled())
+ return queuingRequired ? B_BAD_VALUE : B_OK;
+
+ // increment the queued signals counter
+ QueuedSignalsCounter* counter
+ = thread_get_current_thread()->team->QueuedSignalsCounter();
+ if (!counter->Increment())
+ return queuingRequired ? EAGAIN : B_OK;
+
+ // allocate the signal
+ Signal* signalToQueue = new(std::nothrow) Signal(signal);
+ if (signalToQueue == NULL) {
+ counter->Decrement();
+ return queuingRequired ? B_NO_MEMORY : B_OK;
+ }
+
+ signalToQueue->fCounter = counter;
+
+ _signalToQueue = signalToQueue;
+ return B_OK;
+}
+
+void
+Signal::SetTo(uint32 number)
+{
+ Team* team = thread_get_current_thread()->team;
+
+ fNumber = number;
+ fSignalCode = SI_USER;
+ fErrorCode = 0;
+ fSendingProcess = team->id;
+ fSendingUser = team->effective_uid;
+ // assuming scheduler lock is being held
+ fStatus = 0;
+ fPollBand = 0;
+ fAddress = NULL;
+ fUserValue.sival_ptr = NULL;
+}
+
+
+int32
+Signal::Priority() const
+{
+ return kSignalInfos[fNumber].priority;
+}
+
+
+void
+Signal::Handled()
+{
+ ReleaseReference();
+}
+
+
+void
+Signal::LastReferenceReleased()
+{
+ if (are_interrupts_enabled())
+ delete this;
+ else
+ deferred_delete(this);
+}
+
+
+// #pragma mark - PendingSignals
+
+
+PendingSignals::PendingSignals()
+ :
+ fQueuedSignalsMask(0),
+ fUnqueuedSignalsMask(0)
+{
+}
+
+
+PendingSignals::~PendingSignals()
+{
+ Clear();
+}
+
+
+/*! Of the signals in \a nonBlocked returns the priority of that with the
+ highest priority.
+ \param nonBlocked The mask with the non-blocked signals.
+ \return The priority of the highest priority non-blocked signal, or, if all
+ signals are blocked, \c -1.
+*/
+int32
+PendingSignals::HighestSignalPriority(sigset_t nonBlocked) const
+{
+ Signal* queuedSignal;
+ int32 unqueuedSignal;
+ return _GetHighestPrioritySignal(nonBlocked, queuedSignal, unqueuedSignal);
+}
+
+
+void
+PendingSignals::Clear()
+{
+ // release references of all queued signals
+ while (Signal* signal = fQueuedSignals.RemoveHead())
+ signal->Handled();
+
+ fQueuedSignalsMask = 0;
+ fUnqueuedSignalsMask = 0;
+}
+
+
+/*! Adds a signal.
+ Takes over the reference to the signal from the caller.
+*/
+void
+PendingSignals::AddSignal(Signal* signal)
+{
+ // queue according to priority
+ int32 priority = signal->Priority();
+ Signal* otherSignal = NULL;
+ for (SignalList::Iterator it = fQueuedSignals.GetIterator();
+ (otherSignal = it.Next()) != NULL;) {
+ if (priority > otherSignal->Priority())
+ break;
+ }
+
+ fQueuedSignals.InsertBefore(otherSignal, signal);
+ signal->SetPending(true);
+
+ fQueuedSignalsMask |= SIGNAL_TO_MASK(signal->Number());
+}
+
+
+void
+PendingSignals::RemoveSignal(Signal* signal)
+{
+ signal->SetPending(false);
+ fQueuedSignals.Remove(signal);
+ _UpdateQueuedSignalMask();
+}
+
+
+void
+PendingSignals::RemoveSignals(sigset_t mask)
+{
+ // remove from queued signals
+ if ((fQueuedSignalsMask & mask) != 0) {
+ for (SignalList::Iterator it = fQueuedSignals.GetIterator();
+ Signal* signal = it.Next();) {
+ // remove signal, if in mask
+ if ((SIGNAL_TO_MASK(signal->Number()) & mask) != 0) {
+ it.Remove();
+ signal->SetPending(false);
+ signal->Handled();
+ }
+ }
+
+ fQueuedSignalsMask &= ~mask;
+ }
+
+ // remove from unqueued signals
+ fUnqueuedSignalsMask &= ~mask;
+}
+
+
+/*! Removes and returns a signal in \a nonBlocked that has the highest priority.
+ The caller gets a reference to the returned signal, if any.
+ \param nonBlocked The mask of non-blocked signals.
+ \param buffer If the signal is not queued this buffer is returned. In this
+ case the method acquires a reference to \a buffer, so that the caller
+ gets a reference also in this case.
+ \return The removed signal or \c NULL, if all signals are blocked.
+*/
+Signal*
+PendingSignals::DequeueSignal(sigset_t nonBlocked, Signal& buffer)
+{
+ // find the signal with the highest priority
+ Signal* queuedSignal;
+ int32 unqueuedSignal;
+ if (_GetHighestPrioritySignal(nonBlocked, queuedSignal, unqueuedSignal) < 0)
+ return NULL;
+
+ // if it is a queued signal, dequeue it
+ if (queuedSignal != NULL) {
+ fQueuedSignals.Remove(queuedSignal);
+ queuedSignal->SetPending(false);
+ _UpdateQueuedSignalMask();
+ return queuedSignal;
+ }
+
+ // it is unqueued -- remove from mask
+ fUnqueuedSignalsMask &= ~SIGNAL_TO_MASK(unqueuedSignal);
+
+ // init buffer
+ buffer.SetTo(unqueuedSignal);
+ buffer.AcquireReference();
+ return &buffer;
+}
+
+
+/*! Of the signals not it \a blocked returns the priority of that with the
+ highest priority.
+ \param blocked The mask with the non-blocked signals.
+ \param _queuedSignal If the found signal is a queued signal, the variable
+ will be set to that signal, otherwise to \c NULL.
+ \param _unqueuedSignal If the found signal is an unqueued signal, the
+ variable is set to that signal's number, otherwise to \c -1.
+ \return The priority of the highest priority non-blocked signal, or, if all
+ signals are blocked, \c -1.
+*/
+int32
+PendingSignals::_GetHighestPrioritySignal(sigset_t nonBlocked,
+ Signal*& _queuedSignal, int32& _unqueuedSignal) const
+{
+ // check queued signals
+ Signal* queuedSignal = NULL;
+ int32 queuedPriority = -1;
+
+ if ((fQueuedSignalsMask & nonBlocked) != 0) {
+ for (SignalList::ConstIterator it = fQueuedSignals.GetIterator();
+ Signal* signal = it.Next();) {
+ if ((SIGNAL_TO_MASK(signal->Number()) & nonBlocked) != 0) {
+ queuedPriority = signal->Priority();
+ queuedSignal = signal;
+ break;
+ }
+ }
+ }
+
+ // check unqueued signals
+ int32 unqueuedSignal = -1;
+ int32 unqueuedPriority = -1;
+
+ sigset_t unqueuedSignals = fUnqueuedSignalsMask & nonBlocked;
+ if (unqueuedSignals != 0) {
+ int32 signal = 1;
+ while (unqueuedSignals != 0) {
+ sigset_t mask = SIGNAL_TO_MASK(signal);
+ if ((unqueuedSignals & mask) != 0) {
+ int32 priority = kSignalInfos[signal].priority;
+ if (priority > unqueuedPriority) {
+ unqueuedSignal = signal;
+ unqueuedPriority = priority;
+ }
+ unqueuedSignals &= ~mask;
+ }
+
+ signal++;
+ }
+ }
+
+ // Return found queued or unqueued signal, whichever has the higher
+ // priority.
+ if (queuedPriority >= unqueuedPriority) {
+ _queuedSignal = queuedSignal;
+ _unqueuedSignal = -1;
+ return queuedPriority;
+ }
+
+ _queuedSignal = NULL;
+ _unqueuedSignal = unqueuedSignal;
+ return unqueuedPriority;
+}
+
+
+void
+PendingSignals::_UpdateQueuedSignalMask()
+{
+ sigset_t mask = 0;
+ for (SignalList::Iterator it = fQueuedSignals.GetIterator();
+ Signal* signal = it.Next();) {
+ mask |= SIGNAL_TO_MASK(signal->Number());
+ }
+
+ fQueuedSignalsMask = mask;
+}
+
// #pragma mark - signal tracing
@@ -75,28 +566,29 @@ static status_t deliver_signal(Thread *thread, uint signal, uint32 flags);
namespace SignalTracing {
-class HandleSignals : public AbstractTraceEntry {
+class HandleSignal : public AbstractTraceEntry {
public:
- HandleSignals(uint32 signals)
+ HandleSignal(uint32 signal)
:
- fSignals(signals)
+ fSignal(signal)
{
Initialized();
}
virtual void AddDump(TraceOutput& out)
{
- out.Print("signal handle: 0x%lx", fSignals);
+ out.Print("signal handle: %" B_PRIu32 " (%s)" , fSignal,
+ signal_name(fSignal));
}
private:
- uint32 fSignals;
+ uint32 fSignal;
};
class ExecuteSignalHandler : public AbstractTraceEntry {
public:
- ExecuteSignalHandler(int signal, struct sigaction* handler)
+ ExecuteSignalHandler(uint32 signal, struct sigaction* handler)
:
fSignal(signal),
fHandler((void*)handler->sa_handler)
@@ -106,12 +598,12 @@ class ExecuteSignalHandler : public AbstractTraceEntry {
virtual void AddDump(TraceOutput& out)
{
- out.Print("signal exec handler: signal: %d, handler: %p",
- fSignal, fHandler);
+ out.Print("signal exec handler: signal: %" B_PRIu32 " (%s), "
+ "handler: %p", fSignal, signal_name(fSignal), fHandler);
}
private:
- int fSignal;
+ uint32 fSignal;
void* fHandler;
};
@@ -130,8 +622,7 @@ class SendSignal : public AbstractTraceEntry {
virtual void AddDump(TraceOutput& out)
{
out.Print("signal send: target: %ld, signal: %lu (%s), "
- "flags: 0x%lx", fTarget, fSignal,
- (fSignal < NSIG ? sigstr[fSignal] : "invalid"), fFlags);
+ "flags: 0x%lx", fTarget, fSignal, signal_name(fSignal), fFlags);
}
private:
@@ -143,9 +634,8 @@ class SendSignal : public AbstractTraceEntry {
class SigAction : public AbstractTraceEntry {
public:
- SigAction(Thread* thread, uint32 signal, const struct sigaction* act)
+ SigAction(uint32 signal, const struct sigaction* act)
:
- fThread(thread->id),
fSignal(signal),
fAction(*act)
{
@@ -154,15 +644,13 @@ class SigAction : public AbstractTraceEntry {
virtual void AddDump(TraceOutput& out)
{
- out.Print("signal action: thread: %ld, signal: %lu (%s), "
- "action: {handler: %p, flags: 0x%x, mask: 0x%lx}",
- fThread, fSignal,
- (fSignal < NSIG ? sigstr[fSignal] : "invalid"),
- fAction.sa_handler, fAction.sa_flags, fAction.sa_mask);
+ out.Print("signal action: signal: %lu (%s), "
+ "action: {handler: %p, flags: 0x%x, mask: 0x%llx}", fSignal,
+ signal_name(fSignal), fAction.sa_handler, fAction.sa_flags,
+ (long long)fAction.sa_mask);
}
private:
- thread_id fThread;
uint32 fSignal;
struct sigaction fAction;
};
@@ -194,8 +682,8 @@ class SigProcMask : public AbstractTraceEntry {
break;
}
- out.Print("signal proc mask: %s 0x%lx, old mask: 0x%lx", how, fMask,
- fOldMask);
+ out.Print("signal proc mask: %s 0x%llx, old mask: 0x%llx", how,
+ (long long)fMask, (long long)fOldMask);
}
private:
@@ -217,8 +705,8 @@ class SigSuspend : public AbstractTraceEntry {
virtual void AddDump(TraceOutput& out)
{
- out.Print("signal suspend: %#" B_PRIx32 ", old mask: %#" B_PRIx32,
- fMask, fOldMask);
+ out.Print("signal suspend: %#llx, old mask: %#llx",
+ (long long)fMask, (long long)fOldMask);
}
private:
@@ -231,7 +719,7 @@ class SigSuspendDone : public AbstractTraceEntry {
public:
SigSuspendDone()
:
- fSignals(thread_get_current_thread()->sig_pending)
+ fSignals(thread_get_current_thread()->ThreadPendingSignals())
{
Initialized();
}
@@ -257,80 +745,191 @@ class SigSuspendDone : public AbstractTraceEntry {
// #pragma mark -
-/*! Updates the thread::flags field according to what signals are pending.
- Interrupts must be disabled and the thread lock must be held.
+/*! Updates the given thread's Thread::flags field according to what signals are
+ pending.
+ The caller must hold the scheduler lock.
*/
static void
update_thread_signals_flag(Thread* thread)
{
- sigset_t mask = ~atomic_get(&thread->sig_block_mask)
- | thread->sig_temp_enabled;
- if (atomic_get(&thread->sig_pending) & mask)
+ sigset_t mask = ~thread->sig_block_mask;
+ if ((thread->AllPendingSignals() & mask) != 0)
atomic_or(&thread->flags, THREAD_FLAGS_SIGNALS_PENDING);
else
atomic_and(&thread->flags, ~THREAD_FLAGS_SIGNALS_PENDING);
}
-void
+/*! Updates the current thread's Thread::flags field according to what signals
+ are pending.
+ The caller must hold the scheduler lock.
+*/
+static void
update_current_thread_signals_flag()
{
- InterruptsSpinLocker locker(gThreadSpinlock);
-
update_thread_signals_flag(thread_get_current_thread());
}
+/*! Updates all of the given team's threads' Thread::flags fields according to
+ what signals are pending.
+ The caller must hold the scheduler lock.
+*/
+static void
+update_team_threads_signal_flag(Team* team)
+{
+ for (Thread* thread = team->thread_list; thread != NULL;
+ thread = thread->team_next) {
+ update_thread_signals_flag(thread);
+ }
+}
+
+
+/*! Notifies the user debugger about a signal to be handled.
+
+ The caller must not hold any locks.
+
+ \param thread The current thread.
+ \param signal The signal to be handled.
+ \param handler The installed signal handler for the signal.
+ \param deadly Indicates whether the signal is deadly.
+ \return \c true, if the signal shall be handled, \c false, if it shall be
+ ignored.
+*/
static bool
-notify_debugger(Thread *thread, int signal, struct sigaction *handler,
+notify_debugger(Thread* thread, Signal* signal, struct sigaction& handler,
bool deadly)
{
- uint64 signalMask = SIGNAL_TO_MASK(signal);
+ uint64 signalMask = SIGNAL_TO_MASK(signal->Number());
// first check the ignore signal masks the debugger specified for the thread
+ InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock);
- if (atomic_get(&thread->debug_info.ignore_signals_once) & signalMask) {
- atomic_and(&thread->debug_info.ignore_signals_once, ~signalMask);
+ if ((thread->debug_info.ignore_signals_once & signalMask) != 0) {
+ thread->debug_info.ignore_signals_once &= ~signalMask;
return true;
}
- if (atomic_get(&thread->debug_info.ignore_signals) & signalMask)
+ if ((thread->debug_info.ignore_signals & signalMask) != 0)
return true;
+ threadDebugInfoLocker.Unlock();
+
// deliver the event
- return user_debug_handle_signal(signal, handler, deadly);
+ return user_debug_handle_signal(signal->Number(), &handler, deadly);
}
-/*! Actually handles the signal - ie. the thread will exit, a custom signal
- handler is prepared, or whatever the signal demands.
+/*! Removes and returns a signal with the highest priority in \a nonBlocked that
+ is pending in the given thread or its team.
+ After dequeuing the signal the Thread::flags field of the affected threads
+ are updated.
+ The caller gets a reference to the returned signal, if any.
+ The caller must hold the scheduler lock.
+ \param thread The thread.
+ \param nonBlocked The mask of non-blocked signals.
+ \param buffer If the signal is not queued this buffer is returned. In this
+ case the method acquires a reference to \a buffer, so that the caller
+ gets a reference also in this case.
+ \return The removed signal or \c NULL, if all signals are blocked.
*/
-bool
-handle_signals(Thread *thread)
+static Signal*
+dequeue_thread_or_team_signal(Thread* thread, sigset_t nonBlocked,
+ Signal& buffer)
{
- uint32 signalMask = atomic_get(&thread->sig_pending)
- & (~atomic_get(&thread->sig_block_mask) | thread->sig_temp_enabled);
- thread->sig_temp_enabled = 0;
-
- // If SIGKILL[THR] are pending, we ignore other signals.
- // Otherwise check, if the thread shall stop for debugging.
- if (signalMask & KILL_SIGNALS) {
- signalMask &= KILL_SIGNALS;
- } else if (thread->debug_info.flags & B_THREAD_DEBUG_STOP) {
- user_debug_stop_thread();
+ Team* team = thread->team;
+ Signal* signal;
+ if (team->HighestPendingSignalPriority(nonBlocked)
+ > thread->HighestPendingSignalPriority(nonBlocked)) {
+ signal = team->DequeuePendingSignal(nonBlocked, buffer);
+ update_team_threads_signal_flag(team);
+ } else {
+ signal = thread->DequeuePendingSignal(nonBlocked, buffer);
+ update_thread_signals_flag(thread);
}
- if (signalMask == 0)
- return 0;
+ return signal;
+}
+
+
+static status_t
+setup_signal_frame(Thread* thread, struct sigaction* action, Signal* signal,
+ sigset_t signalMask)
+{
+ // prepare the data, we need to copy onto the user stack
+ signal_frame_data frameData;
+
+ // signal info
+ frameData.info.si_signo = signal->Number();
+ frameData.info.si_code = signal->SignalCode();
+ frameData.info.si_errno = signal->ErrorCode();
+ frameData.info.si_pid = signal->SendingProcess();
+ frameData.info.si_uid = signal->SendingUser();
+ frameData.info.si_addr = signal->Address();
+ frameData.info.si_status = signal->Status();
+ frameData.info.si_band = signal->PollBand();
+ frameData.info.si_value = signal->UserValue();
+
+ // context
+ frameData.context.uc_link = thread->user_signal_context;
+ frameData.context.uc_sigmask = signalMask;
+ // uc_stack and uc_mcontext are filled in by the architecture specific code.
+
+ // user data
+ frameData.user_data = action->sa_userdata;
+
+ // handler function
+ frameData.siginfo_handler = (action->sa_flags & SA_SIGINFO) != 0;
+ frameData.handler = frameData.siginfo_handler
+ ? (void*)action->sa_sigaction : (void*)action->sa_handler;
+
+ // thread flags -- save the and clear the thread's syscall restart related
+ // flags
+ frameData.thread_flags = atomic_and(&thread->flags,
+ ~(THREAD_FLAGS_RESTART_SYSCALL | THREAD_FLAGS_64_BIT_SYSCALL_RETURN));
+
+ // syscall restart related fields
+ memcpy(frameData.syscall_restart_parameters,
+ thread->syscall_restart.parameters,
+ sizeof(frameData.syscall_restart_parameters));
+ // syscall_restart_return_value is filled in by the architecture specific
+ // code.
+
+ return arch_setup_signal_frame(thread, action, &frameData);
+}
+
+
+/*! Actually handles pending signals -- i.e. the thread will exit, a custom
+ signal handler is prepared, or whatever the signal demands.
+ The function will not return, when a deadly signal is encountered. The
+ function will suspend the thread indefinitely, when a stop signal is
+ encountered.
+ Interrupts must be enabled.
+ \param thread The current thread.
+*/
+void
+handle_signals(Thread* thread)
+{
+ Team* team = thread->team;
+
+ TeamLocker teamLocker(team);
+ InterruptsSpinLocker schedulerLocker(gSchedulerLock);
+
+ // If userland requested to defer signals, we check now, if this is
+ // possible.
+ sigset_t nonBlockedMask = ~thread->sig_block_mask;
+ sigset_t signalMask = thread->AllPendingSignals() & nonBlockedMask;
if (thread->user_thread->defer_signals > 0
- && (signalMask & NON_DEFERRABLE_SIGNALS) == 0) {
+ && (signalMask & NON_DEFERRABLE_SIGNALS) == 0
+ && thread->sigsuspend_original_unblocked_mask == 0) {
thread->user_thread->pending_signals = signalMask;
- return 0;
+ return;
}
thread->user_thread->pending_signals = 0;
+ // determine syscall restart behavior
uint32 restartFlags = atomic_and(&thread->flags,
~THREAD_FLAGS_DONT_RESTART_SYSCALL);
bool alwaysRestart
@@ -338,45 +937,94 @@ handle_signals(Thread *thread)
bool restart = alwaysRestart
|| (restartFlags & THREAD_FLAGS_DONT_RESTART_SYSCALL) == 0;
- T(HandleSignals(signalMask));
+ // Loop until we've handled all signals.
+ bool initialIteration = true;
+ while (true) {
+ if (initialIteration) {
+ initialIteration = false;
+ } else {
+ teamLocker.Lock();
+ schedulerLocker.Lock();
+
+ signalMask = thread->AllPendingSignals() & nonBlockedMask;
+ }
- for (int32 i = 0; i < NSIG; i++) {
- bool debugSignal;
- int32 signal = i + 1;
+ // Unless SIGKILL[THR] are pending, check, if the thread shall stop for
+ // debugging.
+ if ((signalMask & KILL_SIGNALS) == 0
+ && (atomic_get(&thread->debug_info.flags) & B_THREAD_DEBUG_STOP)
+ != 0) {
+ schedulerLocker.Unlock();
+ teamLocker.Unlock();
- if ((signalMask & SIGNAL_TO_MASK(signal)) == 0)
+ user_debug_stop_thread();
continue;
+ }
+
+ // We're done, if there aren't any pending signals anymore.
+ if ((signalMask & nonBlockedMask) == 0)
+ break;
+
+ // get pending non-blocked thread or team signal with the highest
+ // priority
+ Signal stackSignal;
+ Signal* signal = dequeue_thread_or_team_signal(thread, nonBlockedMask,
+ stackSignal);
+ ASSERT(signal != NULL);
+ SignalHandledCaller signalHandledCaller(signal);
+
+ schedulerLocker.Unlock();
+
+ // get the action for the signal
+ struct sigaction handler;
+ if (signal->Number() <= MAX_SIGNAL_NUMBER) {
+ handler = team->SignalActionFor(signal->Number());
+ } else {
+ handler.sa_handler = SIG_DFL;
+ handler.sa_flags = 0;
+ }
+
+ if ((handler.sa_flags & SA_ONESHOT) != 0
+ && handler.sa_handler != SIG_IGN && handler.sa_handler != SIG_DFL) {
+ team->SignalActionFor(signal->Number()).sa_handler = SIG_DFL;
+ }
- // clear the signal that we will handle
- atomic_and(&thread->sig_pending, ~SIGNAL_TO_MASK(signal));
+ T(HandleSignal(signal->Number()));
- debugSignal = !(~atomic_get(&thread->team->debug_info.flags)
- & (B_TEAM_DEBUG_SIGNALS | B_TEAM_DEBUG_DEBUGGER_INSTALLED));
+ teamLocker.Unlock();
- // TODO: since sigaction_etc() could clobber the fields at any time,
- // we should actually copy the relevant fields atomically before
- // accessing them (only the debugger is calling sigaction_etc()
- // right now).
- // Update: sigaction_etc() is only used by the userland debugger
- // support. We can just as well restrict getting/setting signal
- // handlers to work only when the respective thread is stopped.
- // Then sigaction() could be used instead and we could get rid of
- // sigaction_etc().
- struct sigaction* handler = &thread->sig_action[i];
+ // debug the signal, if a debugger is installed and the signal debugging
+ // flag is set
+ bool debugSignal = (~atomic_get(&team->debug_info.flags)
+ & (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_SIGNALS))
+ == 0;
- TRACE(("Thread 0x%lx received signal %s\n", thread->id, sigstr[signal]));
+ // handle the signal
+ TRACE(("Thread %" B_PRId32 " received signal %s\n", thread->id,
+ kSignalInfos[signal->Number()].name));
- if (handler->sa_handler == SIG_IGN) {
+ if (handler.sa_handler == SIG_IGN) {
// signal is to be ignored
- // ToDo: apply zombie cleaning on SIGCHLD
+ // TODO: apply zombie cleaning on SIGCHLD
// notify the debugger
if (debugSignal)
notify_debugger(thread, signal, handler, false);
continue;
- } else if (handler->sa_handler == SIG_DFL) {
+ } else if (handler.sa_handler == SIG_DFL) {
// default signal behaviour
- switch (signal) {
+
+ // realtime signals are ignored by default
+ if (signal->Number() >= SIGNAL_REALTIME_MIN
+ && signal->Number() <= SIGNAL_REALTIME_MAX) {
+ // notify the debugger
+ if (debugSignal)
+ notify_debugger(thread, signal, handler, false);
+ continue;
+ }
+
+ bool killTeam = false;
+ switch (signal->Number()) {
case SIGCHLD:
case SIGWINCH:
case SIGURG:
@@ -385,6 +1033,23 @@ handle_signals(Thread *thread)
notify_debugger(thread, signal, handler, false);
continue;
+ case SIGNAL_CANCEL_THREAD:
+ // set up the signal handler
+ handler.sa_handler = thread->cancel_function;
+ handler.sa_flags = 0;
+ handler.sa_mask = 0;
+ handler.sa_userdata = NULL;
+
+ restart = false;
+ // we always want to interrupt
+ break;
+
+ case SIGNAL_CONTINUE_THREAD:
+ // prevent syscall restart, but otherwise ignore
+ restart = false;
+ atomic_and(&thread->flags, ~THREAD_FLAGS_RESTART_SYSCALL);
+ continue;
+
case SIGCONT:
// notify the debugger
if (debugSignal
@@ -392,11 +1057,14 @@ handle_signals(Thread *thread)
continue;
// notify threads waiting for team state changes
- if (thread == thread->team->main_thread) {
- InterruptsSpinLocker locker(gTeamSpinlock);
- team_set_job_control_state(thread->team,
+ if (thread == team->main_thread) {
+ team->LockTeamAndParent(false);
+
+ team_set_job_control_state(team,
JOB_CONTROL_STATE_CONTINUED, signal, false);
+ team->UnlockTeamAndParent();
+
// The standard states that the system *may* send a
// SIGCHLD when a child is continued. I haven't found
// a good reason why we would want to, though.
@@ -407,43 +1075,68 @@ handle_signals(Thread *thread)
case SIGTSTP:
case SIGTTIN:
case SIGTTOU:
+ {
// notify the debugger
if (debugSignal
&& !notify_debugger(thread, signal, handler, false))
continue;
- thread->next_state = B_THREAD_SUSPENDED;
+ // The terminal-sent stop signals are allowed to stop the
+ // process only, if it doesn't belong to an orphaned process
+ // group. Otherwise the signal must be discarded.
+ team->LockProcessGroup();
+ AutoLocker<ProcessGroup> groupLocker(team->group, true);
+ if (signal->Number() != SIGSTOP
+ && team->group->IsOrphaned()) {
+ continue;
+ }
// notify threads waiting for team state changes
- if (thread == thread->team->main_thread) {
- InterruptsSpinLocker locker(gTeamSpinlock);
- team_set_job_control_state(thread->team,
+ if (thread == team->main_thread) {
+ team->LockTeamAndParent(false);
+
+ team_set_job_control_state(team,
JOB_CONTROL_STATE_STOPPED, signal, false);
// send a SIGCHLD to the parent (if it does have
// SA_NOCLDSTOP defined)
- SpinLocker _(gThreadSpinlock);
- Thread* parentThread
- = thread->team->parent->main_thread;
+ Team* parentTeam = team->parent;
+
struct sigaction& parentHandler
- = parentThread->sig_action[SIGCHLD - 1];
- if ((parentHandler.sa_flags & SA_NOCLDSTOP) == 0)
- deliver_signal(parentThread, SIGCHLD, 0);
+ = parentTeam->SignalActionFor(SIGCHLD);
+ if ((parentHandler.sa_flags & SA_NOCLDSTOP) == 0) {
+ Signal childSignal(SIGCHLD, CLD_STOPPED, B_OK,
+ team->id);
+ childSignal.SetStatus(signal->Number());
+ childSignal.SetSendingUser(signal->SendingUser());
+ send_signal_to_team(parentTeam, childSignal, 0);
+ }
+
+ team->UnlockTeamAndParent();
}
- return true;
+ groupLocker.Unlock();
+
+ // Suspend the thread, unless there's already a signal to
+ // continue or kill pending.
+ InterruptsSpinLocker schedulerLocker(gSchedulerLock);
+ if ((thread->AllPendingSignals()
+ & (CONTINUE_SIGNALS | KILL_SIGNALS)) == 0) {
+ thread->next_state = B_THREAD_SUSPENDED;
+ scheduler_reschedule();
+ }
+ schedulerLocker.Unlock();
+
+ continue;
+ }
case SIGSEGV:
+ case SIGBUS:
case SIGFPE:
case SIGILL:
case SIGTRAP:
case SIGABRT:
- // If this is the main thread, we just fall through and let
- // this signal kill the team. Otherwise we send a SIGKILL to
- // the main thread first, since the signal will kill this
- // thread only.
- if (thread != thread->team->main_thread)
- send_signal(thread->team->main_thread->id, SIGKILL);
+ case SIGKILL:
case SIGQUIT:
case SIGPOLL:
case SIGPROF:
@@ -451,21 +1144,52 @@ handle_signals(Thread *thread)
case SIGVTALRM:
case SIGXCPU:
case SIGXFSZ:
- TRACE(("Shutting down thread 0x%lx due to signal #%ld\n",
- thread->id, signal));
- case SIGKILL:
- case SIGKILLTHR:
default:
- // if the thread exited normally, the exit reason is already set
- if (thread->exit.reason != THREAD_RETURN_EXIT) {
- thread->exit.reason = THREAD_RETURN_INTERRUPTED;
- thread->exit.signal = (uint16)signal;
- }
+ TRACE(("Shutting down team %" B_PRId32 " due to signal %"
+ B_PRIu32 " received in thread %" B_PRIu32 " \n",
+ team->id, signal->Number(), thread->id));
+
+ // This signal kills the team regardless which thread
+ // received it.
+ killTeam = true;
+ // fall through
+ case SIGKILLTHR:
// notify the debugger
- if (debugSignal && signal != SIGKILL && signal != SIGKILLTHR
- && !notify_debugger(thread, signal, handler, true))
+ if (debugSignal && signal->Number() != SIGKILL
+ && signal->Number() != SIGKILLTHR
+ && !notify_debugger(thread, signal, handler, true)) {
continue;
+ }
+
+ if (killTeam || thread == team->main_thread) {
+ // The signal is terminal for the team or the thread is
+ // the main thread. In either case the team is going
+ // down. Set its exit status, if that didn't happen yet.
+ teamLocker.Lock();
+
+ if (!team->exit.initialized) {
+ team->exit.reason = CLD_KILLED;
+ team->exit.signal = signal->Number();
+ team->exit.signaling_user = signal->SendingUser();
+ team->exit.status = 0;
+ team->exit.initialized = true;
+ }
+
+ teamLocker.Unlock();
+
+ // If this is not the main thread, send it a SIGKILLTHR
+ // so that the team terminates.
+ if (thread != team->main_thread) {
+ Signal childSignal(SIGKILLTHR, SI_USER, B_OK,
+ team->id);
+ send_signal_to_thread_id(team->id, childSignal, 0);
+ }
+ }
+
+ // explicitly get rid of the signal reference, since
+ // thread_exit() won't return
+ signalHandledCaller.Done();
thread_exit();
// won't return
@@ -479,70 +1203,176 @@ handle_signals(Thread *thread)
continue;
if (!restart
- || ((!alwaysRestart && handler->sa_flags & SA_RESTART) == 0)) {
+ || (!alwaysRestart && (handler.sa_flags & SA_RESTART) == 0)) {
atomic_and(&thread->flags, ~THREAD_FLAGS_RESTART_SYSCALL);
}
- T(ExecuteSignalHandler(signal, handler));
+ T(ExecuteSignalHandler(signal->Number(), &handler));
TRACE(("### Setting up custom signal handler frame...\n"));
- arch_setup_signal_frame(thread, handler, signal,
- atomic_get(&thread->sig_block_mask));
-
- if (handler->sa_flags & SA_ONESHOT)
- handler->sa_handler = SIG_DFL;
- if ((handler->sa_flags & SA_NOMASK) == 0) {
- // Update the block mask while the signal handler is running - it
- // will be automatically restored when the signal frame is left.
- atomic_or(&thread->sig_block_mask,
- (handler->sa_mask | SIGNAL_TO_MASK(signal)) & BLOCKABLE_SIGNALS);
+
+ // save the old block mask -- we may need to adjust it for the handler
+ schedulerLocker.Lock();
+
+ sigset_t oldBlockMask = thread->sigsuspend_original_unblocked_mask != 0
+ ? ~thread->sigsuspend_original_unblocked_mask
+ : thread->sig_block_mask;
+
+ // Update the block mask while the signal handler is running -- it
+ // will be automatically restored when the signal frame is left.
+ thread->sig_block_mask |= handler.sa_mask & BLOCKABLE_SIGNALS;
+
+ if ((handler.sa_flags & SA_NOMASK) == 0) {
+ thread->sig_block_mask
+ |= SIGNAL_TO_MASK(signal->Number()) & BLOCKABLE_SIGNALS;
}
update_current_thread_signals_flag();
- return false;
+ schedulerLocker.Unlock();
+
+ setup_signal_frame(thread, &handler, signal, oldBlockMask);
+
+ // Reset sigsuspend_original_unblocked_mask. It would have been set by
+ // sigsuspend_internal(). In that case, above we set oldBlockMask
+ // accordingly so that after the handler returns the thread's signal
+ // mask is reset.
+ thread->sigsuspend_original_unblocked_mask = 0;
+
+ return;
}
- // clear syscall restart thread flag, if we're not supposed to restart the
- // syscall
- if (!restart)
+ // We have not handled any signal (respectively only ignored ones).
+
+ // If sigsuspend_original_unblocked_mask is non-null, we came from a
+ // sigsuspend_internal(). Not having handled any signal, we should restart
+ // the syscall.
+ if (thread->sigsuspend_original_unblocked_mask != 0) {
+ restart = true;
+ atomic_or(&thread->flags, THREAD_FLAGS_RESTART_SYSCALL);
+ } else if (!restart) {
+ // clear syscall restart thread flag, if we're not supposed to restart
+ // the syscall
atomic_and(&thread->flags, ~THREAD_FLAGS_RESTART_SYSCALL);
+ }
+}
- update_current_thread_signals_flag();
- return false;
+/*! Checks whether the given signal is blocked for the given team (i.e. all of
+ its threads).
+ The caller must hold the team's lock and the scheduler lock.
+*/
+bool
+is_team_signal_blocked(Team* team, int signal)
+{
+ sigset_t mask = SIGNAL_TO_MASK(signal);
+
+ for (Thread* thread = team->thread_list; thread != NULL;
+ thread = thread->team_next) {
+ if ((thread->sig_block_mask & mask) == 0)
+ return false;
+ }
+
+ return true;
}
-bool
-is_kill_signal_pending(void)
+/*! Gets (guesses) the current thread's currently used stack from the given
+ stack pointer.
+ Fills in \a stack with either the signal stack or the thread's user stack.
+ \param address A stack pointer address to be used to determine the used
+ stack.
+ \param stack Filled in by the function.
+*/
+void
+signal_get_user_stack(addr_t address, stack_t* stack)
{
- return (atomic_get(&thread_get_current_thread()->sig_pending)
- & KILL_SIGNALS) != 0;
+ // If a signal stack is enabled for the stack and the address is within it,
+ // return the signal stack. In all other cases return the thread's user
+ // stack, even if the address doesn't lie within it.
+ Thread* thread = thread_get_current_thread();
+ if (thread->signal_stack_enabled && address >= thread->signal_stack_base
+ && address < thread->signal_stack_base + thread->signal_stack_size) {
+ stack->ss_sp = (void*)thread->signal_stack_base;
+ stack->ss_size = thread->signal_stack_size;
+ } else {
+ stack->ss_sp = (void*)thread->user_stack_base;
+ stack->ss_size = thread->user_stack_size;
+ }
+
+ stack->ss_flags = 0;
}
-bool
-is_signal_blocked(int signal)
+/*! Checks whether any non-blocked signal is pending for the current thread.
+ The caller must hold the scheduler lock.
+ \param thread The current thread.
+*/
+static bool
+has_signals_pending(Thread* thread)
{
- return (atomic_get(&thread_get_current_thread()->sig_block_mask)
- & SIGNAL_TO_MASK(signal)) != 0;
+ return (thread->AllPendingSignals() & ~thread->sig_block_mask) != 0;
}
-/*! Delivers the \a signal to the \a thread, but doesn't handle the signal -
- it just makes sure the thread gets the signal, ie. unblocks it if needed.
- This function must be called with interrupts disabled and the
- thread lock held.
+/*! Checks whether the current user has permission to send a signal to the given
+ target team.
+
+ The caller must hold the scheduler lock or \a team's lock.
+
+ \param team The target team.
+ \param schedulerLocked \c true, if the caller holds the scheduler lock,
+ \c false otherwise.
*/
-static status_t
-deliver_signal(Thread *thread, uint signal, uint32 flags)
+static bool
+has_permission_to_signal(Team* team, bool schedulerLocked)
+{
+ // get the current user
+ uid_t currentUser = schedulerLocked
+ ? thread_get_current_thread()->team->effective_uid
+ : geteuid();
+
+ // root is omnipotent -- in the other cases the current user must match the
+ // target team's
+ return currentUser == 0 || currentUser == team->effective_uid;
+}
+
+
+/*! Delivers a signal to the \a thread, but doesn't handle the signal -- it just
+ makes sure the thread gets the signal, i.e. unblocks it if needed.
+
+ The caller must hold the scheduler lock.
+
+ \param thread The thread the signal shall be delivered to.
+ \param signalNumber The number of the signal to be delivered. If \c 0, no
+ actual signal will be delivered. Only delivery checks will be performed.
+ \param signal If non-NULL the signal to be queued (has number
+ \a signalNumber in this case). The caller transfers an object reference
+ to this function. If \c NULL an unqueued signal will be delivered to the
+ thread.
+ \param flags A bitwise combination of any number of the following:
+ - \c B_CHECK_PERMISSION: Check the caller's permission to send the
+ target thread the signal.
+ \return \c B_OK, when the signal was delivered successfully, another error
+ code otherwise.
+*/
+status_t
+send_signal_to_thread_locked(Thread* thread, uint32 signalNumber,
+ Signal* signal, uint32 flags)
{
- if (flags & B_CHECK_PERMISSION) {
- // ToDo: introduce euid & uid fields to the team and check permission
+ ASSERT(signal == NULL || signalNumber == signal->Number());
+
+ T(SendSignal(thread->id, signalNumber, flags));
+
+ // The caller transferred a reference to the signal to us.
+ BReference<Signal> signalReference(signal, true);
+
+ if ((flags & B_CHECK_PERMISSION) != 0) {
+ if (!has_permission_to_signal(thread->team, true))
+ return EPERM;
}
- if (signal == 0)
+ if (signalNumber == 0)
return B_OK;
if (thread->team == team_get_kernel_team()) {
@@ -552,24 +1382,33 @@ deliver_signal(Thread *thread, uint signal, uint32 flags)
return B_OK;
}
- atomic_or(&thread->sig_pending, SIGNAL_TO_MASK(signal));
+ if (signal != NULL)
+ thread->AddPendingSignal(signal);
+ else
+ thread->AddPendingSignal(signalNumber);
+
+ // the thread has the signal reference, now
+ signalReference.Detach();
- switch (signal) {
+ switch (signalNumber) {
case SIGKILL:
{
- // Forward KILLTHR to the main thread of the team
- Thread *mainThread = thread->team->main_thread;
- atomic_or(&mainThread->sig_pending, SIGNAL_TO_MASK(SIGKILLTHR));
-
- // Wake up main thread
- if (mainThread->state == B_THREAD_SUSPENDED)
- scheduler_enqueue_in_run_queue(mainThread);
- else
- thread_interrupt(mainThread, true);
-
- update_thread_signals_flag(mainThread);
+ // If sent to a thread other than the team's main thread, also send
+ // a SIGKILLTHR to the main thread to kill the team.
+ Thread* mainThread = thread->team->main_thread;
+ if (mainThread != NULL && mainThread != thread) {
+ mainThread->AddPendingSignal(SIGKILLTHR);
+
+ // wake up main thread
+ if (mainThread->state == B_THREAD_SUSPENDED)
+ scheduler_enqueue_in_run_queue(mainThread);
+ else
+ thread_interrupt(mainThread, true);
+
+ update_thread_signals_flag(mainThread);
+ }
- // Supposed to fall through
+ // supposed to fall through
}
case SIGKILLTHR:
// Wake up suspended threads and interrupt waiting ones
@@ -579,21 +1418,32 @@ deliver_signal(Thread *thread, uint signal, uint32 flags)
thread_interrupt(thread, true);
break;
- case SIGCONT:
- // Wake up thread if it was suspended
+ case SIGNAL_CONTINUE_THREAD:
+ // wake up thread, and interrupt its current syscall
if (thread->state == B_THREAD_SUSPENDED)
scheduler_enqueue_in_run_queue(thread);
- if ((flags & SIGNAL_FLAG_DONT_RESTART_SYSCALL) != 0)
- atomic_or(&thread->flags, THREAD_FLAGS_DONT_RESTART_SYSCALL);
+ atomic_or(&thread->flags, THREAD_FLAGS_DONT_RESTART_SYSCALL);
+ break;
- atomic_and(&thread->sig_pending, ~STOP_SIGNALS);
- // remove any pending stop signals
+ case SIGCONT:
+ // Wake up thread if it was suspended, otherwise interrupt it, if
+ // the signal isn't blocked.
+ if (thread->state == B_THREAD_SUSPENDED)
+ scheduler_enqueue_in_run_queue(thread);
+ else if ((SIGNAL_TO_MASK(SIGCONT) & ~thread->sig_block_mask) != 0)
+ thread_interrupt(thread, false);
+
+ // remove any pending stop signals
+ thread->RemovePendingSignals(STOP_SIGNALS);
break;
default:
- if (thread->sig_pending
- & (~thread->sig_block_mask | SIGNAL_TO_MASK(SIGCHLD))) {
+ // If the signal is not masked, interrupt the thread, if it is
+ // currently waiting (interruptibly).
+ if ((thread->AllPendingSignals()
+ & (~thread->sig_block_mask | SIGNAL_TO_MASK(SIGCHLD)))
+ != 0) {
// Interrupt thread if it was waiting
thread_interrupt(thread, false);
}
@@ -606,125 +1456,445 @@ deliver_signal(Thread *thread, uint signal, uint32 flags)
}
-int
-send_signal_etc(pid_t id, uint signal, uint32 flags)
+/*! Sends the given signal to the given thread.
+
+ The caller must not hold the scheduler lock.
+
+ \param thread The thread the signal shall be sent to.
+ \param signal The signal to be delivered. If the signal's number is \c 0, no
+ actual signal will be delivered. Only delivery checks will be performed.
+ The given object will be copied. The caller retains ownership.
+ \param flags A bitwise combination of any number of the following:
+ - \c B_CHECK_PERMISSION: Check the caller's permission to send the
+ target thread the signal.
+ - \c B_DO_NOT_RESCHEDULE: If clear and a higher level thread has been
+ woken up, the scheduler will be invoked. If set that will not be
+ done explicitly, but rescheduling can still happen, e.g. when the
+ current thread's time slice runs out.
+ \return \c B_OK, when the signal was delivered successfully, another error
+ code otherwise.
+*/
+status_t
+send_signal_to_thread(Thread* thread, const Signal& signal, uint32 flags)
{
- status_t status = B_BAD_THREAD_ID;
- Thread *thread;
- cpu_status state = 0;
+ // Clone the signal -- the clone will be queued. If something fails and the
+ // caller doesn't require queuing, we will add an unqueued signal.
+ Signal* signalToQueue = NULL;
+ status_t error = Signal::CreateQueuable(signal,
+ (flags & SIGNAL_FLAG_QUEUING_REQUIRED) != 0, signalToQueue);
+ if (error != B_OK)
+ return error;
+
+ InterruptsSpinLocker schedulerLocker(gSchedulerLock);
+
+ error = send_signal_to_thread_locked(thread, signal.Number(), signalToQueue,
+ flags);
+ if (error != B_OK)
+ return error;
+
+ if ((flags & B_DO_NOT_RESCHEDULE) == 0)
+ scheduler_reschedule_if_necessary_locked();
- if (signal < 0 || signal > MAX_SIGNO)
- return B_BAD_VALUE;
+ return B_OK;
+}
- T(SendSignal(id, signal, flags));
- if ((flags & SIGNAL_FLAG_TEAMS_LOCKED) == 0)
- state = disable_interrupts();
+/*! Sends the given signal to the thread with the given ID.
- if (id > 0) {
- // send a signal to the specified thread
+ The caller must not hold the scheduler lock.
- GRAB_THREAD_LOCK();
+ \param threadID The ID of the thread the signal shall be sent to.
+ \param signal The signal to be delivered. If the signal's number is \c 0, no
+ actual signal will be delivered. Only delivery checks will be performed.
+ The given object will be copied. The caller retains ownership.
+ \param flags A bitwise combination of any number of the following:
+ - \c B_CHECK_PERMISSION: Check the caller's permission to send the
+ target thread the signal.
+ - \c B_DO_NOT_RESCHEDULE: If clear and a higher level thread has been
+ woken up, the scheduler will be invoked. If set that will not be
+ done explicitly, but rescheduling can still happen, e.g. when the
+ current thread's time slice runs out.
+ \return \c B_OK, when the signal was delivered successfully, another error
+ code otherwise.
+*/
+status_t
+send_signal_to_thread_id(thread_id threadID, const Signal& signal, uint32 flags)
+{
+ Thread* thread = Thread::Get(threadID);
+ if (thread == NULL)
+ return B_BAD_THREAD_ID;
+ BReference<Thread> threadReference(thread, true);
- thread = thread_get_thread_struct_locked(id);
- if (thread != NULL)
- status = deliver_signal(thread, signal, flags);
- } else {
- // send a signal to the specified process group
- // (the absolute value of the id)
+ return send_signal_to_thread(thread, signal, flags);
+}
- struct process_group *group;
- // TODO: handle -1 correctly
- if (id == 0 || id == -1) {
- // send a signal to the current team
- id = thread_get_current_thread()->team->id;
- } else
- id = -id;
+/*! Sends the given signal to the given team.
+
+ The caller must hold the scheduler lock.
+
+ \param team The team the signal shall be sent to.
+ \param signalNumber The number of the signal to be delivered. If \c 0, no
+ actual signal will be delivered. Only delivery checks will be performed.
+ \param signal If non-NULL the signal to be queued (has number
+ \a signalNumber in this case). The caller transfers an object reference
+ to this function. If \c NULL an unqueued signal will be delivered to the
+ thread.
+ \param flags A bitwise combination of any number of the following:
+ - \c B_CHECK_PERMISSION: Check the caller's permission to send the
+ target thread the signal.
+ - \c B_DO_NOT_RESCHEDULE: If clear and a higher level thread has been
+ woken up, the scheduler will be invoked. If set that will not be
+ done explicitly, but rescheduling can still happen, e.g. when the
+ current thread's time slice runs out.
+ \return \c B_OK, when the signal was delivered successfully, another error
+ code otherwise.
+*/
+status_t
+send_signal_to_team_locked(Team* team, uint32 signalNumber, Signal* signal,
+ uint32 flags)
+{
+ ASSERT(signal == NULL || signalNumber == signal->Number());
- if ((flags & SIGNAL_FLAG_TEAMS_LOCKED) == 0)
- GRAB_TEAM_LOCK();
+ T(SendSignal(team->id, signalNumber, flags));
- group = team_get_process_group_locked(NULL, id);
- if (group != NULL) {
- Team *team, *next;
+ // The caller transferred a reference to the signal to us.
+ BReference<Signal> signalReference(signal, true);
- // Send a signal to all teams in this process group
+ if ((flags & B_CHECK_PERMISSION) != 0) {
+ if (!has_permission_to_signal(team, true))
+ return EPERM;
+ }
- for (team = group->teams; team != NULL; team = next) {
- next = team->group_next;
- id = team->id;
+ if (signalNumber == 0)
+ return B_OK;
- GRAB_THREAD_LOCK();
+ if (team == team_get_kernel_team()) {
+ // signals to the kernel team are not allowed
+ return EPERM;
+ }
- thread = thread_get_thread_struct_locked(id);
- if (thread != NULL) {
- // we don't stop because of an error sending the signal; we
- // rather want to send as much signals as possible
- status = deliver_signal(thread, signal, flags);
- }
+ if (signal != NULL)
+ team->AddPendingSignal(signal);
+ else
+ team->AddPendingSignal(signalNumber);
- RELEASE_THREAD_LOCK();
+ // the team has the signal reference, now
+ signalReference.Detach();
+
+ switch (signalNumber) {
+ case SIGKILL:
+ case SIGKILLTHR:
+ {
+ // Also add a SIGKILLTHR to the main thread's signals and wake it
+ // up/interrupt it, so we get this over with as soon as possible
+ // (only the main thread shuts down the team).
+ Thread* mainThread = team->main_thread;
+ if (mainThread != NULL) {
+ mainThread->AddPendingSignal(SIGKILLTHR);
+
+ // wake up main thread
+ if (mainThread->state == B_THREAD_SUSPENDED)
+ scheduler_enqueue_in_run_queue(mainThread);
+ else
+ thread_interrupt(mainThread, true);
}
+ break;
}
- if ((flags & SIGNAL_FLAG_TEAMS_LOCKED) == 0)
- RELEASE_TEAM_LOCK();
+ case SIGCONT:
+ // Wake up any suspended threads, interrupt the others, if they
+ // don't block the signal.
+ for (Thread* thread = team->thread_list; thread != NULL;
+ thread = thread->team_next) {
+ if (thread->state == B_THREAD_SUSPENDED) {
+ scheduler_enqueue_in_run_queue(thread);
+ } else if ((SIGNAL_TO_MASK(SIGCONT) & ~thread->sig_block_mask)
+ != 0) {
+ thread_interrupt(thread, false);
+ }
+
+ // remove any pending stop signals
+ thread->RemovePendingSignals(STOP_SIGNALS);
+ }
+
+ // remove any pending team stop signals
+ team->RemovePendingSignals(STOP_SIGNALS);
+ break;
+
+ case SIGSTOP:
+ case SIGTSTP:
+ case SIGTTIN:
+ case SIGTTOU:
+ // send the stop signal to all threads
+ // TODO: Is that correct or should we only target the main thread?
+ for (Thread* thread = team->thread_list; thread != NULL;
+ thread = thread->team_next) {
+ thread->AddPendingSignal(signalNumber);
+ }
+
+ // remove the stop signal from the team again
+ if (signal != NULL) {
+ team->RemovePendingSignal(signal);
+ signalReference.SetTo(signal, true);
+ } else
+ team->RemovePendingSignal(signalNumber);
- GRAB_THREAD_LOCK();
+ // fall through to interrupt threads
+ default:
+ // Interrupt all interruptibly waiting threads, if the signal is
+ // not masked.
+ for (Thread* thread = team->thread_list; thread != NULL;
+ thread = thread->team_next) {
+ sigset_t nonBlocked = ~thread->sig_block_mask
+ | SIGNAL_TO_MASK(SIGCHLD);
+ if ((thread->AllPendingSignals() & nonBlocked) != 0)
+ thread_interrupt(thread, false);
+ }
+ break;
}
- if ((flags & (B_DO_NOT_RESCHEDULE | SIGNAL_FLAG_TEAMS_LOCKED)) == 0)
+ update_team_threads_signal_flag(team);
+
+ if ((flags & B_DO_NOT_RESCHEDULE) == 0)
scheduler_reschedule_if_necessary_locked();
- RELEASE_THREAD_LOCK();
+ return B_OK;
+}
- if ((flags & SIGNAL_FLAG_TEAMS_LOCKED) == 0)
- restore_interrupts(state);
- return status;
+/*! Sends the given signal to the given team.
+
+ \param team The team the signal shall be sent to.
+ \param signal The signal to be delivered. If the signal's number is \c 0, no
+ actual signal will be delivered. Only delivery checks will be performed.
+ The given object will be copied. The caller retains ownership.
+ \param flags A bitwise combination of any number of the following:
+ - \c B_CHECK_PERMISSION: Check the caller's permission to send the
+ target thread the signal.
+ - \c B_DO_NOT_RESCHEDULE: If clear and a higher level thread has been
+ woken up, the scheduler will be invoked. If set that will not be
+ done explicitly, but rescheduling can still happen, e.g. when the
+ current thread's time slice runs out.
+ \return \c B_OK, when the signal was delivered successfully, another error
+ code otherwise.
+*/
+status_t
+send_signal_to_team(Team* team, const Signal& signal, uint32 flags)
+{
+ // Clone the signal -- the clone will be queued. If something fails and the
+ // caller doesn't require queuing, we will add an unqueued signal.
+ Signal* signalToQueue = NULL;
+ status_t error = Signal::CreateQueuable(signal,
+ (flags & SIGNAL_FLAG_QUEUING_REQUIRED) != 0, signalToQueue);
+ if (error != B_OK)
+ return error;
+
+ InterruptsSpinLocker schedulerLocker(gSchedulerLock);
+
+ return send_signal_to_team_locked(team, signal.Number(), signalToQueue,
+ flags);
}
-int
-send_signal(pid_t threadID, uint signal)
+/*! Sends the given signal to the team with the given ID.
+
+ \param teamID The ID of the team the signal shall be sent to.
+ \param signal The signal to be delivered. If the signal's number is \c 0, no
+ actual signal will be delivered. Only delivery checks will be performed.
+ The given object will be copied. The caller retains ownership.
+ \param flags A bitwise combination of any number of the following:
+ - \c B_CHECK_PERMISSION: Check the caller's permission to send the
+ target thread the signal.
+ - \c B_DO_NOT_RESCHEDULE: If clear and a higher level thread has been
+ woken up, the scheduler will be invoked. If set that will not be
+ done explicitly, but rescheduling can still happen, e.g. when the
+ current thread's time slice runs out.
+ \return \c B_OK, when the signal was delivered successfully, another error
+ code otherwise.
+*/
+status_t
+send_signal_to_team_id(team_id teamID, const Signal& signal, uint32 flags)
{
- // The BeBook states that this function wouldn't be exported
- // for drivers, but, of course, it's wrong.
- return send_signal_etc(threadID, signal, 0);
+ // get the team
+ Team* team = Team::Get(teamID);
+ if (team == NULL)
+ return B_BAD_TEAM_ID;
+ BReference<Team> teamReference(team, true);
+
+ return send_signal_to_team(team, signal, flags);
+}
+
+
+/*! Sends the given signal to the given process group.
+
+ The caller must hold the process group's lock. Interrupts must be enabled.
+
+ \param group The the process group the signal shall be sent to.
+ \param signal The signal to be delivered. If the signal's number is \c 0, no
+ actual signal will be delivered. Only delivery checks will be performed.
+ The given object will be copied. The caller retains ownership.
+ \param flags A bitwise combination of any number of the following:
+ - \c B_CHECK_PERMISSION: Check the caller's permission to send the
+ target thread the signal.
+ - \c B_DO_NOT_RESCHEDULE: If clear and a higher level thread has been
+ woken up, the scheduler will be invoked. If set that will not be
+ done explicitly, but rescheduling can still happen, e.g. when the
+ current thread's time slice runs out.
+ \return \c B_OK, when the signal was delivered successfully, another error
+ code otherwise.
+*/
+status_t
+send_signal_to_process_group_locked(ProcessGroup* group, const Signal& signal,
+ uint32 flags)
+{
+ T(SendSignal(-group->id, signal.Number(), flags));
+
+ bool firstTeam = true;
+
+ for (Team* team = group->teams; team != NULL; team = team->group_next) {
+ status_t error = send_signal_to_team(team, signal,
+ flags | B_DO_NOT_RESCHEDULE);
+ // If sending to the first team in the group failed, let the whole call
+ // fail.
+ if (firstTeam) {
+ if (error != B_OK)
+ return error;
+ firstTeam = false;
+ }
+ }
+
+ if ((flags & B_DO_NOT_RESCHEDULE) == 0)
+ scheduler_reschedule_if_necessary();
+
+ return B_OK;
+}
+
+
+/*! Sends the given signal to the process group specified by the given ID.
+
+ The caller must not hold any process group, team, or thread lock. Interrupts
+ must be enabled.
+
+ \param groupID The ID of the process group the signal shall be sent to.
+ \param signal The signal to be delivered. If the signal's number is \c 0, no
+ actual signal will be delivered. Only delivery checks will be performed.
+ The given object will be copied. The caller retains ownership.
+ \param flags A bitwise combination of any number of the following:
+ - \c B_CHECK_PERMISSION: Check the caller's permission to send the
+ target thread the signal.
+ - \c B_DO_NOT_RESCHEDULE: If clear and a higher level thread has been
+ woken up, the scheduler will be invoked. If set that will not be
+ done explicitly, but rescheduling can still happen, e.g. when the
+ current thread's time slice runs out.
+ \return \c B_OK, when the signal was delivered successfully, another error
+ code otherwise.
+*/
+status_t
+send_signal_to_process_group(pid_t groupID, const Signal& signal, uint32 flags)
+{
+ ProcessGroup* group = ProcessGroup::Get(groupID);
+ if (group == NULL)
+ return B_BAD_TEAM_ID;
+ BReference<ProcessGroup> groupReference(group);
+
+ T(SendSignal(-group->id, signal.Number(), flags));
+
+ AutoLocker<ProcessGroup> groupLocker(group);
+
+ status_t error = send_signal_to_process_group_locked(group, signal,
+ flags | B_DO_NOT_RESCHEDULE);
+ if (error != B_OK)
+ return error;
+
+ groupLocker.Unlock();
+
+ if ((flags & B_DO_NOT_RESCHEDULE) == 0)
+ scheduler_reschedule_if_necessary();
+
+ return B_OK;
+}
+
+
+static status_t
+send_signal_internal(pid_t id, uint signalNumber, union sigval userValue,
+ uint32 flags)
+{
+ if (signalNumber > MAX_SIGNAL_NUMBER)
+ return B_BAD_VALUE;
+
+ Thread* thread = thread_get_current_thread();
+
+ Signal signal(signalNumber,
+ (flags & SIGNAL_FLAG_QUEUING_REQUIRED) != 0 ? SI_QUEUE : SI_USER,
+ B_OK, thread->team->id);
+ // Note: SI_USER/SI_QUEUE is not correct, if called from within the
+ // kernel (or a driver), but we don't have any info here.
+ signal.SetUserValue(userValue);
+
+ // If id is > 0, send the signal to the respective thread.
+ if (id > 0)
+ return send_signal_to_thread_id(id, signal, flags);
+
+ // If id == 0, send the signal to the current thread.
+ if (id == 0)
+ return send_signal_to_thread(thread, signal, flags);
+
+ // If id == -1, send the signal to all teams the calling team has permission
+ // to send signals to.
+ if (id == -1) {
+ // TODO: Implement correctly!
+ // currently only send to the current team
+ return send_signal_to_team_id(thread->team->id, signal, flags);
+ }
+
+ // Send a signal to the specified process group (the absolute value of the
+ // id).
+ return send_signal_to_process_group(-id, signal, flags);
}
int
-has_signals_pending(void *_thread)
+send_signal_etc(pid_t id, uint signalNumber, uint32 flags)
{
- Thread *thread = (Thread *)_thread;
- if (thread == NULL)
- thread = thread_get_current_thread();
+ // a dummy user value
+ union sigval userValue;
+ userValue.sival_ptr = NULL;
+
+ return send_signal_internal(id, signalNumber, userValue, flags);
+}
- return atomic_get(&thread->sig_pending)
- & ~atomic_get(&thread->sig_block_mask);
+
+int
+send_signal(pid_t threadID, uint signal)
+{
+ // The BeBook states that this function wouldn't be exported
+ // for drivers, but, of course, it's wrong.
+ return send_signal_etc(threadID, signal, 0);
}
static int
-sigprocmask_internal(int how, const sigset_t *set, sigset_t *oldSet)
+sigprocmask_internal(int how, const sigset_t* set, sigset_t* oldSet)
{
- Thread *thread = thread_get_current_thread();
- sigset_t oldMask = atomic_get(&thread->sig_block_mask);
+ Thread* thread = thread_get_current_thread();
+
+ InterruptsSpinLocker schedulerLocker(gSchedulerLock);
+
+ sigset_t oldMask = thread->sig_block_mask;
if (set != NULL) {
T(SigProcMask(how, *set));
switch (how) {
case SIG_BLOCK:
- atomic_or(&thread->sig_block_mask, *set & BLOCKABLE_SIGNALS);
+ thread->sig_block_mask |= *set & BLOCKABLE_SIGNALS;
break;
case SIG_UNBLOCK:
- atomic_and(&thread->sig_block_mask, ~*set);
+ thread->sig_block_mask &= ~*set;
break;
case SIG_SETMASK:
- atomic_set(&thread->sig_block_mask, *set & BLOCKABLE_SIGNALS);
+ thread->sig_block_mask = *set & BLOCKABLE_SIGNALS;
break;
default:
return B_BAD_VALUE;
@@ -741,161 +1911,126 @@ sigprocmask_internal(int how, const sigset_t *set, sigset_t *oldSet)
int
-sigprocmask(int how, const sigset_t *set, sigset_t *oldSet)
+sigprocmask(int how, const sigset_t* set, sigset_t* oldSet)
{
RETURN_AND_SET_ERRNO(sigprocmask_internal(how, set, oldSet));
}
-/*! \brief sigaction() for the specified thread.
- A \a threadID is < 0 specifies the current thread.
+/*! \brief Like sigaction(), but returning the error instead of setting errno.
*/
static status_t
-sigaction_etc_internal(thread_id threadID, int signal, const struct sigaction *act,
- struct sigaction *oldAction)
+sigaction_internal(int signal, const struct sigaction* act,
+ struct sigaction* oldAction)
{
- Thread *thread;
- cpu_status state;
- status_t error = B_OK;
-
- if (signal < 1 || signal > MAX_SIGNO
+ if (signal < 1 || signal > MAX_SIGNAL_NUMBER
|| (SIGNAL_TO_MASK(signal) & ~BLOCKABLE_SIGNALS) != 0)
return B_BAD_VALUE;
- state = disable_interrupts();
- GRAB_THREAD_LOCK();
-
- thread = (threadID < 0
- ? thread_get_current_thread()
- : thread_get_thread_struct_locked(threadID));
-
- if (thread) {
- if (oldAction) {
- // save previous sigaction structure
- memcpy(oldAction, &thread->sig_action[signal - 1],
- sizeof(struct sigaction));
- }
+ // get and lock the team
+ Team* team = thread_get_current_thread()->team;
+ TeamLocker teamLocker(team);
- if (act) {
- T(SigAction(thread, signal, act));
+ struct sigaction& teamHandler = team->SignalActionFor(signal);
+ if (oldAction) {
+ // save previous sigaction structure
+ *oldAction = teamHandler;
+ }
- // set new sigaction structure
- memcpy(&thread->sig_action[signal - 1], act,
- sizeof(struct sigaction));
- thread->sig_action[signal - 1].sa_mask &= BLOCKABLE_SIGNALS;
- }
+ if (act) {
+ T(SigAction(signal, act));
- if (act && act->sa_handler == SIG_IGN) {
- // remove pending signal if it should now be ignored
- atomic_and(&thread->sig_pending, ~SIGNAL_TO_MASK(signal));
- } else if (act && act->sa_handler == SIG_DFL
- && (SIGNAL_TO_MASK(signal) & DEFAULT_IGNORE_SIGNALS) != 0) {
- // remove pending signal for those signals whose default
- // action is to ignore them
- atomic_and(&thread->sig_pending, ~SIGNAL_TO_MASK(signal));
- }
- } else
- error = B_BAD_THREAD_ID;
+ // set new sigaction structure
+ teamHandler = *act;
+ teamHandler.sa_mask &= BLOCKABLE_SIGNALS;
+ }
- RELEASE_THREAD_LOCK();
- restore_interrupts(state);
+ // Remove pending signal if it should now be ignored and remove pending
+ // signal for those signals whose default action is to ignore them.
+ if ((act && act->sa_handler == SIG_IGN)
+ || (act && act->sa_handler == SIG_DFL
+ && (SIGNAL_TO_MASK(signal) & DEFAULT_IGNORE_SIGNALS) != 0)) {
+ InterruptsSpinLocker schedulerLocker(gSchedulerLock);
- return error;
-}
+ team->RemovePendingSignal(signal);
+ for (Thread* thread = team->thread_list; thread != NULL;
+ thread = thread->team_next) {
+ thread->RemovePendingSignal(signal);
+ }
+ }
-int
-sigaction_etc(thread_id threadID, int signal, const struct sigaction *act,
- struct sigaction *oldAction)
-{
- RETURN_AND_SET_ERRNO(sigaction_etc_internal(threadID, signal, act,
- oldAction));
+ return B_OK;
}
int
-sigaction(int signal, const struct sigaction *act, struct sigaction *oldAction)
-{
- return sigaction_etc(-1, signal, act, oldAction);
-}
-
-
-/*! Triggers a SIGALRM to the thread that issued the timer and reschedules */
-static int32
-alarm_event(timer *t)
+sigaction(int signal, const struct sigaction* act, struct sigaction* oldAction)
{
- // The hook can be called from any context, but we have to
- // deliver the signal to the thread that originally called
- // set_alarm().
- // Since thread->alarm is this timer structure, we can just
- // cast it back - ugly but it works for now
- Thread *thread = (Thread *)((uint8 *)t - offsetof(Thread, alarm));
- // ToDo: investigate adding one user parameter to the timer structure to fix this hack
-
- TRACE(("alarm_event: thread = %p\n", thread));
- send_signal_etc(thread->id, SIGALRM, B_DO_NOT_RESCHEDULE);
-
- return B_HANDLED_INTERRUPT;
+ RETURN_AND_SET_ERRNO(sigaction_internal(signal, act, oldAction));
}
-/*! Sets the alarm timer for the current thread. The timer fires at the
- specified time in the future, periodically or just once, as determined
- by \a mode.
- \return the time left until a previous set alarm would have fired.
+/*! Wait for the specified signals, and return the information for the retrieved
+ signal in \a info.
+ The \c flags and \c timeout combination must either define an infinite
+ timeout (no timeout flags set), an absolute timeout (\c B_ABSOLUTE_TIMEOUT
+ set), or a relative timeout \code <= 0 \endcode (\c B_RELATIVE_TIMEOUT set).
*/
-bigtime_t
-set_alarm(bigtime_t time, uint32 mode)
+static status_t
+sigwait_internal(const sigset_t* set, siginfo_t* info, uint32 flags,
+ bigtime_t timeout)
{
- Thread *thread = thread_get_current_thread();
- bigtime_t remainingTime = 0;
-
- ASSERT(B_ONE_SHOT_RELATIVE_ALARM == B_ONE_SHOT_RELATIVE_TIMER);
- // just to be sure no one changes the headers some day
+ // restrict mask to blockable signals
+ sigset_t requestedSignals = *set & BLOCKABLE_SIGNALS;
- TRACE(("set_alarm: thread = %p\n", thread));
+ // make always interruptable
+ flags |= B_CAN_INTERRUPT;
- if (thread->alarm.period)
- remainingTime = (bigtime_t)thread->alarm.schedule_time - system_time();
+ // check whether we are allowed to wait at all
+ bool canWait = (flags & B_RELATIVE_TIMEOUT) == 0 || timeout > 0;
- cancel_timer(&thread->alarm);
+ Thread* thread = thread_get_current_thread();
- if (time != B_INFINITE_TIMEOUT)
- add_timer(&thread->alarm, &alarm_event, time, mode);
- else {
- // this marks the alarm as canceled (for returning the remaining time)
- thread->alarm.period = 0;
- }
-
- return remainingTime;
-}
+ InterruptsSpinLocker schedulerLocker(gSchedulerLock);
+ bool timedOut = false;
+ status_t error = B_OK;
-/*! Wait for the specified signals, and return the signal retrieved in
- \a _signal.
-*/
-static status_t
-sigwait_internal(const sigset_t *set, int *_signal)
-{
- sigset_t requestedSignals = *set & BLOCKABLE_SIGNALS;
+ while (!timedOut) {
+ sigset_t pendingSignals = thread->AllPendingSignals();
- Thread* thread = thread_get_current_thread();
+ // If a kill signal is pending, just bail out.
+ if ((pendingSignals & KILL_SIGNALS) != 0)
+ return B_INTERRUPTED;
- while (true) {
- sigset_t pendingSignals = atomic_get(&thread->sig_pending);
- sigset_t blockedSignals = atomic_get(&thread->sig_block_mask);
- sigset_t pendingRequestedSignals = pendingSignals & requestedSignals;
- if ((pendingRequestedSignals) != 0) {
- // select the lowest pending signal to return in _signal
- for (int signal = 1; signal < NSIG; signal++) {
- if ((SIGNAL_TO_MASK(signal) & pendingSignals) != 0) {
- atomic_and(&thread->sig_pending, ~SIGNAL_TO_MASK(signal));
- *_signal = signal;
- return B_OK;
- }
- }
+ if ((pendingSignals & requestedSignals) != 0) {
+ // get signal with the highest priority
+ Signal stackSignal;
+ Signal* signal = dequeue_thread_or_team_signal(thread,
+ requestedSignals, stackSignal);
+ ASSERT(signal != NULL);
+
+ SignalHandledCaller signalHandledCaller(signal);
+ schedulerLocker.Unlock();
+
+ info->si_signo = signal->Number();
+ info->si_code = signal->SignalCode();
+ info->si_errno = signal->ErrorCode();
+ info->si_pid = signal->SendingProcess();
+ info->si_uid = signal->SendingUser();
+ info->si_addr = signal->Address();
+ info->si_status = signal->Status();
+ info->si_band = signal->PollBand();
+ info->si_value = signal->UserValue();
+
+ return B_OK;
}
+ if (!canWait)
+ return B_WOULD_BLOCK;
+
+ sigset_t blockedSignals = thread->sig_block_mask;
if ((pendingSignals & ~blockedSignals) != 0) {
// Non-blocked signals are pending -- return to let them be handled.
return B_INTERRUPTED;
@@ -903,27 +2038,32 @@ sigwait_internal(const sigset_t *set, int *_signal)
// No signals yet. Set the signal block mask to not include the
// requested mask and wait until we're interrupted.
- atomic_set(&thread->sig_block_mask,
- blockedSignals & ~(requestedSignals & BLOCKABLE_SIGNALS));
+ thread->sig_block_mask = blockedSignals & ~requestedSignals;
while (!has_signals_pending(thread)) {
- thread_prepare_to_block(thread, B_CAN_INTERRUPT,
- THREAD_BLOCK_TYPE_SIGNAL, NULL);
- thread_block();
+ thread_prepare_to_block(thread, flags, THREAD_BLOCK_TYPE_SIGNAL,
+ NULL);
+
+ if ((flags & B_ABSOLUTE_TIMEOUT) != 0) {
+ error = thread_block_with_timeout_locked(flags, timeout);
+ if (error == B_WOULD_BLOCK || error == B_TIMED_OUT) {
+ error = B_WOULD_BLOCK;
+ // POSIX requires EAGAIN (B_WOULD_BLOCK) on timeout
+ timedOut = true;
+ break;
+ }
+ } else
+ thread_block_locked(thread);
}
// restore the original block mask
- atomic_set(&thread->sig_block_mask, blockedSignals);
+ thread->sig_block_mask = blockedSignals;
update_current_thread_signals_flag();
}
-}
-
-int
-sigwait(const sigset_t *set, int *_signal)
-{
- RETURN_AND_SET_ERRNO(sigwait_internal(set, _signal));
+ // we get here only when timed out
+ return error;
}
@@ -931,29 +2071,37 @@ sigwait(const sigset_t *set, int *_signal)
Before returning, the original signal block mask is reinstantiated.
*/
static status_t
-sigsuspend_internal(const sigset_t *mask)
+sigsuspend_internal(const sigset_t* _mask)
{
- T(SigSuspend(*mask));
+ sigset_t mask = *_mask & BLOCKABLE_SIGNALS;
- Thread *thread = thread_get_current_thread();
- sigset_t oldMask = atomic_get(&thread->sig_block_mask);
+ T(SigSuspend(mask));
+
+ Thread* thread = thread_get_current_thread();
+
+ InterruptsSpinLocker schedulerLocker(gSchedulerLock);
- // Set the new block mask and block until interrupted.
+ // Set the new block mask and block until interrupted. We might be here
+ // after a syscall restart, in which case sigsuspend_original_unblocked_mask
+ // will still be set.
+ sigset_t oldMask = thread->sigsuspend_original_unblocked_mask != 0
+ ? ~thread->sigsuspend_original_unblocked_mask : thread->sig_block_mask;
+ thread->sig_block_mask = mask & BLOCKABLE_SIGNALS;
- atomic_set(&thread->sig_block_mask, *mask & BLOCKABLE_SIGNALS);
+ update_current_thread_signals_flag();
while (!has_signals_pending(thread)) {
thread_prepare_to_block(thread, B_CAN_INTERRUPT,
THREAD_BLOCK_TYPE_SIGNAL, NULL);
- thread_block();
+ thread_block_locked(thread);
}
- // restore the original block mask
- atomic_set(&thread->sig_block_mask, oldMask);
-
- thread->sig_temp_enabled = ~*mask;
-
- update_current_thread_signals_flag();
+ // Set sigsuspend_original_unblocked_mask (guaranteed to be non-0 due to
+ // BLOCKABLE_SIGNALS). This will indicate to handle_signals() that it is
+ // called after a _user_sigsuspend(). It will reset the field after invoking
+ // a signal handler, or restart the syscall, if there wasn't anything to
+ // handle anymore (e.g. because another thread was faster).
+ thread->sigsuspend_original_unblocked_mask = ~oldMask;
T(SigSuspendDone());
@@ -962,54 +2110,91 @@ sigsuspend_internal(const sigset_t *mask)
}
-int
-sigsuspend(const sigset_t *mask)
-{
- RETURN_AND_SET_ERRNO(sigsuspend_internal(mask));
-}
-
-
static status_t
-sigpending_internal(sigset_t *set)
+sigpending_internal(sigset_t* set)
{
- Thread *thread = thread_get_current_thread();
+ Thread* thread = thread_get_current_thread();
if (set == NULL)
return B_BAD_VALUE;
- *set = atomic_get(&thread->sig_pending);
- return B_OK;
-}
+ InterruptsSpinLocker schedulerLocker(gSchedulerLock);
+ *set = thread->AllPendingSignals() & thread->sig_block_mask;
-int
-sigpending(sigset_t *set)
-{
- RETURN_AND_SET_ERRNO(sigpending_internal(set));
+ return B_OK;
}
// #pragma mark - syscalls
-bigtime_t
-_user_set_alarm(bigtime_t time, uint32 mode)
+/*! Sends a signal to a thread, process, or process group.
+ \param id Specifies the ID of the target:
+ - \code id > 0 \endcode: If \a toThread is \c true, the target is the
+ thread with ID \a id, otherwise the team with the ID \a id.
+ - \code id == 0 \endcode: If toThread is \c true, the target is the
+ current thread, otherwise the current team.
+ - \code id == -1 \endcode: The target are all teams the current team has
+ permission to send signals to. Currently not implemented correctly.
+ - \code id < -1 \endcode: The target are is the process group with ID
+ \c -id.
+ \param signalNumber The signal number. \c 0 to just perform checks, but not
+ actually send any signal.
+ \param userUserValue A user value to be associated with the signal. Might be
+ ignored unless signal queuing is forced. Can be \c NULL.
+ \param flags A bitwise or of any number of the following:
+ - \c SIGNAL_FLAG_QUEUING_REQUIRED: Signal queuing is required. Fail
+ instead of falling back to unqueued signals, when queuing isn't
+ possible.
+ - \c SIGNAL_FLAG_SEND_TO_THREAD: Interpret the the given ID as a
+ \c thread_id rather than a \c team_id. Ignored when the \a id is
+ \code < 0 \endcode -- then the target is a process group.
+ \return \c B_OK on success, another error code otherwise.
+*/
+status_t
+_user_send_signal(int32 id, uint32 signalNumber,
+ const union sigval* userUserValue, uint32 flags)
{
- syscall_64_bit_return_value();
+ // restrict flags to the allowed ones and add B_CHECK_PERMISSION
+ flags &= SIGNAL_FLAG_QUEUING_REQUIRED | SIGNAL_FLAG_SEND_TO_THREAD;
+ flags |= B_CHECK_PERMISSION;
+
+ // Copy the user value from userland. If not given, use a dummy value.
+ union sigval userValue;
+ if (userUserValue != NULL) {
+ if (!IS_USER_ADDRESS(userUserValue)
+ || user_memcpy(&userValue, userUserValue, sizeof(userValue))
+ != B_OK) {
+ return B_BAD_ADDRESS;
+ }
+ } else
+ userValue.sival_ptr = NULL;
- return set_alarm(time, mode);
-}
+ // If to be sent to a thread, delegate to send_signal_internal(). Also do
+ // that when id < 0, since in this case the semantics is the same as well.
+ if ((flags & SIGNAL_FLAG_SEND_TO_THREAD) != 0 || id < 0)
+ return send_signal_internal(id, signalNumber, userValue, flags);
+
+ // kill() semantics for id >= 0
+ if (signalNumber > MAX_SIGNAL_NUMBER)
+ return B_BAD_VALUE;
+ Thread* thread = thread_get_current_thread();
-status_t
-_user_send_signal(pid_t team, uint signal)
-{
- return send_signal_etc(team, signal, B_CHECK_PERMISSION);
+ Signal signal(signalNumber,
+ (flags & SIGNAL_FLAG_QUEUING_REQUIRED) != 0 ? SI_QUEUE : SI_USER,
+ B_OK, thread->team->id);
+ signal.SetUserValue(userValue);
+
+ // send to current team for id == 0, otherwise to the respective team
+ return send_signal_to_team_id(id == 0 ? team_get_current_team_id() : id,
+ signal, flags);
}
status_t
-_user_sigprocmask(int how, const sigset_t *userSet, sigset_t *userOldSet)
+_user_set_signal_mask(int how, const sigset_t *userSet, sigset_t *userOldSet)
{
sigset_t set, oldSet;
status_t status;
@@ -1044,7 +2229,7 @@ _user_sigaction(int signal, const struct sigaction *userAction,
sizeof(struct sigaction)) < B_OK))
return B_BAD_ADDRESS;
- status = sigaction(signal, userAction ? &act : NULL,
+ status = sigaction_internal(signal, userAction ? &act : NULL,
userOldAction ? &oact : NULL);
// only copy the old action if a pointer has been given
@@ -1057,26 +2242,37 @@ _user_sigaction(int signal, const struct sigaction *userAction,
status_t
-_user_sigwait(const sigset_t *userSet, int *_userSignal)
+_user_sigwait(const sigset_t *userSet, siginfo_t *userInfo, uint32 flags,
+ bigtime_t timeout)
{
- if (userSet == NULL || _userSignal == NULL)
- return B_BAD_VALUE;
-
+ // copy userSet to stack
sigset_t set;
- if (user_memcpy(&set, userSet, sizeof(sigset_t)) < B_OK)
+ if (userSet == NULL || !IS_USER_ADDRESS(userSet)
+ || user_memcpy(&set, userSet, sizeof(sigset_t)) != B_OK) {
+ return B_BAD_ADDRESS;
+ }
+
+ // userInfo is optional, but must be a user address when given
+ if (userInfo != NULL && !IS_USER_ADDRESS(userInfo))
return B_BAD_ADDRESS;
- int signal;
- status_t status = sigwait_internal(&set, &signal);
- if (status == B_INTERRUPTED) {
+ syscall_restart_handle_timeout_pre(flags, timeout);
+
+ flags |= B_CAN_INTERRUPT;
+
+ siginfo_t info;
+ status_t status = sigwait_internal(&set, &info, flags, timeout);
+ if (status == B_OK) {
+ // copy the info back to userland, if userSet is non-NULL
+ if (userInfo != NULL)
+ status = user_memcpy(userInfo, &info, sizeof(info));
+ } else if (status == B_INTERRUPTED) {
// make sure we'll be restarted
Thread* thread = thread_get_current_thread();
- atomic_or(&thread->flags,
- THREAD_FLAGS_ALWAYS_RESTART_SYSCALL | THREAD_FLAGS_RESTART_SYSCALL);
- return status;
+ atomic_or(&thread->flags, THREAD_FLAGS_ALWAYS_RESTART_SYSCALL);
}
- return user_memcpy(_userSignal, &signal, sizeof(int));
+ return syscall_restart_handle_timeout_post(status, timeout);
}
@@ -1115,7 +2311,7 @@ _user_sigpending(sigset_t *userSet)
status_t
-_user_set_signal_stack(const stack_t *newUserStack, stack_t *oldUserStack)
+_user_set_signal_stack(const stack_t* newUserStack, stack_t* oldUserStack)
{
Thread *thread = thread_get_current_thread();
struct stack_t newStack, oldStack;
@@ -1169,3 +2365,76 @@ _user_set_signal_stack(const stack_t *newUserStack, stack_t *oldUserStack)
return B_OK;
}
+
+/*! Restores the environment of a function that was interrupted by a signal
+ handler call.
+ This syscall is invoked when a signal handler function returns. It
+ deconstructs the signal handler frame and restores the stack and register
+ state of the function that was interrupted by a signal. The syscall is
+ therefore somewhat unusual, since it does not return to the calling
+ function, but to someplace else. In case the signal interrupted a syscall,
+ it will appear as if the syscall just returned. That is also the reason, why
+ this syscall returns an int64, since it needs to return the value the
+ interrupted syscall returns, which is potentially 64 bits wide.
+
+ \param userSignalFrameData The signal frame data created for the signal
+ handler. Potentially some data (e.g. registers) have been modified by
+ the signal handler.
+ \return In case the signal interrupted a syscall, the return value of that
+ syscall. Otherwise (i.e. in case of a (hardware) interrupt/exception)
+ the value might need to be tailored such that after a return to userland
+ the interrupted environment is identical to the interrupted one (unless
+ explicitly modified). E.g. for x86 to achieve that, the return value
+ must contain the eax|edx values of the interrupted environment.
+*/
+int64
+_user_restore_signal_frame(struct signal_frame_data* userSignalFrameData)
+{
+ syscall_64_bit_return_value();
+
+ Thread *thread = thread_get_current_thread();
+
+ // copy the signal frame data from userland
+ signal_frame_data signalFrameData;
+ if (userSignalFrameData == NULL || !IS_USER_ADDRESS(userSignalFrameData)
+ || user_memcpy(&signalFrameData, userSignalFrameData,
+ sizeof(signalFrameData)) != B_OK) {
+ // We failed to copy the signal frame data from userland. This is a
+ // serious problem. Kill the thread.
+ dprintf("_user_restore_signal_frame(): thread %" B_PRId32 ": Failed to "
+ "copy signal frame data (%p) from userland. Killing thread...\n",
+ thread->id, userSignalFrameData);
+ kill_thread(thread->id);
+ return B_BAD_ADDRESS;
+ }
+
+ // restore the signal block mask
+ InterruptsSpinLocker schedulerLocker(gSchedulerLock);
+
+ thread->sig_block_mask
+ = signalFrameData.context.uc_sigmask & BLOCKABLE_SIGNALS;
+ update_current_thread_signals_flag();
+
+ schedulerLocker.Unlock();
+
+ // restore the syscall restart related thread flags and the syscall restart
+ // parameters
+ atomic_and(&thread->flags,
+ ~(THREAD_FLAGS_RESTART_SYSCALL | THREAD_FLAGS_64_BIT_SYSCALL_RETURN));
+ atomic_or(&thread->flags, signalFrameData.thread_flags
+ & (THREAD_FLAGS_RESTART_SYSCALL | THREAD_FLAGS_64_BIT_SYSCALL_RETURN));
+
+ memcpy(thread->syscall_restart.parameters,
+ signalFrameData.syscall_restart_parameters,
+ sizeof(thread->syscall_restart.parameters));
+
+ // restore the previously stored Thread::user_signal_context
+ thread->user_signal_context = signalFrameData.context.uc_link;
+ if (thread->user_signal_context != NULL
+ && !IS_USER_ADDRESS(thread->user_signal_context)) {
+ thread->user_signal_context = NULL;
+ }
+
+ // let the architecture specific code restore the registers
+ return arch_restore_signal_frame(&signalFrameData);
+}
diff --git a/src/system/kernel/syscalls.cpp b/src/system/kernel/syscalls.cpp
index ac9da08..2085db4 100644
--- a/src/system/kernel/syscalls.cpp
+++ b/src/system/kernel/syscalls.cpp
@@ -48,6 +48,7 @@
#include <user_atomic.h>
#include <user_mutex.h>
#include <usergroup.h>
+#include <UserTimer.h>
#include <util/AutoLock.h>
#include <vfs.h>
#include <vm/vm.h>
@@ -181,16 +182,6 @@ _user_is_computer_on(void)
}
-//! Map to the arch specific call
-static inline int64
-_user_restore_signal_frame()
-{
- syscall_64_bit_return_value();
-
- return arch_restore_signal_frame();
-}
-
-
// #pragma mark -
diff --git a/src/system/kernel/team.cpp b/src/system/kernel/team.cpp
index 4f8360f..e2a1f29 100644
--- a/src/system/kernel/team.cpp
+++ b/src/system/kernel/team.cpp
@@ -13,6 +13,7 @@
#include <team.h>
+#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
@@ -52,7 +53,9 @@
#include <vm/vm.h>
#include <vm/VMAddressSpace.h>
#include <util/AutoLock.h>
-#include <util/khash.h>
+
+#include "TeamThreadTables.h"
+
//#define TRACE_TEAM
#ifdef TRACE_TEAM
@@ -77,21 +80,9 @@ struct team_arg {
uint32 error_token;
};
-struct fork_arg {
- area_id user_stack_area;
- addr_t user_stack_base;
- size_t user_stack_size;
- addr_t user_local_storage;
- sigset_t sig_block_mask;
- struct sigaction sig_action[32];
- addr_t signal_stack_base;
- size_t signal_stack_size;
- bool signal_stack_enabled;
- struct user_thread* user_thread;
+namespace {
- struct arch_fork_arg arch_info;
-};
class TeamNotificationService : public DefaultNotificationService {
public:
@@ -101,46 +92,103 @@ public:
};
-struct TeamHashDefinition {
- typedef team_id KeyType;
- typedef Team ValueType;
+// #pragma mark - TeamTable
+
+
+typedef BKernel::TeamThreadTable<Team> TeamTable;
+
+
+// #pragma mark - ProcessGroupHashDefinition
+
+
+struct ProcessGroupHashDefinition {
+ typedef pid_t KeyType;
+ typedef ProcessGroup ValueType;
- size_t HashKey(team_id key) const
+ size_t HashKey(pid_t key) const
{
return key;
}
- size_t Hash(Team* value) const
+ size_t Hash(ProcessGroup* value) const
{
return HashKey(value->id);
}
- bool Compare(team_id key, Team* value) const
+ bool Compare(pid_t key, ProcessGroup* value) const
{
return value->id == key;
}
- Team*& GetLink(Team* value) const
+ ProcessGroup*& GetLink(ProcessGroup* value) const
{
return value->next;
}
};
-typedef BOpenHashTable<TeamHashDefinition> TeamHashTable;
+typedef BOpenHashTable<ProcessGroupHashDefinition> ProcessGroupHashTable;
-static TeamHashTable sTeamHash;
-static hash_table* sGroupHash = NULL;
+} // unnamed namespace
+
+
+// #pragma mark -
+
+
+// the team_id -> Team hash table and the lock protecting it
+static TeamTable sTeamHash;
+static spinlock sTeamHashLock = B_SPINLOCK_INITIALIZER;
+
+// the pid_t -> ProcessGroup hash table and the lock protecting it
+static ProcessGroupHashTable sGroupHash;
+static spinlock sGroupHashLock = B_SPINLOCK_INITIALIZER;
+
static Team* sKernelTeam = NULL;
-// some arbitrary chosen limits - should probably depend on the available
+// A list of process groups of children of dying session leaders that need to
+// be signalled, if they have become orphaned and contain stopped processes.
+static ProcessGroupList sOrphanedCheckProcessGroups;
+static mutex sOrphanedCheckLock
+ = MUTEX_INITIALIZER("orphaned process group check");
+
+// some arbitrarily chosen limits -- should probably depend on the available
// memory (the limit is not yet enforced)
static int32 sMaxTeams = 2048;
static int32 sUsedTeams = 1;
static TeamNotificationService sNotificationService;
-spinlock gTeamSpinlock = B_SPINLOCK_INITIALIZER;
+
+// #pragma mark - TeamListIterator
+
+
+TeamListIterator::TeamListIterator()
+{
+ // queue the entry
+ InterruptsSpinLocker locker(sTeamHashLock);
+ sTeamHash.InsertIteratorEntry(&fEntry);
+}
+
+
+TeamListIterator::~TeamListIterator()
+{
+ // remove the entry
+ InterruptsSpinLocker locker(sTeamHashLock);
+ sTeamHash.RemoveIteratorEntry(&fEntry);
+}
+
+
+Team*
+TeamListIterator::Next()
+{
+ // get the next team -- if there is one, get reference for it
+ InterruptsSpinLocker locker(sTeamHashLock);
+ Team* team = sTeamHash.NextElement(&fEntry);
+ if (team != NULL)
+ team->AcquireReference();
+
+ return team;
+}
// #pragma mark - Tracing
@@ -243,11 +291,11 @@ job_control_state_name(job_control_state state)
class SetJobControlState : public AbstractTraceEntry {
public:
- SetJobControlState(team_id team, job_control_state newState, int signal)
+ SetJobControlState(team_id team, job_control_state newState, Signal* signal)
:
fTeam(team),
fNewState(newState),
- fSignal(signal)
+ fSignal(signal != NULL ? signal->Number() : 0)
{
Initialized();
}
@@ -360,7 +408,706 @@ TeamNotificationService::Notify(uint32 eventCode, Team* team)
}
-// #pragma mark - Private functions
+// #pragma mark - Team
+
+
+Team::Team(team_id id, bool kernel)
+{
+ // allocate an ID
+ this->id = id;
+ visible = true;
+ serial_number = -1;
+
+ // init mutex
+ if (kernel) {
+ mutex_init(&fLock, "Team:kernel");
+ } else {
+ char lockName[16];
+ snprintf(lockName, sizeof(lockName), "Team:%" B_PRId32, id);
+ mutex_init_etc(&fLock, lockName, MUTEX_FLAG_CLONE_NAME);
+ }
+
+ hash_next = siblings_next = children = parent = NULL;
+ fName[0] = '\0';
+ fArgs[0] = '\0';
+ num_threads = 0;
+ io_context = NULL;
+ address_space = NULL;
+ realtime_sem_context = NULL;
+ xsi_sem_context = NULL;
+ thread_list = NULL;
+ main_thread = NULL;
+ loading_info = NULL;
+ state = TEAM_STATE_BIRTH;
+ flags = 0;
+ death_entry = NULL;
+ user_data_area = -1;
+ user_data = 0;
+ used_user_data = 0;
+ user_data_size = 0;
+ free_user_threads = NULL;
+
+ supplementary_groups = NULL;
+ supplementary_group_count = 0;
+
+ dead_threads_kernel_time = 0;
+ dead_threads_user_time = 0;
+ cpu_clock_offset = 0;
+
+ // dead threads
+ list_init(&dead_threads);
+ dead_threads_count = 0;
+
+ // dead children
+ dead_children.count = 0;
+ dead_children.kernel_time = 0;
+ dead_children.user_time = 0;
+
+ // job control entry
+ job_control_entry = new(nothrow) ::job_control_entry;
+ if (job_control_entry != NULL) {
+ job_control_entry->state = JOB_CONTROL_STATE_NONE;
+ job_control_entry->thread = id;
+ job_control_entry->team = this;
+ }
+
+ // exit status -- setting initialized to false suffices
+ exit.initialized = false;
+
+ list_init(&sem_list);
+ list_init(&port_list);
+ list_init(&image_list);
+ list_init(&watcher_list);
+
+ clear_team_debug_info(&debug_info, true);
+
+ // init dead/stopped/continued children condition vars
+ dead_children.condition_variable.Init(&dead_children, "team children");
+
+ fQueuedSignalsCounter = new(std::nothrow) BKernel::QueuedSignalsCounter(
+ kernel ? -1 : MAX_QUEUED_SIGNALS);
+ memset(fSignalActions, 0, sizeof(fSignalActions));
+
+ fUserDefinedTimerCount = 0;
+}
+
+
+Team::~Team()
+{
+ // get rid of all associated data
+ PrepareForDeletion();
+
+ vfs_put_io_context(io_context);
+ delete_owned_ports(this);
+ sem_delete_owned_sems(this);
+
+ DeleteUserTimers(false);
+
+ fPendingSignals.Clear();
+
+ if (fQueuedSignalsCounter != NULL)
+ fQueuedSignalsCounter->ReleaseReference();
+
+ while (thread_death_entry* threadDeathEntry
+ = (thread_death_entry*)list_remove_head_item(&dead_threads)) {
+ free(threadDeathEntry);
+ }
+
+ while (::job_control_entry* entry = dead_children.entries.RemoveHead())
+ delete entry;
+
+ while (free_user_thread* entry = free_user_threads) {
+ free_user_threads = entry->next;
+ free(entry);
+ }
+
+ malloc_referenced_release(supplementary_groups);
+
+ delete job_control_entry;
+ // usually already NULL and transferred to the parent
+
+ mutex_destroy(&fLock);
+}
+
+
+/*static*/ Team*
+Team::Create(team_id id, const char* name, bool kernel)
+{
+ // create the team object
+ Team* team = new(std::nothrow) Team(id, kernel);
+ if (team == NULL)
+ return NULL;
+ ObjectDeleter<Team> teamDeleter(team);
+
+ if (name != NULL)
+ team->SetName(name);
+
+ // check initialization
+ if (team->job_control_entry == NULL || team->fQueuedSignalsCounter == NULL)
+ return NULL;
+
+ // finish initialization (arch specifics)
+ if (arch_team_init_team_struct(team, kernel) != B_OK)
+ return NULL;
+
+ if (!kernel) {
+ status_t error = user_timer_create_team_timers(team);
+ if (error != B_OK)
+ return NULL;
+ }
+
+ // everything went fine
+ return teamDeleter.Detach();
+}
+
+
+/*! \brief Returns the team with the given ID.
+ Returns a reference to the team.
+ Team and thread spinlock must not be held.
+*/
+/*static*/ Team*
+Team::Get(team_id id)
+{
+ if (id == B_CURRENT_TEAM) {
+ Team* team = thread_get_current_thread()->team;
+ team->AcquireReference();
+ return team;
+ }
+
+ InterruptsSpinLocker locker(sTeamHashLock);
+ Team* team = sTeamHash.Lookup(id);
+ if (team != NULL)
+ team->AcquireReference();
+ return team;
+}
+
+
+/*! \brief Returns the team with the given ID in a locked state.
+ Returns a reference to the team.
+ Team and thread spinlock must not be held.
+*/
+/*static*/ Team*
+Team::GetAndLock(team_id id)
+{
+ // get the team
+ Team* team = Get(id);
+ if (team == NULL)
+ return NULL;
+
+ // lock it
+ team->Lock();
+
+ // only return the team, when it isn't already dying
+ if (team->state >= TEAM_STATE_SHUTDOWN) {
+ team->Unlock();
+ team->ReleaseReference();
+ return NULL;
+ }
+
+ return team;
+}
+
+
+/*! Locks the team and its parent team (if any).
+ The caller must hold a reference to the team or otherwise make sure that
+ it won't be deleted.
+ If the team doesn't have a parent, only the team itself is locked. If the
+ team's parent is the kernel team and \a dontLockParentIfKernel is \c true,
+ only the team itself is locked.
+
+ \param dontLockParentIfKernel If \c true, the team's parent team is only
+ locked, if it is not the kernel team.
+*/
+void
+Team::LockTeamAndParent(bool dontLockParentIfKernel)
+{
+ // The locking order is parent -> child. Since the parent can change as long
+ // as we don't lock the team, we need to do a trial and error loop.
+ Lock();
+
+ while (true) {
+ // If the team doesn't have a parent, we're done. Otherwise try to lock
+ // the parent.This will succeed in most cases, simplifying things.
+ Team* parent = this->parent;
+ if (parent == NULL || (dontLockParentIfKernel && parent == sKernelTeam)
+ || parent->TryLock()) {
+ return;
+ }
+
+ // get a temporary reference to the parent, unlock this team, lock the
+ // parent, and re-lock this team
+ BReference<Team> parentReference(parent);
+
+ Unlock();
+ parent->Lock();
+ Lock();
+
+ // If the parent hasn't changed in the meantime, we're done.
+ if (this->parent == parent)
+ return;
+
+ // The parent has changed -- unlock and retry.
+ parent->Unlock();
+ }
+}
+
+
+/*! Unlocks the team and its parent team (if any).
+*/
+void
+Team::UnlockTeamAndParent()
+{
+ if (parent != NULL)
+ parent->Unlock();
+
+ Unlock();
+}
+
+
+/*! Locks the team, its parent team (if any), and the team's process group.
+ The caller must hold a reference to the team or otherwise make sure that
+ it won't be deleted.
+ If the team doesn't have a parent, only the team itself is locked.
+*/
+void
+Team::LockTeamParentAndProcessGroup()
+{
+ LockTeamAndProcessGroup();
+
+ // We hold the group's and the team's lock, but not the parent team's lock.
+ // If we have a parent, try to lock it.
+ if (this->parent == NULL || this->parent->TryLock())
+ return;
+
+ // No success -- unlock the team and let LockTeamAndParent() do the rest of
+ // the job.
+ Unlock();
+ LockTeamAndParent(false);
+}
+
+
+/*! Unlocks the team, its parent team (if any), and the team's process group.
+*/
+void
+Team::UnlockTeamParentAndProcessGroup()
+{
+ group->Unlock();
+
+ if (parent != NULL)
+ parent->Unlock();
+
+ Unlock();
+}
+
+
+void
+Team::LockTeamAndProcessGroup()
+{
+ // The locking order is process group -> child. Since the process group can
+ // change as long as we don't lock the team, we need to do a trial and error
+ // loop.
+ Lock();
+
+ while (true) {
+ // Try to lock the group. This will succeed in most cases, simplifying
+ // things.
+ ProcessGroup* group = this->group;
+ if (group->TryLock())
+ return;
+
+ // get a temporary reference to the group, unlock this team, lock the
+ // group, and re-lock this team
+ BReference<ProcessGroup> groupReference(group);
+
+ Unlock();
+ group->Lock();
+ Lock();
+
+ // If the group hasn't changed in the meantime, we're done.
+ if (this->group == group)
+ return;
+
+ // The group has changed -- unlock and retry.
+ group->Unlock();
+ }
+}
+
+
+void
+Team::UnlockTeamAndProcessGroup()
+{
+ group->Unlock();
+ Unlock();
+}
+
+
+void
+Team::SetName(const char* name)
+{
+ if (const char* lastSlash = strrchr(name, '/'))
+ name = lastSlash + 1;
+
+ strlcpy(fName, name, B_OS_NAME_LENGTH);
+}
+
+
+void
+Team::SetArgs(const char* args)
+{
+ strlcpy(fArgs, args, sizeof(fArgs));
+}
+
+
+void
+Team::SetArgs(const char* path, const char* const* otherArgs, int otherArgCount)
+{
+ fArgs[0] = '\0';
+ strlcpy(fArgs, path, sizeof(fArgs));
+ for (int i = 0; i < otherArgCount; i++) {
+ strlcat(fArgs, " ", sizeof(fArgs));
+ strlcat(fArgs, otherArgs[i], sizeof(fArgs));
+ }
+}
+
+
+void
+Team::ResetSignalsOnExec()
+{
+ // We are supposed to keep pending signals. Signal actions shall be reset
+ // partially: SIG_IGN and SIG_DFL dispositions shall be kept as they are
+ // (for SIGCHLD it's implementation-defined). Others shall be reset to
+ // SIG_DFL. SA_ONSTACK shall be cleared. There's no mention of the other
+ // flags, but since there aren't any handlers, they make little sense, so
+ // we clear them.
+
+ for (uint32 i = 1; i <= MAX_SIGNAL_NUMBER; i++) {
+ struct sigaction& action = SignalActionFor(i);
+ if (action.sa_handler != SIG_IGN && action.sa_handler != SIG_DFL)
+ action.sa_handler = SIG_DFL;
+
+ action.sa_mask = 0;
+ action.sa_flags = 0;
+ action.sa_userdata = NULL;
+ }
+}
+
+
+void
+Team::InheritSignalActions(Team* parent)
+{
+ memcpy(fSignalActions, parent->fSignalActions, sizeof(fSignalActions));
+}
+
+
+/*! Adds the given user timer to the team and, if user-defined, assigns it an
+ ID.
+
+ The caller must hold the team's lock.
+
+ \param timer The timer to be added. If it doesn't have an ID yet, it is
+ considered user-defined and will be assigned an ID.
+ \return \c B_OK, if the timer was added successfully, another error code
+ otherwise.
+*/
+status_t
+Team::AddUserTimer(UserTimer* timer)
+{
+ // don't allow addition of timers when already shutting the team down
+ if (state >= TEAM_STATE_SHUTDOWN)
+ return B_BAD_TEAM_ID;
+
+ // If the timer is user-defined, check timer limit and increment
+ // user-defined count.
+ if (timer->ID() < 0 && !CheckAddUserDefinedTimer())
+ return EAGAIN;
+
+ fUserTimers.AddTimer(timer);
+
+ return B_OK;
+}
+
+
+/*! Removes the given user timer from the team.
+
+ The caller must hold the team's lock.
+
+ \param timer The timer to be removed.
+
+*/
+void
+Team::RemoveUserTimer(UserTimer* timer)
+{
+ fUserTimers.RemoveTimer(timer);
+
+ if (timer->ID() >= USER_TIMER_FIRST_USER_DEFINED_ID)
+ UserDefinedTimersRemoved(1);
+}
+
+
+/*! Deletes all (or all user-defined) user timers of the team.
+
+ Timer's belonging to the team's threads are not affected.
+ The caller must hold the team's lock.
+
+ \param userDefinedOnly If \c true, only the user-defined timers are deleted,
+ otherwise all timers are deleted.
+*/
+void
+Team::DeleteUserTimers(bool userDefinedOnly)
+{
+ int32 count = fUserTimers.DeleteTimers(userDefinedOnly);
+ UserDefinedTimersRemoved(count);
+}
+
+
+/*! If not at the limit yet, increments the team's user-defined timer count.
+ \return \c true, if the limit wasn't reached yet, \c false otherwise.
+*/
+bool
+Team::CheckAddUserDefinedTimer()
+{
+ int32 oldCount = atomic_add(&fUserDefinedTimerCount, 1);
+ if (oldCount >= MAX_USER_TIMERS_PER_TEAM) {
+ atomic_add(&fUserDefinedTimerCount, -1);
+ return false;
+ }
+
+ return true;
+}
+
+
+/*! Subtracts the given count for the team's user-defined timer count.
+ \param count The count to subtract.
+*/
+void
+Team::UserDefinedTimersRemoved(int32 count)
+{
+ atomic_add(&fUserDefinedTimerCount, -count);
+}
+
+
+void
+Team::DeactivateCPUTimeUserTimers()
+{
+ while (TeamTimeUserTimer* timer = fCPUTimeUserTimers.Head())
+ timer->Deactivate();
+
+ while (TeamUserTimeUserTimer* timer = fUserTimeUserTimers.Head())
+ timer->Deactivate();
+}
+
+
+/*! Returns the team's current total CPU time (kernel + user + offset).
+
+ The caller must hold the scheduler lock.
+
+ \param ignoreCurrentRun If \c true and the current thread is one team's
+ threads, don't add the time since the last time \c last_time was
+ updated. Should be used in "thread unscheduled" scheduler callbacks,
+ since although the thread is still running at that time, its time has
+ already been stopped.
+ \return The team's current total CPU time.
+*/
+bigtime_t
+Team::CPUTime(bool ignoreCurrentRun) const
+{
+ bigtime_t time = cpu_clock_offset + dead_threads_kernel_time
+ + dead_threads_user_time;
+
+ Thread* currentThread = thread_get_current_thread();
+ bigtime_t now = system_time();
+
+ for (Thread* thread = thread_list; thread != NULL;
+ thread = thread->team_next) {
+ SpinLocker threadTimeLocker(thread->time_lock);
+ time += thread->kernel_time + thread->user_time;
+
+ if (thread->IsRunning()) {
+ if (!ignoreCurrentRun || thread != currentThread)
+ time += now - thread->last_time;
+ }
+ }
+
+ return time;
+}
+
+
+/*! Returns the team's current user CPU time.
+
+ The caller must hold the scheduler lock.
+
+ \return The team's current user CPU time.
+*/
+bigtime_t
+Team::UserCPUTime() const
+{
+ bigtime_t time = dead_threads_user_time;
+
+ bigtime_t now = system_time();
+
+ for (Thread* thread = thread_list; thread != NULL;
+ thread = thread->team_next) {
+ SpinLocker threadTimeLocker(thread->time_lock);
+ time += thread->user_time;
+
+ if (thread->IsRunning() && !thread->in_kernel)
+ time += now - thread->last_time;
+ }
+
+ return time;
+}
+
+
+// #pragma mark - ProcessGroup
+
+
+ProcessGroup::ProcessGroup(pid_t id)
+ :
+ id(id),
+ teams(NULL),
+ fSession(NULL),
+ fInOrphanedCheckList(false)
+{
+ char lockName[32];
+ snprintf(lockName, sizeof(lockName), "Group:%" B_PRId32, id);
+ mutex_init_etc(&fLock, lockName, MUTEX_FLAG_CLONE_NAME);
+}
+
+
+ProcessGroup::~ProcessGroup()
+{
+ TRACE(("ProcessGroup::~ProcessGroup(): id = %ld\n", group->id));
+
+ // If the group is in the orphaned check list, remove it.
+ MutexLocker orphanedCheckLocker(sOrphanedCheckLock);
+
+ if (fInOrphanedCheckList)
+ sOrphanedCheckProcessGroups.Remove(this);
+
+ orphanedCheckLocker.Unlock();
+
+ // remove group from the hash table and from the session
+ if (fSession != NULL) {
+ InterruptsSpinLocker groupHashLocker(sGroupHashLock);
+ sGroupHash.RemoveUnchecked(this);
+ groupHashLocker.Unlock();
+
+ fSession->ReleaseReference();
+ }
+
+ mutex_destroy(&fLock);
+}
+
+
+/*static*/ ProcessGroup*
+ProcessGroup::Get(pid_t id)
+{
+ InterruptsSpinLocker groupHashLocker(sGroupHashLock);
+ ProcessGroup* group = sGroupHash.Lookup(id);
+ if (group != NULL)
+ group->AcquireReference();
+ return group;
+}
+
+
+/*! Adds the group the given session and makes it publicly accessible.
+ The caller must not hold the process group hash lock.
+*/
+void
+ProcessGroup::Publish(ProcessSession* session)
+{
+ InterruptsSpinLocker groupHashLocker(sGroupHashLock);
+ PublishLocked(session);
+}
+
+
+/*! Adds the group to the given session and makes it publicly accessible.
+ The caller must hold the process group hash lock.
+*/
+void
+ProcessGroup::PublishLocked(ProcessSession* session)
+{
+ ASSERT(sGroupHash.Lookup(this->id) == NULL);
+
+ fSession = session;
+ fSession->AcquireReference();
+
+ sGroupHash.InsertUnchecked(this);
+}
+
+
+/*! Checks whether the process group is orphaned.
+ The caller must hold the group's lock.
+ \return \c true, if the group is orphaned, \c false otherwise.
+*/
+bool
+ProcessGroup::IsOrphaned() const
+{
+ // Orphaned Process Group: "A process group in which the parent of every
+ // member is either itself a member of the group or is not a member of the
+ // group's session." (Open Group Base Specs Issue 7)
+ bool orphaned = true;
+
+ Team* team = teams;
+ while (orphaned && team != NULL) {
+ team->LockTeamAndParent(false);
+
+ Team* parent = team->parent;
+ if (parent != NULL && parent->group_id != id
+ && parent->session_id == fSession->id) {
+ orphaned = false;
+ }
+
+ team->UnlockTeamAndParent();
+
+ team = team->group_next;
+ }
+
+ return orphaned;
+}
+
+
+void
+ProcessGroup::ScheduleOrphanedCheck()
+{
+ MutexLocker orphanedCheckLocker(sOrphanedCheckLock);
+
+ if (!fInOrphanedCheckList) {
+ sOrphanedCheckProcessGroups.Add(this);
+ fInOrphanedCheckList = true;
+ }
+}
+
+
+void
+ProcessGroup::UnsetOrphanedCheck()
+{
+ fInOrphanedCheckList = false;
+}
+
+
+// #pragma mark - ProcessSession
+
+
+ProcessSession::ProcessSession(pid_t id)
+ :
+ id(id),
+ controlling_tty(-1),
+ foreground_group(-1)
+{
+ char lockName[32];
+ snprintf(lockName, sizeof(lockName), "Session:%" B_PRId32, id);
+ mutex_init_etc(&fLock, lockName, MUTEX_FLAG_CLONE_NAME);
+}
+
+
+ProcessSession::~ProcessSession()
+{
+ mutex_destroy(&fLock);
+}
+
+
+// #pragma mark - KDL functions
static void
@@ -368,9 +1115,10 @@ _dump_team_info(Team* team)
{
kprintf("TEAM: %p\n", team);
kprintf("id: %ld (%#lx)\n", team->id, team->id);
- kprintf("name: '%s'\n", team->name);
- kprintf("args: '%s'\n", team->args);
- kprintf("next: %p\n", team->next);
+ kprintf("serial_number: %" B_PRId64 "\n", team->serial_number);
+ kprintf("name: '%s'\n", team->Name());
+ kprintf("args: '%s'\n", team->Args());
+ kprintf("hash_next: %p\n", team->hash_next);
kprintf("parent: %p", team->parent);
if (team->parent != NULL) {
kprintf(" (id = %ld)\n", team->parent->id);
@@ -417,9 +1165,9 @@ dump_team_info(int argc, char** argv)
}
// walk through the thread list, trying to match name or id
- for (TeamHashTable::Iterator it = sTeamHash.GetIterator();
+ for (TeamTable::Iterator it = sTeamHash.GetIterator();
Team* team = it.Next();) {
- if ((team->name && strcmp(argv[1], team->name) == 0)
+ if ((team->Name() && strcmp(argv[1], team->Name()) == 0)
|| team->id == id) {
_dump_team_info(team);
found = true;
@@ -438,41 +1186,25 @@ dump_teams(int argc, char** argv)
{
kprintf("team id parent name\n");
- for (TeamHashTable::Iterator it = sTeamHash.GetIterator();
+ for (TeamTable::Iterator it = sTeamHash.GetIterator();
Team* team = it.Next();) {
- kprintf("%p%7ld %p %s\n", team, team->id, team->parent, team->name);
+ kprintf("%p%7ld %p %s\n", team, team->id, team->parent, team->Name());
}
return 0;
}
-static int
-process_group_compare(void* _group, const void* _key)
-{
- struct process_group* group = (struct process_group*)_group;
- const struct team_key* key = (const struct team_key*)_key;
-
- if (group->id == key->id)
- return 0;
-
- return 1;
-}
-
-
-static uint32
-process_group_hash(void* _group, const void* _key, uint32 range)
-{
- struct process_group* group = (struct process_group*)_group;
- const struct team_key* key = (const struct team_key*)_key;
+// #pragma mark - Private functions
- if (group != NULL)
- return group->id % range;
- return (uint32)key->id % range;
-}
+/*! Inserts team \a team into the child list of team \a parent.
+ The caller must hold the lock of both \a parent and \a team.
+ \param parent The parent team.
+ \param team The team to be inserted into \a parent's child list.
+*/
static void
insert_team_into_parent(Team* parent, Team* team)
{
@@ -484,7 +1216,13 @@ insert_team_into_parent(Team* parent, Team* team)
}
-/*! Note: must have team lock held */
+/*! Removes team \a team from the child list of team \a parent.
+
+ The caller must hold the lock of both \a parent and \a team.
+
+ \param parent The parent team.
+ \param team The team to be removed from \a parent's child list.
+*/
static void
remove_team_from_parent(Team* parent, Team* team)
{
@@ -507,31 +1245,9 @@ remove_team_from_parent(Team* parent, Team* team)
}
-/*! Reparent each of our children
- Note: must have team lock held
+/*! Returns whether the given team is a session leader.
+ The caller must hold the team's lock or its process group's lock.
*/
-static void
-reparent_children(Team* team)
-{
- Team* child;
-
- while ((child = team->children) != NULL) {
- // remove the child from the current proc and add to the parent
- remove_team_from_parent(team, child);
- insert_team_into_parent(sKernelTeam, child);
- }
-
- // move job control entries too
- sKernelTeam->stopped_children.entries.MoveFrom(
- &team->stopped_children.entries);
- sKernelTeam->continued_children.entries.MoveFrom(
- &team->continued_children.entries);
-
- // Note, we don't move the dead children entries. Those will be deleted
- // when the team structure is deleted.
-}
-
-
static bool
is_session_leader(Team* team)
{
@@ -539,6 +1255,9 @@ is_session_leader(Team* team)
}
+/*! Returns whether the given team is a process group leader.
+ The caller must hold the team's lock or its process group's lock.
+*/
static bool
is_process_group_leader(Team* team)
{
@@ -546,127 +1265,38 @@ is_process_group_leader(Team* team)
}
-static void
-deferred_delete_process_group(struct process_group* group)
-{
- if (group == NULL)
- return;
-
- // remove_group_from_session() keeps this pointer around
- // only if the session can be freed as well
- if (group->session) {
- TRACE(("deferred_delete_process_group(): frees session %ld\n",
- group->session->id));
- deferred_free(group->session);
- }
-
- deferred_free(group);
-}
-
-
-/*! Removes a group from a session, and puts the session object
- back into the session cache, if it's not used anymore.
- You must hold the team lock when calling this function.
+/*! Inserts the given team into the given process group.
+ The caller must hold the process group's lock, the team's lock, and the
+ team's parent's lock.
*/
static void
-remove_group_from_session(struct process_group* group)
-{
- struct process_session* session = group->session;
-
- // the group must be in any session to let this function have any effect
- if (session == NULL)
- return;
-
- hash_remove(sGroupHash, group);
-
- // we cannot free the resource here, so we're keeping the group link
- // around - this way it'll be freed by free_process_group()
- if (--session->group_count > 0)
- group->session = NULL;
-}
-
-
-/*! Team lock must be held.
-*/
-static void
-acquire_process_group_ref(pid_t groupID)
-{
- process_group* group = team_get_process_group_locked(NULL, groupID);
- if (group == NULL) {
- panic("acquire_process_group_ref(): unknown group ID: %ld", groupID);
- return;
- }
-
- group->refs++;
-}
-
-
-/*! Team lock must be held.
-*/
-static void
-release_process_group_ref(pid_t groupID)
-{
- process_group* group = team_get_process_group_locked(NULL, groupID);
- if (group == NULL) {
- panic("release_process_group_ref(): unknown group ID: %ld", groupID);
- return;
- }
-
- if (group->refs <= 0) {
- panic("release_process_group_ref(%ld): ref count already 0", groupID);
- return;
- }
-
- if (--group->refs > 0)
- return;
-
- // group is no longer used
-
- remove_group_from_session(group);
- deferred_delete_process_group(group);
-}
-
-
-/*! You must hold the team lock when calling this function. */
-static void
-insert_group_into_session(struct process_session* session,
- struct process_group* group)
-{
- if (group == NULL)
- return;
-
- group->session = session;
- hash_insert(sGroupHash, group);
- session->group_count++;
-}
-
-
-/*! You must hold the team lock when calling this function. */
-static void
-insert_team_into_group(struct process_group* group, Team* team)
+insert_team_into_group(ProcessGroup* group, Team* team)
{
team->group = group;
team->group_id = group->id;
- team->session_id = group->session->id;
+ team->session_id = group->Session()->id;
team->group_next = group->teams;
group->teams = team;
- acquire_process_group_ref(group->id);
+ group->AcquireReference();
}
-/*! Removes the team from the group.
+/*! Removes the given team from its process group.
+
+ The caller must hold the process group's lock, the team's lock, and the
+ team's parent's lock. Interrupts must be enabled.
- \param team the team that'll be removed from it's group
+ \param team The team that'll be removed from its process group.
*/
static void
remove_team_from_group(Team* team)
{
- struct process_group* group = team->group;
+ ProcessGroup* group = team->group;
Team* current;
Team* last = NULL;
- // the team must be in any team to let this function have any effect
+ // the team must be in a process group to let this function have any effect
if (group == NULL)
return;
@@ -687,154 +1317,7 @@ remove_team_from_group(Team* team)
team->group = NULL;
team->group_next = NULL;
- release_process_group_ref(group->id);
-}
-
-
-static struct process_group*
-create_process_group(pid_t id)
-{
- struct process_group* group
- = (struct process_group*)malloc(sizeof(struct process_group));
- if (group == NULL)
- return NULL;
-
- group->id = id;
- group->refs = 0;
- group->session = NULL;
- group->teams = NULL;
- group->orphaned = true;
- return group;
-}
-
-
-static struct process_session*
-create_process_session(pid_t id)
-{
- struct process_session* session
- = (struct process_session*)malloc(sizeof(struct process_session));
- if (session == NULL)
- return NULL;
-
- session->id = id;
- session->group_count = 0;
- session->controlling_tty = -1;
- session->foreground_group = -1;
-
- return session;
-}
-
-
-static void
-set_team_name(Team* team, const char* name)
-{
- if (const char* lastSlash = strrchr(name, '/'))
- name = lastSlash + 1;
-
- strlcpy(team->name, name, B_OS_NAME_LENGTH);
-}
-
-
-static Team*
-create_team_struct(const char* name, bool kernel)
-{
- Team* team = new(std::nothrow) Team;
- if (team == NULL)
- return NULL;
- ObjectDeleter<Team> teamDeleter(team);
-
- team->next = team->siblings_next = team->children = team->parent = NULL;
- team->id = allocate_thread_id();
- set_team_name(team, name);
- team->args[0] = '\0';
- team->num_threads = 0;
- team->io_context = NULL;
- team->address_space = NULL;
- team->realtime_sem_context = NULL;
- team->xsi_sem_context = NULL;
- team->thread_list = NULL;
- team->main_thread = NULL;
- team->loading_info = NULL;
- team->state = TEAM_STATE_BIRTH;
- team->flags = 0;
- team->death_entry = NULL;
- team->user_data_area = -1;
- team->user_data = 0;
- team->used_user_data = 0;
- team->user_data_size = 0;
- team->free_user_threads = NULL;
-
- team->supplementary_groups = NULL;
- team->supplementary_group_count = 0;
-
- team->dead_threads_kernel_time = 0;
- team->dead_threads_user_time = 0;
-
- // dead threads
- list_init(&team->dead_threads);
- team->dead_threads_count = 0;
-
- // dead children
- team->dead_children.count = 0;
- team->dead_children.kernel_time = 0;
- team->dead_children.user_time = 0;
-
- // job control entry
- team->job_control_entry = new(nothrow) job_control_entry;
- if (team->job_control_entry == NULL)
- return NULL;
- ObjectDeleter<job_control_entry> jobControlEntryDeleter(
- team->job_control_entry);
- team->job_control_entry->state = JOB_CONTROL_STATE_NONE;
- team->job_control_entry->thread = team->id;
- team->job_control_entry->team = team;
-
- list_init(&team->sem_list);
- list_init(&team->port_list);
- list_init(&team->image_list);
- list_init(&team->watcher_list);
-
- clear_team_debug_info(&team->debug_info, true);
-
- if (arch_team_init_team_struct(team, kernel) < 0)
- return NULL;
-
- // publish dead/stopped/continued children condition vars
- team->dead_children.condition_variable.Init(&team->dead_children,
- "team children");
-
- // keep all allocated structures
- jobControlEntryDeleter.Detach();
- teamDeleter.Detach();
-
- return team;
-}
-
-
-static void
-delete_team_struct(Team* team)
-{
- // get rid of all associated data
- team->PrepareForDeletion();
-
- while (death_entry* threadDeathEntry = (death_entry*)list_remove_head_item(
- &team->dead_threads)) {
- free(threadDeathEntry);
- }
-
- while (job_control_entry* entry = team->dead_children.entries.RemoveHead())
- delete entry;
-
- while (free_user_thread* entry = team->free_user_threads) {
- team->free_user_threads = entry->next;
- free(entry);
- }
-
- malloc_referenced_release(team->supplementary_groups);
-
- delete team->job_control_entry;
- // usually already NULL and transferred to the