* Copyright 2004-2016, Haiku, Inc.
* Distributed under the terms of the MIT License.
*
* Thread definition and structures
*/
#ifndef _KERNEL_THREAD_TYPES_H
#define _KERNEL_THREAD_TYPES_H
#ifndef _ASSEMBLER
#include <pthread.h>
#include <arch/thread_types.h>
#include <condition_variable.h>
#include <heap.h>
#include <ksignal.h>
#include <lock.h>
#include <smp.h>
#include <thread_defs.h>
#include <timer.h>
#include <UserTimer.h>
#include <user_debugger.h>
#include <util/DoublyLinkedList.h>
#include <util/KernelReferenceable.h>
#include <util/list.h>
#include <SupportDefs.h>
enum additional_thread_state {
THREAD_STATE_FREE_ON_RESCHED = 7,
};
#define THREAD_MIN_SET_PRIORITY B_LOWEST_ACTIVE_PRIORITY
#define THREAD_MAX_SET_PRIORITY B_REAL_TIME_PRIORITY
enum team_state {
TEAM_STATE_NORMAL,
TEAM_STATE_BIRTH,
TEAM_STATE_SHUTDOWN,
TEAM_STATE_DEATH
};
#define TEAM_FLAG_EXEC_DONE 0x01
#define TEAM_FLAG_DUMP_CORE 0x02
typedef enum job_control_state {
JOB_CONTROL_STATE_NONE,
JOB_CONTROL_STATE_STOPPED,
JOB_CONTROL_STATE_CONTINUED,
JOB_CONTROL_STATE_DEAD
} job_control_state;
struct cpu_ent;
struct image;
struct io_context;
struct realtime_sem_context;
struct select_info;
struct user_thread;
struct VMAddressSpace;
struct user_mutex_context;
struct xsi_sem_context;
namespace Scheduler {
struct ThreadData;
}
namespace BKernel {
struct Team;
struct Thread;
struct ProcessGroup;
}
struct thread_death_entry : DoublyLinkedListLinkImpl<thread_death_entry> {
thread_id thread;
status_t status;
};
typedef DoublyLinkedList<thread_death_entry> ThreadDeathEntryList;
struct team_loading_info {
ConditionVariable condition;
status_t result;
};
struct team_watcher {
struct list_link link;
void (*hook)(team_id team, void *data);
void *data;
};
#define MAX_DEAD_CHILDREN 32
struct job_control_entry : DoublyLinkedListLinkImpl<job_control_entry> {
job_control_state state;
thread_id thread;
uint16 signal;
bool has_group_ref;
uid_t signaling_user;
BKernel::Team* team;
pid_t group_id;
status_t status;
uint16 reason;
bigtime_t user_time;
bigtime_t kernel_time;
job_control_entry();
~job_control_entry();
void InitDeadState();
job_control_entry& operator=(const job_control_entry& other);
};
typedef DoublyLinkedList<job_control_entry> JobControlEntryList;
struct team_job_control_children {
JobControlEntryList entries;
};
struct team_dead_children : team_job_control_children {
ConditionVariable condition_variable;
uint32 count;
bigtime_t kernel_time;
bigtime_t user_time;
};
struct team_death_entry {
int32 remaining_threads;
ConditionVariable condition;
};
struct free_user_thread {
struct free_user_thread* next;
struct user_thread* thread;
};
class AssociatedDataOwner;
class AssociatedData : public BReferenceable,
public DoublyLinkedListLinkImpl<AssociatedData> {
public:
AssociatedData();
virtual ~AssociatedData();
AssociatedDataOwner* Owner() const
{ return fOwner; }
void SetOwner(AssociatedDataOwner* owner)
{ fOwner = owner; }
virtual void OwnerDeleted(AssociatedDataOwner* owner);
private:
AssociatedDataOwner* fOwner;
};
class AssociatedDataOwner {
public:
AssociatedDataOwner();
~AssociatedDataOwner();
bool AddData(AssociatedData* data);
bool RemoveData(AssociatedData* data);
void PrepareForDeletion();
private:
typedef DoublyLinkedList<AssociatedData> DataList;
private:
mutex fLock;
DataList fList;
};
typedef int32 (*thread_entry_func)(thread_func, void *);
namespace BKernel {
struct GroupsArray : KernelReferenceable {
int count;
gid_t groups[];
};
template<typename IDType>
struct TeamThreadIteratorEntry
: DoublyLinkedListLinkImpl<TeamThreadIteratorEntry<IDType> > {
typedef IDType id_type;
typedef TeamThreadIteratorEntry<id_type> iterator_type;
id_type id;
bool visible;
};
struct Thread : TeamThreadIteratorEntry<thread_id>, KernelReferenceable {
int32 flags;
int64 serial_number;
Thread *hash_next;
DoublyLinkedListLink<Thread> team_link;
char name[B_OS_NAME_LENGTH];
bool going_to_suspend;
int32 priority;
int32 io_priority;
int32 state;
struct cpu_ent *cpu;
struct cpu_ent *previous_cpu;
CPUSet cpumask;
int32 pinned_to_cpu;
spinlock scheduler_lock;
sigset_t sig_block_mask;
sigset_t sigsuspend_original_unblocked_mask;
sigset_t old_sig_block_mask;
ucontext_t* user_signal_context;
addr_t signal_stack_base;
size_t signal_stack_size;
bool signal_stack_enabled;
bool in_kernel;
bool has_yielded;
Scheduler::ThreadData* scheduler_data;
struct user_thread* user_thread;
void (*cancel_function)(int);
struct {
uint8 parameters[SYSCALL_RESTART_PARAMETER_SIZE];
} syscall_restart;
struct {
status_t status;
uint32 flags;
uint32 type;
const void* object;
timer unblock_timer;
} wait;
struct {
sem_id write_sem;
sem_id read_sem;
thread_id sender;
int32 code;
size_t size;
void* buffer;
} msg;
void (*fault_handler)(void);
jmp_buf fault_handler_state;
int16 page_faults_allowed;
int16 page_fault_waits_allowed;
BKernel::Team *team;
rw_spinlock team_lock;
struct {
sem_id sem;
status_t status;
ThreadDeathEntryList waiters;
} exit;
struct select_info *select_infos;
struct thread_debug_info debug_info;
area_id kernel_stack_area;
addr_t kernel_stack_base;
addr_t kernel_stack_top;
area_id user_stack_area;
addr_t user_stack_base;
size_t user_stack_size;
addr_t user_local_storage;
int kernel_errno;
spinlock time_lock;
bigtime_t user_time;
bigtime_t kernel_time;
bigtime_t last_time;
bigtime_t cpu_clock_offset;
void (*post_interrupt_callback)(void*);
void* post_interrupt_data;
#if KDEBUG_RW_LOCK_DEBUG
rw_lock* held_read_locks[64] = {};
#endif
struct arch_thread arch_info;
public:
Thread() {}
Thread(const char *name, thread_id threadID,
struct cpu_ent *cpu);
~Thread();
static status_t Create(const char* name, Thread*& _thread);
static Thread* Get(thread_id id);
static Thread* GetAndLock(thread_id id);
static Thread* GetDebug(thread_id id);
static bool IsAlive(thread_id id);
void* operator new(size_t size);
void* operator new(size_t, void* pointer);
void operator delete(void* pointer, size_t size);
status_t Init(bool idleThread);
bool Lock()
{ mutex_lock(&fLock); return true; }
bool TryLock()
{ return mutex_trylock(&fLock) == B_OK; }
void Unlock()
{ mutex_unlock(&fLock); }
void UnlockAndReleaseReference()
{ Unlock(); ReleaseReference(); }
bool IsAlive() const;
bool IsRunning() const
{ return cpu != NULL; }
sigset_t ThreadPendingSignals() const
{ return fPendingSignals.AllSignals(); }
inline sigset_t AllPendingSignals() const;
void AddPendingSignal(int signal)
{ fPendingSignals.AddSignal(signal); }
void AddPendingSignal(Signal* signal)
{ fPendingSignals.AddSignal(signal); }
void RemovePendingSignal(int signal)
{ fPendingSignals.RemoveSignal(signal); }
void RemovePendingSignal(Signal* signal)
{ fPendingSignals.RemoveSignal(signal); }
void RemovePendingSignals(sigset_t mask)
{ fPendingSignals.RemoveSignals(mask); }
void ResetSignalsOnExec();
inline int32 HighestPendingSignalPriority(
sigset_t nonBlocked) const;
inline Signal* DequeuePendingSignal(sigset_t nonBlocked,
Signal& buffer);
UserTimer* UserTimerFor(int32 id) const
{ return fUserTimers.TimerFor(id); }
status_t AddUserTimer(UserTimer* timer);
void RemoveUserTimer(UserTimer* timer);
void DeleteUserTimers(bool userDefinedOnly);
void UserTimerActivated(ThreadTimeUserTimer* timer)
{ fCPUTimeUserTimers.Add(timer); }
void UserTimerDeactivated(ThreadTimeUserTimer* timer)
{ fCPUTimeUserTimers.Remove(timer); }
void DeactivateCPUTimeUserTimers();
bool HasActiveCPUTimeUserTimers() const
{ return !fCPUTimeUserTimers.IsEmpty(); }
ThreadTimeUserTimerList::ConstIterator
CPUTimeUserTimerIterator() const
{ return fCPUTimeUserTimers.GetIterator(); }
inline bigtime_t CPUTime(bool ignoreCurrentRun) const;
private:
mutex fLock;
BKernel::PendingSignals fPendingSignals;
UserTimerList fUserTimers;
ThreadTimeUserTimerList fCPUTimeUserTimers;
};
struct Team : TeamThreadIteratorEntry<team_id>, KernelReferenceable,
AssociatedDataOwner {
DoublyLinkedListLink<Team> global_list_link;
Team *hash_next;
DoublyLinkedListLink<Team> siblings_link;
Team *parent;
DoublyLinkedList<Team, DoublyLinkedListMemberGetLink<Team, &Team::siblings_link> > children;
DoublyLinkedListLink<Team> group_link;
int64 serial_number;
pid_t group_id;
pid_t session_id;
ProcessGroup *group;
int num_threads;
int state;
int32 flags;
struct io_context *io_context;
struct user_mutex_context *user_mutex_context;
struct realtime_sem_context *realtime_sem_context;
struct xsi_sem_context *xsi_sem_context;
struct team_death_entry *death_entry;
ThreadDeathEntryList dead_threads;
team_dead_children dead_children;
team_job_control_children stopped_children;
team_job_control_children continued_children;
struct job_control_entry* job_control_entry;
VMAddressSpace *address_space;
Thread *main_thread;
DoublyLinkedList<Thread, DoublyLinkedListMemberGetLink<Thread, &Thread::team_link> >
thread_list;
struct team_loading_info *loading_info;
DoublyLinkedList<image> image_list;
struct list watcher_list;
struct list sem_list;
struct list port_list;
struct arch_team arch_info;
addr_t user_data;
area_id user_data_area;
size_t user_data_size;
size_t used_user_data;
struct free_user_thread* free_user_threads;
void* commpage_address;
struct team_debug_info debug_info;
bigtime_t start_time;
bigtime_t dead_threads_kernel_time;
bigtime_t dead_threads_user_time;
bigtime_t cpu_clock_offset;
spinlock time_lock;
uid_t saved_set_uid;
uid_t real_uid;
uid_t effective_uid;
gid_t saved_set_gid;
gid_t real_gid;
gid_t effective_gid;
BReference<GroupsArray> supplementary_groups;
struct {
uint16 reason;
uint16 signal;
uid_t signaling_user;
status_t status;
bool initialized;
} exit;
spinlock signal_lock;
public:
~Team();
static Team* Create(team_id id, const char* name,
bool kernel);
static Team* Get(team_id id);
static Team* GetAndLock(team_id id);
bool Lock()
{ mutex_lock(&fLock); return true; }
bool TryLock()
{ return mutex_trylock(&fLock) == B_OK; }
void Unlock()
{ mutex_unlock(&fLock); }
void UnlockAndReleaseReference()
{ Unlock(); ReleaseReference(); }
void LockTeamAndParent(bool dontLockParentIfKernel);
void UnlockTeamAndParent();
void LockTeamAndProcessGroup();
void UnlockTeamAndProcessGroup();
void LockTeamParentAndProcessGroup();
void UnlockTeamParentAndProcessGroup();
void LockProcessGroup()
{ LockTeamAndProcessGroup(); Unlock(); }
const char* Name() const { return fName; }
void SetName(const char* name);
const char* Args() const { return fArgs; }
void SetArgs(const char* args);
void SetArgs(const char* path,
const char* const* otherArgs,
int otherArgCount);
BKernel::QueuedSignalsCounter* QueuedSignalsCounter() const
{ return fQueuedSignalsCounter; }
sigset_t PendingSignals() const
{ return fPendingSignals.AllSignals(); }
void AddPendingSignal(int signal)
{ fPendingSignals.AddSignal(signal); }
void AddPendingSignal(Signal* signal)
{ fPendingSignals.AddSignal(signal); }
void RemovePendingSignal(int signal)
{ fPendingSignals.RemoveSignal(signal); }
void RemovePendingSignal(Signal* signal)
{ fPendingSignals.RemoveSignal(signal); }
void RemovePendingSignals(sigset_t mask)
{ fPendingSignals.RemoveSignals(mask); }
void ResetSignalsOnExec();
inline int32 HighestPendingSignalPriority(
sigset_t nonBlocked) const;
inline Signal* DequeuePendingSignal(sigset_t nonBlocked,
Signal& buffer);
struct sigaction& SignalActionFor(int32 signal)
{ return fSignalActions[signal - 1]; }
void InheritSignalActions(Team* parent);
UserTimer* UserTimerFor(int32 id) const
{ return fUserTimers.TimerFor(id); }
status_t AddUserTimer(UserTimer* timer);
void RemoveUserTimer(UserTimer* timer);
void DeleteUserTimers(bool userDefinedOnly);
bool CheckAddUserDefinedTimer();
void UserDefinedTimersRemoved(int32 count);
void UserTimerActivated(TeamTimeUserTimer* timer)
{ fCPUTimeUserTimers.Add(timer); }
void UserTimerActivated(TeamUserTimeUserTimer* timer)
{ fUserTimeUserTimers.Add(timer); }
void UserTimerDeactivated(TeamTimeUserTimer* timer)
{ fCPUTimeUserTimers.Remove(timer); }
void UserTimerDeactivated(
TeamUserTimeUserTimer* timer)
{ fUserTimeUserTimers.Remove(timer); }
void DeactivateCPUTimeUserTimers();
bool HasActiveCPUTimeUserTimers() const
{ return !fCPUTimeUserTimers.IsEmpty(); }
bool HasActiveUserTimeUserTimers() const
{ return !fUserTimeUserTimers.IsEmpty(); }
TeamTimeUserTimerList::ConstIterator
CPUTimeUserTimerIterator() const
{ return fCPUTimeUserTimers.GetIterator(); }
inline TeamUserTimeUserTimerList::ConstIterator
UserTimeUserTimerIterator() const;
bigtime_t CPUTime(bool ignoreCurrentRun,
Thread* lockedThread = NULL) const;
bigtime_t UserCPUTime() const;
ConditionVariable* CoreDumpCondition() const
{ return fCoreDumpCondition; }
void SetCoreDumpCondition(
ConditionVariable* condition)
{ fCoreDumpCondition = condition; }
private:
Team(team_id id, bool kernel);
private:
mutex fLock;
char fName[B_OS_NAME_LENGTH];
char fArgs[64];
BKernel::QueuedSignalsCounter* fQueuedSignalsCounter;
BKernel::PendingSignals fPendingSignals;
struct sigaction fSignalActions[MAX_SIGNAL_NUMBER];
UserTimerList fUserTimers;
TeamTimeUserTimerList fCPUTimeUserTimers;
TeamUserTimeUserTimerList fUserTimeUserTimers;
int32 fUserDefinedTimerCount;
ConditionVariable* fCoreDumpCondition;
};
struct ProcessSession : BReferenceable {
pid_t id;
void* controlling_tty;
pid_t foreground_group;
public:
ProcessSession(pid_t id);
~ProcessSession();
bool Lock()
{ mutex_lock(&fLock); return true; }
bool TryLock()
{ return mutex_trylock(&fLock) == B_OK; }
void Unlock()
{ mutex_unlock(&fLock); }
private:
mutex fLock;
};
struct ProcessGroup : KernelReferenceable {
typedef DoublyLinkedList<Team,
DoublyLinkedListMemberGetLink<Team,
&Team::group_link> > TeamList;
public:
struct ProcessGroup *hash_next;
pid_t id;
TeamList teams;
public:
ProcessGroup(pid_t id);
~ProcessGroup();
static ProcessGroup* Get(pid_t id);
bool Lock()
{ mutex_lock(&fLock); return true; }
bool TryLock()
{ return mutex_trylock(&fLock) == B_OK; }
void Unlock()
{ mutex_unlock(&fLock); }
ProcessSession* Session() const
{ return fSession; }
void Publish(ProcessSession* session);
void PublishLocked(ProcessSession* session);
bool IsOrphaned() const;
void ScheduleOrphanedCheck();
void UnsetOrphanedCheck();
public:
SinglyLinkedListLink<ProcessGroup> fOrphanedCheckListLink;
private:
mutex fLock;
ProcessSession* fSession;
bool fInOrphanedCheckList;
};
typedef SinglyLinkedList<ProcessGroup,
SinglyLinkedListMemberGetLink<ProcessGroup,
&ProcessGroup::fOrphanedCheckListLink> > ProcessGroupList;
*/
struct TeamListIterator {
TeamListIterator();
~TeamListIterator();
Team* Next();
private:
TeamThreadIteratorEntry<team_id> fEntry;
};
*/
struct ThreadListIterator {
ThreadListIterator();
~ThreadListIterator();
Thread* Next();
private:
TeamThreadIteratorEntry<thread_id> fEntry;
};
inline int32
Team::HighestPendingSignalPriority(sigset_t nonBlocked) const
{
return fPendingSignals.HighestSignalPriority(nonBlocked);
}
inline Signal*
Team::DequeuePendingSignal(sigset_t nonBlocked, Signal& buffer)
{
return fPendingSignals.DequeueSignal(nonBlocked, buffer);
}
inline TeamUserTimeUserTimerList::ConstIterator
Team::UserTimeUserTimerIterator() const
{
return fUserTimeUserTimers.GetIterator();
}
inline sigset_t
Thread::AllPendingSignals() const
{
return fPendingSignals.AllSignals() | team->PendingSignals();
}
inline int32
Thread::HighestPendingSignalPriority(sigset_t nonBlocked) const
{
return fPendingSignals.HighestSignalPriority(nonBlocked);
}
inline Signal*
Thread::DequeuePendingSignal(sigset_t nonBlocked, Signal& buffer)
{
return fPendingSignals.DequeueSignal(nonBlocked, buffer);
}
The caller must hold \c time_lock.
\param ignoreCurrentRun If \c true and the thread is currently running,
don't add the time since the last time \c last_time was updated. Should
be used in "thread unscheduled" scheduler callbacks, since although the
thread is still running at that time, its time has already been stopped.
\return The thread's current total CPU time.
*/
inline bigtime_t
Thread::CPUTime(bool ignoreCurrentRun) const
{
bigtime_t time = user_time + kernel_time + cpu_clock_offset;
if (!ignoreCurrentRun && last_time != 0)
time += system_time() - last_time;
return time;
}
}
using BKernel::Team;
using BKernel::TeamListIterator;
using BKernel::Thread;
using BKernel::ThreadListIterator;
using BKernel::ProcessSession;
using BKernel::ProcessGroup;
using BKernel::ProcessGroupList;
#endif
#define THREAD_FLAGS_SIGNALS_PENDING 0x0001
#define THREAD_FLAGS_DEBUG_THREAD 0x0002
#define THREAD_FLAGS_SINGLE_STEP 0x0004
#define THREAD_FLAGS_DEBUGGER_INSTALLED 0x0008
#define THREAD_FLAGS_BREAKPOINTS_DEFINED 0x0010
#define THREAD_FLAGS_BREAKPOINTS_INSTALLED 0x0020
#define THREAD_FLAGS_64_BIT_SYSCALL_RETURN 0x0040
#define THREAD_FLAGS_RESTART_SYSCALL 0x0080
#define THREAD_FLAGS_DONT_RESTART_SYSCALL 0x0100
#define THREAD_FLAGS_ALWAYS_RESTART_SYSCALL 0x0200
#define THREAD_FLAGS_SYSCALL_RESTARTED 0x0400
#define THREAD_FLAGS_SYSCALL 0x0800
#define THREAD_FLAGS_TRAP_FOR_CORE_DUMP 0x1000
#ifdef _COMPAT_MODE
#define THREAD_FLAGS_COMPAT_MODE 0x2000
#endif
#define THREAD_FLAGS_OLD_SIGMASK 0x4000
#endif