From 787964f7e7b89f407e7f7a1ab0c40c0c12a2ae0d Mon Sep 17 00:00:00 2001 From: Michael Scire Date: Mon, 27 Jul 2020 17:32:04 -0700 Subject: [PATCH] kern: implement thread pinning/SvcSynchronizePreemptionState --- .../mesosphere/kern_k_capabilities.hpp | 38 +++++- .../include/mesosphere/kern_k_process.hpp | 22 ++++ .../include/mesosphere/kern_k_scheduler.hpp | 3 + .../include/mesosphere/kern_k_thread.hpp | 9 +- .../arch/arm64/kern_exception_handlers.cpp | 2 +- .../arch/arm64/kern_k_interrupt_manager.cpp | 2 +- .../libmesosphere/source/kern_k_process.cpp | 4 - .../libmesosphere/source/kern_k_scheduler.cpp | 36 +++++- .../libmesosphere/source/kern_k_thread.cpp | 114 +++++++++++++++++- .../source/svc/kern_svc_synchronization.cpp | 19 ++- 10 files changed, 230 insertions(+), 19 deletions(-) diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_capabilities.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_capabilities.hpp index f8f022b94..6557fd5f8 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_capabilities.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_capabilities.hpp @@ -221,6 +221,12 @@ namespace ams::kern { data[id / BitsPerWord] &= ~(1ul << (id % BitsPerWord)); } + static constexpr ALWAYS_INLINE bool GetSvcAllowedImpl(u8 *data, u32 id) { + constexpr size_t BitsPerWord = BITSIZEOF(*data); + MESOSPHERE_ASSERT(id < svc::SvcId_Count); + return (data[id / BitsPerWord] & (1ul << (id % BitsPerWord))) != 0; + } + bool SetSvcAllowed(u32 id) { if (id < BITSIZEOF(this->svc_access_flags)) { SetSvcAllowedImpl(this->svc_access_flags, id); @@ -266,16 +272,46 @@ namespace ams::kern { ALWAYS_INLINE void CopySvcPermissionsTo(KThread::StackParameters &sp) const { static_assert(sizeof(svc_access_flags) == sizeof(sp.svc_permission)); + /* Copy permissions. */ std::memcpy(sp.svc_permission, this->svc_access_flags, sizeof(this->svc_access_flags)); /* Clear specific SVCs based on our state. */ ClearSvcAllowedImpl(sp.svc_permission, svc::SvcId_ReturnFromException); ClearSvcAllowedImpl(sp.svc_permission, svc::SvcId_SynchronizePreemptionState); - if (sp.is_preemption_state_pinned) { + if (sp.is_pinned) { ClearSvcAllowedImpl(sp.svc_permission, svc::SvcId_GetInfo); } } + ALWAYS_INLINE void CopyPinnedSvcPermissionsTo(KThread::StackParameters &sp) const { + static_assert(sizeof(svc_access_flags) == sizeof(sp.svc_permission)); + /* Clear all permissions. */ + std::memset(sp.svc_permission, 0, sizeof(this->svc_access_flags)); + + /* Set specific SVCs based on our state. */ + SetSvcAllowedImpl(sp.svc_permission, svc::SvcId_SynchronizePreemptionState); + if (GetSvcAllowedImpl(sp.svc_permission, svc::SvcId_ReturnFromException)) { + SetSvcAllowedImpl(sp.svc_permission, svc::SvcId_ReturnFromException); + SetSvcAllowedImpl(sp.svc_permission, svc::SvcId_GetInfo); + } + } + + ALWAYS_INLINE void CopyUnpinnedSvcPermissionsTo(KThread::StackParameters &sp) const { + static_assert(sizeof(svc_access_flags) == sizeof(sp.svc_permission)); + /* Get whether we have access to return from exception. */ + const bool return_from_exception = GetSvcAllowedImpl(sp.svc_permission, svc::SvcId_ReturnFromException); + + /* Copy permissions. */ + std::memcpy(sp.svc_permission, this->svc_access_flags, sizeof(this->svc_access_flags)); + + /* Clear/Set specific SVCs based on our state. */ + ClearSvcAllowedImpl(sp.svc_permission, svc::SvcId_ReturnFromException); + ClearSvcAllowedImpl(sp.svc_permission, svc::SvcId_SynchronizePreemptionState); + if (return_from_exception) { + SetSvcAllowedImpl(sp.svc_permission, svc::SvcId_ReturnFromException); + } + } + constexpr bool IsPermittedInterrupt(u32 id) const { constexpr size_t BitsPerWord = BITSIZEOF(this->irq_access_flags[0]); if (id < BITSIZEOF(this->irq_access_flags)) { diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_process.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_process.hpp index 2bd7d2df0..28f34f14f 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_process.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_process.hpp @@ -204,10 +204,32 @@ namespace ams::kern { return this->pinned_threads[core_id]; } + void PinThread(s32 core_id, KThread *thread) { + MESOSPHERE_ASSERT(0 <= core_id && core_id < static_cast(cpu::NumCores)); + MESOSPHERE_ASSERT(thread != nullptr); + MESOSPHERE_ASSERT(this->pinned_threads[core_id] == nullptr); + this->pinned_threads[core_id] = thread; + } + + void UnpinThread(s32 core_id, KThread *thread) { + MESOSPHERE_ASSERT(0 <= core_id && core_id < static_cast(cpu::NumCores)); + MESOSPHERE_ASSERT(thread != nullptr); + MESOSPHERE_ASSERT(this->pinned_threads[core_id] == thread); + this->pinned_threads[core_id] = nullptr; + } + void CopySvcPermissionsTo(KThread::StackParameters &sp) { this->capabilities.CopySvcPermissionsTo(sp); } + void CopyPinnedSvcPermissionsTo(KThread::StackParameters &sp) { + this->capabilities.CopyPinnedSvcPermissionsTo(sp); + } + + void CopyUnpinnedSvcPermissionsTo(KThread::StackParameters &sp) { + this->capabilities.CopyUnpinnedSvcPermissionsTo(sp); + } + constexpr KResourceLimit *GetResourceLimit() const { return this->resource_limit; } bool ReserveResource(ams::svc::LimitableResource which, s64 value); diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_scheduler.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_scheduler.hpp index 123a7fb49..c33878401 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_scheduler.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_scheduler.hpp @@ -131,6 +131,9 @@ namespace ams::kern { static NOINLINE void ClearPreviousThread(KThread *thread); + static NOINLINE void PinCurrentThread(KProcess *cur_process); + static NOINLINE void UnpinCurrentThread(KProcess *cur_process); + static NOINLINE void OnThreadStateChanged(KThread *thread, KThread::ThreadState old_state); static NOINLINE void OnThreadPriorityChanged(KThread *thread, s32 old_priority); static NOINLINE void OnThreadAffinityMaskChanged(KThread *thread, const KAffinityMask &old_affinity, s32 old_core); diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_thread.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_thread.hpp index c43fea431..88f1c21c6 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_thread.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_thread.hpp @@ -87,7 +87,7 @@ namespace ams::kern { u8 current_svc_id; bool is_calling_svc; bool is_in_exception_handler; - bool is_preemption_state_pinned; + bool is_pinned; s32 disable_count; KThreadContext *context; }; @@ -171,7 +171,7 @@ namespace ams::kern { using ConditionVariableThreadTree = ConditionVariableThreadTreeTraits::TreeType; WaiterList waiter_list{}; - WaiterList paused_waiter_list{}; + WaiterList pinned_waiter_list{}; KThread *lock_owner{}; ConditionVariableThreadTree *condvar_tree{}; uintptr_t debug_params[3]{}; @@ -249,6 +249,9 @@ namespace ams::kern { this->GetStackParameters().disable_count--; } + void Pin(); + void Unpin(); + NOINLINE void DisableCoreMigration(); NOINLINE void EnableCoreMigration(); @@ -281,7 +284,7 @@ namespace ams::kern { ALWAYS_INLINE bool HasDpc() const { MESOSPHERE_ASSERT_THIS(); - return this->GetDpc() != 0;; + return this->GetDpc() != 0; } private: void Suspend(); diff --git a/libraries/libmesosphere/source/arch/arm64/kern_exception_handlers.cpp b/libraries/libmesosphere/source/arch/arm64/kern_exception_handlers.cpp index b82d8faa4..3d2126043 100644 --- a/libraries/libmesosphere/source/arch/arm64/kern_exception_handlers.cpp +++ b/libraries/libmesosphere/source/arch/arm64/kern_exception_handlers.cpp @@ -150,7 +150,7 @@ namespace ams::kern::arch::arm64 { KScopedSchedulerLock lk; /* Pin the current thread. */ - GetCurrentProcess().PinCurrentThread(); + KScheduler::PinCurrentThread(GetCurrentProcessPointer()); /* Set the interrupt flag for the thread. */ GetCurrentThread().SetInterruptFlag(); diff --git a/libraries/libmesosphere/source/arch/arm64/kern_k_interrupt_manager.cpp b/libraries/libmesosphere/source/arch/arm64/kern_k_interrupt_manager.cpp index cda867fe5..f1995e103 100644 --- a/libraries/libmesosphere/source/arch/arm64/kern_k_interrupt_manager.cpp +++ b/libraries/libmesosphere/source/arch/arm64/kern_k_interrupt_manager.cpp @@ -180,7 +180,7 @@ namespace ams::kern::arch::arm64 { KScopedSchedulerLock sl; /* Pin the current thread. */ - GetCurrentProcess().PinCurrentThread(); + KScheduler::PinCurrentThread(GetCurrentProcessPointer()); /* Set the interrupt flag for the thread. */ GetCurrentThread().SetInterruptFlag(); diff --git a/libraries/libmesosphere/source/kern_k_process.cpp b/libraries/libmesosphere/source/kern_k_process.cpp index 0177f969f..4a1ee3572 100644 --- a/libraries/libmesosphere/source/kern_k_process.cpp +++ b/libraries/libmesosphere/source/kern_k_process.cpp @@ -980,10 +980,6 @@ namespace ams::kern { } } - void KProcess::PinCurrentThread() { - MESOSPHERE_UNIMPLEMENTED(); - } - KProcess *KProcess::GetProcessFromId(u64 process_id) { /* Lock the list. */ KProcess::ListAccessor accessor; diff --git a/libraries/libmesosphere/source/kern_k_scheduler.cpp b/libraries/libmesosphere/source/kern_k_scheduler.cpp index b1f0b96cd..2e99b20f1 100644 --- a/libraries/libmesosphere/source/kern_k_scheduler.cpp +++ b/libraries/libmesosphere/source/kern_k_scheduler.cpp @@ -121,10 +121,10 @@ namespace ams::kern { /* If the thread has no waiters, we need to check if the process has a thread pinned. */ if (top_thread->GetNumKernelWaiters() == 0) { if (KProcess *parent = top_thread->GetOwnerProcess(); parent != nullptr) { - if (KThread *suggested = parent->GetPinnedThread(core_id); suggested != nullptr && suggested != top_thread && suggested->GetNumKernelWaiters() == 0) { + if (KThread *pinned = parent->GetPinnedThread(core_id); pinned != nullptr && pinned != top_thread) { /* We prefer our parent's pinned thread if possible. However, we also don't want to schedule un-runnable threads. */ - if (suggested->GetRawState() == KThread::ThreadState_Runnable) { - top_thread = suggested; + if (pinned->GetRawState() == KThread::ThreadState_Runnable) { + top_thread = pinned; } else { top_thread = nullptr; } @@ -274,6 +274,36 @@ namespace ams::kern { } } + void KScheduler::PinCurrentThread(KProcess *cur_process) { + MESOSPHERE_ASSERT(IsSchedulerLockedByCurrentThread()); + + /* Get the current thread. */ + const s32 core_id = GetCurrentCoreId(); + KThread *cur_thread = GetCurrentThreadPointer(); + + /* Pin it. */ + cur_process->PinThread(core_id, cur_thread); + cur_thread->Pin(); + + /* An update is needed. */ + SetSchedulerUpdateNeeded(); + } + + void KScheduler::UnpinCurrentThread(KProcess *cur_process) { + MESOSPHERE_ASSERT(IsSchedulerLockedByCurrentThread()); + + /* Get the current thread. */ + const s32 core_id = GetCurrentCoreId(); + KThread *cur_thread = GetCurrentThreadPointer(); + + /* Unpin it. */ + cur_thread->Unpin(); + cur_process->UnpinThread(core_id, cur_thread); + + /* An update is needed. */ + SetSchedulerUpdateNeeded(); + } + void KScheduler::OnThreadStateChanged(KThread *thread, KThread::ThreadState old_state) { MESOSPHERE_ASSERT(IsSchedulerLockedByCurrentThread()); diff --git a/libraries/libmesosphere/source/kern_k_thread.cpp b/libraries/libmesosphere/source/kern_k_thread.cpp index 8c3a465cc..55ef83adb 100644 --- a/libraries/libmesosphere/source/kern_k_thread.cpp +++ b/libraries/libmesosphere/source/kern_k_thread.cpp @@ -324,12 +324,11 @@ namespace ams::kern { MESOSPHERE_ASSERT_THIS(); MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread()); - /* Release user exception, if relevant. */ + /* Release user exception and unpin, if relevant. */ if (this->parent != nullptr) { this->parent->ReleaseUserException(this); if (this->parent->GetPinnedThread(GetCurrentCoreId()) == this) { - /* TODO: this->parent->UnpinCurrentThread(); */ - MESOSPHERE_UNIMPLEMENTED(); + KScheduler::UnpinCurrentThread(this->parent); } } @@ -376,6 +375,113 @@ namespace ams::kern { this->FinishTermination(); } + void KThread::Pin() { + MESOSPHERE_ASSERT_THIS(); + MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread()); + + /* Set ourselves as pinned. */ + this->GetStackParameters().is_pinned = true; + + /* Disable core migration. */ + MESOSPHERE_ASSERT(this->num_core_migration_disables == 0); + { + ++this->num_core_migration_disables; + + /* Save our ideal state to restore when we're unpinned. */ + this->original_ideal_core_id = this->ideal_core_id; + this->original_affinity_mask = this->affinity_mask; + + /* Bind ourselves to this core. */ + const s32 active_core = this->GetActiveCore(); + const s32 current_core = GetCurrentCoreId(); + + this->SetActiveCore(current_core); + this->ideal_core_id = current_core; + + this->affinity_mask.SetAffinityMask(1ul << current_core); + + if (active_core != current_core || this->affinity_mask.GetAffinityMask() != this->original_affinity_mask.GetAffinityMask()) { + KScheduler::OnThreadAffinityMaskChanged(this, this->original_affinity_mask, active_core); + } + } + + /* Disallow performing thread suspension. */ + { + /* Update our allow flags. */ + this->suspend_allowed_flags &= ~(1 << (SuspendType_Thread + ThreadState_SuspendShift)); + + /* Update our state. */ + const ThreadState old_state = this->thread_state; + this->thread_state = static_cast(this->GetSuspendFlags() | (old_state & ThreadState_Mask)); + if (this->thread_state != old_state) { + KScheduler::OnThreadStateChanged(this, old_state); + } + } + + /* Update our SVC access permissions. */ + MESOSPHERE_ASSERT(this->parent != nullptr); + this->parent->CopyPinnedSvcPermissionsTo(this->GetStackParameters()); + } + + void KThread::Unpin() { + MESOSPHERE_ASSERT_THIS(); + MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread()); + + /* Set ourselves as unpinned. */ + this->GetStackParameters().is_pinned = false; + + /* Enable core migration. */ + MESOSPHERE_ASSERT(this->num_core_migration_disables == 1); + { + --this->num_core_migration_disables; + + /* Restore our original state. */ + const KAffinityMask old_mask = this->affinity_mask; + + this->ideal_core_id = this->original_ideal_core_id; + this->affinity_mask = this->original_affinity_mask; + + if (this->affinity_mask.GetAffinityMask() != old_mask.GetAffinityMask()) { + const s32 active_core = this->GetActiveCore(); + + if (!this->affinity_mask.GetAffinity(active_core)) { + if (this->ideal_core_id >= 0) { + this->SetActiveCore(this->ideal_core_id); + } else { + this->SetActiveCore(BITSIZEOF(unsigned long long) - 1 - __builtin_clzll(this->affinity_mask.GetAffinityMask())); + } + } + KScheduler::OnThreadAffinityMaskChanged(this, old_mask, active_core); + } + } + + /* Allow performing thread suspension (if termination hasn't been requested). */ + { + /* Update our allow flags. */ + if (!this->IsTerminationRequested()) { + this->suspend_allowed_flags |= (1 << (SuspendType_Thread + ThreadState_SuspendShift)); + } + + /* Update our state. */ + const ThreadState old_state = this->thread_state; + this->thread_state = static_cast(this->GetSuspendFlags() | (old_state & ThreadState_Mask)); + if (this->thread_state != old_state) { + KScheduler::OnThreadStateChanged(this, old_state); + } + } + + /* Update our SVC access permissions. */ + MESOSPHERE_ASSERT(this->parent != nullptr); + this->parent->CopyUnpinnedSvcPermissionsTo(this->GetStackParameters()); + + /* Resume any threads that began waiting on us while we were pinned. */ + for (auto it = this->pinned_waiter_list.begin(); it != this->pinned_waiter_list.end(); ++it) { + if (it->GetState() == ThreadState_Waiting) { + it->SetState(ThreadState_Runnable); + } + } + } + void KThread::DisableCoreMigration() { MESOSPHERE_ASSERT_THIS(); MESOSPHERE_ASSERT(this == GetCurrentThreadPointer()); @@ -387,7 +493,7 @@ namespace ams::kern { this->original_ideal_core_id = this->ideal_core_id; this->original_affinity_mask = this->affinity_mask; - /* Bind outselves to this core. */ + /* Bind ourselves to this core. */ const s32 active_core = this->GetActiveCore(); this->ideal_core_id = active_core; this->affinity_mask.SetAffinityMask(1ul << active_core); diff --git a/libraries/libmesosphere/source/svc/kern_svc_synchronization.cpp b/libraries/libmesosphere/source/svc/kern_svc_synchronization.cpp index 05c651ab1..1db753eea 100644 --- a/libraries/libmesosphere/source/svc/kern_svc_synchronization.cpp +++ b/libraries/libmesosphere/source/svc/kern_svc_synchronization.cpp @@ -112,6 +112,21 @@ namespace ams::kern::svc { return ResultSuccess(); } + void SynchronizePreemptionState() { + /* Lock the scheduler. */ + KScopedSchedulerLock sl; + + /* If the current thread is pinned, unpin it. */ + KProcess *cur_process = GetCurrentProcessPointer(); + if (cur_process->GetPinnedThread(GetCurrentCoreId()) == GetCurrentThreadPointer()) { + /* Clear the current thread's interrupt flag. */ + GetCurrentThread().ClearInterruptFlag(); + + /* Unpin the current thread. */ + KScheduler::UnpinCurrentThread(cur_process); + } + } + } /* ============================= 64 ABI ============================= */ @@ -133,7 +148,7 @@ namespace ams::kern::svc { } void SynchronizePreemptionState64() { - MESOSPHERE_PANIC("Stubbed SvcSynchronizePreemptionState64 was called."); + return SynchronizePreemptionState(); } /* ============================= 64From32 ABI ============================= */ @@ -155,7 +170,7 @@ namespace ams::kern::svc { } void SynchronizePreemptionState64From32() { - MESOSPHERE_PANIC("Stubbed SvcSynchronizePreemptionState64From32 was called."); + return SynchronizePreemptionState(); } }