2020-01-31 10:53:30 +01:00
|
|
|
/*
|
2021-10-04 21:59:10 +02:00
|
|
|
* Copyright (c) Atmosphère-NX
|
2020-01-31 10:53:30 +01:00
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify it
|
|
|
|
* under the terms and conditions of the GNU General Public License,
|
|
|
|
* version 2, as published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope it will be useful, but WITHOUT
|
|
|
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
|
|
* more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
*/
|
|
|
|
#include <mesosphere.hpp>
|
|
|
|
|
|
|
|
namespace ams::kern {
|
|
|
|
|
2020-02-08 04:16:09 +01:00
|
|
|
namespace {
|
|
|
|
|
2021-04-07 10:18:26 +02:00
|
|
|
constexpr inline s32 TerminatingThreadPriority = ams::svc::SystemThreadPriorityHighest - 1;
|
|
|
|
|
2021-10-20 20:02:17 +02:00
|
|
|
constinit util::Atomic<u64> g_thread_id = 0;
|
|
|
|
|
2021-10-19 10:19:31 +02:00
|
|
|
constexpr ALWAYS_INLINE bool IsKernelAddressKey(KProcessAddress key) {
|
2020-02-15 00:27:34 +01:00
|
|
|
const uintptr_t key_uptr = GetInteger(key);
|
2021-04-07 10:30:36 +02:00
|
|
|
return KernelVirtualAddressSpaceBase <= key_uptr && key_uptr <= KernelVirtualAddressSpaceLast && (key_uptr & 1) == 0;
|
2020-02-15 00:27:34 +01:00
|
|
|
}
|
|
|
|
|
2020-12-09 14:59:54 +01:00
|
|
|
void InitializeKernelStack(uintptr_t stack_top) {
|
|
|
|
#if defined(MESOSPHERE_ENABLE_KERNEL_STACK_USAGE)
|
|
|
|
const uintptr_t stack_bottom = stack_top - PageSize;
|
|
|
|
std::memset(reinterpret_cast<void *>(stack_bottom), 0xCC, PageSize - sizeof(KThread::StackParameters));
|
|
|
|
#else
|
|
|
|
MESOSPHERE_UNUSED(stack_top);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2020-02-08 04:16:09 +01:00
|
|
|
void CleanupKernelStack(uintptr_t stack_top) {
|
|
|
|
const uintptr_t stack_bottom = stack_top - PageSize;
|
|
|
|
|
|
|
|
KPhysicalAddress stack_paddr = Null<KPhysicalAddress>;
|
2021-10-09 23:49:53 +02:00
|
|
|
MESOSPHERE_ABORT_UNLESS(Kernel::GetKernelPageTable().GetPhysicalAddress(std::addressof(stack_paddr), stack_bottom));
|
2020-02-08 04:16:09 +01:00
|
|
|
|
2020-02-22 14:42:46 +01:00
|
|
|
MESOSPHERE_R_ABORT_UNLESS(Kernel::GetKernelPageTable().UnmapPages(stack_bottom, 1, KMemoryState_Kernel));
|
2020-02-08 04:16:09 +01:00
|
|
|
|
|
|
|
/* Free the stack page. */
|
2022-10-12 08:14:15 +02:00
|
|
|
KPageBuffer::FreeChecked<PageSize>(KPageBuffer::FromPhysicalAddress(stack_paddr));
|
2020-02-08 04:16:09 +01:00
|
|
|
}
|
|
|
|
|
2021-09-19 19:11:56 +02:00
|
|
|
class ThreadQueueImplForKThreadSleep final : public KThreadQueueWithoutEndWait { /* ... */ };
|
|
|
|
|
|
|
|
class ThreadQueueImplForKThreadSetProperty final : public KThreadQueue {
|
|
|
|
private:
|
|
|
|
KThread::WaiterList *m_wait_list;
|
|
|
|
public:
|
|
|
|
constexpr ThreadQueueImplForKThreadSetProperty(KThread::WaiterList *wl) : m_wait_list(wl) { /* ... */ }
|
|
|
|
|
|
|
|
virtual void CancelWait(KThread *waiting_thread, Result wait_result, bool cancel_timer_task) override {
|
|
|
|
/* Remove the thread from the wait list. */
|
|
|
|
m_wait_list->erase(m_wait_list->iterator_to(*waiting_thread));
|
|
|
|
|
|
|
|
/* Invoke the base cancel wait handler. */
|
|
|
|
KThreadQueue::CancelWait(waiting_thread, wait_result, cancel_timer_task);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2020-02-08 04:16:09 +01:00
|
|
|
}
|
|
|
|
|
2022-03-23 03:59:39 +01:00
|
|
|
ALWAYS_INLINE void KThread::SetPinnedSvcPermissions() {
|
|
|
|
/* Get our stack parameters. */
|
|
|
|
auto &sp = this->GetStackParameters();
|
|
|
|
|
|
|
|
/* Get our parent's svc permissions. */
|
|
|
|
MESOSPHERE_ASSERT(m_parent != nullptr);
|
|
|
|
const auto &svc_permissions = m_parent->GetSvcPermissions();
|
|
|
|
|
|
|
|
/* Get whether we have access to return from exception. */
|
|
|
|
const bool return_from_exception = sp.svc_access_flags[svc::SvcId_ReturnFromException];
|
|
|
|
|
|
|
|
/* Clear all permissions. */
|
|
|
|
sp.svc_access_flags.Reset();
|
|
|
|
|
|
|
|
/* Set SynchronizePreemptionState if allowed. */
|
|
|
|
if (svc_permissions[svc::SvcId_SynchronizePreemptionState]) {
|
|
|
|
sp.svc_access_flags[svc::SvcId_SynchronizePreemptionState] = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If we previously had ReturnFromException, potentially grant it and GetInfo. */
|
|
|
|
if (return_from_exception) {
|
|
|
|
/* Set ReturnFromException (guaranteed allowed, if we're here). */
|
|
|
|
sp.svc_access_flags[svc::SvcId_ReturnFromException] = true;
|
|
|
|
|
|
|
|
/* Set GetInfo if allowed. */
|
|
|
|
if (svc_permissions[svc::SvcId_GetInfo]) {
|
|
|
|
sp.svc_access_flags[svc::SvcId_GetInfo] = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ALWAYS_INLINE void KThread::SetUnpinnedSvcPermissions() {
|
|
|
|
/* Get our stack parameters. */
|
|
|
|
auto &sp = this->GetStackParameters();
|
|
|
|
|
|
|
|
/* Get our parent's svc permissions. */
|
|
|
|
MESOSPHERE_ASSERT(m_parent != nullptr);
|
|
|
|
const auto &svc_permissions = m_parent->GetSvcPermissions();
|
|
|
|
|
|
|
|
/* Get whether we have access to return from exception. */
|
|
|
|
const bool return_from_exception = sp.svc_access_flags[svc::SvcId_ReturnFromException];
|
|
|
|
|
|
|
|
/* Copy permissions. */
|
|
|
|
sp.svc_access_flags = svc_permissions;
|
|
|
|
|
|
|
|
/* Clear specific SVCs based on our state. */
|
|
|
|
sp.svc_access_flags[svc::SvcId_SynchronizePreemptionState] = false;
|
|
|
|
|
|
|
|
if (!return_from_exception) {
|
|
|
|
sp.svc_access_flags[svc::SvcId_ReturnFromException] = false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ALWAYS_INLINE void KThread::SetUsermodeExceptionSvcPermissions() {
|
|
|
|
/* Get our stack parameters. */
|
|
|
|
auto &sp = this->GetStackParameters();
|
|
|
|
|
|
|
|
/* Get our parent's svc permissions. */
|
|
|
|
MESOSPHERE_ASSERT(m_parent != nullptr);
|
|
|
|
const auto &svc_permissions = m_parent->GetSvcPermissions();
|
|
|
|
|
|
|
|
/* Set ReturnFromException if allowed. */
|
|
|
|
if (svc_permissions[svc::SvcId_ReturnFromException]) {
|
|
|
|
sp.svc_access_flags[svc::SvcId_ReturnFromException] = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Set GetInfo if allowed. */
|
|
|
|
if (svc_permissions[svc::SvcId_GetInfo]) {
|
|
|
|
sp.svc_access_flags[svc::SvcId_GetInfo] = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ALWAYS_INLINE void KThread::ClearUsermodeExceptionSvcPermissions() {
|
|
|
|
/* Get our stack parameters. */
|
|
|
|
auto &sp = this->GetStackParameters();
|
|
|
|
|
|
|
|
/* Clear ReturnFromException. */
|
|
|
|
sp.svc_access_flags[svc::SvcId_ReturnFromException] = false;
|
|
|
|
|
|
|
|
/* If pinned, clear GetInfo. */
|
|
|
|
if (sp.is_pinned) {
|
|
|
|
sp.svc_access_flags[svc::SvcId_GetInfo] = false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-12-02 00:54:31 +01:00
|
|
|
Result KThread::Initialize(KThreadFunction func, uintptr_t arg, void *kern_stack_top, KProcessAddress user_stack_top, s32 prio, s32 virt_core, KProcess *owner, ThreadType type) {
|
2020-01-31 10:53:30 +01:00
|
|
|
/* Assert parameters are valid. */
|
|
|
|
MESOSPHERE_ASSERT_THIS();
|
|
|
|
MESOSPHERE_ASSERT(kern_stack_top != nullptr);
|
|
|
|
MESOSPHERE_ASSERT((type == ThreadType_Main) || (ams::svc::HighestThreadPriority <= prio && prio <= ams::svc::LowestThreadPriority));
|
|
|
|
MESOSPHERE_ASSERT((owner != nullptr) || (type != ThreadType_User));
|
2020-12-02 00:54:31 +01:00
|
|
|
MESOSPHERE_ASSERT(0 <= virt_core && virt_core < static_cast<s32>(BITSIZEOF(u64)));
|
|
|
|
|
|
|
|
/* Convert the virtual core to a physical core. */
|
|
|
|
const s32 phys_core = cpu::VirtualToPhysicalCoreMap[virt_core];
|
|
|
|
MESOSPHERE_ASSERT(0 <= phys_core && phys_core < static_cast<s32>(cpu::NumCores));
|
2020-01-31 10:53:30 +01:00
|
|
|
|
|
|
|
/* First, clear the TLS address. */
|
2020-12-18 02:18:47 +01:00
|
|
|
m_tls_address = Null<KProcessAddress>;
|
2020-01-31 10:53:30 +01:00
|
|
|
|
|
|
|
const uintptr_t kern_stack_top_address = reinterpret_cast<uintptr_t>(kern_stack_top);
|
2021-01-08 11:13:36 +01:00
|
|
|
MESOSPHERE_UNUSED(kern_stack_top_address);
|
2020-01-31 10:53:30 +01:00
|
|
|
|
|
|
|
/* Next, assert things based on the type. */
|
|
|
|
switch (type) {
|
|
|
|
case ThreadType_Main:
|
|
|
|
{
|
|
|
|
MESOSPHERE_ASSERT(arg == 0);
|
|
|
|
}
|
|
|
|
[[fallthrough]];
|
|
|
|
case ThreadType_HighPriority:
|
2020-12-02 03:41:44 +01:00
|
|
|
if (type != ThreadType_Main) {
|
2020-12-02 00:54:31 +01:00
|
|
|
MESOSPHERE_ASSERT(phys_core == GetCurrentCoreId());
|
2020-01-31 10:53:30 +01:00
|
|
|
}
|
|
|
|
[[fallthrough]];
|
|
|
|
case ThreadType_Kernel:
|
|
|
|
{
|
|
|
|
MESOSPHERE_ASSERT(user_stack_top == 0);
|
|
|
|
MESOSPHERE_ASSERT(util::IsAligned(kern_stack_top_address, PageSize));
|
|
|
|
}
|
|
|
|
[[fallthrough]];
|
|
|
|
case ThreadType_User:
|
|
|
|
{
|
2020-12-02 00:54:31 +01:00
|
|
|
MESOSPHERE_ASSERT(((owner == nullptr) || (owner->GetCoreMask() | (1ul << virt_core)) == owner->GetCoreMask()));
|
|
|
|
MESOSPHERE_ASSERT(((owner == nullptr) || (owner->GetPriorityMask() | (1ul << prio)) == owner->GetPriorityMask()));
|
2020-01-31 10:53:30 +01:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
MESOSPHERE_PANIC("KThread::Initialize: Unknown ThreadType %u", static_cast<u32>(type));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Set the ideal core ID and affinity mask. */
|
2020-12-18 02:18:47 +01:00
|
|
|
m_virtual_ideal_core_id = virt_core;
|
|
|
|
m_physical_ideal_core_id = phys_core;
|
|
|
|
m_virtual_affinity_mask = (static_cast<u64>(1) << virt_core);
|
|
|
|
m_physical_affinity_mask.SetAffinity(phys_core, true);
|
2020-01-31 10:53:30 +01:00
|
|
|
|
|
|
|
/* Set the thread state. */
|
2020-12-18 02:18:47 +01:00
|
|
|
m_thread_state = (type == ThreadType_Main) ? ThreadState_Runnable : ThreadState_Initialized;
|
2020-01-31 10:53:30 +01:00
|
|
|
|
|
|
|
/* Set TLS address and TLS heap address. */
|
|
|
|
/* NOTE: Nintendo wrote TLS address above already, but official code really does write tls address twice. */
|
2020-12-18 02:18:47 +01:00
|
|
|
m_tls_address = 0;
|
|
|
|
m_tls_heap_address = 0;
|
2020-01-31 10:53:30 +01:00
|
|
|
|
|
|
|
/* Set parent and condvar tree. */
|
2020-12-18 02:18:47 +01:00
|
|
|
m_parent = nullptr;
|
|
|
|
m_condvar_tree = nullptr;
|
2021-10-24 00:25:20 +02:00
|
|
|
m_condvar_key = 0;
|
2020-01-31 10:53:30 +01:00
|
|
|
|
|
|
|
/* Set sync booleans. */
|
2020-12-18 02:18:47 +01:00
|
|
|
m_signaled = false;
|
|
|
|
m_termination_requested = false;
|
|
|
|
m_wait_cancelled = false;
|
|
|
|
m_cancellable = false;
|
2020-01-31 10:53:30 +01:00
|
|
|
|
|
|
|
/* Set core ID and wait result. */
|
2020-12-18 02:18:47 +01:00
|
|
|
m_core_id = phys_core;
|
|
|
|
m_wait_result = svc::ResultNoSynchronizationObject();
|
2020-01-31 10:53:30 +01:00
|
|
|
|
|
|
|
/* Set the stack top. */
|
2020-12-18 02:18:47 +01:00
|
|
|
m_kernel_stack_top = kern_stack_top;
|
2020-01-31 10:53:30 +01:00
|
|
|
|
|
|
|
/* Set priorities. */
|
2020-12-18 02:18:47 +01:00
|
|
|
m_priority = prio;
|
|
|
|
m_base_priority = prio;
|
2020-01-31 10:53:30 +01:00
|
|
|
|
2021-09-19 19:11:56 +02:00
|
|
|
/* Initialize wait queue/sync index. */
|
|
|
|
m_synced_index = -1;
|
|
|
|
m_wait_queue = nullptr;
|
2020-01-31 10:53:30 +01:00
|
|
|
|
|
|
|
/* Set suspend flags. */
|
2020-12-18 02:18:47 +01:00
|
|
|
m_suspend_request_flags = 0;
|
|
|
|
m_suspend_allowed_flags = ThreadState_SuspendFlagMask;
|
2020-01-31 10:53:30 +01:00
|
|
|
|
|
|
|
/* We're neither debug attached, nor are we nesting our priority inheritance. */
|
2020-12-18 02:18:47 +01:00
|
|
|
m_debug_attached = false;
|
|
|
|
m_priority_inheritance_count = 0;
|
2020-01-31 10:53:30 +01:00
|
|
|
|
|
|
|
/* We haven't been scheduled, and we have done no light IPC. */
|
2020-12-18 02:18:47 +01:00
|
|
|
m_schedule_count = -1;
|
|
|
|
m_last_scheduled_tick = 0;
|
|
|
|
m_light_ipc_data = nullptr;
|
2020-01-31 10:53:30 +01:00
|
|
|
|
|
|
|
/* We're not waiting for a lock, and we haven't disabled migration. */
|
2023-02-21 21:15:01 +01:00
|
|
|
m_waiting_lock_info = nullptr;
|
2020-12-18 02:18:47 +01:00
|
|
|
m_num_core_migration_disables = 0;
|
2020-01-31 10:53:30 +01:00
|
|
|
|
2021-04-07 22:38:51 +02:00
|
|
|
/* We have no waiters, and no closed objects. */
|
2020-12-18 02:18:47 +01:00
|
|
|
m_num_kernel_waiters = 0;
|
2021-04-07 22:38:51 +02:00
|
|
|
m_closed_object = nullptr;
|
2020-01-31 10:53:30 +01:00
|
|
|
|
2020-12-01 22:41:37 +01:00
|
|
|
/* Set our current core id. */
|
2020-12-18 02:18:47 +01:00
|
|
|
m_current_core_id = phys_core;
|
2020-12-01 22:41:37 +01:00
|
|
|
|
2020-07-24 04:26:46 +02:00
|
|
|
/* We haven't released our resource limit hint, and we've spent no time on the cpu. */
|
2021-10-24 00:25:20 +02:00
|
|
|
m_resource_limit_release_hint = false;
|
2020-12-18 02:18:47 +01:00
|
|
|
m_cpu_time = 0;
|
2020-01-31 10:53:30 +01:00
|
|
|
|
2020-12-09 14:59:54 +01:00
|
|
|
/* Setup our kernel stack. */
|
|
|
|
if (type != ThreadType_Main) {
|
|
|
|
InitializeKernelStack(reinterpret_cast<uintptr_t>(kern_stack_top));
|
|
|
|
}
|
|
|
|
|
2020-01-31 10:53:30 +01:00
|
|
|
/* Clear our stack parameters. */
|
|
|
|
std::memset(static_cast<void *>(std::addressof(this->GetStackParameters())), 0, sizeof(StackParameters));
|
|
|
|
|
|
|
|
/* Setup the TLS, if needed. */
|
|
|
|
if (type == ThreadType_User) {
|
2020-12-18 02:18:47 +01:00
|
|
|
R_TRY(owner->CreateThreadLocalRegion(std::addressof(m_tls_address)));
|
|
|
|
m_tls_heap_address = owner->GetThreadLocalRegionPointer(m_tls_address);
|
|
|
|
std::memset(m_tls_heap_address, 0, ams::svc::ThreadLocalRegionSize);
|
2020-01-31 10:53:30 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Set parent, if relevant. */
|
|
|
|
if (owner != nullptr) {
|
2020-12-18 02:18:47 +01:00
|
|
|
m_parent = owner;
|
|
|
|
m_parent->Open();
|
2020-01-31 10:53:30 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Initialize thread context. */
|
|
|
|
constexpr bool IsDefault64Bit = sizeof(uintptr_t) == sizeof(u64);
|
2020-12-18 02:18:47 +01:00
|
|
|
const bool is_64_bit = m_parent ? m_parent->Is64Bit() : IsDefault64Bit;
|
2020-01-31 10:53:30 +01:00
|
|
|
const bool is_user = (type == ThreadType_User);
|
|
|
|
const bool is_main = (type == ThreadType_Main);
|
2022-03-23 03:59:39 +01:00
|
|
|
this->GetContext().Initialize(reinterpret_cast<uintptr_t>(func), reinterpret_cast<uintptr_t>(this->GetStackTop()), GetInteger(user_stack_top), arg, is_user, is_64_bit, is_main);
|
2020-01-31 10:53:30 +01:00
|
|
|
|
|
|
|
/* Setup the stack parameters. */
|
|
|
|
StackParameters &sp = this->GetStackParameters();
|
2020-12-18 02:18:47 +01:00
|
|
|
if (m_parent != nullptr) {
|
2022-03-23 03:59:39 +01:00
|
|
|
this->SetUnpinnedSvcPermissions();
|
|
|
|
this->ClearUsermodeExceptionSvcPermissions();
|
2020-01-31 10:53:30 +01:00
|
|
|
}
|
2022-03-23 03:59:39 +01:00
|
|
|
sp.caller_save_fpu_registers = std::addressof(m_caller_save_fpu_registers);
|
|
|
|
sp.cur_thread = this;
|
|
|
|
sp.disable_count = 1;
|
2020-01-31 10:53:30 +01:00
|
|
|
this->SetInExceptionHandler();
|
|
|
|
|
2022-03-23 03:59:39 +01:00
|
|
|
if (m_parent != nullptr && is_64_bit) {
|
|
|
|
this->SetFpu64Bit();
|
|
|
|
}
|
|
|
|
|
2020-01-31 10:53:30 +01:00
|
|
|
/* Set thread ID. */
|
2021-10-20 20:02:17 +02:00
|
|
|
m_thread_id = g_thread_id++;
|
2020-01-31 10:53:30 +01:00
|
|
|
|
|
|
|
/* We initialized! */
|
2020-12-18 02:18:47 +01:00
|
|
|
m_initialized = true;
|
2020-01-31 10:53:30 +01:00
|
|
|
|
|
|
|
/* Register ourselves with our parent process. */
|
2020-12-18 02:18:47 +01:00
|
|
|
if (m_parent != nullptr) {
|
|
|
|
m_parent->RegisterThread(this);
|
|
|
|
if (m_parent->IsSuspended()) {
|
2020-02-20 04:38:20 +01:00
|
|
|
this->RequestSuspend(SuspendType_Process);
|
|
|
|
}
|
2020-01-31 10:53:30 +01:00
|
|
|
}
|
|
|
|
|
2022-02-14 23:45:32 +01:00
|
|
|
R_SUCCEED();
|
2020-01-31 10:53:30 +01:00
|
|
|
}
|
|
|
|
|
2020-02-08 04:16:09 +01:00
|
|
|
Result KThread::InitializeThread(KThread *thread, KThreadFunction func, uintptr_t arg, KProcessAddress user_stack_top, s32 prio, s32 core, KProcess *owner, ThreadType type) {
|
|
|
|
/* Get stack region for the thread. */
|
|
|
|
const auto &stack_region = KMemoryLayout::GetKernelStackRegion();
|
2020-12-02 02:14:23 +01:00
|
|
|
MESOSPHERE_ABORT_UNLESS(stack_region.GetEndAddress() != 0);
|
2020-02-08 04:16:09 +01:00
|
|
|
|
|
|
|
/* Allocate a page to use as the thread. */
|
2022-10-12 08:14:15 +02:00
|
|
|
KPageBuffer *page = KPageBuffer::AllocateChecked<PageSize>();
|
2020-02-08 04:16:09 +01:00
|
|
|
R_UNLESS(page != nullptr, svc::ResultOutOfResource());
|
|
|
|
|
|
|
|
/* Map the stack page. */
|
|
|
|
KProcessAddress stack_top = Null<KProcessAddress>;
|
|
|
|
{
|
2022-02-14 23:45:32 +01:00
|
|
|
/* If we fail to map, avoid leaking the page. */
|
|
|
|
ON_RESULT_FAILURE { KPageBuffer::Free(page); };
|
|
|
|
|
|
|
|
/* Perform the mapping. */
|
2020-02-14 02:38:56 +01:00
|
|
|
KProcessAddress stack_bottom = Null<KProcessAddress>;
|
|
|
|
R_TRY(Kernel::GetKernelPageTable().MapPages(std::addressof(stack_bottom), 1, PageSize, page->GetPhysicalAddress(), stack_region.GetAddress(),
|
|
|
|
stack_region.GetSize() / PageSize, KMemoryState_Kernel, KMemoryPermission_KernelReadWrite));
|
|
|
|
|
|
|
|
/* Calculate top of the stack. */
|
|
|
|
stack_top = stack_bottom + PageSize;
|
2020-02-08 04:16:09 +01:00
|
|
|
}
|
|
|
|
|
2022-02-14 23:45:32 +01:00
|
|
|
/* If we fail, cleanup the stack we mapped. */
|
|
|
|
ON_RESULT_FAILURE { CleanupKernelStack(GetInteger(stack_top)); };
|
2020-02-08 04:16:09 +01:00
|
|
|
|
2022-02-14 23:45:32 +01:00
|
|
|
/* Initialize the thread. */
|
|
|
|
R_RETURN(thread->Initialize(func, arg, GetVoidPointer(stack_top), user_stack_top, prio, core, owner, type));
|
2020-02-08 04:16:09 +01:00
|
|
|
}
|
|
|
|
|
2020-01-31 10:53:30 +01:00
|
|
|
void KThread::PostDestroy(uintptr_t arg) {
|
|
|
|
KProcess *owner = reinterpret_cast<KProcess *>(arg & ~1ul);
|
|
|
|
const bool resource_limit_release_hint = (arg & 1);
|
2020-02-20 05:42:21 +01:00
|
|
|
const s64 hint_value = (resource_limit_release_hint ? 0 : 1);
|
2020-01-31 10:53:30 +01:00
|
|
|
if (owner != nullptr) {
|
2020-02-20 05:42:21 +01:00
|
|
|
owner->ReleaseResource(ams::svc::LimitableResource_ThreadCountMax, 1, hint_value);
|
2020-01-31 10:53:30 +01:00
|
|
|
owner->Close();
|
|
|
|
} else {
|
2020-02-20 05:42:21 +01:00
|
|
|
Kernel::GetSystemResourceLimit().Release(ams::svc::LimitableResource_ThreadCountMax, 1, hint_value);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void KThread::ResumeThreadsSuspendedForInit() {
|
|
|
|
KThread::ListAccessor list_accessor;
|
|
|
|
{
|
|
|
|
KScopedSchedulerLock sl;
|
|
|
|
|
|
|
|
for (auto &thread : list_accessor) {
|
|
|
|
static_cast<KThread &>(thread).Resume(SuspendType_Init);
|
|
|
|
}
|
2020-01-31 10:53:30 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void KThread::Finalize() {
|
2020-07-21 13:58:54 +02:00
|
|
|
MESOSPHERE_ASSERT_THIS();
|
|
|
|
|
|
|
|
/* If the thread has an owner process, unregister it. */
|
2020-12-18 02:18:47 +01:00
|
|
|
if (m_parent != nullptr) {
|
|
|
|
m_parent->UnregisterThread(this);
|
2020-07-21 13:58:54 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* If the thread has a local region, delete it. */
|
2020-12-18 02:18:47 +01:00
|
|
|
if (m_tls_address != Null<KProcessAddress>) {
|
|
|
|
MESOSPHERE_R_ABORT_UNLESS(m_parent->DeleteThreadLocalRegion(m_tls_address));
|
2020-07-21 13:58:54 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Release any waiters. */
|
|
|
|
{
|
2023-02-21 21:15:01 +01:00
|
|
|
MESOSPHERE_ASSERT(m_waiting_lock_info == nullptr);
|
2020-07-21 13:58:54 +02:00
|
|
|
KScopedSchedulerLock sl;
|
|
|
|
|
2023-02-21 21:15:01 +01:00
|
|
|
/* Check that we have no kernel waiters. */
|
|
|
|
MESOSPHERE_ABORT_UNLESS(m_num_kernel_waiters == 0);
|
2021-09-19 19:11:56 +02:00
|
|
|
|
2023-02-21 21:15:01 +01:00
|
|
|
auto it = m_held_lock_info_list.begin();
|
|
|
|
while (it != m_held_lock_info_list.end()) {
|
|
|
|
/* Get the lock info. */
|
|
|
|
auto * const lock_info = std::addressof(*it);
|
2021-09-19 19:11:56 +02:00
|
|
|
|
2023-02-21 21:15:01 +01:00
|
|
|
/* The lock shouldn't have a kernel waiter. */
|
|
|
|
MESOSPHERE_ASSERT(!IsKernelAddressKey(lock_info->GetAddressKey()));
|
2021-09-19 19:11:56 +02:00
|
|
|
|
2023-02-21 21:15:01 +01:00
|
|
|
/* Remove all waiters. */
|
|
|
|
while (lock_info->GetWaiterCount() != 0) {
|
|
|
|
/* Get the front waiter. */
|
|
|
|
KThread * const waiter = lock_info->GetHighestPriorityWaiter();
|
2021-09-19 19:11:56 +02:00
|
|
|
|
2023-02-21 21:15:01 +01:00
|
|
|
/* Remove it from the lock. */
|
|
|
|
if (lock_info->RemoveWaiter(waiter)) {
|
|
|
|
MESOSPHERE_ASSERT(lock_info->GetWaiterCount() == 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Cancel the thread's wait. */
|
|
|
|
waiter->CancelWait(svc::ResultInvalidState(), true);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Remove the held lock from our list. */
|
|
|
|
it = m_held_lock_info_list.erase(it);
|
|
|
|
|
|
|
|
/* Free the lock info. */
|
|
|
|
LockWithPriorityInheritanceInfo::Free(lock_info);
|
2020-07-21 13:58:54 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Cleanup the kernel stack. */
|
2020-12-18 02:18:47 +01:00
|
|
|
if (m_kernel_stack_top != nullptr) {
|
|
|
|
CleanupKernelStack(reinterpret_cast<uintptr_t>(m_kernel_stack_top));
|
2020-07-21 13:58:54 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Perform inherited finalization. */
|
2021-10-24 06:13:26 +02:00
|
|
|
KSynchronizationObject::Finalize();
|
2020-01-31 10:53:30 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
bool KThread::IsSignaled() const {
|
2020-12-18 02:18:47 +01:00
|
|
|
return m_signaled;
|
2020-01-31 10:53:30 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void KThread::OnTimer() {
|
2020-02-21 22:05:16 +01:00
|
|
|
MESOSPHERE_ASSERT_THIS();
|
|
|
|
MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread());
|
|
|
|
|
2021-09-19 19:11:56 +02:00
|
|
|
/* If we're waiting, cancel the wait. */
|
|
|
|
if (this->GetState() == ThreadState_Waiting) {
|
|
|
|
m_wait_queue->CancelWait(this, svc::ResultTimedOut(), false);
|
|
|
|
}
|
2020-01-31 10:53:30 +01:00
|
|
|
}
|
|
|
|
|
2020-07-11 03:39:53 +02:00
|
|
|
void KThread::StartTermination() {
|
|
|
|
MESOSPHERE_ASSERT_THIS();
|
|
|
|
MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread());
|
|
|
|
|
2020-07-28 02:32:04 +02:00
|
|
|
/* Release user exception and unpin, if relevant. */
|
2020-12-18 02:18:47 +01:00
|
|
|
if (m_parent != nullptr) {
|
|
|
|
m_parent->ReleaseUserException(this);
|
|
|
|
if (m_parent->GetPinnedThread(GetCurrentCoreId()) == this) {
|
|
|
|
m_parent->UnpinCurrentThread();
|
2020-07-11 03:39:53 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Set state to terminated. */
|
|
|
|
this->SetState(KThread::ThreadState_Terminated);
|
|
|
|
|
|
|
|
/* Clear the thread's status as running in parent. */
|
2020-12-18 02:18:47 +01:00
|
|
|
if (m_parent != nullptr) {
|
|
|
|
m_parent->ClearRunningThread(this);
|
2020-07-11 03:39:53 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Signal. */
|
2020-12-18 02:18:47 +01:00
|
|
|
m_signaled = true;
|
2021-09-19 19:11:56 +02:00
|
|
|
KSynchronizationObject::NotifyAvailable();
|
2020-07-11 03:39:53 +02:00
|
|
|
|
2020-07-31 10:27:09 +02:00
|
|
|
/* Call the on thread termination handler. */
|
|
|
|
KThreadContext::OnThreadTerminating(this);
|
2020-07-11 03:39:53 +02:00
|
|
|
|
|
|
|
/* Clear previous thread in KScheduler. */
|
|
|
|
KScheduler::ClearPreviousThread(this);
|
|
|
|
|
|
|
|
/* Register terminated dpc flag. */
|
|
|
|
this->RegisterDpc(DpcFlag_Terminated);
|
|
|
|
}
|
|
|
|
|
|
|
|
void KThread::FinishTermination() {
|
|
|
|
MESOSPHERE_ASSERT_THIS();
|
|
|
|
|
|
|
|
/* Ensure that the thread is not executing on any core. */
|
2020-12-18 02:18:47 +01:00
|
|
|
if (m_parent != nullptr) {
|
2022-03-23 07:02:10 +01:00
|
|
|
/* Wait for the thread to not be current on any core. */
|
2020-07-11 03:39:53 +02:00
|
|
|
for (size_t i = 0; i < cpu::NumCores; ++i) {
|
|
|
|
KThread *core_thread;
|
|
|
|
do {
|
2020-12-01 22:41:37 +01:00
|
|
|
core_thread = Kernel::GetScheduler(i).GetSchedulerCurrentThread();
|
2020-07-11 03:39:53 +02:00
|
|
|
} while (core_thread == this);
|
|
|
|
}
|
2022-03-23 07:02:10 +01:00
|
|
|
|
|
|
|
/* Ensure that all cores are synchronized at this point. */
|
|
|
|
cpu::SynchronizeCores(m_parent->GetPhysicalCoreMask());
|
2020-07-11 03:39:53 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Close the thread. */
|
|
|
|
this->Close();
|
|
|
|
}
|
|
|
|
|
2021-10-25 05:41:38 +02:00
|
|
|
void KThread::DoWorkerTaskImpl() {
|
2020-07-11 03:39:53 +02:00
|
|
|
/* Finish the termination that was begun by Exit(). */
|
|
|
|
this->FinishTermination();
|
2020-01-31 10:53:30 +01:00
|
|
|
}
|
|
|
|
|
2022-03-23 03:59:39 +01:00
|
|
|
void KThread::OnEnterUsermodeException() {
|
|
|
|
this->SetUsermodeExceptionSvcPermissions();
|
|
|
|
this->SetInUsermodeExceptionHandler();
|
|
|
|
}
|
|
|
|
|
|
|
|
void KThread::OnLeaveUsermodeException() {
|
|
|
|
this->ClearUsermodeExceptionSvcPermissions();
|
2022-10-12 09:50:03 +02:00
|
|
|
|
|
|
|
/* NOTE: InUsermodeExceptionHandler will be cleared by RestoreContext. */
|
2022-03-23 03:59:39 +01:00
|
|
|
}
|
|
|
|
|
2020-07-28 02:32:04 +02:00
|
|
|
void KThread::Pin() {
|
|
|
|
MESOSPHERE_ASSERT_THIS();
|
|
|
|
MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread());
|
|
|
|
|
|
|
|
/* Set ourselves as pinned. */
|
|
|
|
this->GetStackParameters().is_pinned = true;
|
|
|
|
|
|
|
|
/* Disable core migration. */
|
2020-12-18 02:18:47 +01:00
|
|
|
MESOSPHERE_ASSERT(m_num_core_migration_disables == 0);
|
2020-07-28 02:32:04 +02:00
|
|
|
{
|
2020-12-18 02:18:47 +01:00
|
|
|
++m_num_core_migration_disables;
|
2020-07-28 02:32:04 +02:00
|
|
|
|
|
|
|
/* Save our ideal state to restore when we're unpinned. */
|
2020-12-18 02:18:47 +01:00
|
|
|
m_original_physical_ideal_core_id = m_physical_ideal_core_id;
|
|
|
|
m_original_physical_affinity_mask = m_physical_affinity_mask;
|
2020-07-28 02:32:04 +02:00
|
|
|
|
|
|
|
/* Bind ourselves to this core. */
|
|
|
|
const s32 active_core = this->GetActiveCore();
|
|
|
|
const s32 current_core = GetCurrentCoreId();
|
|
|
|
|
|
|
|
this->SetActiveCore(current_core);
|
2020-12-18 02:18:47 +01:00
|
|
|
m_physical_ideal_core_id = current_core;
|
|
|
|
m_physical_affinity_mask.SetAffinityMask(1ul << current_core);
|
2020-07-28 02:32:04 +02:00
|
|
|
|
2020-12-18 02:18:47 +01:00
|
|
|
if (active_core != current_core || m_physical_affinity_mask.GetAffinityMask() != m_original_physical_affinity_mask.GetAffinityMask()) {
|
|
|
|
KScheduler::OnThreadAffinityMaskChanged(this, m_original_physical_affinity_mask, active_core);
|
2020-07-28 02:32:04 +02:00
|
|
|
}
|
2021-04-07 10:18:26 +02:00
|
|
|
|
|
|
|
/* Set base priority-on-unpin. */
|
|
|
|
const s32 old_base_priority = m_base_priority;
|
|
|
|
m_base_priority_on_unpin = old_base_priority;
|
|
|
|
|
|
|
|
/* Set base priority to higher than any possible process priority. */
|
2021-10-19 10:20:28 +02:00
|
|
|
m_base_priority = std::min<s32>(old_base_priority, __builtin_ctzll(this->GetOwnerProcess()->GetPriorityMask()) - 1);
|
2021-04-07 10:18:26 +02:00
|
|
|
RestorePriority(this);
|
2020-07-28 02:32:04 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Disallow performing thread suspension. */
|
|
|
|
{
|
|
|
|
/* Update our allow flags. */
|
2021-04-27 01:56:00 +02:00
|
|
|
m_suspend_allowed_flags &= ~(1 << (util::ToUnderlying(SuspendType_Thread) + util::ToUnderlying(ThreadState_SuspendShift)));
|
2020-07-28 02:32:04 +02:00
|
|
|
|
|
|
|
/* Update our state. */
|
2021-04-07 17:17:15 +02:00
|
|
|
this->UpdateState();
|
2020-07-28 02:32:04 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Update our SVC access permissions. */
|
2022-03-23 03:59:39 +01:00
|
|
|
this->SetPinnedSvcPermissions();
|
2020-07-28 02:32:04 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void KThread::Unpin() {
|
|
|
|
MESOSPHERE_ASSERT_THIS();
|
|
|
|
MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread());
|
|
|
|
|
|
|
|
/* Set ourselves as unpinned. */
|
|
|
|
this->GetStackParameters().is_pinned = false;
|
|
|
|
|
|
|
|
/* Enable core migration. */
|
2020-12-18 02:18:47 +01:00
|
|
|
MESOSPHERE_ASSERT(m_num_core_migration_disables == 1);
|
2020-07-28 02:32:04 +02:00
|
|
|
{
|
2020-12-18 02:18:47 +01:00
|
|
|
--m_num_core_migration_disables;
|
2020-07-28 02:32:04 +02:00
|
|
|
|
|
|
|
/* Restore our original state. */
|
2020-12-18 02:18:47 +01:00
|
|
|
const KAffinityMask old_mask = m_physical_affinity_mask;
|
2020-07-28 02:32:04 +02:00
|
|
|
|
2020-12-18 02:18:47 +01:00
|
|
|
m_physical_ideal_core_id = m_original_physical_ideal_core_id;
|
|
|
|
m_physical_affinity_mask = m_original_physical_affinity_mask;
|
2020-07-28 02:32:04 +02:00
|
|
|
|
2020-12-18 02:18:47 +01:00
|
|
|
if (m_physical_affinity_mask.GetAffinityMask() != old_mask.GetAffinityMask()) {
|
2020-07-28 02:32:04 +02:00
|
|
|
const s32 active_core = this->GetActiveCore();
|
|
|
|
|
2020-12-18 02:18:47 +01:00
|
|
|
if (!m_physical_affinity_mask.GetAffinity(active_core)) {
|
|
|
|
if (m_physical_ideal_core_id >= 0) {
|
|
|
|
this->SetActiveCore(m_physical_ideal_core_id);
|
2020-07-28 02:32:04 +02:00
|
|
|
} else {
|
2020-12-18 02:18:47 +01:00
|
|
|
this->SetActiveCore(BITSIZEOF(unsigned long long) - 1 - __builtin_clzll(m_physical_affinity_mask.GetAffinityMask()));
|
2020-07-28 02:32:04 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
KScheduler::OnThreadAffinityMaskChanged(this, old_mask, active_core);
|
|
|
|
}
|
2021-04-07 10:18:26 +02:00
|
|
|
|
|
|
|
m_base_priority = m_base_priority_on_unpin;
|
|
|
|
RestorePriority(this);
|
2020-07-28 02:32:04 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Allow performing thread suspension (if termination hasn't been requested). */
|
2021-04-08 00:30:13 +02:00
|
|
|
if (!this->IsTerminationRequested()) {
|
2020-07-28 02:32:04 +02:00
|
|
|
/* Update our allow flags. */
|
2021-04-27 01:56:00 +02:00
|
|
|
m_suspend_allowed_flags |= (1 << (util::ToUnderlying(SuspendType_Thread) + util::ToUnderlying(ThreadState_SuspendShift)));
|
2020-07-28 02:32:04 +02:00
|
|
|
|
|
|
|
/* Update our state. */
|
2021-04-07 17:17:15 +02:00
|
|
|
this->UpdateState();
|
2020-07-28 02:32:04 +02:00
|
|
|
|
2021-04-08 00:30:13 +02:00
|
|
|
/* Update our SVC access permissions. */
|
|
|
|
MESOSPHERE_ASSERT(m_parent != nullptr);
|
2022-03-23 03:59:39 +01:00
|
|
|
this->SetUnpinnedSvcPermissions();
|
2021-04-08 00:30:13 +02:00
|
|
|
}
|
2020-07-28 02:32:04 +02:00
|
|
|
|
|
|
|
/* Resume any threads that began waiting on us while we were pinned. */
|
2021-09-19 19:11:56 +02:00
|
|
|
for (auto it = m_pinned_waiter_list.begin(); it != m_pinned_waiter_list.end(); it = m_pinned_waiter_list.erase(it)) {
|
|
|
|
it->EndWait(ResultSuccess());
|
2020-07-28 02:32:04 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-02-15 09:00:35 +01:00
|
|
|
void KThread::DisableCoreMigration() {
|
|
|
|
MESOSPHERE_ASSERT_THIS();
|
|
|
|
MESOSPHERE_ASSERT(this == GetCurrentThreadPointer());
|
|
|
|
|
|
|
|
KScopedSchedulerLock sl;
|
2020-12-18 02:18:47 +01:00
|
|
|
MESOSPHERE_ASSERT(m_num_core_migration_disables >= 0);
|
|
|
|
if ((m_num_core_migration_disables++) == 0) {
|
2020-02-15 09:00:35 +01:00
|
|
|
/* Save our ideal state to restore when we can migrate again. */
|
2020-12-18 02:18:47 +01:00
|
|
|
m_original_physical_ideal_core_id = m_physical_ideal_core_id;
|
|
|
|
m_original_physical_affinity_mask = m_physical_affinity_mask;
|
2020-02-15 09:00:35 +01:00
|
|
|
|
2020-07-28 02:32:04 +02:00
|
|
|
/* Bind ourselves to this core. */
|
2020-02-15 09:00:35 +01:00
|
|
|
const s32 active_core = this->GetActiveCore();
|
2020-12-18 02:18:47 +01:00
|
|
|
m_physical_ideal_core_id = active_core;
|
|
|
|
m_physical_affinity_mask.SetAffinityMask(1ul << active_core);
|
2020-02-15 09:00:35 +01:00
|
|
|
|
2020-12-18 02:18:47 +01:00
|
|
|
if (m_physical_affinity_mask.GetAffinityMask() != m_original_physical_affinity_mask.GetAffinityMask()) {
|
|
|
|
KScheduler::OnThreadAffinityMaskChanged(this, m_original_physical_affinity_mask, active_core);
|
2020-02-15 09:00:35 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void KThread::EnableCoreMigration() {
|
|
|
|
MESOSPHERE_ASSERT_THIS();
|
|
|
|
MESOSPHERE_ASSERT(this == GetCurrentThreadPointer());
|
|
|
|
|
|
|
|
KScopedSchedulerLock sl;
|
2020-12-18 02:18:47 +01:00
|
|
|
MESOSPHERE_ASSERT(m_num_core_migration_disables > 0);
|
|
|
|
if ((--m_num_core_migration_disables) == 0) {
|
|
|
|
const KAffinityMask old_mask = m_physical_affinity_mask;
|
2020-02-15 09:00:35 +01:00
|
|
|
|
|
|
|
/* Restore our ideals. */
|
2020-12-18 02:18:47 +01:00
|
|
|
m_physical_ideal_core_id = m_original_physical_ideal_core_id;
|
|
|
|
m_physical_affinity_mask = m_original_physical_affinity_mask;
|
2020-02-15 09:00:35 +01:00
|
|
|
|
2020-12-18 02:18:47 +01:00
|
|
|
if (m_physical_affinity_mask.GetAffinityMask() != old_mask.GetAffinityMask()) {
|
2020-02-15 09:00:35 +01:00
|
|
|
const s32 active_core = this->GetActiveCore();
|
|
|
|
|
2020-12-18 02:18:47 +01:00
|
|
|
if (!m_physical_affinity_mask.GetAffinity(active_core)) {
|
|
|
|
if (m_physical_ideal_core_id >= 0) {
|
|
|
|
this->SetActiveCore(m_physical_ideal_core_id);
|
2020-02-15 09:00:35 +01:00
|
|
|
} else {
|
2020-12-18 02:18:47 +01:00
|
|
|
this->SetActiveCore(BITSIZEOF(unsigned long long) - 1 - __builtin_clzll(m_physical_affinity_mask.GetAffinityMask()));
|
2020-02-15 09:00:35 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
KScheduler::OnThreadAffinityMaskChanged(this, old_mask, active_core);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-23 10:25:59 +02:00
|
|
|
Result KThread::GetCoreMask(int32_t *out_ideal_core, u64 *out_affinity_mask) {
|
2020-12-02 00:54:31 +01:00
|
|
|
MESOSPHERE_ASSERT_THIS();
|
|
|
|
{
|
|
|
|
KScopedSchedulerLock sl;
|
|
|
|
|
|
|
|
/* Get the virtual mask. */
|
2020-12-18 02:18:47 +01:00
|
|
|
*out_ideal_core = m_virtual_ideal_core_id;
|
|
|
|
*out_affinity_mask = m_virtual_affinity_mask;
|
2020-12-02 00:54:31 +01:00
|
|
|
}
|
|
|
|
|
2022-02-14 23:45:32 +01:00
|
|
|
R_SUCCEED();
|
2020-12-02 00:54:31 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
Result KThread::GetPhysicalCoreMask(int32_t *out_ideal_core, u64 *out_affinity_mask) {
|
2020-07-23 10:25:59 +02:00
|
|
|
MESOSPHERE_ASSERT_THIS();
|
|
|
|
{
|
|
|
|
KScopedSchedulerLock sl;
|
2020-12-18 02:18:47 +01:00
|
|
|
MESOSPHERE_ASSERT(m_num_core_migration_disables >= 0);
|
2020-07-23 10:25:59 +02:00
|
|
|
|
|
|
|
/* Select between core mask and original core mask. */
|
2020-12-18 02:18:47 +01:00
|
|
|
if (m_num_core_migration_disables == 0) {
|
|
|
|
*out_ideal_core = m_physical_ideal_core_id;
|
|
|
|
*out_affinity_mask = m_physical_affinity_mask.GetAffinityMask();
|
2020-07-23 10:25:59 +02:00
|
|
|
} else {
|
2020-12-18 02:18:47 +01:00
|
|
|
*out_ideal_core = m_original_physical_ideal_core_id;
|
|
|
|
*out_affinity_mask = m_original_physical_affinity_mask.GetAffinityMask();
|
2020-07-23 10:25:59 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-02-14 23:45:32 +01:00
|
|
|
R_SUCCEED();
|
2020-07-23 10:25:59 +02:00
|
|
|
}
|
|
|
|
|
2020-12-02 00:54:31 +01:00
|
|
|
Result KThread::SetCoreMask(int32_t core_id, u64 v_affinity_mask) {
|
2020-07-23 10:25:59 +02:00
|
|
|
MESOSPHERE_ASSERT_THIS();
|
2020-12-18 02:18:47 +01:00
|
|
|
MESOSPHERE_ASSERT(m_parent != nullptr);
|
2020-12-02 00:54:31 +01:00
|
|
|
MESOSPHERE_ASSERT(v_affinity_mask != 0);
|
2020-12-18 02:18:47 +01:00
|
|
|
KScopedLightLock lk(m_activity_pause_lock);
|
2020-07-28 12:20:24 +02:00
|
|
|
|
|
|
|
/* Set the core mask. */
|
2020-12-02 00:54:31 +01:00
|
|
|
u64 p_affinity_mask = 0;
|
2020-07-23 10:25:59 +02:00
|
|
|
{
|
|
|
|
KScopedSchedulerLock sl;
|
2020-12-18 02:18:47 +01:00
|
|
|
MESOSPHERE_ASSERT(m_num_core_migration_disables >= 0);
|
2020-07-23 10:25:59 +02:00
|
|
|
|
2021-04-07 10:11:17 +02:00
|
|
|
/* If we're updating, set our ideal virtual core. */
|
|
|
|
if (core_id != ams::svc::IdealCoreNoUpdate) {
|
|
|
|
m_virtual_ideal_core_id = core_id;
|
|
|
|
} else {
|
|
|
|
/* Preserve our ideal core id. */
|
2020-12-18 02:18:47 +01:00
|
|
|
core_id = m_virtual_ideal_core_id;
|
2020-12-02 00:54:31 +01:00
|
|
|
R_UNLESS(((1ul << core_id) & v_affinity_mask) != 0, svc::ResultInvalidCombination());
|
|
|
|
}
|
2020-07-23 10:25:59 +02:00
|
|
|
|
2021-04-07 10:11:17 +02:00
|
|
|
/* Set our affinity mask. */
|
2020-12-18 02:18:47 +01:00
|
|
|
m_virtual_affinity_mask = v_affinity_mask;
|
2020-12-02 00:54:31 +01:00
|
|
|
|
|
|
|
/* Translate the virtual core to a physical core. */
|
|
|
|
if (core_id >= 0) {
|
|
|
|
core_id = cpu::VirtualToPhysicalCoreMap[core_id];
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Translate the virtual affinity mask to a physical one. */
|
2022-03-23 07:02:10 +01:00
|
|
|
p_affinity_mask = cpu::ConvertVirtualCoreMaskToPhysical(v_affinity_mask);
|
2020-07-23 10:25:59 +02:00
|
|
|
|
|
|
|
/* If we haven't disabled migration, perform an affinity change. */
|
2020-12-18 02:18:47 +01:00
|
|
|
if (m_num_core_migration_disables == 0) {
|
|
|
|
const KAffinityMask old_mask = m_physical_affinity_mask;
|
2020-07-23 10:25:59 +02:00
|
|
|
|
|
|
|
/* Set our new ideals. */
|
2020-12-18 02:18:47 +01:00
|
|
|
m_physical_ideal_core_id = core_id;
|
|
|
|
m_physical_affinity_mask.SetAffinityMask(p_affinity_mask);
|
2020-07-23 10:25:59 +02:00
|
|
|
|
2020-12-18 02:18:47 +01:00
|
|
|
if (m_physical_affinity_mask.GetAffinityMask() != old_mask.GetAffinityMask()) {
|
2020-07-23 10:25:59 +02:00
|
|
|
const s32 active_core = this->GetActiveCore();
|
|
|
|
|
2020-12-18 02:18:47 +01:00
|
|
|
if (active_core >= 0 && !m_physical_affinity_mask.GetAffinity(active_core)) {
|
|
|
|
const s32 new_core = m_physical_ideal_core_id >= 0 ? m_physical_ideal_core_id : BITSIZEOF(unsigned long long) - 1 - __builtin_clzll(m_physical_affinity_mask.GetAffinityMask());
|
2020-12-02 00:54:31 +01:00
|
|
|
this->SetActiveCore(new_core);
|
2020-07-23 10:25:59 +02:00
|
|
|
}
|
|
|
|
KScheduler::OnThreadAffinityMaskChanged(this, old_mask, active_core);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/* Otherwise, we edit the original affinity for restoration later. */
|
2020-12-18 02:18:47 +01:00
|
|
|
m_original_physical_ideal_core_id = core_id;
|
|
|
|
m_original_physical_affinity_mask.SetAffinityMask(p_affinity_mask);
|
2020-07-23 10:25:59 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-28 12:20:24 +02:00
|
|
|
/* Update the pinned waiter list. */
|
2021-09-19 19:11:56 +02:00
|
|
|
ThreadQueueImplForKThreadSetProperty wait_queue(std::addressof(m_pinned_waiter_list));
|
2020-07-28 12:20:24 +02:00
|
|
|
{
|
|
|
|
bool retry_update;
|
|
|
|
do {
|
|
|
|
/* Lock the scheduler. */
|
|
|
|
KScopedSchedulerLock sl;
|
|
|
|
|
|
|
|
/* Don't do any further management if our termination has been requested. */
|
|
|
|
R_SUCCEED_IF(this->IsTerminationRequested());
|
|
|
|
|
|
|
|
/* By default, we won't need to retry. */
|
|
|
|
retry_update = false;
|
|
|
|
|
|
|
|
/* Check if the thread is currently running. */
|
|
|
|
bool thread_is_current = false;
|
|
|
|
s32 thread_core;
|
|
|
|
for (thread_core = 0; thread_core < static_cast<s32>(cpu::NumCores); ++thread_core) {
|
2020-12-01 22:41:37 +01:00
|
|
|
if (Kernel::GetScheduler(thread_core).GetSchedulerCurrentThread() == this) {
|
2020-07-28 12:20:24 +02:00
|
|
|
thread_is_current = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If the thread is currently running, check whether it's no longer allowed under the new mask. */
|
2020-12-02 00:54:31 +01:00
|
|
|
if (thread_is_current && ((1ul << thread_core) & p_affinity_mask) == 0) {
|
2020-07-28 12:20:24 +02:00
|
|
|
/* If the thread is pinned, we want to wait until it's not pinned. */
|
|
|
|
if (this->GetStackParameters().is_pinned) {
|
|
|
|
/* Verify that the current thread isn't terminating. */
|
|
|
|
R_UNLESS(!GetCurrentThread().IsTerminationRequested(), svc::ResultTerminationRequested());
|
|
|
|
|
|
|
|
/* Wait until the thread isn't pinned any more. */
|
2020-12-18 02:18:47 +01:00
|
|
|
m_pinned_waiter_list.push_back(GetCurrentThread());
|
2021-09-19 19:11:56 +02:00
|
|
|
GetCurrentThread().BeginWait(std::addressof(wait_queue));
|
2020-07-28 12:20:24 +02:00
|
|
|
} else {
|
|
|
|
/* If the thread isn't pinned, release the scheduler lock and retry until it's not current. */
|
|
|
|
retry_update = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} while (retry_update);
|
|
|
|
}
|
2020-07-23 12:04:43 +02:00
|
|
|
|
2022-02-14 23:45:32 +01:00
|
|
|
R_SUCCEED();
|
2020-07-23 10:25:59 +02:00
|
|
|
}
|
|
|
|
|
2020-07-20 11:11:49 +02:00
|
|
|
void KThread::SetBasePriority(s32 priority) {
|
|
|
|
MESOSPHERE_ASSERT_THIS();
|
|
|
|
MESOSPHERE_ASSERT(ams::svc::HighestThreadPriority <= priority && priority <= ams::svc::LowestThreadPriority);
|
|
|
|
|
|
|
|
KScopedSchedulerLock sl;
|
|
|
|
|
2021-04-07 10:18:26 +02:00
|
|
|
/* Determine the priority value to use. */
|
2021-10-20 00:24:15 +02:00
|
|
|
const s32 target_priority = m_termination_requested.Load() && priority >= TerminatingThreadPriority ? TerminatingThreadPriority : priority;
|
2021-04-07 10:18:26 +02:00
|
|
|
|
2020-07-20 11:11:49 +02:00
|
|
|
/* Change our base priority. */
|
2021-04-07 10:18:26 +02:00
|
|
|
if (this->GetStackParameters().is_pinned) {
|
|
|
|
m_base_priority_on_unpin = target_priority;
|
|
|
|
} else {
|
|
|
|
m_base_priority = target_priority;
|
|
|
|
}
|
2020-07-20 11:11:49 +02:00
|
|
|
|
|
|
|
/* Perform a priority restoration. */
|
|
|
|
RestorePriority(this);
|
|
|
|
}
|
|
|
|
|
2021-04-07 10:18:26 +02:00
|
|
|
void KThread::IncreaseBasePriority(s32 priority) {
|
|
|
|
MESOSPHERE_ASSERT_THIS();
|
|
|
|
MESOSPHERE_ASSERT(ams::svc::HighestThreadPriority <= priority && priority <= ams::svc::LowestThreadPriority);
|
2023-02-21 21:15:01 +01:00
|
|
|
MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread());
|
|
|
|
MESOSPHERE_ASSERT(!this->GetStackParameters().is_pinned);
|
2021-04-07 10:18:26 +02:00
|
|
|
|
|
|
|
/* Set our base priority. */
|
|
|
|
if (m_base_priority > priority) {
|
|
|
|
m_base_priority = priority;
|
|
|
|
|
|
|
|
/* Perform a priority restoration. */
|
|
|
|
RestorePriority(this);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-02-08 04:16:09 +01:00
|
|
|
Result KThread::SetPriorityToIdle() {
|
|
|
|
MESOSPHERE_ASSERT_THIS();
|
|
|
|
|
2020-02-20 05:42:21 +01:00
|
|
|
KScopedSchedulerLock sl;
|
|
|
|
|
2020-02-08 04:16:09 +01:00
|
|
|
/* Change both our priorities to the idle thread priority. */
|
2020-12-18 02:18:47 +01:00
|
|
|
const s32 old_priority = m_priority;
|
|
|
|
m_priority = IdleThreadPriority;
|
|
|
|
m_base_priority = IdleThreadPriority;
|
2020-02-08 04:16:09 +01:00
|
|
|
KScheduler::OnThreadPriorityChanged(this, old_priority);
|
|
|
|
|
2022-02-14 23:45:32 +01:00
|
|
|
R_SUCCEED();
|
2020-02-08 04:16:09 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void KThread::RequestSuspend(SuspendType type) {
|
|
|
|
MESOSPHERE_ASSERT_THIS();
|
|
|
|
|
|
|
|
KScopedSchedulerLock lk;
|
|
|
|
|
|
|
|
/* Note the request in our flags. */
|
2021-04-27 01:56:00 +02:00
|
|
|
m_suspend_request_flags |= (1u << (util::ToUnderlying(ThreadState_SuspendShift) + util::ToUnderlying(type)));
|
2020-02-08 04:16:09 +01:00
|
|
|
|
|
|
|
/* Try to perform the suspend. */
|
|
|
|
this->TrySuspend();
|
|
|
|
}
|
|
|
|
|
2020-02-20 05:42:21 +01:00
|
|
|
void KThread::Resume(SuspendType type) {
|
|
|
|
MESOSPHERE_ASSERT_THIS();
|
|
|
|
|
|
|
|
KScopedSchedulerLock sl;
|
|
|
|
|
|
|
|
/* Clear the request in our flags. */
|
2021-04-27 01:56:00 +02:00
|
|
|
m_suspend_request_flags &= ~(1u << (util::ToUnderlying(ThreadState_SuspendShift) + util::ToUnderlying(type)));
|
2020-02-20 05:42:21 +01:00
|
|
|
|
|
|
|
/* Update our state. */
|
2021-04-07 17:17:15 +02:00
|
|
|
this->UpdateState();
|
2020-02-20 05:42:21 +01:00
|
|
|
}
|
|
|
|
|
2020-07-20 11:27:53 +02:00
|
|
|
void KThread::WaitCancel() {
|
|
|
|
MESOSPHERE_ASSERT_THIS();
|
|
|
|
|
|
|
|
KScopedSchedulerLock sl;
|
|
|
|
|
|
|
|
/* Check if we're waiting and cancellable. */
|
2020-12-18 02:18:47 +01:00
|
|
|
if (this->GetState() == ThreadState_Waiting && m_cancellable) {
|
2021-09-19 19:11:56 +02:00
|
|
|
m_wait_cancelled = false;
|
|
|
|
m_wait_queue->CancelWait(this, svc::ResultCancelled(), true);
|
2020-07-20 11:27:53 +02:00
|
|
|
} else {
|
|
|
|
/* Otherwise, note that we cancelled a wait. */
|
2020-12-18 02:18:47 +01:00
|
|
|
m_wait_cancelled = true;
|
2020-07-20 11:27:53 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-02-08 04:16:09 +01:00
|
|
|
void KThread::TrySuspend() {
|
|
|
|
MESOSPHERE_ASSERT_THIS();
|
|
|
|
MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread());
|
2020-07-25 12:12:09 +02:00
|
|
|
MESOSPHERE_ASSERT(this->IsSuspendRequested());
|
2020-02-08 04:16:09 +01:00
|
|
|
|
|
|
|
/* Ensure that we have no waiters. */
|
|
|
|
if (this->GetNumKernelWaiters() > 0) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
MESOSPHERE_ABORT_UNLESS(this->GetNumKernelWaiters() == 0);
|
|
|
|
|
|
|
|
/* Perform the suspend. */
|
2021-04-07 17:17:15 +02:00
|
|
|
this->UpdateState();
|
2020-02-08 04:16:09 +01:00
|
|
|
}
|
|
|
|
|
2021-04-07 17:17:15 +02:00
|
|
|
void KThread::UpdateState() {
|
2020-02-08 04:16:09 +01:00
|
|
|
MESOSPHERE_ASSERT_THIS();
|
|
|
|
MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread());
|
|
|
|
|
|
|
|
/* Set our suspend flags in state. */
|
2020-12-18 02:18:47 +01:00
|
|
|
const auto old_state = m_thread_state;
|
2021-04-07 17:17:15 +02:00
|
|
|
const auto new_state = static_cast<ThreadState>(this->GetSuspendFlags() | (old_state & ThreadState_Mask));
|
|
|
|
m_thread_state = new_state;
|
2020-02-08 04:16:09 +01:00
|
|
|
|
|
|
|
/* Note the state change in scheduler. */
|
2021-04-07 17:17:15 +02:00
|
|
|
if (new_state != old_state) {
|
|
|
|
KScheduler::OnThreadStateChanged(this, old_state);
|
|
|
|
}
|
2020-02-08 04:16:09 +01:00
|
|
|
}
|
|
|
|
|
2020-02-15 00:27:34 +01:00
|
|
|
void KThread::Continue() {
|
|
|
|
MESOSPHERE_ASSERT_THIS();
|
|
|
|
MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread());
|
|
|
|
|
|
|
|
/* Clear our suspend flags in state. */
|
2020-12-18 02:18:47 +01:00
|
|
|
const auto old_state = m_thread_state;
|
|
|
|
m_thread_state = static_cast<ThreadState>(old_state & ThreadState_Mask);
|
2020-02-15 00:27:34 +01:00
|
|
|
|
|
|
|
/* Note the state change in scheduler. */
|
|
|
|
KScheduler::OnThreadStateChanged(this, old_state);
|
|
|
|
}
|
|
|
|
|
2020-12-09 14:59:54 +01:00
|
|
|
size_t KThread::GetKernelStackUsage() const {
|
|
|
|
MESOSPHERE_ASSERT_THIS();
|
2020-12-18 02:18:47 +01:00
|
|
|
MESOSPHERE_ASSERT(m_kernel_stack_top != nullptr);
|
2020-12-09 14:59:54 +01:00
|
|
|
|
|
|
|
#if defined(MESOSPHERE_ENABLE_KERNEL_STACK_USAGE)
|
2020-12-18 02:18:47 +01:00
|
|
|
const u8 *stack = static_cast<const u8 *>(m_kernel_stack_top) - PageSize;
|
2020-12-09 14:59:54 +01:00
|
|
|
|
|
|
|
size_t i;
|
|
|
|
for (i = 0; i < PageSize; ++i) {
|
|
|
|
if (stack[i] != 0xCC) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return PageSize - i;
|
|
|
|
#else
|
|
|
|
return 0;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2020-07-23 12:04:43 +02:00
|
|
|
Result KThread::SetActivity(ams::svc::ThreadActivity activity) {
|
2020-07-28 12:20:24 +02:00
|
|
|
/* Lock ourselves. */
|
2020-12-18 02:18:47 +01:00
|
|
|
KScopedLightLock lk(m_activity_pause_lock);
|
2020-07-23 12:04:43 +02:00
|
|
|
|
2020-07-28 12:20:24 +02:00
|
|
|
/* Set the activity. */
|
|
|
|
{
|
|
|
|
/* Lock the scheduler. */
|
|
|
|
KScopedSchedulerLock sl;
|
2020-07-23 12:04:43 +02:00
|
|
|
|
2020-07-28 12:20:24 +02:00
|
|
|
/* Verify our state. */
|
|
|
|
const auto cur_state = this->GetState();
|
|
|
|
R_UNLESS((cur_state == ThreadState_Waiting || cur_state == ThreadState_Runnable), svc::ResultInvalidState());
|
2020-07-23 12:04:43 +02:00
|
|
|
|
2020-07-28 12:20:24 +02:00
|
|
|
/* Either pause or resume. */
|
|
|
|
if (activity == ams::svc::ThreadActivity_Paused) {
|
|
|
|
/* Verify that we're not suspended. */
|
|
|
|
R_UNLESS(!this->IsSuspendRequested(SuspendType_Thread), svc::ResultInvalidState());
|
2020-07-23 12:04:43 +02:00
|
|
|
|
2020-07-28 12:20:24 +02:00
|
|
|
/* Suspend. */
|
|
|
|
this->RequestSuspend(SuspendType_Thread);
|
|
|
|
} else {
|
|
|
|
MESOSPHERE_ASSERT(activity == ams::svc::ThreadActivity_Runnable);
|
|
|
|
|
|
|
|
/* Verify that we're suspended. */
|
|
|
|
R_UNLESS(this->IsSuspendRequested(SuspendType_Thread), svc::ResultInvalidState());
|
|
|
|
|
|
|
|
/* Resume. */
|
|
|
|
this->Resume(SuspendType_Thread);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If the thread is now paused, update the pinned waiter list. */
|
|
|
|
if (activity == ams::svc::ThreadActivity_Paused) {
|
2021-09-19 19:11:56 +02:00
|
|
|
ThreadQueueImplForKThreadSetProperty wait_queue(std::addressof(m_pinned_waiter_list));
|
|
|
|
|
2020-07-28 12:20:24 +02:00
|
|
|
bool thread_is_current;
|
|
|
|
do {
|
|
|
|
/* Lock the scheduler. */
|
|
|
|
KScopedSchedulerLock sl;
|
|
|
|
|
|
|
|
/* Don't do any further management if our termination has been requested. */
|
|
|
|
R_SUCCEED_IF(this->IsTerminationRequested());
|
|
|
|
|
2021-09-19 19:11:56 +02:00
|
|
|
/* By default, treat the thread as not current. */
|
|
|
|
thread_is_current = false;
|
|
|
|
|
2020-07-28 12:20:24 +02:00
|
|
|
/* Check whether the thread is pinned. */
|
|
|
|
if (this->GetStackParameters().is_pinned) {
|
|
|
|
/* Verify that the current thread isn't terminating. */
|
|
|
|
R_UNLESS(!GetCurrentThread().IsTerminationRequested(), svc::ResultTerminationRequested());
|
|
|
|
|
|
|
|
/* Wait until the thread isn't pinned any more. */
|
2020-12-18 02:18:47 +01:00
|
|
|
m_pinned_waiter_list.push_back(GetCurrentThread());
|
2021-09-19 19:11:56 +02:00
|
|
|
GetCurrentThread().BeginWait(std::addressof(wait_queue));
|
2020-07-28 12:20:24 +02:00
|
|
|
} else {
|
|
|
|
/* Check if the thread is currently running. */
|
|
|
|
/* If it is, we'll need to retry. */
|
|
|
|
for (auto i = 0; i < static_cast<s32>(cpu::NumCores); ++i) {
|
2020-12-01 22:41:37 +01:00
|
|
|
if (Kernel::GetScheduler(i).GetSchedulerCurrentThread() == this) {
|
2020-07-28 12:20:24 +02:00
|
|
|
thread_is_current = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} while (thread_is_current);
|
2020-07-23 12:04:43 +02:00
|
|
|
}
|
|
|
|
|
2022-02-14 23:45:32 +01:00
|
|
|
R_SUCCEED();
|
2020-07-23 12:04:43 +02:00
|
|
|
}
|
|
|
|
|
2020-07-28 12:56:47 +02:00
|
|
|
Result KThread::GetThreadContext3(ams::svc::ThreadContext *out) {
|
|
|
|
/* Lock ourselves. */
|
2020-12-18 02:18:47 +01:00
|
|
|
KScopedLightLock lk(m_activity_pause_lock);
|
2020-07-28 12:56:47 +02:00
|
|
|
|
|
|
|
/* Get the context. */
|
|
|
|
{
|
|
|
|
/* Lock the scheduler. */
|
|
|
|
KScopedSchedulerLock sl;
|
|
|
|
|
|
|
|
/* Verify that we're suspended. */
|
|
|
|
R_UNLESS(this->IsSuspendRequested(SuspendType_Thread), svc::ResultInvalidState());
|
|
|
|
|
|
|
|
/* If we're not terminating, get the thread's user context. */
|
|
|
|
if (!this->IsTerminationRequested()) {
|
|
|
|
GetUserContext(out, this);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-02-14 23:45:32 +01:00
|
|
|
R_SUCCEED();
|
2020-07-28 12:56:47 +02:00
|
|
|
}
|
|
|
|
|
2023-02-21 21:15:01 +01:00
|
|
|
void KThread::AddHeldLock(LockWithPriorityInheritanceInfo *lock_info) {
|
2020-02-15 00:27:34 +01:00
|
|
|
MESOSPHERE_ASSERT_THIS();
|
|
|
|
MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread());
|
|
|
|
|
2023-02-21 21:15:01 +01:00
|
|
|
/* Set ourselves as the lock's owner. */
|
|
|
|
lock_info->SetOwner(this);
|
|
|
|
|
|
|
|
/* Add the lock to our held list. */
|
|
|
|
m_held_lock_info_list.push_front(*lock_info);
|
|
|
|
}
|
|
|
|
|
|
|
|
KThread::LockWithPriorityInheritanceInfo *KThread::FindHeldLock(KProcessAddress address_key) {
|
|
|
|
MESOSPHERE_ASSERT_THIS();
|
|
|
|
MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread());
|
|
|
|
|
|
|
|
/* Try to find an existing held lock. */
|
|
|
|
for (auto &held_lock : m_held_lock_info_list) {
|
|
|
|
if (held_lock.GetAddressKey() == address_key) {
|
|
|
|
return std::addressof(held_lock);
|
2020-02-15 00:27:34 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-02-21 21:15:01 +01:00
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void KThread::AddWaiterImpl(KThread *thread) {
|
|
|
|
MESOSPHERE_ASSERT_THIS();
|
|
|
|
MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread());
|
|
|
|
MESOSPHERE_ASSERT(thread->GetConditionVariableTree() == nullptr);
|
|
|
|
|
|
|
|
/* Get the thread's address key. */
|
|
|
|
const auto address_key = thread->GetAddressKey();
|
|
|
|
|
2020-02-15 00:27:34 +01:00
|
|
|
/* Keep track of how many kernel waiters we have. */
|
2023-02-21 21:15:01 +01:00
|
|
|
if (IsKernelAddressKey(address_key)) {
|
2020-12-18 02:18:47 +01:00
|
|
|
MESOSPHERE_ABORT_UNLESS((m_num_kernel_waiters++) >= 0);
|
2021-04-07 10:30:36 +02:00
|
|
|
KScheduler::SetSchedulerUpdateNeeded();
|
2020-02-15 00:27:34 +01:00
|
|
|
}
|
|
|
|
|
2023-02-21 21:15:01 +01:00
|
|
|
/* Get the relevant lock info. */
|
|
|
|
auto *lock_info = this->FindHeldLock(address_key);
|
|
|
|
if (lock_info == nullptr) {
|
|
|
|
/* Create a new lock for the address key. */
|
|
|
|
lock_info = LockWithPriorityInheritanceInfo::Create(address_key);
|
|
|
|
|
|
|
|
/* Add the new lock to our list. */
|
|
|
|
this->AddHeldLock(lock_info);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Add the thread as waiter to the lock info. */
|
|
|
|
lock_info->AddWaiter(thread);
|
2020-02-15 00:27:34 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void KThread::RemoveWaiterImpl(KThread *thread) {
|
|
|
|
MESOSPHERE_ASSERT_THIS();
|
|
|
|
MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread());
|
|
|
|
|
|
|
|
/* Keep track of how many kernel waiters we have. */
|
|
|
|
if (IsKernelAddressKey(thread->GetAddressKey())) {
|
2020-12-18 02:18:47 +01:00
|
|
|
MESOSPHERE_ABORT_UNLESS((m_num_kernel_waiters--) > 0);
|
2021-04-07 10:30:36 +02:00
|
|
|
KScheduler::SetSchedulerUpdateNeeded();
|
2020-02-15 00:27:34 +01:00
|
|
|
}
|
|
|
|
|
2023-02-21 21:15:01 +01:00
|
|
|
/* Get the info for the lock the thread is waiting on. */
|
|
|
|
auto *lock_info = thread->GetWaitingLockInfo();
|
|
|
|
MESOSPHERE_ASSERT(lock_info->GetOwner() == this);
|
|
|
|
|
2020-02-15 00:27:34 +01:00
|
|
|
/* Remove the waiter. */
|
2023-02-21 21:15:01 +01:00
|
|
|
if (lock_info->RemoveWaiter(thread)) {
|
|
|
|
m_held_lock_info_list.erase(m_held_lock_info_list.iterator_to(*lock_info));
|
|
|
|
LockWithPriorityInheritanceInfo::Free(lock_info);
|
|
|
|
}
|
2020-02-15 00:27:34 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void KThread::RestorePriority(KThread *thread) {
|
|
|
|
MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread());
|
|
|
|
|
2023-02-21 21:15:01 +01:00
|
|
|
while (thread != nullptr) {
|
2020-02-15 00:27:34 +01:00
|
|
|
/* We want to inherit priority where possible. */
|
|
|
|
s32 new_priority = thread->GetBasePriority();
|
2023-02-21 21:15:01 +01:00
|
|
|
for (const auto &held_lock : thread->m_held_lock_info_list) {
|
|
|
|
new_priority = std::min(new_priority, held_lock.GetHighestPriorityWaiter()->GetPriority());
|
2020-02-15 00:27:34 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* If the priority we would inherit is not different from ours, don't do anything. */
|
|
|
|
if (new_priority == thread->GetPriority()) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2023-02-21 21:15:01 +01:00
|
|
|
/* Get the owner of whatever lock this thread is waiting on. */
|
|
|
|
KThread * const lock_owner = thread->GetLockOwner();
|
|
|
|
|
|
|
|
/* If the thread is waiting on some lock, remove it as a waiter to prevent violating red black tree invariants. */
|
|
|
|
if (lock_owner != nullptr) {
|
|
|
|
lock_owner->RemoveWaiterImpl(thread);
|
|
|
|
}
|
|
|
|
|
2020-02-15 00:27:34 +01:00
|
|
|
/* Ensure we don't violate condition variable red black tree invariants. */
|
2020-07-15 18:15:49 +02:00
|
|
|
if (auto *cv_tree = thread->GetConditionVariableTree(); cv_tree != nullptr) {
|
|
|
|
BeforeUpdatePriority(cv_tree, thread);
|
2020-02-15 00:27:34 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Change the priority. */
|
|
|
|
const s32 old_priority = thread->GetPriority();
|
|
|
|
thread->SetPriority(new_priority);
|
|
|
|
|
|
|
|
/* Restore the condition variable, if relevant. */
|
2020-07-15 18:15:49 +02:00
|
|
|
if (auto *cv_tree = thread->GetConditionVariableTree(); cv_tree != nullptr) {
|
|
|
|
AfterUpdatePriority(cv_tree, thread);
|
2020-02-15 00:27:34 +01:00
|
|
|
}
|
|
|
|
|
2023-02-21 21:15:01 +01:00
|
|
|
/* If we removed the thread from some lock's waiting list, add it back. */
|
|
|
|
if (lock_owner != nullptr) {
|
|
|
|
lock_owner->AddWaiterImpl(thread);
|
|
|
|
}
|
|
|
|
|
2020-02-15 00:27:34 +01:00
|
|
|
/* Update the scheduler. */
|
|
|
|
KScheduler::OnThreadPriorityChanged(thread, old_priority);
|
|
|
|
|
2023-02-21 21:15:01 +01:00
|
|
|
/* Continue inheriting priority. */
|
2020-02-15 00:27:34 +01:00
|
|
|
thread = lock_owner;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void KThread::AddWaiter(KThread *thread) {
|
|
|
|
MESOSPHERE_ASSERT_THIS();
|
2023-02-21 21:15:01 +01:00
|
|
|
|
2020-02-15 00:27:34 +01:00
|
|
|
this->AddWaiterImpl(thread);
|
2023-02-21 21:15:01 +01:00
|
|
|
|
|
|
|
/* If the thread has a higher priority than us, we should inherit. */
|
|
|
|
if (thread->GetPriority() < this->GetPriority()) {
|
|
|
|
RestorePriority(this);
|
|
|
|
}
|
2020-02-15 00:27:34 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void KThread::RemoveWaiter(KThread *thread) {
|
|
|
|
MESOSPHERE_ASSERT_THIS();
|
|
|
|
this->RemoveWaiterImpl(thread);
|
2023-02-21 21:15:01 +01:00
|
|
|
|
|
|
|
/* If our priority is the same as the thread's (and we've inherited), we may need to restore to lower priority. */
|
|
|
|
if (this->GetPriority() == thread->GetPriority() && this->GetPriority() < this->GetBasePriority()) {
|
|
|
|
RestorePriority(this);
|
|
|
|
}
|
2020-02-15 00:27:34 +01:00
|
|
|
}
|
|
|
|
|
2023-02-21 21:15:01 +01:00
|
|
|
KThread *KThread::RemoveWaiterByKey(bool *out_has_waiters, KProcessAddress key) {
|
2020-02-15 00:27:34 +01:00
|
|
|
MESOSPHERE_ASSERT_THIS();
|
|
|
|
MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread());
|
|
|
|
|
2023-02-21 21:15:01 +01:00
|
|
|
/* Get the relevant lock info. */
|
|
|
|
auto *lock_info = this->FindHeldLock(key);
|
|
|
|
if (lock_info == nullptr) {
|
|
|
|
*out_has_waiters = false;
|
|
|
|
return nullptr;
|
|
|
|
}
|
2020-02-15 00:27:34 +01:00
|
|
|
|
2023-02-21 21:15:01 +01:00
|
|
|
/* Remove the lock info from our held list. */
|
|
|
|
m_held_lock_info_list.erase(m_held_lock_info_list.iterator_to(*lock_info));
|
|
|
|
|
|
|
|
/* Keep track of how many kernel waiters we have. */
|
|
|
|
if (IsKernelAddressKey(lock_info->GetAddressKey())) {
|
|
|
|
m_num_kernel_waiters -= lock_info->GetWaiterCount();
|
|
|
|
MESOSPHERE_ABORT_UNLESS(m_num_kernel_waiters >= 0);
|
|
|
|
KScheduler::SetSchedulerUpdateNeeded();
|
|
|
|
}
|
|
|
|
|
|
|
|
MESOSPHERE_ASSERT(lock_info->GetWaiterCount() > 0);
|
|
|
|
|
|
|
|
/* Remove the highest priority waiter from the lock to be the next owner. */
|
|
|
|
KThread *next_lock_owner = lock_info->GetHighestPriorityWaiter();
|
|
|
|
if (lock_info->RemoveWaiter(next_lock_owner)) {
|
|
|
|
/* The new owner was the only waiter. */
|
|
|
|
*out_has_waiters = false;
|
|
|
|
|
|
|
|
/* Free the lock info, since it has no waiters. */
|
|
|
|
LockWithPriorityInheritanceInfo::Free(lock_info);
|
|
|
|
} else {
|
|
|
|
/* There are additional waiters on the lock. */
|
|
|
|
*out_has_waiters = true;
|
|
|
|
|
|
|
|
/* Add the lock to the new owner's held list. */
|
|
|
|
next_lock_owner->AddHeldLock(lock_info);
|
|
|
|
|
|
|
|
/* Keep track of any kernel waiters for the new owner. */
|
|
|
|
if (IsKernelAddressKey(lock_info->GetAddressKey())) {
|
|
|
|
next_lock_owner->m_num_kernel_waiters += lock_info->GetWaiterCount();
|
|
|
|
MESOSPHERE_ABORT_UNLESS(next_lock_owner->m_num_kernel_waiters > 0);
|
|
|
|
|
|
|
|
/* NOTE: No need to set scheduler update needed, because we will have already done so when removing earlier. */
|
2020-02-15 00:27:34 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-02-21 21:15:01 +01:00
|
|
|
/* If our priority is the same as the next owner's (and we've inherited), we may need to restore to lower priority. */
|
|
|
|
if (this->GetPriority() == next_lock_owner->GetPriority() && this->GetPriority() < this->GetBasePriority()) {
|
2020-02-15 00:27:34 +01:00
|
|
|
RestorePriority(this);
|
2023-02-21 21:15:01 +01:00
|
|
|
/* NOTE: No need to restore priority on the next lock owner, because it was already the highest priority waiter on the lock. */
|
2020-02-15 00:27:34 +01:00
|
|
|
}
|
|
|
|
|
2023-02-21 21:15:01 +01:00
|
|
|
/* Return the next lock owner. */
|
2020-02-15 00:27:34 +01:00
|
|
|
return next_lock_owner;
|
|
|
|
}
|
|
|
|
|
2020-02-08 04:16:09 +01:00
|
|
|
Result KThread::Run() {
|
|
|
|
MESOSPHERE_ASSERT_THIS();
|
|
|
|
|
|
|
|
/* If the kernel hasn't finished initializing, then we should suspend. */
|
|
|
|
if (Kernel::GetState() != Kernel::State::Initialized) {
|
|
|
|
this->RequestSuspend(SuspendType_Init);
|
|
|
|
}
|
|
|
|
while (true) {
|
|
|
|
KScopedSchedulerLock lk;
|
|
|
|
|
|
|
|
/* If either this thread or the current thread are requesting termination, note it. */
|
|
|
|
R_UNLESS(!this->IsTerminationRequested(), svc::ResultTerminationRequested());
|
|
|
|
R_UNLESS(!GetCurrentThread().IsTerminationRequested(), svc::ResultTerminationRequested());
|
|
|
|
|
|
|
|
/* Ensure our thread state is correct. */
|
|
|
|
R_UNLESS(this->GetState() == ThreadState_Initialized, svc::ResultInvalidState());
|
|
|
|
|
|
|
|
/* If the current thread has been asked to suspend, suspend it and retry. */
|
|
|
|
if (GetCurrentThread().IsSuspended()) {
|
2021-04-07 17:17:15 +02:00
|
|
|
GetCurrentThread().UpdateState();
|
2020-02-08 04:16:09 +01:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If we're not a kernel thread and we've been asked to suspend, suspend ourselves. */
|
2021-04-07 17:17:15 +02:00
|
|
|
if (KProcess *parent = this->GetOwnerProcess(); parent != nullptr) {
|
|
|
|
if (this->IsSuspended()) {
|
|
|
|
this->UpdateState();
|
|
|
|
}
|
|
|
|
parent->IncrementRunningThreadCount();
|
2020-02-08 04:16:09 +01:00
|
|
|
}
|
|
|
|
|
2021-04-07 17:17:15 +02:00
|
|
|
/* Open a reference, now that we're running. */
|
|
|
|
this->Open();
|
|
|
|
|
2020-02-08 04:16:09 +01:00
|
|
|
/* Set our state and finish. */
|
|
|
|
this->SetState(KThread::ThreadState_Runnable);
|
2022-02-14 23:45:32 +01:00
|
|
|
R_SUCCEED();
|
2020-02-08 04:16:09 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void KThread::Exit() {
|
|
|
|
MESOSPHERE_ASSERT_THIS();
|
|
|
|
|
2020-07-11 03:39:53 +02:00
|
|
|
MESOSPHERE_ASSERT(this == GetCurrentThreadPointer());
|
|
|
|
|
2020-07-23 08:52:29 +02:00
|
|
|
/* Call the debug callback. */
|
|
|
|
KDebug::OnExitThread(this);
|
2020-07-11 03:39:53 +02:00
|
|
|
|
2021-04-07 17:17:15 +02:00
|
|
|
/* Release the thread resource hint, running thread count from parent. */
|
2020-12-18 02:18:47 +01:00
|
|
|
if (m_parent != nullptr) {
|
|
|
|
m_parent->ReleaseResource(ams::svc::LimitableResource_ThreadCountMax, 0, 1);
|
|
|
|
m_resource_limit_release_hint = true;
|
2021-04-07 17:17:15 +02:00
|
|
|
m_parent->DecrementRunningThreadCount();
|
2020-07-11 03:39:53 +02:00
|
|
|
}
|
|
|
|
|
2021-04-07 22:38:51 +02:00
|
|
|
/* Destroy any dependent objects. */
|
|
|
|
this->DestroyClosedObjects();
|
|
|
|
|
2020-07-11 03:39:53 +02:00
|
|
|
/* Perform termination. */
|
|
|
|
{
|
|
|
|
KScopedSchedulerLock sl;
|
|
|
|
|
|
|
|
/* Disallow all suspension. */
|
2020-12-18 02:18:47 +01:00
|
|
|
m_suspend_allowed_flags = 0;
|
2021-04-07 17:17:15 +02:00
|
|
|
this->UpdateState();
|
2020-07-11 03:39:53 +02:00
|
|
|
|
|
|
|
/* Start termination. */
|
|
|
|
this->StartTermination();
|
|
|
|
|
|
|
|
/* Register the thread as a work task. */
|
|
|
|
KWorkerTaskManager::AddTask(KWorkerTaskManager::WorkerType_Exit, this);
|
|
|
|
}
|
2020-02-08 04:16:09 +01:00
|
|
|
|
|
|
|
MESOSPHERE_PANIC("KThread::Exit() would return");
|
|
|
|
}
|
|
|
|
|
2021-04-07 17:17:15 +02:00
|
|
|
Result KThread::Terminate() {
|
2020-07-23 08:52:29 +02:00
|
|
|
MESOSPHERE_ASSERT_THIS();
|
|
|
|
MESOSPHERE_ASSERT(this != GetCurrentThreadPointer());
|
|
|
|
|
2022-02-14 23:45:32 +01:00
|
|
|
/* Request the thread terminate if it hasn't already. */
|
2020-07-23 08:52:29 +02:00
|
|
|
if (const auto new_state = this->RequestTerminate(); new_state != ThreadState_Terminated) {
|
|
|
|
/* If the thread isn't terminated, wait for it to terminate. */
|
|
|
|
s32 index;
|
|
|
|
KSynchronizationObject *objects[] = { this };
|
2022-02-14 23:45:32 +01:00
|
|
|
R_TRY(KSynchronizationObject::Wait(std::addressof(index), objects, 1, ams::svc::WaitInfinite));
|
2020-07-23 08:52:29 +02:00
|
|
|
}
|
2022-02-14 23:45:32 +01:00
|
|
|
|
|
|
|
R_SUCCEED();
|
2020-07-23 08:52:29 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
KThread::ThreadState KThread::RequestTerminate() {
|
|
|
|
MESOSPHERE_ASSERT_THIS();
|
|
|
|
MESOSPHERE_ASSERT(this != GetCurrentThreadPointer());
|
|
|
|
|
|
|
|
KScopedSchedulerLock sl;
|
|
|
|
|
|
|
|
/* Determine if this is the first termination request. */
|
2021-11-07 02:19:34 +01:00
|
|
|
const bool first_request = [&]() ALWAYS_INLINE_LAMBDA -> bool {
|
2020-07-23 08:52:29 +02:00
|
|
|
/* Perform an atomic compare-and-swap from false to true. */
|
2021-10-20 22:29:38 +02:00
|
|
|
bool expected = false;
|
2021-10-20 00:24:15 +02:00
|
|
|
return m_termination_requested.CompareExchangeStrong(expected, true);
|
2020-07-23 08:52:29 +02:00
|
|
|
}();
|
|
|
|
|
|
|
|
/* If this is the first request, start termination procedure. */
|
|
|
|
if (first_request) {
|
|
|
|
/* If the thread is in initialized state, just change state to terminated. */
|
|
|
|
if (this->GetState() == ThreadState_Initialized) {
|
2020-12-18 02:18:47 +01:00
|
|
|
m_thread_state = ThreadState_Terminated;
|
2020-07-23 08:52:29 +02:00
|
|
|
return ThreadState_Terminated;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Register the terminating dpc. */
|
|
|
|
this->RegisterDpc(DpcFlag_Terminating);
|
|
|
|
|
2021-04-08 00:30:13 +02:00
|
|
|
/* If the thread is pinned, unpin it. */
|
|
|
|
if (this->GetStackParameters().is_pinned) {
|
|
|
|
this->GetOwnerProcess()->UnpinThread(this);
|
|
|
|
}
|
|
|
|
|
2020-07-23 08:52:29 +02:00
|
|
|
/* If the thread is suspended, continue it. */
|
|
|
|
if (this->IsSuspended()) {
|
2020-12-18 02:18:47 +01:00
|
|
|
m_suspend_allowed_flags = 0;
|
2021-04-07 17:17:15 +02:00
|
|
|
this->UpdateState();
|
2020-07-23 08:52:29 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Change the thread's priority to be higher than any system thread's. */
|
2023-02-21 21:15:01 +01:00
|
|
|
this->IncreaseBasePriority(TerminatingThreadPriority);
|
2020-07-23 08:52:29 +02:00
|
|
|
|
|
|
|
/* If the thread is runnable, send a termination interrupt to other cores. */
|
|
|
|
if (this->GetState() == ThreadState_Runnable) {
|
2020-12-18 02:18:47 +01:00
|
|
|
if (const u64 core_mask = m_physical_affinity_mask.GetAffinityMask() & ~(1ul << GetCurrentCoreId()); core_mask != 0) {
|
2022-03-23 05:33:43 +01:00
|
|
|
cpu::DataSynchronizationBarrierInnerShareable();
|
2020-07-23 08:52:29 +02:00
|
|
|
Kernel::GetInterruptManager().SendInterProcessorInterrupt(KInterruptName_ThreadTerminate, core_mask);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Wake up the thread. */
|
2021-09-19 19:11:56 +02:00
|
|
|
if (this->GetState() == ThreadState_Waiting) {
|
|
|
|
m_wait_queue->CancelWait(this, svc::ResultTerminationRequested(), true);
|
|
|
|
}
|
2020-07-23 08:52:29 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return this->GetState();
|
|
|
|
}
|
|
|
|
|
2020-07-11 08:30:15 +02:00
|
|
|
Result KThread::Sleep(s64 timeout) {
|
|
|
|
MESOSPHERE_ASSERT_THIS();
|
|
|
|
MESOSPHERE_ASSERT(!KScheduler::IsSchedulerLockedByCurrentThread());
|
|
|
|
MESOSPHERE_ASSERT(this == GetCurrentThreadPointer());
|
|
|
|
MESOSPHERE_ASSERT(timeout > 0);
|
|
|
|
|
2021-09-19 19:11:56 +02:00
|
|
|
ThreadQueueImplForKThreadSleep wait_queue;
|
2020-07-11 08:30:15 +02:00
|
|
|
KHardwareTimer *timer;
|
|
|
|
{
|
|
|
|
/* Setup the scheduling lock and sleep. */
|
|
|
|
KScopedSchedulerLockAndSleep slp(std::addressof(timer), this, timeout);
|
|
|
|
|
|
|
|
/* Check if the thread should terminate. */
|
|
|
|
if (this->IsTerminationRequested()) {
|
|
|
|
slp.CancelSleep();
|
2022-02-14 23:45:32 +01:00
|
|
|
R_THROW(svc::ResultTerminationRequested());
|
2020-07-11 08:30:15 +02:00
|
|
|
}
|
|
|
|
|
2021-09-19 19:11:56 +02:00
|
|
|
/* Wait for the sleep to end. */
|
|
|
|
wait_queue.SetHardwareTimer(timer);
|
|
|
|
this->BeginWait(std::addressof(wait_queue));
|
2020-07-11 08:30:15 +02:00
|
|
|
}
|
|
|
|
|
2022-02-14 23:45:32 +01:00
|
|
|
R_SUCCEED();
|
2021-09-19 19:11:56 +02:00
|
|
|
}
|
2020-07-11 08:30:15 +02:00
|
|
|
|
2021-09-19 19:11:56 +02:00
|
|
|
void KThread::BeginWait(KThreadQueue *queue) {
|
|
|
|
/* Set our state as waiting. */
|
|
|
|
this->SetState(ThreadState_Waiting);
|
2020-07-11 08:30:15 +02:00
|
|
|
|
2021-09-19 19:11:56 +02:00
|
|
|
/* Set our wait queue. */
|
|
|
|
m_wait_queue = queue;
|
|
|
|
}
|
|
|
|
|
|
|
|
void KThread::NotifyAvailable(KSynchronizationObject *signaled_object, Result wait_result) {
|
|
|
|
MESOSPHERE_ASSERT_THIS();
|
|
|
|
|
|
|
|
/* Lock the scheduler. */
|
|
|
|
KScopedSchedulerLock sl;
|
|
|
|
|
|
|
|
/* If we're waiting, notify our queue that we're available. */
|
|
|
|
if (this->GetState() == ThreadState_Waiting) {
|
|
|
|
m_wait_queue->NotifyAvailable(this, signaled_object, wait_result);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void KThread::EndWait(Result wait_result) {
|
|
|
|
MESOSPHERE_ASSERT_THIS();
|
|
|
|
|
|
|
|
/* Lock the scheduler. */
|
|
|
|
KScopedSchedulerLock sl;
|
|
|
|
|
|
|
|
/* If we're waiting, notify our queue that we're available. */
|
|
|
|
if (this->GetState() == ThreadState_Waiting) {
|
|
|
|
m_wait_queue->EndWait(this, wait_result);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void KThread::CancelWait(Result wait_result, bool cancel_timer_task) {
|
|
|
|
MESOSPHERE_ASSERT_THIS();
|
|
|
|
|
|
|
|
/* Lock the scheduler. */
|
|
|
|
KScopedSchedulerLock sl;
|
|
|
|
|
|
|
|
/* If we're waiting, notify our queue that we're available. */
|
|
|
|
if (this->GetState() == ThreadState_Waiting) {
|
|
|
|
m_wait_queue->CancelWait(this, wait_result, cancel_timer_task);
|
|
|
|
}
|
2020-07-11 08:30:15 +02:00
|
|
|
}
|
|
|
|
|
2020-02-05 23:07:51 +01:00
|
|
|
void KThread::SetState(ThreadState state) {
|
|
|
|
MESOSPHERE_ASSERT_THIS();
|
|
|
|
|
|
|
|
KScopedSchedulerLock sl;
|
|
|
|
|
2020-12-18 02:18:47 +01:00
|
|
|
const ThreadState old_state = m_thread_state;
|
|
|
|
m_thread_state = static_cast<ThreadState>((old_state & ~ThreadState_Mask) | (state & ThreadState_Mask));
|
|
|
|
if (m_thread_state != old_state) {
|
2020-02-05 23:07:51 +01:00
|
|
|
KScheduler::OnThreadStateChanged(this, old_state);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-31 10:59:46 +02:00
|
|
|
KThread *KThread::GetThreadFromId(u64 thread_id) {
|
|
|
|
/* Lock the list. */
|
|
|
|
KThread::ListAccessor accessor;
|
|
|
|
const auto end = accessor.end();
|
|
|
|
|
|
|
|
/* Find the object with the right id. */
|
2021-10-24 06:58:48 +02:00
|
|
|
if (const auto it = accessor.find_key(thread_id); it != end) {
|
|
|
|
/* Try to open the thread. */
|
|
|
|
if (KThread *thread = static_cast<KThread *>(std::addressof(*it)); AMS_LIKELY(thread->Open())) {
|
|
|
|
MESOSPHERE_ASSERT(thread->GetId() == thread_id);
|
|
|
|
return thread;
|
|
|
|
}
|
2020-07-31 10:59:46 +02:00
|
|
|
}
|
|
|
|
|
2021-10-24 06:58:48 +02:00
|
|
|
/* We failed to find or couldn't open the thread. */
|
2020-07-31 10:59:46 +02:00
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
2020-07-31 01:52:11 +02:00
|
|
|
Result KThread::GetThreadList(s32 *out_num_threads, ams::kern::svc::KUserPointer<u64 *> out_thread_ids, s32 max_out_count) {
|
|
|
|
/* Lock the list. */
|
|
|
|
KThread::ListAccessor accessor;
|
|
|
|
const auto end = accessor.end();
|
|
|
|
|
|
|
|
/* Iterate over the list. */
|
|
|
|
s32 count = 0;
|
|
|
|
for (auto it = accessor.begin(); it != end; ++it) {
|
|
|
|
/* If we're within array bounds, write the id. */
|
|
|
|
if (count < max_out_count) {
|
|
|
|
/* Get the thread id. */
|
|
|
|
KThread *thread = static_cast<KThread *>(std::addressof(*it));
|
|
|
|
const u64 id = thread->GetId();
|
|
|
|
|
|
|
|
/* Copy the id to userland. */
|
|
|
|
R_TRY(out_thread_ids.CopyArrayElementFrom(std::addressof(id), count));
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Increment the count. */
|
|
|
|
++count;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We successfully iterated the list. */
|
|
|
|
*out_num_threads = count;
|
2022-02-14 23:45:32 +01:00
|
|
|
R_SUCCEED();
|
2020-07-31 01:52:11 +02:00
|
|
|
}
|
|
|
|
|
2020-01-31 10:53:30 +01:00
|
|
|
}
|