2020-02-08 11:49:32 +01:00
|
|
|
/*
|
2021-10-04 21:59:10 +02:00
|
|
|
* Copyright (c) Atmosphère-NX
|
2020-02-08 11:49:32 +01:00
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify it
|
|
|
|
* under the terms and conditions of the GNU General Public License,
|
|
|
|
* version 2, as published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope it will be useful, but WITHOUT
|
|
|
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
|
|
* more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
*/
|
|
|
|
#include <mesosphere.hpp>
|
|
|
|
|
|
|
|
namespace ams::kern {
|
|
|
|
|
2020-02-19 10:22:27 +01:00
|
|
|
namespace {
|
|
|
|
|
|
|
|
constexpr u64 InitialProcessIdMin = 1;
|
|
|
|
constexpr u64 InitialProcessIdMax = 0x50;
|
2020-07-22 07:13:16 +02:00
|
|
|
|
|
|
|
constexpr u64 ProcessIdMin = InitialProcessIdMax + 1;
|
|
|
|
constexpr u64 ProcessIdMax = std::numeric_limits<u64>::max();
|
|
|
|
|
2021-10-20 20:02:17 +02:00
|
|
|
constinit util::Atomic<u64> g_initial_process_id = InitialProcessIdMin;
|
|
|
|
constinit util::Atomic<u64> g_process_id = ProcessIdMin;
|
2020-02-19 10:22:27 +01:00
|
|
|
|
2021-04-07 17:17:15 +02:00
|
|
|
Result TerminateChildren(KProcess *process, const KThread *thread_to_not_terminate) {
|
2020-07-23 08:52:29 +02:00
|
|
|
/* Request that all children threads terminate. */
|
|
|
|
{
|
|
|
|
KScopedLightLock proc_lk(process->GetListLock());
|
|
|
|
KScopedSchedulerLock sl;
|
|
|
|
|
2021-04-08 00:30:13 +02:00
|
|
|
if (thread_to_not_terminate != nullptr && process->GetPinnedThread(GetCurrentCoreId()) == thread_to_not_terminate) {
|
|
|
|
/* NOTE: Here Nintendo unpins the current thread instead of the thread_to_not_terminate. */
|
|
|
|
/* This is valid because the only caller which uses non-nullptr as argument uses GetCurrentThreadPointer(), */
|
|
|
|
/* but it's still notable because it seems incorrect at first glance. */
|
|
|
|
process->UnpinCurrentThread();
|
|
|
|
}
|
|
|
|
|
2020-07-23 08:52:29 +02:00
|
|
|
auto &thread_list = process->GetThreadList();
|
|
|
|
for (auto it = thread_list.begin(); it != thread_list.end(); ++it) {
|
|
|
|
if (KThread *thread = std::addressof(*it); thread != thread_to_not_terminate) {
|
|
|
|
if (thread->GetState() != KThread::ThreadState_Terminated) {
|
|
|
|
thread->RequestTerminate();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-11-01 18:25:31 +01:00
|
|
|
/* Wait for all children threads to terminate. */
|
2020-07-23 08:52:29 +02:00
|
|
|
while (true) {
|
|
|
|
/* Get the next child. */
|
|
|
|
KThread *cur_child = nullptr;
|
|
|
|
{
|
|
|
|
KScopedLightLock proc_lk(process->GetListLock());
|
|
|
|
|
|
|
|
auto &thread_list = process->GetThreadList();
|
|
|
|
for (auto it = thread_list.begin(); it != thread_list.end(); ++it) {
|
|
|
|
if (KThread *thread = std::addressof(*it); thread != thread_to_not_terminate) {
|
|
|
|
if (thread->GetState() != KThread::ThreadState_Terminated) {
|
|
|
|
if (AMS_LIKELY(thread->Open())) {
|
|
|
|
cur_child = thread;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If we didn't find any non-terminated children, we're done. */
|
|
|
|
if (cur_child == nullptr) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Terminate and close the thread. */
|
2021-04-07 17:17:15 +02:00
|
|
|
ON_SCOPE_EXIT { cur_child->Close(); };
|
|
|
|
|
2022-02-14 23:45:32 +01:00
|
|
|
if (const Result terminate_result = cur_child->Terminate(); svc::ResultTerminationRequested::Includes(terminate_result)) {
|
|
|
|
R_THROW(terminate_result);
|
2021-04-07 17:17:15 +02:00
|
|
|
}
|
2020-07-23 08:52:29 +02:00
|
|
|
}
|
2021-04-07 17:17:15 +02:00
|
|
|
|
2022-02-14 23:45:32 +01:00
|
|
|
R_SUCCEED();
|
2020-07-23 08:52:29 +02:00
|
|
|
}
|
|
|
|
|
2021-09-19 19:11:56 +02:00
|
|
|
class ThreadQueueImplForKProcessEnterUserException final : public KThreadQueue {
|
|
|
|
private:
|
|
|
|
KThread **m_exception_thread;
|
|
|
|
public:
|
|
|
|
constexpr ThreadQueueImplForKProcessEnterUserException(KThread **t) : KThreadQueue(), m_exception_thread(t) { /* ... */ }
|
|
|
|
|
|
|
|
virtual void EndWait(KThread *waiting_thread, Result wait_result) override {
|
|
|
|
/* Set the exception thread. */
|
|
|
|
*m_exception_thread = waiting_thread;
|
|
|
|
|
|
|
|
/* Invoke the base end wait handler. */
|
|
|
|
KThreadQueue::EndWait(waiting_thread, wait_result);
|
|
|
|
}
|
|
|
|
|
|
|
|
virtual void CancelWait(KThread *waiting_thread, Result wait_result, bool cancel_timer_task) override {
|
|
|
|
/* Remove the thread as a waiter on its mutex owner. */
|
|
|
|
waiting_thread->GetLockOwner()->RemoveWaiter(waiting_thread);
|
|
|
|
|
|
|
|
/* Invoke the base cancel wait handler. */
|
|
|
|
KThreadQueue::CancelWait(waiting_thread, wait_result, cancel_timer_task);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2020-02-19 10:22:27 +01:00
|
|
|
}
|
|
|
|
|
2020-02-18 14:04:49 +01:00
|
|
|
void KProcess::Finalize() {
|
2020-07-23 08:52:29 +02:00
|
|
|
/* Delete the process local region. */
|
2020-12-18 02:18:47 +01:00
|
|
|
this->DeleteThreadLocalRegion(m_plr_address);
|
2020-07-23 08:52:29 +02:00
|
|
|
|
|
|
|
/* Get the used memory size. */
|
2022-10-12 06:32:56 +02:00
|
|
|
const size_t used_memory_size = this->GetUsedNonSystemUserPhysicalMemorySize();
|
2020-07-23 08:52:29 +02:00
|
|
|
|
|
|
|
/* Finalize the page table. */
|
2020-12-18 02:18:47 +01:00
|
|
|
m_page_table.Finalize();
|
2020-07-23 08:52:29 +02:00
|
|
|
|
2022-10-12 06:32:56 +02:00
|
|
|
/* Finish using our system resource. */
|
|
|
|
{
|
|
|
|
if (m_system_resource->IsSecureResource()) {
|
|
|
|
/* Finalize optimized memory. If memory wasn't optimized, this is a no-op. */
|
|
|
|
Kernel::GetMemoryManager().FinalizeOptimizedMemory(this->GetId(), m_memory_pool);
|
|
|
|
}
|
2020-07-25 05:44:15 +02:00
|
|
|
|
2022-10-12 06:32:56 +02:00
|
|
|
m_system_resource->Close();
|
2020-07-23 08:52:29 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Free all shared memory infos. */
|
|
|
|
{
|
2020-12-18 02:18:47 +01:00
|
|
|
auto it = m_shared_memory_list.begin();
|
|
|
|
while (it != m_shared_memory_list.end()) {
|
2020-07-23 08:52:29 +02:00
|
|
|
KSharedMemoryInfo *info = std::addressof(*it);
|
|
|
|
KSharedMemory *shmem = info->GetSharedMemory();
|
|
|
|
|
|
|
|
while (!info->Close()) {
|
|
|
|
shmem->Close();
|
|
|
|
}
|
|
|
|
shmem->Close();
|
|
|
|
|
2020-12-18 02:18:47 +01:00
|
|
|
it = m_shared_memory_list.erase(it);
|
2020-07-23 08:52:29 +02:00
|
|
|
KSharedMemoryInfo::Free(info);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-09-18 22:26:21 +02:00
|
|
|
/* Close all references to our io regions. */
|
2020-12-02 01:32:30 +01:00
|
|
|
{
|
2021-09-18 22:26:21 +02:00
|
|
|
auto it = m_io_region_list.begin();
|
|
|
|
while (it != m_io_region_list.end()) {
|
|
|
|
KIoRegion *io_region = std::addressof(*it);
|
|
|
|
it = m_io_region_list.erase(it);
|
2020-12-02 01:32:30 +01:00
|
|
|
|
2021-09-18 22:26:21 +02:00
|
|
|
io_region->Close();
|
2020-12-02 01:32:30 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-23 08:52:29 +02:00
|
|
|
/* Our thread local page list must be empty at this point. */
|
2020-12-18 02:18:47 +01:00
|
|
|
MESOSPHERE_ABORT_UNLESS(m_partially_used_tlp_tree.empty());
|
|
|
|
MESOSPHERE_ABORT_UNLESS(m_fully_used_tlp_tree.empty());
|
2020-07-23 08:52:29 +02:00
|
|
|
|
2022-10-12 06:32:56 +02:00
|
|
|
/* Release memory to the resource limit. */
|
|
|
|
if (m_resource_limit != nullptr) {
|
|
|
|
MESOSPHERE_ABORT_UNLESS(used_memory_size >= m_memory_release_hint);
|
|
|
|
m_resource_limit->Release(ams::svc::LimitableResource_PhysicalMemoryMax, used_memory_size, used_memory_size - m_memory_release_hint);
|
|
|
|
m_resource_limit->Close();
|
|
|
|
}
|
|
|
|
|
2020-07-23 08:52:29 +02:00
|
|
|
/* Log that we finalized for debug. */
|
2020-12-18 02:18:47 +01:00
|
|
|
MESOSPHERE_LOG("KProcess::Finalize() pid=%ld name=%-12s\n", m_process_id, m_name);
|
2020-07-23 08:52:29 +02:00
|
|
|
|
|
|
|
/* Perform inherited finalization. */
|
2021-10-24 06:13:26 +02:00
|
|
|
KSynchronizationObject::Finalize();
|
2020-02-18 14:04:49 +01:00
|
|
|
}
|
|
|
|
|
2020-02-19 10:22:27 +01:00
|
|
|
Result KProcess::Initialize(const ams::svc::CreateProcessParameter ¶ms) {
|
2020-08-18 13:03:01 +02:00
|
|
|
/* Validate that the intended kernel version is high enough for us to support. */
|
2020-12-18 02:18:47 +01:00
|
|
|
R_UNLESS(m_capabilities.GetIntendedKernelVersion() >= ams::svc::RequiredKernelVersion, svc::ResultInvalidCombination());
|
2020-08-18 13:03:01 +02:00
|
|
|
|
|
|
|
/* Validate that the intended kernel version isn't too high for us to support. */
|
2020-12-18 02:18:47 +01:00
|
|
|
R_UNLESS(m_capabilities.GetIntendedKernelVersion() <= ams::svc::SupportedKernelVersion, svc::ResultInvalidCombination());
|
2020-02-19 15:46:59 +01:00
|
|
|
|
|
|
|
/* Create and clear the process local region. */
|
2020-12-18 02:18:47 +01:00
|
|
|
R_TRY(this->CreateThreadLocalRegion(std::addressof(m_plr_address)));
|
|
|
|
m_plr_heap_address = this->GetThreadLocalRegionPointer(m_plr_address);
|
|
|
|
std::memset(m_plr_heap_address, 0, ams::svc::ThreadLocalRegionSize);
|
2020-02-19 15:46:59 +01:00
|
|
|
|
|
|
|
/* Copy in the name from parameters. */
|
2020-12-18 02:18:47 +01:00
|
|
|
static_assert(sizeof(params.name) < sizeof(m_name));
|
|
|
|
std::memcpy(m_name, params.name, sizeof(params.name));
|
|
|
|
m_name[sizeof(params.name)] = 0;
|
2020-02-19 15:46:59 +01:00
|
|
|
|
|
|
|
/* Set misc fields. */
|
2020-12-18 02:18:47 +01:00
|
|
|
m_state = State_Created;
|
|
|
|
m_main_thread_stack_size = 0;
|
|
|
|
m_used_kernel_memory_size = 0;
|
|
|
|
m_ideal_core_id = 0;
|
|
|
|
m_flags = params.flags;
|
|
|
|
m_version = params.version;
|
|
|
|
m_program_id = params.program_id;
|
|
|
|
m_code_address = params.code_address;
|
|
|
|
m_code_size = params.code_num_pages * PageSize;
|
|
|
|
m_is_application = (params.flags & ams::svc::CreateProcessFlag_IsApplication);
|
|
|
|
m_is_jit_debug = false;
|
2020-02-19 15:46:59 +01:00
|
|
|
|
2023-02-21 21:51:56 +01:00
|
|
|
#if defined(MESOSPHERE_ENABLE_PROCESS_CREATION_TIME)
|
|
|
|
m_creation_time = KHardwareTimer::GetTick();
|
|
|
|
#endif
|
|
|
|
|
2020-02-19 15:46:59 +01:00
|
|
|
/* Set thread fields. */
|
|
|
|
for (size_t i = 0; i < cpu::NumCores; i++) {
|
2023-02-21 11:12:17 +01:00
|
|
|
m_running_threads[i] = nullptr;
|
|
|
|
m_pinned_threads[i] = nullptr;
|
|
|
|
m_running_thread_idle_counts[i] = 0;
|
|
|
|
m_running_thread_switch_counts[i] = 0;
|
2020-02-19 15:46:59 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Set max memory based on address space type. */
|
|
|
|
switch ((params.flags & ams::svc::CreateProcessFlag_AddressSpaceMask)) {
|
|
|
|
case ams::svc::CreateProcessFlag_AddressSpace32Bit:
|
|
|
|
case ams::svc::CreateProcessFlag_AddressSpace64BitDeprecated:
|
|
|
|
case ams::svc::CreateProcessFlag_AddressSpace64Bit:
|
2020-12-18 02:18:47 +01:00
|
|
|
m_max_process_memory = m_page_table.GetHeapRegionSize();
|
2020-02-19 15:46:59 +01:00
|
|
|
break;
|
|
|
|
case ams::svc::CreateProcessFlag_AddressSpace32BitWithoutAlias:
|
2020-12-18 02:18:47 +01:00
|
|
|
m_max_process_memory = m_page_table.GetHeapRegionSize() + m_page_table.GetAliasRegionSize();
|
2020-02-19 15:46:59 +01:00
|
|
|
break;
|
|
|
|
MESOSPHERE_UNREACHABLE_DEFAULT_CASE();
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Generate random entropy. */
|
2021-10-26 00:07:24 +02:00
|
|
|
KSystemControl::GenerateRandom(m_entropy, util::size(m_entropy));
|
2020-02-19 15:46:59 +01:00
|
|
|
|
|
|
|
/* Clear remaining fields. */
|
2021-04-08 00:16:11 +02:00
|
|
|
m_num_running_threads = 0;
|
|
|
|
m_num_process_switches = 0;
|
|
|
|
m_num_thread_switches = 0;
|
|
|
|
m_num_fpu_switches = 0;
|
|
|
|
m_num_supervisor_calls = 0;
|
|
|
|
m_num_ipc_messages = 0;
|
|
|
|
|
|
|
|
m_is_signaled = false;
|
|
|
|
m_attached_object = nullptr;
|
|
|
|
m_exception_thread = nullptr;
|
|
|
|
m_is_suspended = false;
|
|
|
|
m_memory_release_hint = 0;
|
|
|
|
m_schedule_count = 0;
|
|
|
|
m_is_handle_table_initialized = false;
|
2020-02-19 15:46:59 +01:00
|
|
|
|
|
|
|
/* We're initialized! */
|
2020-12-18 02:18:47 +01:00
|
|
|
m_is_initialized = true;
|
2020-02-19 15:46:59 +01:00
|
|
|
|
2022-02-14 23:45:32 +01:00
|
|
|
R_SUCCEED();
|
2020-02-19 10:22:27 +01:00
|
|
|
}
|
|
|
|
|
2021-04-07 17:23:21 +02:00
|
|
|
Result KProcess::Initialize(const ams::svc::CreateProcessParameter ¶ms, const KPageGroup &pg, const u32 *caps, s32 num_caps, KResourceLimit *res_limit, KMemoryManager::Pool pool, bool immortal) {
|
2020-02-19 10:22:27 +01:00
|
|
|
MESOSPHERE_ASSERT_THIS();
|
|
|
|
MESOSPHERE_ASSERT(res_limit != nullptr);
|
2020-07-22 03:54:08 +02:00
|
|
|
MESOSPHERE_ABORT_UNLESS((params.code_num_pages * PageSize) / PageSize == static_cast<size_t>(params.code_num_pages));
|
2020-02-19 10:22:27 +01:00
|
|
|
|
|
|
|
/* Set members. */
|
2023-10-11 18:13:59 +02:00
|
|
|
m_memory_pool = pool;
|
|
|
|
m_resource_limit = res_limit;
|
|
|
|
m_is_default_application_system_resource = false;
|
|
|
|
m_is_immortal = immortal;
|
|
|
|
|
|
|
|
/* Setup our system resource. */
|
|
|
|
if (const size_t system_resource_num_pages = params.system_resource_num_pages; system_resource_num_pages != 0) {
|
|
|
|
/* Create a secure system resource. */
|
|
|
|
KSecureSystemResource *secure_resource = KSecureSystemResource::Create();
|
|
|
|
R_UNLESS(secure_resource != nullptr, svc::ResultOutOfResource());
|
|
|
|
|
|
|
|
ON_RESULT_FAILURE { secure_resource->Close(); };
|
|
|
|
|
|
|
|
/* Initialize the secure resource. */
|
|
|
|
R_TRY(secure_resource->Initialize(system_resource_num_pages * PageSize, m_resource_limit, m_memory_pool));
|
|
|
|
|
|
|
|
/* Set our system resource. */
|
|
|
|
m_system_resource = secure_resource;
|
|
|
|
} else {
|
|
|
|
/* Use the system-wide system resource. */
|
|
|
|
const bool is_app = (params.flags & ams::svc::CreateProcessFlag_IsApplication);
|
|
|
|
m_system_resource = std::addressof(is_app ? Kernel::GetApplicationSystemResource() : Kernel::GetSystemSystemResource());
|
|
|
|
|
|
|
|
m_is_default_application_system_resource = is_app;
|
|
|
|
|
|
|
|
/* Open reference to the system resource. */
|
|
|
|
m_system_resource->Open();
|
|
|
|
}
|
2020-02-19 10:22:27 +01:00
|
|
|
|
2023-10-11 18:13:59 +02:00
|
|
|
/* Ensure we clean up our secure resource, if we fail. */
|
|
|
|
ON_RESULT_FAILURE { m_system_resource->Close(); };
|
2022-10-12 06:32:56 +02:00
|
|
|
|
2020-02-19 10:22:27 +01:00
|
|
|
/* Setup page table. */
|
|
|
|
{
|
2024-03-28 10:07:04 +01:00
|
|
|
const bool from_back = (params.flags & ams::svc::CreateProcessFlag_EnableAslr) == 0;
|
|
|
|
R_TRY(m_page_table.Initialize(static_cast<ams::svc::CreateProcessFlag>(params.flags), from_back, pool, params.code_address, params.code_num_pages * PageSize, m_system_resource, res_limit));
|
2020-02-19 10:22:27 +01:00
|
|
|
}
|
2023-10-11 18:13:59 +02:00
|
|
|
ON_RESULT_FAILURE_2 { m_page_table.Finalize(); };
|
2020-02-19 10:22:27 +01:00
|
|
|
|
|
|
|
/* Ensure we can insert the code region. */
|
2020-12-18 02:18:47 +01:00
|
|
|
R_UNLESS(m_page_table.CanContain(params.code_address, params.code_num_pages * PageSize, KMemoryState_Code), svc::ResultInvalidMemoryRegion());
|
2020-02-19 10:22:27 +01:00
|
|
|
|
|
|
|
/* Map the code region. */
|
2020-12-18 02:18:47 +01:00
|
|
|
R_TRY(m_page_table.MapPageGroup(params.code_address, pg, KMemoryState_Code, KMemoryPermission_KernelRead));
|
2020-02-19 10:22:27 +01:00
|
|
|
|
|
|
|
/* Initialize capabilities. */
|
2020-12-18 02:18:47 +01:00
|
|
|
R_TRY(m_capabilities.Initialize(caps, num_caps, std::addressof(m_page_table)));
|
2020-02-19 10:22:27 +01:00
|
|
|
|
|
|
|
/* Initialize the process id. */
|
2021-10-20 20:02:17 +02:00
|
|
|
m_process_id = g_initial_process_id++;
|
2020-12-18 02:18:47 +01:00
|
|
|
MESOSPHERE_ABORT_UNLESS(InitialProcessIdMin <= m_process_id);
|
|
|
|
MESOSPHERE_ABORT_UNLESS(m_process_id <= InitialProcessIdMax);
|
2020-02-19 10:22:27 +01:00
|
|
|
|
|
|
|
/* Initialize the rest of the process. */
|
|
|
|
R_TRY(this->Initialize(params));
|
|
|
|
|
|
|
|
/* Open a reference to the resource limit. */
|
2020-12-18 02:18:47 +01:00
|
|
|
m_resource_limit->Open();
|
2020-02-19 10:22:27 +01:00
|
|
|
|
|
|
|
/* We succeeded! */
|
2022-02-14 23:45:32 +01:00
|
|
|
R_SUCCEED();
|
2020-02-19 10:22:27 +01:00
|
|
|
}
|
|
|
|
|
2020-07-22 07:13:16 +02:00
|
|
|
Result KProcess::Initialize(const ams::svc::CreateProcessParameter ¶ms, svc::KUserPointer<const u32 *> user_caps, s32 num_caps, KResourceLimit *res_limit, KMemoryManager::Pool pool) {
|
|
|
|
MESOSPHERE_ASSERT_THIS();
|
|
|
|
MESOSPHERE_ASSERT(res_limit != nullptr);
|
|
|
|
|
|
|
|
/* Set pool and resource limit. */
|
2023-10-11 18:13:59 +02:00
|
|
|
m_memory_pool = pool;
|
|
|
|
m_resource_limit = res_limit;
|
|
|
|
m_is_default_application_system_resource = false;
|
|
|
|
m_is_immortal = false;
|
2020-07-22 07:13:16 +02:00
|
|
|
|
|
|
|
/* Get the memory sizes. */
|
|
|
|
const size_t code_num_pages = params.code_num_pages;
|
|
|
|
const size_t system_resource_num_pages = params.system_resource_num_pages;
|
|
|
|
const size_t code_size = code_num_pages * PageSize;
|
|
|
|
const size_t system_resource_size = system_resource_num_pages * PageSize;
|
|
|
|
|
2022-10-12 06:32:56 +02:00
|
|
|
/* Reserve memory for our code resource. */
|
|
|
|
KScopedResourceReservation memory_reservation(this, ams::svc::LimitableResource_PhysicalMemoryMax, code_size);
|
2020-07-22 07:13:16 +02:00
|
|
|
R_UNLESS(memory_reservation.Succeeded(), svc::ResultLimitReached());
|
|
|
|
|
2022-10-12 06:32:56 +02:00
|
|
|
/* Setup our system resource. */
|
|
|
|
if (system_resource_num_pages != 0) {
|
|
|
|
/* Create a secure system resource. */
|
|
|
|
KSecureSystemResource *secure_resource = KSecureSystemResource::Create();
|
|
|
|
R_UNLESS(secure_resource != nullptr, svc::ResultOutOfResource());
|
2020-07-22 07:13:16 +02:00
|
|
|
|
2022-10-12 06:32:56 +02:00
|
|
|
ON_RESULT_FAILURE { secure_resource->Close(); };
|
|
|
|
|
|
|
|
/* Initialize the secure resource. */
|
|
|
|
R_TRY(secure_resource->Initialize(system_resource_size, m_resource_limit, m_memory_pool));
|
|
|
|
|
|
|
|
/* Set our system resource. */
|
|
|
|
m_system_resource = secure_resource;
|
2020-07-22 07:13:16 +02:00
|
|
|
|
|
|
|
} else {
|
2022-10-12 06:32:56 +02:00
|
|
|
/* Use the system-wide system resource. */
|
|
|
|
const bool is_app = (params.flags & ams::svc::CreateProcessFlag_IsApplication);
|
|
|
|
m_system_resource = std::addressof(is_app ? Kernel::GetApplicationSystemResource() : Kernel::GetSystemSystemResource());
|
|
|
|
|
2023-10-11 18:13:59 +02:00
|
|
|
m_is_default_application_system_resource = is_app;
|
|
|
|
|
2022-10-12 06:32:56 +02:00
|
|
|
/* Open reference to the system resource. */
|
|
|
|
m_system_resource->Open();
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Ensure we clean up our secure resource, if we fail. */
|
|
|
|
ON_RESULT_FAILURE { m_system_resource->Close(); };
|
2020-07-22 07:13:16 +02:00
|
|
|
|
|
|
|
/* Setup page table. */
|
|
|
|
{
|
2024-03-28 10:07:04 +01:00
|
|
|
const bool from_back = (params.flags & ams::svc::CreateProcessFlag_EnableAslr) == 0;
|
|
|
|
R_TRY(m_page_table.Initialize(static_cast<ams::svc::CreateProcessFlag>(params.flags), from_back, pool, params.code_address, code_size, m_system_resource, res_limit));
|
2020-07-22 07:13:16 +02:00
|
|
|
}
|
2022-02-14 23:45:32 +01:00
|
|
|
ON_RESULT_FAILURE_2 { m_page_table.Finalize(); };
|
2020-07-22 07:13:16 +02:00
|
|
|
|
|
|
|
/* Ensure we can insert the code region. */
|
2020-12-18 02:18:47 +01:00
|
|
|
R_UNLESS(m_page_table.CanContain(params.code_address, code_size, KMemoryState_Code), svc::ResultInvalidMemoryRegion());
|
2020-07-22 07:13:16 +02:00
|
|
|
|
|
|
|
/* Map the code region. */
|
2020-12-18 02:18:47 +01:00
|
|
|
R_TRY(m_page_table.MapPages(params.code_address, code_num_pages, KMemoryState_Code, static_cast<KMemoryPermission>(KMemoryPermission_KernelRead | KMemoryPermission_NotMapped)));
|
2020-07-22 07:13:16 +02:00
|
|
|
|
|
|
|
/* Initialize capabilities. */
|
2020-12-18 02:18:47 +01:00
|
|
|
R_TRY(m_capabilities.Initialize(user_caps, num_caps, std::addressof(m_page_table)));
|
2020-07-22 07:13:16 +02:00
|
|
|
|
|
|
|
/* Initialize the process id. */
|
2021-10-20 20:02:17 +02:00
|
|
|
m_process_id = g_process_id++;
|
2020-12-18 02:18:47 +01:00
|
|
|
MESOSPHERE_ABORT_UNLESS(ProcessIdMin <= m_process_id);
|
|
|
|
MESOSPHERE_ABORT_UNLESS(m_process_id <= ProcessIdMax);
|
2020-07-22 07:13:16 +02:00
|
|
|
|
|
|
|
/* If we should optimize memory allocations, do so. */
|
2022-10-12 06:32:56 +02:00
|
|
|
if (m_system_resource->IsSecureResource() && (params.flags & ams::svc::CreateProcessFlag_OptimizeMemoryAllocation) != 0) {
|
2020-12-18 02:18:47 +01:00
|
|
|
R_TRY(Kernel::GetMemoryManager().InitializeOptimizedMemory(m_process_id, pool));
|
2020-07-22 07:13:16 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Initialize the rest of the process. */
|
|
|
|
R_TRY(this->Initialize(params));
|
|
|
|
|
|
|
|
/* Open a reference to the resource limit. */
|
2020-12-18 02:18:47 +01:00
|
|
|
m_resource_limit->Open();
|
2020-07-22 07:13:16 +02:00
|
|
|
|
2022-02-14 23:45:32 +01:00
|
|
|
/* We succeeded, so commit our memory reservation. */
|
2020-07-22 07:13:16 +02:00
|
|
|
memory_reservation.Commit();
|
2022-02-14 23:45:32 +01:00
|
|
|
R_SUCCEED();
|
2020-07-22 03:54:08 +02:00
|
|
|
}
|
|
|
|
|
2021-10-25 05:41:38 +02:00
|
|
|
void KProcess::DoWorkerTaskImpl() {
|
2020-07-23 08:52:29 +02:00
|
|
|
/* Terminate child threads. */
|
|
|
|
TerminateChildren(this, nullptr);
|
|
|
|
|
2021-04-08 00:16:11 +02:00
|
|
|
/* Finalize the handle table, if we're not immortal. */
|
|
|
|
if (!m_is_immortal && m_is_handle_table_initialized) {
|
|
|
|
this->FinalizeHandleTable();
|
|
|
|
}
|
|
|
|
|
2020-07-23 08:52:29 +02:00
|
|
|
/* Call the debug callback. */
|
|
|
|
KDebug::OnExitProcess(this);
|
|
|
|
|
|
|
|
/* Finish termination. */
|
|
|
|
this->FinishTermination();
|
|
|
|
}
|
|
|
|
|
2021-04-07 17:17:15 +02:00
|
|
|
Result KProcess::StartTermination() {
|
2021-04-07 17:23:21 +02:00
|
|
|
/* Finalize the handle table when we're done, if the process isn't immortal. */
|
2021-04-07 17:17:15 +02:00
|
|
|
ON_SCOPE_EXIT {
|
2021-04-07 17:23:21 +02:00
|
|
|
if (!m_is_immortal) {
|
2021-04-08 00:16:11 +02:00
|
|
|
this->FinalizeHandleTable();
|
2021-04-07 17:23:21 +02:00
|
|
|
}
|
2021-04-07 17:17:15 +02:00
|
|
|
};
|
2020-07-23 08:52:29 +02:00
|
|
|
|
2021-04-07 17:17:15 +02:00
|
|
|
/* Terminate child threads other than the current one. */
|
2022-02-14 23:45:32 +01:00
|
|
|
R_RETURN(TerminateChildren(this, GetCurrentThreadPointer()));
|
2020-07-23 08:52:29 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void KProcess::FinishTermination() {
|
2021-04-07 17:23:21 +02:00
|
|
|
/* Only allow termination to occur if the process isn't immortal. */
|
|
|
|
if (!m_is_immortal) {
|
|
|
|
/* Release resource limit hint. */
|
|
|
|
if (m_resource_limit != nullptr) {
|
2022-10-12 06:32:56 +02:00
|
|
|
m_memory_release_hint = this->GetUsedNonSystemUserPhysicalMemorySize();
|
2021-04-07 17:23:21 +02:00
|
|
|
m_resource_limit->Release(ams::svc::LimitableResource_PhysicalMemoryMax, 0, m_memory_release_hint);
|
|
|
|
}
|
2020-07-23 08:52:29 +02:00
|
|
|
|
2021-04-07 17:23:21 +02:00
|
|
|
/* Change state. */
|
|
|
|
{
|
|
|
|
KScopedSchedulerLock sl;
|
|
|
|
this->ChangeState(State_Terminated);
|
|
|
|
}
|
2020-07-23 08:52:29 +02:00
|
|
|
|
2021-04-07 17:23:21 +02:00
|
|
|
/* Close. */
|
|
|
|
this->Close();
|
|
|
|
}
|
2020-02-18 14:04:49 +01:00
|
|
|
}
|
|
|
|
|
2020-07-10 05:11:41 +02:00
|
|
|
void KProcess::Exit() {
|
2020-07-23 08:52:29 +02:00
|
|
|
MESOSPHERE_ASSERT_THIS();
|
|
|
|
|
2023-11-01 18:25:31 +01:00
|
|
|
/* Determine whether we need to start terminating. */
|
2020-07-23 08:52:29 +02:00
|
|
|
bool needs_terminate = false;
|
|
|
|
{
|
2020-12-18 02:18:47 +01:00
|
|
|
KScopedLightLock lk(m_state_lock);
|
2020-07-23 08:52:29 +02:00
|
|
|
KScopedSchedulerLock sl;
|
|
|
|
|
2020-12-18 02:18:47 +01:00
|
|
|
MESOSPHERE_ASSERT(m_state != State_Created);
|
|
|
|
MESOSPHERE_ASSERT(m_state != State_CreatedAttached);
|
|
|
|
MESOSPHERE_ASSERT(m_state != State_Crashed);
|
|
|
|
MESOSPHERE_ASSERT(m_state != State_Terminated);
|
|
|
|
if (m_state == State_Running || m_state == State_RunningAttached || m_state == State_DebugBreak) {
|
2020-07-23 08:52:29 +02:00
|
|
|
this->ChangeState(State_Terminating);
|
|
|
|
needs_terminate = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If we need to start termination, do so. */
|
|
|
|
if (needs_terminate) {
|
|
|
|
this->StartTermination();
|
|
|
|
|
|
|
|
/* Note for debug that we're exiting the process. */
|
2020-12-18 02:18:47 +01:00
|
|
|
MESOSPHERE_LOG("KProcess::Exit() pid=%ld name=%-12s\n", m_process_id, m_name);
|
2020-07-23 08:52:29 +02:00
|
|
|
|
|
|
|
/* Register the process as a work task. */
|
2023-10-11 18:57:58 +02:00
|
|
|
KWorkerTaskManager::AddTask(KWorkerTaskManager::WorkerType_ExitProcess, this);
|
2020-07-23 08:52:29 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Exit the current thread. */
|
|
|
|
GetCurrentThread().Exit();
|
|
|
|
MESOSPHERE_PANIC("Thread survived call to exit");
|
2020-07-10 05:11:41 +02:00
|
|
|
}
|
|
|
|
|
2020-07-22 20:15:45 +02:00
|
|
|
Result KProcess::Terminate() {
|
2020-07-23 08:52:29 +02:00
|
|
|
MESOSPHERE_ASSERT_THIS();
|
|
|
|
|
|
|
|
/* Determine whether we need to start terminating */
|
|
|
|
bool needs_terminate = false;
|
|
|
|
{
|
2020-12-18 02:18:47 +01:00
|
|
|
KScopedLightLock lk(m_state_lock);
|
2020-07-23 08:52:29 +02:00
|
|
|
|
|
|
|
/* Check whether we're allowed to terminate. */
|
2020-12-18 02:18:47 +01:00
|
|
|
R_UNLESS(m_state != State_Created, svc::ResultInvalidState());
|
|
|
|
R_UNLESS(m_state != State_CreatedAttached, svc::ResultInvalidState());
|
2020-07-23 08:52:29 +02:00
|
|
|
|
|
|
|
KScopedSchedulerLock sl;
|
|
|
|
|
2020-12-18 02:18:47 +01:00
|
|
|
if (m_state == State_Running || m_state == State_RunningAttached || m_state == State_Crashed || m_state == State_DebugBreak) {
|
2020-07-23 08:52:29 +02:00
|
|
|
this->ChangeState(State_Terminating);
|
|
|
|
needs_terminate = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If we need to terminate, do so. */
|
|
|
|
if (needs_terminate) {
|
|
|
|
/* Start termination. */
|
2021-04-07 17:17:15 +02:00
|
|
|
if (R_SUCCEEDED(this->StartTermination())) {
|
|
|
|
/* Note for debug that we're terminating the process. */
|
|
|
|
MESOSPHERE_LOG("KProcess::Terminate() OK pid=%ld name=%-12s\n", m_process_id, m_name);
|
2020-07-23 08:52:29 +02:00
|
|
|
|
2021-04-07 17:17:15 +02:00
|
|
|
/* Call the debug callback. */
|
|
|
|
KDebug::OnTerminateProcess(this);
|
2020-07-23 08:52:29 +02:00
|
|
|
|
2021-04-07 17:17:15 +02:00
|
|
|
/* Finish termination. */
|
|
|
|
this->FinishTermination();
|
|
|
|
} else {
|
|
|
|
/* Note for debug that we're terminating the process. */
|
|
|
|
MESOSPHERE_LOG("KProcess::Terminate() FAIL pid=%ld name=%-12s\n", m_process_id, m_name);
|
2020-07-23 08:52:29 +02:00
|
|
|
|
2021-04-07 17:17:15 +02:00
|
|
|
/* Register the process as a work task. */
|
2023-10-11 18:57:58 +02:00
|
|
|
KWorkerTaskManager::AddTask(KWorkerTaskManager::WorkerType_ExitProcess, this);
|
2021-04-07 17:17:15 +02:00
|
|
|
}
|
2020-07-23 08:52:29 +02:00
|
|
|
}
|
|
|
|
|
2022-02-14 23:45:32 +01:00
|
|
|
R_SUCCEED();
|
2020-07-22 20:15:45 +02:00
|
|
|
}
|
|
|
|
|
2020-07-22 11:50:19 +02:00
|
|
|
Result KProcess::AddSharedMemory(KSharedMemory *shmem, KProcessAddress address, size_t size) {
|
|
|
|
/* Lock ourselves, to prevent concurrent access. */
|
2020-12-18 02:18:47 +01:00
|
|
|
KScopedLightLock lk(m_state_lock);
|
2020-07-22 11:50:19 +02:00
|
|
|
|
2020-08-17 23:20:24 +02:00
|
|
|
/* Address and size parameters aren't used. */
|
|
|
|
MESOSPHERE_UNUSED(address, size);
|
|
|
|
|
2020-07-22 11:50:19 +02:00
|
|
|
/* Try to find an existing info for the memory. */
|
|
|
|
KSharedMemoryInfo *info = nullptr;
|
2020-12-18 02:18:47 +01:00
|
|
|
for (auto it = m_shared_memory_list.begin(); it != m_shared_memory_list.end(); ++it) {
|
2020-07-22 11:50:19 +02:00
|
|
|
if (it->GetSharedMemory() == shmem) {
|
|
|
|
info = std::addressof(*it);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If we didn't find an info, create one. */
|
|
|
|
if (info == nullptr) {
|
|
|
|
/* Allocate a new info. */
|
|
|
|
info = KSharedMemoryInfo::Allocate();
|
|
|
|
R_UNLESS(info != nullptr, svc::ResultOutOfResource());
|
|
|
|
|
|
|
|
/* Initialize the info and add it to our list. */
|
|
|
|
info->Initialize(shmem);
|
2020-12-18 02:18:47 +01:00
|
|
|
m_shared_memory_list.push_back(*info);
|
2020-07-22 11:50:19 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Open a reference to the shared memory and its info. */
|
|
|
|
shmem->Open();
|
|
|
|
info->Open();
|
|
|
|
|
2022-02-14 23:45:32 +01:00
|
|
|
R_SUCCEED();
|
2020-07-22 11:50:19 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void KProcess::RemoveSharedMemory(KSharedMemory *shmem, KProcessAddress address, size_t size) {
|
|
|
|
/* Lock ourselves, to prevent concurrent access. */
|
2020-12-18 02:18:47 +01:00
|
|
|
KScopedLightLock lk(m_state_lock);
|
2020-07-22 11:50:19 +02:00
|
|
|
|
2020-08-17 23:20:24 +02:00
|
|
|
/* Address and size parameters aren't used. */
|
|
|
|
MESOSPHERE_UNUSED(address, size);
|
|
|
|
|
2020-07-22 11:50:19 +02:00
|
|
|
/* Find an existing info for the memory. */
|
|
|
|
KSharedMemoryInfo *info = nullptr;
|
2020-12-18 02:18:47 +01:00
|
|
|
auto it = m_shared_memory_list.begin();
|
|
|
|
for (/* ... */; it != m_shared_memory_list.end(); ++it) {
|
2020-07-22 11:50:19 +02:00
|
|
|
if (it->GetSharedMemory() == shmem) {
|
|
|
|
info = std::addressof(*it);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
MESOSPHERE_ABORT_UNLESS(info != nullptr);
|
|
|
|
|
|
|
|
/* Close a reference to the info and its memory. */
|
|
|
|
if (info->Close()) {
|
2020-12-18 02:18:47 +01:00
|
|
|
m_shared_memory_list.erase(it);
|
2020-07-22 11:50:19 +02:00
|
|
|
KSharedMemoryInfo::Free(info);
|
|
|
|
}
|
|
|
|
|
|
|
|
shmem->Close();
|
|
|
|
}
|
|
|
|
|
2021-09-18 22:26:21 +02:00
|
|
|
void KProcess::AddIoRegion(KIoRegion *io_region) {
|
|
|
|
/* Lock ourselves, to prevent concurrent access. */
|
|
|
|
KScopedLightLock lk(m_state_lock);
|
|
|
|
|
|
|
|
/* Open a reference to the region. */
|
|
|
|
io_region->Open();
|
|
|
|
|
|
|
|
/* Add the region to our list. */
|
|
|
|
m_io_region_list.push_back(*io_region);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
void KProcess::RemoveIoRegion(KIoRegion *io_region) {
|
|
|
|
/* Remove the region from our list. */
|
|
|
|
{
|
|
|
|
/* Lock ourselves, to prevent concurrent access. */
|
|
|
|
KScopedLightLock lk(m_state_lock);
|
|
|
|
|
|
|
|
/* Remove the region from our list. */
|
|
|
|
m_io_region_list.erase(m_io_region_list.iterator_to(*io_region));
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Close our reference to the io region. */
|
|
|
|
io_region->Close();
|
|
|
|
}
|
|
|
|
|
2020-02-19 15:46:59 +01:00
|
|
|
Result KProcess::CreateThreadLocalRegion(KProcessAddress *out) {
|
|
|
|
KThreadLocalPage *tlp = nullptr;
|
|
|
|
KProcessAddress tlr = Null<KProcessAddress>;
|
|
|
|
|
|
|
|
/* See if we can get a region from a partially used TLP. */
|
|
|
|
{
|
|
|
|
KScopedSchedulerLock sl;
|
|
|
|
|
2020-12-18 02:18:47 +01:00
|
|
|
if (auto it = m_partially_used_tlp_tree.begin(); it != m_partially_used_tlp_tree.end()) {
|
2020-02-19 15:46:59 +01:00
|
|
|
tlr = it->Reserve();
|
|
|
|
MESOSPHERE_ABORT_UNLESS(tlr != Null<KProcessAddress>);
|
|
|
|
|
|
|
|
if (it->IsAllUsed()) {
|
|
|
|
tlp = std::addressof(*it);
|
2020-12-18 02:18:47 +01:00
|
|
|
m_partially_used_tlp_tree.erase(it);
|
|
|
|
m_fully_used_tlp_tree.insert(*tlp);
|
2020-02-19 15:46:59 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
*out = tlr;
|
2022-02-14 23:45:32 +01:00
|
|
|
R_SUCCEED();
|
2020-02-19 15:46:59 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Allocate a new page. */
|
|
|
|
tlp = KThreadLocalPage::Allocate();
|
|
|
|
R_UNLESS(tlp != nullptr, svc::ResultOutOfMemory());
|
2022-02-14 23:45:32 +01:00
|
|
|
ON_RESULT_FAILURE { KThreadLocalPage::Free(tlp); };
|
2020-02-19 15:46:59 +01:00
|
|
|
|
|
|
|
/* Initialize the new page. */
|
|
|
|
R_TRY(tlp->Initialize(this));
|
|
|
|
|
|
|
|
/* Reserve a TLR. */
|
|
|
|
tlr = tlp->Reserve();
|
|
|
|
MESOSPHERE_ABORT_UNLESS(tlr != Null<KProcessAddress>);
|
|
|
|
|
|
|
|
/* Insert into our tree. */
|
|
|
|
{
|
|
|
|
KScopedSchedulerLock sl;
|
|
|
|
if (tlp->IsAllUsed()) {
|
2020-12-18 02:18:47 +01:00
|
|
|
m_fully_used_tlp_tree.insert(*tlp);
|
2020-02-19 15:46:59 +01:00
|
|
|
} else {
|
2020-12-18 02:18:47 +01:00
|
|
|
m_partially_used_tlp_tree.insert(*tlp);
|
2020-02-19 15:46:59 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We succeeded! */
|
|
|
|
*out = tlr;
|
2022-02-14 23:45:32 +01:00
|
|
|
R_SUCCEED();
|
2020-02-19 15:46:59 +01:00
|
|
|
}
|
|
|
|
|
2020-07-21 13:58:54 +02:00
|
|
|
Result KProcess::DeleteThreadLocalRegion(KProcessAddress addr) {
|
|
|
|
KThreadLocalPage *page_to_free = nullptr;
|
|
|
|
|
|
|
|
/* Release the region. */
|
|
|
|
{
|
|
|
|
KScopedSchedulerLock sl;
|
|
|
|
|
|
|
|
/* Try to find the page in the partially used list. */
|
2021-10-24 06:58:48 +02:00
|
|
|
auto it = m_partially_used_tlp_tree.find_key(util::AlignDown(GetInteger(addr), PageSize));
|
2020-12-18 02:18:47 +01:00
|
|
|
if (it == m_partially_used_tlp_tree.end()) {
|
2020-07-21 13:58:54 +02:00
|
|
|
/* If we don't find it, it has to be in the fully used list. */
|
2021-10-24 06:58:48 +02:00
|
|
|
it = m_fully_used_tlp_tree.find_key(util::AlignDown(GetInteger(addr), PageSize));
|
2020-12-18 02:18:47 +01:00
|
|
|
R_UNLESS(it != m_fully_used_tlp_tree.end(), svc::ResultInvalidAddress());
|
2020-07-21 13:58:54 +02:00
|
|
|
|
|
|
|
/* Release the region. */
|
|
|
|
it->Release(addr);
|
|
|
|
|
|
|
|
/* Move the page out of the fully used list. */
|
|
|
|
KThreadLocalPage *tlp = std::addressof(*it);
|
2020-12-18 02:18:47 +01:00
|
|
|
m_fully_used_tlp_tree.erase(it);
|
2020-07-21 13:58:54 +02:00
|
|
|
if (tlp->IsAllFree()) {
|
|
|
|
page_to_free = tlp;
|
|
|
|
} else {
|
2020-12-18 02:18:47 +01:00
|
|
|
m_partially_used_tlp_tree.insert(*tlp);
|
2020-07-21 13:58:54 +02:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/* Release the region. */
|
|
|
|
it->Release(addr);
|
|
|
|
|
|
|
|
/* Handle the all-free case. */
|
|
|
|
KThreadLocalPage *tlp = std::addressof(*it);
|
|
|
|
if (tlp->IsAllFree()) {
|
2020-12-18 02:18:47 +01:00
|
|
|
m_partially_used_tlp_tree.erase(it);
|
2020-07-21 13:58:54 +02:00
|
|
|
page_to_free = tlp;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If we should free the page it was in, do so. */
|
|
|
|
if (page_to_free != nullptr) {
|
|
|
|
page_to_free->Finalize();
|
|
|
|
|
|
|
|
KThreadLocalPage::Free(page_to_free);
|
|
|
|
}
|
|
|
|
|
2022-02-14 23:45:32 +01:00
|
|
|
R_SUCCEED();
|
2020-07-21 13:58:54 +02:00
|
|
|
}
|
|
|
|
|
2020-02-19 15:46:59 +01:00
|
|
|
void *KProcess::GetThreadLocalRegionPointer(KProcessAddress addr) {
|
|
|
|
KThreadLocalPage *tlp = nullptr;
|
|
|
|
{
|
|
|
|
KScopedSchedulerLock sl;
|
2021-10-24 06:58:48 +02:00
|
|
|
if (auto it = m_partially_used_tlp_tree.find_key(util::AlignDown(GetInteger(addr), PageSize)); it != m_partially_used_tlp_tree.end()) {
|
2020-02-19 15:46:59 +01:00
|
|
|
tlp = std::addressof(*it);
|
2021-10-24 06:58:48 +02:00
|
|
|
} else if (auto it = m_fully_used_tlp_tree.find_key(util::AlignDown(GetInteger(addr), PageSize)); it != m_fully_used_tlp_tree.end()) {
|
2020-02-19 15:46:59 +01:00
|
|
|
tlp = std::addressof(*it);
|
|
|
|
} else {
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return static_cast<u8 *>(tlp->GetPointer()) + (GetInteger(addr) & (PageSize - 1));
|
|
|
|
}
|
|
|
|
|
2020-02-20 05:42:21 +01:00
|
|
|
bool KProcess::ReserveResource(ams::svc::LimitableResource which, s64 value) {
|
|
|
|
if (KResourceLimit *rl = this->GetResourceLimit(); rl != nullptr) {
|
|
|
|
return rl->Reserve(which, value);
|
|
|
|
} else {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
bool KProcess::ReserveResource(ams::svc::LimitableResource which, s64 value, s64 timeout) {
|
|
|
|
if (KResourceLimit *rl = this->GetResourceLimit(); rl != nullptr) {
|
|
|
|
return rl->Reserve(which, value, timeout);
|
|
|
|
} else {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void KProcess::ReleaseResource(ams::svc::LimitableResource which, s64 value) {
|
|
|
|
if (KResourceLimit *rl = this->GetResourceLimit(); rl != nullptr) {
|
|
|
|
rl->Release(which, value);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void KProcess::ReleaseResource(ams::svc::LimitableResource which, s64 value, s64 hint) {
|
|
|
|
if (KResourceLimit *rl = this->GetResourceLimit(); rl != nullptr) {
|
|
|
|
rl->Release(which, value, hint);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-04-07 17:17:15 +02:00
|
|
|
void KProcess::IncrementRunningThreadCount() {
|
2021-10-20 00:24:15 +02:00
|
|
|
MESOSPHERE_ASSERT(m_num_running_threads.Load() >= 0);
|
2020-02-20 04:38:20 +01:00
|
|
|
|
2021-10-20 20:02:17 +02:00
|
|
|
++m_num_running_threads;
|
2020-02-20 04:38:20 +01:00
|
|
|
}
|
|
|
|
|
2021-04-07 17:17:15 +02:00
|
|
|
void KProcess::DecrementRunningThreadCount() {
|
2021-10-20 00:24:15 +02:00
|
|
|
MESOSPHERE_ASSERT(m_num_running_threads.Load() > 0);
|
2020-02-20 04:38:20 +01:00
|
|
|
|
2021-10-20 20:02:17 +02:00
|
|
|
if (const auto prev = m_num_running_threads--; prev == 1) {
|
2020-07-23 08:52:29 +02:00
|
|
|
this->Terminate();
|
2020-02-20 04:38:20 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-11 03:39:53 +02:00
|
|
|
bool KProcess::EnterUserException() {
|
2020-07-31 14:52:59 +02:00
|
|
|
/* Get the current thread. */
|
|
|
|
KThread *cur_thread = GetCurrentThreadPointer();
|
|
|
|
MESOSPHERE_ASSERT(this == cur_thread->GetOwnerProcess());
|
|
|
|
|
2021-09-19 19:11:56 +02:00
|
|
|
/* Check that we haven't already claimed the exception thread. */
|
|
|
|
if (m_exception_thread == cur_thread) {
|
|
|
|
return false;
|
|
|
|
}
|
2020-07-31 14:52:59 +02:00
|
|
|
|
2021-09-19 19:11:56 +02:00
|
|
|
/* Create the wait queue we'll be using. */
|
|
|
|
ThreadQueueImplForKProcessEnterUserException wait_queue(std::addressof(m_exception_thread));
|
2020-07-31 14:52:59 +02:00
|
|
|
|
2021-09-19 19:11:56 +02:00
|
|
|
/* Claim the exception thread. */
|
|
|
|
{
|
|
|
|
/* Lock the scheduler. */
|
|
|
|
KScopedSchedulerLock sl;
|
2021-04-07 10:44:27 +02:00
|
|
|
|
2021-09-19 19:11:56 +02:00
|
|
|
/* Check that we're not terminating. */
|
|
|
|
if (cur_thread->IsTerminationRequested()) {
|
|
|
|
return false;
|
|
|
|
}
|
2021-04-07 10:44:27 +02:00
|
|
|
|
2021-09-19 19:11:56 +02:00
|
|
|
/* If we don't have an exception thread, we can just claim it directly. */
|
|
|
|
if (m_exception_thread == nullptr) {
|
|
|
|
m_exception_thread = cur_thread;
|
|
|
|
KScheduler::SetSchedulerUpdateNeeded();
|
|
|
|
return true;
|
2020-07-31 14:52:59 +02:00
|
|
|
}
|
2021-09-19 19:11:56 +02:00
|
|
|
|
|
|
|
/* Otherwise, we need to wait until we don't have an exception thread. */
|
|
|
|
|
|
|
|
/* Add the current thread as a waiter on the current exception thread. */
|
|
|
|
cur_thread->SetAddressKey(reinterpret_cast<uintptr_t>(std::addressof(m_exception_thread)) | 1);
|
|
|
|
m_exception_thread->AddWaiter(cur_thread);
|
|
|
|
|
|
|
|
/* Wait to claim the exception thread. */
|
|
|
|
cur_thread->BeginWait(std::addressof(wait_queue));
|
2020-07-31 14:52:59 +02:00
|
|
|
}
|
2021-09-19 19:11:56 +02:00
|
|
|
|
|
|
|
/* If our wait didn't end due to thread termination, we succeeded. */
|
|
|
|
return !svc::ResultTerminationRequested::Includes(cur_thread->GetWaitResult());
|
2020-07-11 03:39:53 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
bool KProcess::LeaveUserException() {
|
|
|
|
return this->ReleaseUserException(GetCurrentThreadPointer());
|
|
|
|
}
|
|
|
|
|
|
|
|
bool KProcess::ReleaseUserException(KThread *thread) {
|
|
|
|
KScopedSchedulerLock sl;
|
|
|
|
|
2020-12-18 02:18:47 +01:00
|
|
|
if (m_exception_thread == thread) {
|
|
|
|
m_exception_thread = nullptr;
|
2020-07-31 14:52:59 +02:00
|
|
|
|
|
|
|
/* Remove waiter thread. */
|
2023-02-21 21:15:01 +01:00
|
|
|
bool has_waiters;
|
|
|
|
if (KThread *next = thread->RemoveWaiterByKey(std::addressof(has_waiters), reinterpret_cast<uintptr_t>(std::addressof(m_exception_thread)) | 1); next != nullptr) {
|
2021-09-19 19:11:56 +02:00
|
|
|
next->EndWait(ResultSuccess());
|
2020-07-31 14:52:59 +02:00
|
|
|
}
|
|
|
|
|
2021-04-07 10:44:27 +02:00
|
|
|
KScheduler::SetSchedulerUpdateNeeded();
|
|
|
|
|
2020-07-31 14:52:59 +02:00
|
|
|
return true;
|
2020-07-11 03:39:53 +02:00
|
|
|
} else {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-02-20 04:38:20 +01:00
|
|
|
void KProcess::RegisterThread(KThread *thread) {
|
2020-12-18 02:18:47 +01:00
|
|
|
KScopedLightLock lk(m_list_lock);
|
2020-02-20 04:38:20 +01:00
|
|
|
|
2020-12-18 02:18:47 +01:00
|
|
|
m_thread_list.push_back(*thread);
|
2020-02-20 04:38:20 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void KProcess::UnregisterThread(KThread *thread) {
|
2020-12-18 02:18:47 +01:00
|
|
|
KScopedLightLock lk(m_list_lock);
|
2020-02-20 04:38:20 +01:00
|
|
|
|
2020-12-18 02:18:47 +01:00
|
|
|
m_thread_list.erase(m_thread_list.iterator_to(*thread));
|
2020-02-20 04:38:20 +01:00
|
|
|
}
|
|
|
|
|
2020-07-13 21:17:28 +02:00
|
|
|
size_t KProcess::GetUsedUserPhysicalMemorySize() const {
|
2020-12-18 02:18:47 +01:00
|
|
|
const size_t norm_size = m_page_table.GetNormalMemorySize();
|
|
|
|
const size_t other_size = m_code_size + m_main_thread_stack_size;
|
2023-10-11 18:13:59 +02:00
|
|
|
const size_t sec_size = this->GetRequiredSecureMemorySizeNonDefault();
|
2020-05-29 09:57:25 +02:00
|
|
|
|
|
|
|
return norm_size + other_size + sec_size;
|
|
|
|
}
|
|
|
|
|
2020-07-13 21:17:28 +02:00
|
|
|
size_t KProcess::GetTotalUserPhysicalMemorySize() const {
|
2020-05-29 09:57:25 +02:00
|
|
|
/* Get the amount of free and used size. */
|
2020-12-18 02:18:47 +01:00
|
|
|
const size_t free_size = m_resource_limit->GetFreeValue(ams::svc::LimitableResource_PhysicalMemoryMax);
|
|
|
|
const size_t max_size = m_max_process_memory;
|
2020-07-13 21:17:28 +02:00
|
|
|
|
2023-10-11 18:13:59 +02:00
|
|
|
/* Determine used size. */
|
|
|
|
/* NOTE: This does *not* check this->IsDefaultApplicationSystemResource(), unlike GetUsedUserPhysicalMemorySize(). */
|
|
|
|
const size_t norm_size = m_page_table.GetNormalMemorySize();
|
|
|
|
const size_t other_size = m_code_size + m_main_thread_stack_size;
|
|
|
|
const size_t sec_size = this->GetRequiredSecureMemorySize();
|
|
|
|
const size_t used_size = norm_size + other_size + sec_size;
|
|
|
|
|
|
|
|
/* NOTE: These function calls will recalculate, introducing a race...it is unclear why Nintendo does it this way. */
|
2020-07-13 21:17:28 +02:00
|
|
|
if (used_size + free_size > max_size) {
|
|
|
|
return max_size;
|
|
|
|
} else {
|
2023-10-11 18:13:59 +02:00
|
|
|
return free_size + this->GetUsedUserPhysicalMemorySize();
|
2020-07-13 21:17:28 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t KProcess::GetUsedNonSystemUserPhysicalMemorySize() const {
|
2020-12-18 02:18:47 +01:00
|
|
|
const size_t norm_size = m_page_table.GetNormalMemorySize();
|
|
|
|
const size_t other_size = m_code_size + m_main_thread_stack_size;
|
2020-07-13 21:17:28 +02:00
|
|
|
|
|
|
|
return norm_size + other_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t KProcess::GetTotalNonSystemUserPhysicalMemorySize() const {
|
|
|
|
/* Get the amount of free and used size. */
|
2020-12-18 02:18:47 +01:00
|
|
|
const size_t free_size = m_resource_limit->GetFreeValue(ams::svc::LimitableResource_PhysicalMemoryMax);
|
|
|
|
const size_t max_size = m_max_process_memory;
|
2020-05-29 09:57:25 +02:00
|
|
|
|
2023-10-11 18:13:59 +02:00
|
|
|
/* Determine used size. */
|
|
|
|
/* NOTE: This does *not* check this->IsDefaultApplicationSystemResource(), unlike GetUsedUserPhysicalMemorySize(). */
|
|
|
|
const size_t norm_size = m_page_table.GetNormalMemorySize();
|
|
|
|
const size_t other_size = m_code_size + m_main_thread_stack_size;
|
|
|
|
const size_t sec_size = this->GetRequiredSecureMemorySize();
|
|
|
|
const size_t used_size = norm_size + other_size + sec_size;
|
|
|
|
|
|
|
|
/* NOTE: These function calls will recalculate, introducing a race...it is unclear why Nintendo does it this way. */
|
2020-05-29 09:57:25 +02:00
|
|
|
if (used_size + free_size > max_size) {
|
2023-10-11 18:13:59 +02:00
|
|
|
return max_size - this->GetRequiredSecureMemorySizeNonDefault();
|
2020-05-29 09:57:25 +02:00
|
|
|
} else {
|
2023-10-11 18:13:59 +02:00
|
|
|
return free_size + this->GetUsedNonSystemUserPhysicalMemorySize();
|
2020-05-29 09:57:25 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-02-20 04:38:20 +01:00
|
|
|
Result KProcess::Run(s32 priority, size_t stack_size) {
|
|
|
|
MESOSPHERE_ASSERT_THIS();
|
|
|
|
|
|
|
|
/* Lock ourselves, to prevent concurrent access. */
|
2020-12-18 02:18:47 +01:00
|
|
|
KScopedLightLock lk(m_state_lock);
|
2020-02-20 04:38:20 +01:00
|
|
|
|
|
|
|
/* Validate that we're in a state where we can initialize. */
|
2020-12-18 02:18:47 +01:00
|
|
|
const auto state = m_state;
|
2020-02-20 04:38:20 +01:00
|
|
|
R_UNLESS(state == State_Created || state == State_CreatedAttached, svc::ResultInvalidState());
|
|
|
|
|
|
|
|
/* Place a tentative reservation of a thread for this process. */
|
|
|
|
KScopedResourceReservation thread_reservation(this, ams::svc::LimitableResource_ThreadCountMax);
|
|
|
|
R_UNLESS(thread_reservation.Succeeded(), svc::ResultLimitReached());
|
|
|
|
|
|
|
|
/* Ensure that we haven't already allocated stack. */
|
2020-12-18 02:18:47 +01:00
|
|
|
MESOSPHERE_ABORT_UNLESS(m_main_thread_stack_size == 0);
|
2020-02-20 04:38:20 +01:00
|
|
|
|
|
|
|
/* Ensure that we're allocating a valid stack. */
|
|
|
|
stack_size = util::AlignUp(stack_size, PageSize);
|
2020-12-18 02:18:47 +01:00
|
|
|
R_UNLESS(stack_size + m_code_size <= m_max_process_memory, svc::ResultOutOfMemory());
|
|
|
|
R_UNLESS(stack_size + m_code_size >= m_code_size, svc::ResultOutOfMemory());
|
2020-02-20 04:38:20 +01:00
|
|
|
|
|
|
|
/* Place a tentative reservation of memory for our new stack. */
|
2020-07-24 04:26:46 +02:00
|
|
|
KScopedResourceReservation mem_reservation(this, ams::svc::LimitableResource_PhysicalMemoryMax, stack_size);
|
2020-02-20 04:38:20 +01:00
|
|
|
R_UNLESS(mem_reservation.Succeeded(), svc::ResultLimitReached());
|
|
|
|
|
|
|
|
/* Allocate and map our stack. */
|
|
|
|
KProcessAddress stack_top = Null<KProcessAddress>;
|
|
|
|
if (stack_size) {
|
|
|
|
KProcessAddress stack_bottom;
|
2020-12-18 02:18:47 +01:00
|
|
|
R_TRY(m_page_table.MapPages(std::addressof(stack_bottom), stack_size / PageSize, KMemoryState_Stack, KMemoryPermission_UserReadWrite));
|
2020-02-20 04:38:20 +01:00
|
|
|
|
|
|
|
stack_top = stack_bottom + stack_size;
|
2020-12-18 02:18:47 +01:00
|
|
|
m_main_thread_stack_size = stack_size;
|
2020-02-20 04:38:20 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Ensure our stack is safe to clean up on exit. */
|
2022-02-14 23:45:32 +01:00
|
|
|
ON_RESULT_FAILURE {
|
2020-12-18 02:18:47 +01:00
|
|
|
if (m_main_thread_stack_size) {
|
|
|
|
MESOSPHERE_R_ABORT_UNLESS(m_page_table.UnmapPages(stack_top - m_main_thread_stack_size, m_main_thread_stack_size / PageSize, KMemoryState_Stack));
|
|
|
|
m_main_thread_stack_size = 0;
|
2020-02-20 04:38:20 +01:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
/* Set our maximum heap size. */
|
2020-12-18 02:18:47 +01:00
|
|
|
R_TRY(m_page_table.SetMaxHeapSize(m_max_process_memory - (m_main_thread_stack_size + m_code_size)));
|
2020-02-20 04:38:20 +01:00
|
|
|
|
|
|
|
/* Initialize our handle table. */
|
2021-04-08 00:16:11 +02:00
|
|
|
R_TRY(this->InitializeHandleTable(m_capabilities.GetHandleTableSize()));
|
2022-02-14 23:45:32 +01:00
|
|
|
ON_RESULT_FAILURE_2 { this->FinalizeHandleTable(); };
|
2020-02-20 04:38:20 +01:00
|
|
|
|
|
|
|
/* Create a new thread for the process. */
|
|
|
|
KThread *main_thread = KThread::Create();
|
|
|
|
R_UNLESS(main_thread != nullptr, svc::ResultOutOfResource());
|
2021-04-07 17:17:15 +02:00
|
|
|
ON_SCOPE_EXIT { main_thread->Close(); };
|
2020-02-20 04:38:20 +01:00
|
|
|
|
|
|
|
/* Initialize the thread. */
|
2020-12-18 02:18:47 +01:00
|
|
|
R_TRY(KThread::InitializeUserThread(main_thread, reinterpret_cast<KThreadFunction>(GetVoidPointer(this->GetEntryPoint())), 0, stack_top, priority, m_ideal_core_id, this));
|
2020-02-20 04:38:20 +01:00
|
|
|
|
|
|
|
/* Register the thread, and commit our reservation. */
|
|
|
|
KThread::Register(main_thread);
|
|
|
|
thread_reservation.Commit();
|
|
|
|
|
|
|
|
/* Add the thread to our handle table. */
|
|
|
|
ams::svc::Handle thread_handle;
|
2020-12-18 02:18:47 +01:00
|
|
|
R_TRY(m_handle_table.Add(std::addressof(thread_handle), main_thread));
|
2020-02-20 04:38:20 +01:00
|
|
|
|
|
|
|
/* Set the thread arguments. */
|
|
|
|
main_thread->GetContext().SetArguments(0, thread_handle);
|
|
|
|
|
|
|
|
/* Update our state. */
|
|
|
|
this->ChangeState((state == State_Created) ? State_Running : State_RunningAttached);
|
2022-02-14 23:45:32 +01:00
|
|
|
ON_RESULT_FAILURE_2 { this->ChangeState(state); };
|
2020-02-20 04:38:20 +01:00
|
|
|
|
|
|
|
/* Run our thread. */
|
|
|
|
R_TRY(main_thread->Run());
|
|
|
|
|
2021-04-07 17:17:15 +02:00
|
|
|
/* Open a reference to represent that we're running. */
|
|
|
|
this->Open();
|
|
|
|
|
2022-02-14 23:45:32 +01:00
|
|
|
/* We succeeded! Commit our memory reservation. */
|
2020-02-20 04:38:20 +01:00
|
|
|
mem_reservation.Commit();
|
|
|
|
|
2020-02-20 05:42:21 +01:00
|
|
|
/* Note for debug that we're running a new process. */
|
2020-12-18 02:18:47 +01:00
|
|
|
MESOSPHERE_LOG("KProcess::Run() pid=%ld name=%-12s thread=%ld affinity=0x%lx ideal_core=%d active_core=%d\n", m_process_id, m_name, main_thread->GetId(), main_thread->GetVirtualAffinityMask(), main_thread->GetIdealVirtualCore(), main_thread->GetActiveCore());
|
2020-02-20 05:42:21 +01:00
|
|
|
|
2022-02-14 23:45:32 +01:00
|
|
|
R_SUCCEED();
|
2020-02-20 04:38:20 +01:00
|
|
|
}
|
|
|
|
|
2020-07-14 11:45:06 +02:00
|
|
|
Result KProcess::Reset() {
|
|
|
|
MESOSPHERE_ASSERT_THIS();
|
|
|
|
|
|
|
|
/* Lock the process and the scheduler. */
|
2020-12-18 02:18:47 +01:00
|
|
|
KScopedLightLock lk(m_state_lock);
|
2020-07-14 11:45:06 +02:00
|
|
|
KScopedSchedulerLock sl;
|
|
|
|
|
|
|
|
/* Validate that we're in a state that we can reset. */
|
2020-12-18 02:18:47 +01:00
|
|
|
R_UNLESS(m_state != State_Terminated, svc::ResultInvalidState());
|
|
|
|
R_UNLESS(m_is_signaled, svc::ResultInvalidState());
|
2020-07-14 11:45:06 +02:00
|
|
|
|
|
|
|
/* Clear signaled. */
|
2020-12-18 02:18:47 +01:00
|
|
|
m_is_signaled = false;
|
2022-02-14 23:45:32 +01:00
|
|
|
R_SUCCEED();
|
2020-07-14 11:45:06 +02:00
|
|
|
}
|
|
|
|
|
2020-07-23 12:04:43 +02:00
|
|
|
Result KProcess::SetActivity(ams::svc::ProcessActivity activity) {
|
|
|
|
/* Lock ourselves and the scheduler. */
|
2020-12-18 02:18:47 +01:00
|
|
|
KScopedLightLock lk(m_state_lock);
|
|
|
|
KScopedLightLock list_lk(m_list_lock);
|
2020-07-23 12:04:43 +02:00
|
|
|
KScopedSchedulerLock sl;
|
|
|
|
|
|
|
|
/* Validate our state. */
|
2020-12-18 02:18:47 +01:00
|
|
|
R_UNLESS(m_state != State_Terminating, svc::ResultInvalidState());
|
|
|
|
R_UNLESS(m_state != State_Terminated, svc::ResultInvalidState());
|
2020-07-23 12:04:43 +02:00
|
|
|
|
|
|
|
/* Either pause or resume. */
|
|
|
|
if (activity == ams::svc::ProcessActivity_Paused) {
|
|
|
|
/* Verify that we're not suspended. */
|
2020-12-18 02:18:47 +01:00
|
|
|
R_UNLESS(!m_is_suspended, svc::ResultInvalidState());
|
2020-07-23 12:04:43 +02:00
|
|
|
|
|
|
|
/* Suspend all threads. */
|
|
|
|
auto end = this->GetThreadList().end();
|
|
|
|
for (auto it = this->GetThreadList().begin(); it != end; ++it) {
|
|
|
|
it->RequestSuspend(KThread::SuspendType_Process);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Set ourselves as suspended. */
|
|
|
|
this->SetSuspended(true);
|
|
|
|
} else {
|
|
|
|
MESOSPHERE_ASSERT(activity == ams::svc::ProcessActivity_Runnable);
|
|
|
|
|
|
|
|
/* Verify that we're suspended. */
|
2020-12-18 02:18:47 +01:00
|
|
|
R_UNLESS(m_is_suspended, svc::ResultInvalidState());
|
2020-07-23 12:04:43 +02:00
|
|
|
|
|
|
|
/* Resume all threads. */
|
|
|
|
auto end = this->GetThreadList().end();
|
|
|
|
for (auto it = this->GetThreadList().begin(); it != end; ++it) {
|
|
|
|
it->Resume(KThread::SuspendType_Process);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Set ourselves as resumed. */
|
|
|
|
this->SetSuspended(false);
|
|
|
|
}
|
|
|
|
|
2022-02-14 23:45:32 +01:00
|
|
|
R_SUCCEED();
|
2020-07-23 12:04:43 +02:00
|
|
|
}
|
|
|
|
|
2020-08-03 08:06:29 +02:00
|
|
|
void KProcess::PinCurrentThread() {
|
|
|
|
MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread());
|
|
|
|
|
|
|
|
/* Get the current thread. */
|
|
|
|
const s32 core_id = GetCurrentCoreId();
|
|
|
|
KThread *cur_thread = GetCurrentThreadPointer();
|
|
|
|
|
2021-04-08 00:30:13 +02:00
|
|
|
/* If the thread isn't terminated, pin it. */
|
|
|
|
if (!cur_thread->IsTerminationRequested()) {
|
|
|
|
/* Pin it. */
|
|
|
|
this->PinThread(core_id, cur_thread);
|
|
|
|
cur_thread->Pin();
|
2020-08-03 08:06:29 +02:00
|
|
|
|
2021-04-08 00:30:13 +02:00
|
|
|
/* An update is needed. */
|
|
|
|
KScheduler::SetSchedulerUpdateNeeded();
|
|
|
|
}
|
2020-08-03 08:06:29 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void KProcess::UnpinCurrentThread() {
|
|
|
|
MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread());
|
|
|
|
|
|
|
|
/* Get the current thread. */
|
|
|
|
const s32 core_id = GetCurrentCoreId();
|
|
|
|
KThread *cur_thread = GetCurrentThreadPointer();
|
|
|
|
|
|
|
|
/* Unpin it. */
|
|
|
|
cur_thread->Unpin();
|
|
|
|
this->UnpinThread(core_id, cur_thread);
|
|
|
|
|
|
|
|
/* An update is needed. */
|
|
|
|
KScheduler::SetSchedulerUpdateNeeded();
|
|
|
|
}
|
|
|
|
|
2021-04-08 00:30:13 +02:00
|
|
|
void KProcess::UnpinThread(KThread *thread) {
|
|
|
|
MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread());
|
|
|
|
|
|
|
|
/* Get the thread's core id. */
|
|
|
|
const auto core_id = thread->GetActiveCore();
|
|
|
|
|
|
|
|
/* Unpin it. */
|
|
|
|
this->UnpinThread(core_id, thread);
|
|
|
|
thread->Unpin();
|
|
|
|
|
|
|
|
/* An update is needed. */
|
|
|
|
KScheduler::SetSchedulerUpdateNeeded();
|
|
|
|
}
|
|
|
|
|
2020-07-31 01:52:11 +02:00
|
|
|
Result KProcess::GetThreadList(s32 *out_num_threads, ams::kern::svc::KUserPointer<u64 *> out_thread_ids, s32 max_out_count) {
|
|
|
|
/* Lock the list. */
|
2020-12-18 02:18:47 +01:00
|
|
|
KScopedLightLock lk(m_list_lock);
|
2020-07-31 01:52:11 +02:00
|
|
|
|
|
|
|
/* Iterate over the list. */
|
|
|
|
s32 count = 0;
|
|
|
|
auto end = this->GetThreadList().end();
|
|
|
|
for (auto it = this->GetThreadList().begin(); it != end; ++it) {
|
|
|
|
/* If we're within array bounds, write the id. */
|
|
|
|
if (count < max_out_count) {
|
|
|
|
/* Get the thread id. */
|
|
|
|
KThread *thread = std::addressof(*it);
|
|
|
|
const u64 id = thread->GetId();
|
|
|
|
|
|
|
|
/* Copy the id to userland. */
|
|
|
|
R_TRY(out_thread_ids.CopyArrayElementFrom(std::addressof(id), count));
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Increment the count. */
|
|
|
|
++count;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We successfully iterated the list. */
|
|
|
|
*out_num_threads = count;
|
2022-02-14 23:45:32 +01:00
|
|
|
R_SUCCEED();
|
2020-07-31 01:52:11 +02:00
|
|
|
}
|
|
|
|
|
2020-07-19 05:03:27 +02:00
|
|
|
KProcess::State KProcess::SetDebugObject(void *debug_object) {
|
2020-07-20 05:06:21 +02:00
|
|
|
/* Attaching should only happen to non-null objects while the scheduler is locked. */
|
|
|
|
MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread());
|
|
|
|
MESOSPHERE_ASSERT(debug_object != nullptr);
|
|
|
|
|
2020-07-19 05:03:27 +02:00
|
|
|
/* Cache our state to return it to the debug object. */
|
2020-12-18 02:18:47 +01:00
|
|
|
const auto old_state = m_state;
|
2020-07-19 05:03:27 +02:00
|
|
|
|
|
|
|
/* Set the object. */
|
2020-12-18 02:18:47 +01:00
|
|
|
m_attached_object = debug_object;
|
2020-07-19 05:03:27 +02:00
|
|
|
|
2020-07-20 05:06:21 +02:00
|
|
|
/* Check that our state is valid for attach. */
|
2020-12-18 02:18:47 +01:00
|
|
|
MESOSPHERE_ASSERT(m_state == State_Created || m_state == State_Running || m_state == State_Crashed);
|
2020-07-20 05:06:21 +02:00
|
|
|
|
2020-07-19 05:03:27 +02:00
|
|
|
/* Update our state. */
|
2020-12-18 02:18:47 +01:00
|
|
|
if (m_state != State_DebugBreak) {
|
|
|
|
if (m_state == State_Created) {
|
2020-07-20 05:06:21 +02:00
|
|
|
this->ChangeState(State_CreatedAttached);
|
|
|
|
} else {
|
|
|
|
this->ChangeState(State_DebugBreak);
|
|
|
|
}
|
2020-07-19 05:03:27 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return old_state;
|
|
|
|
}
|
|
|
|
|
2020-07-20 05:06:21 +02:00
|
|
|
void KProcess::ClearDebugObject(KProcess::State old_state) {
|
|
|
|
/* Detaching from process should only happen while the scheduler is locked. */
|
|
|
|
MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread());
|
|
|
|
|
|
|
|
/* Clear the attached object. */
|
2020-12-18 02:18:47 +01:00
|
|
|
m_attached_object = nullptr;
|
2020-07-20 05:06:21 +02:00
|
|
|
|
|
|
|
/* Validate that the process is in an attached state. */
|
2020-12-18 02:18:47 +01:00
|
|
|
MESOSPHERE_ASSERT(m_state == State_CreatedAttached || m_state == State_RunningAttached || m_state == State_DebugBreak || m_state == State_Terminating || m_state == State_Terminated);
|
2020-07-20 05:06:21 +02:00
|
|
|
|
|
|
|
/* Change the state appropriately. */
|
2020-12-18 02:18:47 +01:00
|
|
|
if (m_state == State_CreatedAttached) {
|
2020-07-20 05:06:21 +02:00
|
|
|
this->ChangeState(State_Created);
|
2020-12-18 02:18:47 +01:00
|
|
|
} else if (m_state == State_RunningAttached || m_state == State_DebugBreak) {
|
2020-07-20 05:06:21 +02:00
|
|
|
/* Disallow transition back to created from running. */
|
|
|
|
if (old_state == State_Created) {
|
|
|
|
old_state = State_Running;
|
|
|
|
}
|
|
|
|
this->ChangeState(old_state);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-31 09:04:43 +02:00
|
|
|
bool KProcess::EnterJitDebug(ams::svc::DebugEvent event, ams::svc::DebugException exception, uintptr_t param1, uintptr_t param2, uintptr_t param3, uintptr_t param4) {
|
|
|
|
/* Check that we're the current process. */
|
|
|
|
MESOSPHERE_ASSERT_THIS();
|
|
|
|
MESOSPHERE_ASSERT(this == GetCurrentProcessPointer());
|
|
|
|
|
|
|
|
/* If we aren't allowed to enter jit debug, don't. */
|
2020-12-18 02:18:47 +01:00
|
|
|
if ((m_flags & ams::svc::CreateProcessFlag_EnableDebug) == 0) {
|
2020-07-31 09:04:43 +02:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We're the current process, so we should be some kind of running. */
|
2020-12-18 02:18:47 +01:00
|
|
|
MESOSPHERE_ASSERT(m_state != State_Created);
|
|
|
|
MESOSPHERE_ASSERT(m_state != State_CreatedAttached);
|
|
|
|
MESOSPHERE_ASSERT(m_state != State_Terminated);
|
2020-07-31 09:04:43 +02:00
|
|
|
|
|
|
|
/* Try to enter JIT debug. */
|
|
|
|
while (true) {
|
|
|
|
/* Lock ourselves and the scheduler. */
|
2020-12-18 02:18:47 +01:00
|
|
|
KScopedLightLock lk(m_state_lock);
|
|
|
|
KScopedLightLock list_lk(m_list_lock);
|
2020-07-31 09:04:43 +02:00
|
|
|
KScopedSchedulerLock sl;
|
|
|
|
|
|
|
|
/* If we're attached to a debugger, we're necessarily in debug. */
|
|
|
|
if (this->IsAttachedToDebugger()) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If the current thread is terminating, we can't enter debug. */
|
|
|
|
if (GetCurrentThread().IsTerminationRequested()) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We're not attached to debugger, so check that. */
|
2020-12-18 02:18:47 +01:00
|
|
|
MESOSPHERE_ASSERT(m_state != State_RunningAttached);
|
|
|
|
MESOSPHERE_ASSERT(m_state != State_DebugBreak);
|
2020-07-31 09:04:43 +02:00
|
|
|
|
|
|
|
/* If we're terminating, we can't enter debug. */
|
2020-12-18 02:18:47 +01:00
|
|
|
if (m_state != State_Running && m_state != State_Crashed) {
|
|
|
|
MESOSPHERE_ASSERT(m_state == State_Terminating);
|
2020-07-31 09:04:43 +02:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If the current thread is suspended, retry. */
|
|
|
|
if (GetCurrentThread().IsSuspended()) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Suspend all our threads. */
|
|
|
|
{
|
|
|
|
auto end = this->GetThreadList().end();
|
|
|
|
for (auto it = this->GetThreadList().begin(); it != end; ++it) {
|
|
|
|
it->RequestSuspend(KThread::SuspendType_Debug);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Change our state to crashed. */
|
|
|
|
this->ChangeState(State_Crashed);
|
|
|
|
|
|
|
|
/* Enter jit debug. */
|
2020-12-18 02:18:47 +01:00
|
|
|
m_is_jit_debug = true;
|
|
|
|
m_jit_debug_event_type = event;
|
|
|
|
m_jit_debug_exception_type = exception;
|
|
|
|
m_jit_debug_params[0] = param1;
|
|
|
|
m_jit_debug_params[1] = param2;
|
|
|
|
m_jit_debug_params[2] = param3;
|
|
|
|
m_jit_debug_params[3] = param4;
|
|
|
|
m_jit_debug_thread_id = GetCurrentThread().GetId();
|
2020-07-31 09:04:43 +02:00
|
|
|
|
|
|
|
/* Exit our retry loop. */
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check if our state indicates we're in jit debug. */
|
|
|
|
{
|
|
|
|
KScopedSchedulerLock sl;
|
|
|
|
|
2020-12-18 02:18:47 +01:00
|
|
|
if (m_state == State_Running || m_state == State_RunningAttached || m_state == State_Crashed || m_state == State_DebugBreak) {
|
2020-07-31 09:04:43 +02:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2020-07-19 05:03:27 +02:00
|
|
|
KEventInfo *KProcess::GetJitDebugInfo() {
|
2020-07-31 05:49:58 +02:00
|
|
|
MESOSPHERE_ASSERT_THIS();
|
|
|
|
MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread());
|
|
|
|
|
2020-12-18 02:18:47 +01:00
|
|
|
if (m_is_jit_debug) {
|
2023-02-21 17:16:15 +01:00
|
|
|
const uintptr_t params[5] = { m_jit_debug_exception_type, m_jit_debug_params[0], m_jit_debug_params[1], m_jit_debug_params[2], m_jit_debug_params[3] };
|
|
|
|
return KDebugBase::CreateDebugEvent(m_jit_debug_event_type, m_jit_debug_thread_id, params, util::size(params));
|
2020-07-19 05:03:27 +02:00
|
|
|
} else {
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-31 05:49:58 +02:00
|
|
|
void KProcess::ClearJitDebugInfo() {
|
|
|
|
MESOSPHERE_ASSERT_THIS();
|
|
|
|
MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread());
|
|
|
|
|
2020-12-18 02:18:47 +01:00
|
|
|
m_is_jit_debug = false;
|
2020-07-31 05:49:58 +02:00
|
|
|
}
|
|
|
|
|
2020-07-14 22:22:08 +02:00
|
|
|
KProcess *KProcess::GetProcessFromId(u64 process_id) {
|
|
|
|
/* Lock the list. */
|
2020-07-14 22:36:35 +02:00
|
|
|
KProcess::ListAccessor accessor;
|
2020-07-14 22:22:08 +02:00
|
|
|
const auto end = accessor.end();
|
|
|
|
|
|
|
|
/* Iterate over the list. */
|
|
|
|
for (auto it = accessor.begin(); it != end; ++it) {
|
|
|
|
/* Get the process. */
|
|
|
|
KProcess *process = static_cast<KProcess *>(std::addressof(*it));
|
|
|
|
|
|
|
|
if (process->GetId() == process_id) {
|
|
|
|
if (AMS_LIKELY(process->Open())) {
|
|
|
|
return process;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We failed to find the process. */
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
Result KProcess::GetProcessList(s32 *out_num_processes, ams::kern::svc::KUserPointer<u64 *> out_process_ids, s32 max_out_count) {
|
|
|
|
/* Lock the list. */
|
2020-07-14 22:36:35 +02:00
|
|
|
KProcess::ListAccessor accessor;
|
2020-07-14 22:22:08 +02:00
|
|
|
const auto end = accessor.end();
|
|
|
|
|
|
|
|
/* Iterate over the list. */
|
|
|
|
s32 count = 0;
|
|
|
|
for (auto it = accessor.begin(); it != end; ++it) {
|
|
|
|
/* If we're within array bounds, write the id. */
|
|
|
|
if (count < max_out_count) {
|
|
|
|
/* Get the process id. */
|
|
|
|
KProcess *process = static_cast<KProcess *>(std::addressof(*it));
|
|
|
|
const u64 id = process->GetId();
|
|
|
|
|
|
|
|
/* Copy the id to userland. */
|
|
|
|
R_TRY(out_process_ids.CopyArrayElementFrom(std::addressof(id), count));
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Increment the count. */
|
|
|
|
++count;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We successfully iterated the list. */
|
|
|
|
*out_num_processes = count;
|
2022-02-14 23:45:32 +01:00
|
|
|
R_SUCCEED();
|
2020-07-14 22:22:08 +02:00
|
|
|
}
|
|
|
|
|
2020-02-08 11:49:32 +01:00
|
|
|
}
|