diff --git a/libraries/libmesosphere/source/kern_k_code_memory.cpp b/libraries/libmesosphere/source/kern_k_code_memory.cpp
new file mode 100644
index 000000000..57931326d
--- /dev/null
+++ b/libraries/libmesosphere/source/kern_k_code_memory.cpp
@@ -0,0 +1,163 @@
+/*
+ * Copyright (c) 2018-2020 Atmosphère-NX
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see .
+ */
+#include
+
+namespace ams::kern {
+
+ Result KCodeMemory::Initialize(KProcessAddress addr, size_t size) {
+ MESOSPHERE_ASSERT_THIS();
+
+ /* Set members. */
+ this->owner = GetCurrentProcessPointer();
+
+ /* Initialize the page group. */
+ auto &page_table = this->owner->GetPageTable();
+ new (GetPointer(this->page_group)) KPageGroup(page_table.GetBlockInfoManager());
+
+ /* Ensure that our page group's state is valid on exit. */
+ auto pg_guard = SCOPE_GUARD { GetReference(this->page_group).~KPageGroup(); };
+
+ /* Lock the memory. */
+ R_TRY(page_table.LockForCodeMemory(GetPointer(this->page_group), addr, size));
+
+ /* Clear the memory. */
+ for (const auto &block : GetReference(this->page_group)) {
+ /* Clear and store cache. */
+ std::memset(GetVoidPointer(block.GetAddress()), 0xFF, block.GetSize());
+ cpu::StoreDataCache(GetVoidPointer(block.GetAddress()), block.GetSize());
+ }
+
+ /* Set remaining tracking members. */
+ this->owner->Open();
+ this->address = addr;
+ this->is_initialized = true;
+ this->is_owner_mapped = false;
+ this->is_mapped = false;
+
+ /* We succeeded. */
+ pg_guard.Cancel();
+ return ResultSuccess();
+ }
+
+ void KCodeMemory::Finalize() {
+ MESOSPHERE_ASSERT_THIS();
+
+ /* Unlock. */
+ if (!this->is_mapped && !this->is_owner_mapped) {
+ const size_t size = GetReference(this->page_group).GetNumPages() * PageSize;
+ MESOSPHERE_R_ABORT_UNLESS(this->owner->GetPageTable().UnlockForCodeMemory(this->address, size, GetReference(this->page_group)));
+ }
+
+ /* Close the page group. */
+ GetReference(this->page_group).Close();
+ GetReference(this->page_group).Finalize();
+
+ /* Close our reference to our owner. */
+ this->owner->Close();
+
+ /* Perform inherited finalization. */
+ KAutoObjectWithSlabHeapAndContainer::Finalize();
+ }
+
+ Result KCodeMemory::Map(KProcessAddress address, size_t size) {
+ MESOSPHERE_ASSERT_THIS();
+
+ /* Validate the size. */
+ R_UNLESS(GetReference(this->page_group).GetNumPages() == util::DivideUp(size, PageSize), svc::ResultInvalidSize());
+
+ /* Lock ourselves. */
+ KScopedLightLock lk(this->lock);
+
+ /* Ensure we're not already mapped. */
+ R_UNLESS(!this->is_mapped, svc::ResultInvalidState());
+
+ /* Map the memory. */
+ R_TRY(GetCurrentProcess().GetPageTable().MapPageGroup(address, GetReference(this->page_group), KMemoryState_CodeOut, KMemoryPermission_UserReadWrite));
+
+ /* Mark ourselves as mapped. */
+ this->is_mapped = true;
+
+ return ResultSuccess();
+ }
+
+ Result KCodeMemory::Unmap(KProcessAddress address, size_t size) {
+ MESOSPHERE_ASSERT_THIS();
+
+ /* Validate the size. */
+ R_UNLESS(GetReference(this->page_group).GetNumPages() == util::DivideUp(size, PageSize), svc::ResultInvalidSize());
+
+ /* Lock ourselves. */
+ KScopedLightLock lk(this->lock);
+
+ /* Unmap the memory. */
+ R_TRY(GetCurrentProcess().GetPageTable().UnmapPageGroup(address, GetReference(this->page_group), KMemoryState_CodeOut));
+
+ /* Mark ourselves as unmapped. */
+ MESOSPHERE_ASSERT(this->is_mapped);
+ this->is_mapped = false;
+
+ return ResultSuccess();
+ }
+
+ Result KCodeMemory::MapToOwner(KProcessAddress address, size_t size, ams::svc::MemoryPermission perm) {
+ MESOSPHERE_ASSERT_THIS();
+
+ /* Validate the size. */
+ R_UNLESS(GetReference(this->page_group).GetNumPages() == util::DivideUp(size, PageSize), svc::ResultInvalidSize());
+
+ /* Lock ourselves. */
+ KScopedLightLock lk(this->lock);
+
+ /* Ensure we're not already mapped. */
+ R_UNLESS(!this->is_owner_mapped, svc::ResultInvalidState());
+
+ /* Convert the memory permission. */
+ KMemoryPermission k_perm;
+ switch (perm) {
+ case ams::svc::MemoryPermission_Read: k_perm = KMemoryPermission_UserRead; break;
+ case ams::svc::MemoryPermission_ReadExecute: k_perm = KMemoryPermission_UserReadExecute; break;
+ MESOSPHERE_UNREACHABLE_DEFAULT_CASE();
+ }
+
+ /* Map the memory. */
+ R_TRY(GetCurrentProcess().GetPageTable().MapPageGroup(address, GetReference(this->page_group), KMemoryState_GeneratedCode, k_perm));
+
+ /* Mark ourselves as mapped. */
+ this->is_owner_mapped = true;
+
+ return ResultSuccess();
+ }
+
+ Result KCodeMemory::UnmapFromOwner(KProcessAddress address, size_t size) {
+ MESOSPHERE_ASSERT_THIS();
+
+ /* Validate the size. */
+ R_UNLESS(GetReference(this->page_group).GetNumPages() == util::DivideUp(size, PageSize), svc::ResultInvalidSize());
+
+ /* Lock ourselves. */
+ KScopedLightLock lk(this->lock);
+
+ /* Unmap the memory. */
+ R_TRY(GetCurrentProcess().GetPageTable().UnmapPageGroup(address, GetReference(this->page_group), KMemoryState_GeneratedCode));
+
+ /* Mark ourselves as unmapped. */
+ MESOSPHERE_ASSERT(this->is_owner_mapped);
+ this->is_owner_mapped = false;
+
+ return ResultSuccess();
+ }
+
+}
diff --git a/libraries/libmesosphere/source/kern_k_memory_block_manager.cpp b/libraries/libmesosphere/source/kern_k_memory_block_manager.cpp
index 31c73b6f8..df93139db 100644
--- a/libraries/libmesosphere/source/kern_k_memory_block_manager.cpp
+++ b/libraries/libmesosphere/source/kern_k_memory_block_manager.cpp
@@ -250,7 +250,7 @@ namespace ams::kern {
it = this->memory_block_tree.insert(*new_block);
it++;
- cur_info = it->GetMemoryInfo();
+ cur_info = it->GetMemoryInfo();
cur_address = cur_info.GetAddress();
}
@@ -266,7 +266,7 @@ namespace ams::kern {
/* Update block state. */
it->Update(state, perm, attr);
- cur_address += cur_info.GetSize();
+ cur_address += cur_info.GetSize();
remaining_pages -= cur_info.GetNumPages();
} else {
/* If we already have the right properties, just advance. */
@@ -275,7 +275,7 @@ namespace ams::kern {
cur_address += remaining_size;
} else {
remaining_pages = (cur_address + remaining_size - cur_info.GetEndAddress()) / PageSize;
- cur_address = cur_info.GetEndAddress();
+ cur_address = cur_info.GetEndAddress();
}
}
it++;
diff --git a/libraries/libmesosphere/source/kern_k_page_table_base.cpp b/libraries/libmesosphere/source/kern_k_page_table_base.cpp
index b200a42ef..dcfcf775b 100644
--- a/libraries/libmesosphere/source/kern_k_page_table_base.cpp
+++ b/libraries/libmesosphere/source/kern_k_page_table_base.cpp
@@ -2665,6 +2665,7 @@ namespace ams::kern {
Result KPageTableBase::SetupForIpcServer(KProcessAddress *out_addr, size_t size, KProcessAddress src_addr, KMemoryPermission test_perm, KMemoryState dst_state, KPageTableBase &src_page_table, bool send) {
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
+ MESOSPHERE_ASSERT(src_page_table.IsLockedByCurrentThread());
/* Check that we can theoretically map. */
const KProcessAddress region_start = this->alias_region_start;