mirror of
https://github.com/Atmosphere-NX/Atmosphere.git
synced 2024-11-15 03:27:49 +01:00
kern: implement SetProcessMemoryPermission
This commit is contained in:
parent
b857153964
commit
28ea0b12a8
@ -207,6 +207,8 @@ namespace ams::kern::arch::arm64 {
|
||||
ALWAYS_INLINE Result SeparatePagesImpl(KProcessAddress virt_addr, size_t block_size, PageLinkedList *page_list, bool reuse_ll);
|
||||
Result SeparatePages(KProcessAddress virt_addr, size_t block_size, PageLinkedList *page_list, bool reuse_ll);
|
||||
|
||||
Result ChangePermissions(KProcessAddress virt_addr, size_t num_pages, PageTableEntry entry_template, bool refresh_mapping, PageLinkedList *page_list, bool reuse_ll);
|
||||
|
||||
static void PteDataSynchronizationBarrier() {
|
||||
cpu::DataSynchronizationBarrierInnerShareable();
|
||||
}
|
||||
|
@ -31,6 +31,14 @@ namespace ams::kern::arch::arm64 {
|
||||
|
||||
void Finalize() { this->page_table.Finalize(); }
|
||||
|
||||
Result SetMemoryPermission(KProcessAddress addr, size_t size, ams::svc::MemoryPermission perm) {
|
||||
return this->page_table.SetMemoryPermission(addr, size, perm);
|
||||
}
|
||||
|
||||
Result SetProcessMemoryPermission(KProcessAddress addr, size_t size, ams::svc::MemoryPermission perm) {
|
||||
return this->page_table.SetProcessMemoryPermission(addr, size, perm);
|
||||
}
|
||||
|
||||
Result MapIo(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm) {
|
||||
return this->page_table.MapIo(phys_addr, size, perm);
|
||||
}
|
||||
|
@ -16,6 +16,7 @@
|
||||
#pragma once
|
||||
#include <mesosphere/kern_common.hpp>
|
||||
#include <mesosphere/kern_k_address_space_info.hpp>
|
||||
#include <mesosphere/kern_select_page_table.hpp>
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
@ -61,7 +62,7 @@ namespace ams::kern {
|
||||
constexpr u64 GetProgramId() const { return this->program_id; }
|
||||
constexpr u32 GetVersion() const { return this->version; }
|
||||
constexpr u8 GetPriority() const { return this->priority; }
|
||||
constexpr u8 GetIdealCore() const { return this->ideal_core_id; }
|
||||
constexpr u8 GetIdealCoreId() const { return this->ideal_core_id; }
|
||||
|
||||
constexpr bool IsRxCompressed() const { return (this->flags & (1 << 0)); }
|
||||
constexpr bool IsRoCompressed() const { return (this->flags & (1 << 1)); }
|
||||
@ -109,7 +110,7 @@ namespace ams::kern {
|
||||
}
|
||||
|
||||
constexpr u8 GetPriority() const { return this->kip_header->GetPriority(); }
|
||||
constexpr u8 GetIdealCore() const { return this->kip_header->GetIdealCore(); }
|
||||
constexpr u8 GetIdealCoreId() const { return this->kip_header->GetIdealCoreId(); }
|
||||
constexpr u32 GetAffinityMask() const { return this->kip_header->GetAffinityMask(); }
|
||||
constexpr u32 GetStackSize() const { return this->kip_header->GetStackSize(); }
|
||||
|
||||
@ -128,6 +129,7 @@ namespace ams::kern {
|
||||
|
||||
Result MakeCreateProcessParameter(ams::svc::CreateProcessParameter *out, bool enable_aslr) const;
|
||||
Result Load(KProcessAddress address, const ams::svc::CreateProcessParameter ¶ms) const;
|
||||
Result SetMemoryPermissions(KProcessPageTable &page_table, const ams::svc::CreateProcessParameter ¶ms) const;
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -47,9 +47,11 @@ namespace ams::kern {
|
||||
};
|
||||
|
||||
enum OperationType {
|
||||
OperationType_Map = 0,
|
||||
OperationType_MapGroup = 1,
|
||||
OperationType_Unmap = 2,
|
||||
OperationType_Map = 0,
|
||||
OperationType_MapGroup = 1,
|
||||
OperationType_Unmap = 2,
|
||||
OperationType_ChangePermissions = 3,
|
||||
OperationType_ChangePermissionsAndRefresh = 4,
|
||||
/* TODO: perm/attr operations */
|
||||
};
|
||||
|
||||
@ -234,6 +236,7 @@ namespace ams::kern {
|
||||
Result AllocateAndMapPagesImpl(PageLinkedList *page_list, KProcessAddress address, size_t num_pages, const KPageProperties properties);
|
||||
Result MapPageGroupImpl(PageLinkedList *page_list, KProcessAddress address, const KPageGroup &pg, const KPageProperties properties, bool reuse_ll);
|
||||
|
||||
Result MakePageGroup(KPageGroup &pg, KProcessAddress addr, size_t num_pages);
|
||||
bool IsValidPageGroup(const KPageGroup &pg, KProcessAddress addr, size_t num_pages);
|
||||
|
||||
NOINLINE Result MapPages(KProcessAddress *out_addr, size_t num_pages, size_t alignment, KPhysicalAddress phys_addr, bool is_pa_valid, KProcessAddress region_start, size_t region_num_pages, KMemoryState state, KMemoryPermission perm);
|
||||
@ -242,6 +245,8 @@ namespace ams::kern {
|
||||
return this->GetImpl().GetPhysicalAddress(out, virt_addr);
|
||||
}
|
||||
|
||||
Result SetMemoryPermission(KProcessAddress addr, size_t size, ams::svc::MemoryPermission perm);
|
||||
Result SetProcessMemoryPermission(KProcessAddress addr, size_t size, ams::svc::MemoryPermission perm);
|
||||
Result MapIo(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm);
|
||||
Result MapStatic(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm);
|
||||
Result MapRegion(KMemoryRegionType region_type, KMemoryPermission perm);
|
||||
|
@ -128,6 +128,8 @@ namespace ams::kern {
|
||||
constexpr u64 GetCoreMask() const { return this->capabilities.GetCoreMask(); }
|
||||
constexpr u64 GetPriorityMask() const { return this->capabilities.GetPriorityMask(); }
|
||||
|
||||
constexpr void SetIdealCoreId(s32 core_id) { this->ideal_core_id = core_id; }
|
||||
|
||||
constexpr bool Is64Bit() const { return this->flags & ams::svc::CreateProcessFlag_Is64Bit; }
|
||||
|
||||
KThread *GetPreemptionStatePinnedThread(s32 core_id) const {
|
||||
|
@ -175,6 +175,10 @@ namespace ams::kern::arch::arm64 {
|
||||
switch (operation) {
|
||||
case OperationType_Map:
|
||||
return this->MapContiguous(virt_addr, phys_addr, num_pages, entry_template, page_list, reuse_ll);
|
||||
case OperationType_ChangePermissions:
|
||||
return this->ChangePermissions(virt_addr, num_pages, entry_template, false, page_list, reuse_ll);
|
||||
case OperationType_ChangePermissionsAndRefresh:
|
||||
return this->ChangePermissions(virt_addr, num_pages, entry_template, true, page_list, reuse_ll);
|
||||
MESOSPHERE_UNREACHABLE_DEFAULT_CASE();
|
||||
}
|
||||
}
|
||||
@ -418,6 +422,7 @@ namespace ams::kern::arch::arm64 {
|
||||
}
|
||||
}
|
||||
break;
|
||||
MESOSPHERE_UNREACHABLE_DEFAULT_CASE();
|
||||
}
|
||||
|
||||
/* Close the blocks. */
|
||||
@ -762,6 +767,139 @@ namespace ams::kern::arch::arm64 {
|
||||
return ResultSuccess();
|
||||
}
|
||||
|
||||
Result KPageTable::ChangePermissions(KProcessAddress virt_addr, size_t num_pages, PageTableEntry entry_template, bool refresh_mapping, PageLinkedList *page_list, bool reuse_ll) {
|
||||
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
||||
|
||||
auto &impl = this->GetImpl();
|
||||
|
||||
/* Separate pages before we change permissions. */
|
||||
const size_t size = num_pages * PageSize;
|
||||
R_TRY(this->SeparatePages(virt_addr, std::min(GetInteger(virt_addr) & -GetInteger(virt_addr), size), page_list, reuse_ll));
|
||||
if (num_pages > 1) {
|
||||
const auto end_page = virt_addr + size;
|
||||
const auto last_page = end_page - PageSize;
|
||||
|
||||
auto merge_guard = SCOPE_GUARD { this->MergePages(virt_addr, page_list); };
|
||||
R_TRY(this->SeparatePages(last_page, std::min(GetInteger(end_page) & -GetInteger(end_page), size), page_list, reuse_ll));
|
||||
merge_guard.Cancel();
|
||||
}
|
||||
|
||||
/* Cache initial addresses for use on cleanup. */
|
||||
const KProcessAddress orig_virt_addr = virt_addr;
|
||||
size_t remaining_pages = num_pages;
|
||||
|
||||
/* Begin traversal. */
|
||||
TraversalContext context;
|
||||
TraversalEntry next_entry;
|
||||
MESOSPHERE_ABORT_UNLESS(impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), virt_addr));
|
||||
|
||||
/* Continue changing properties until we've changed them for all pages. */
|
||||
while (remaining_pages > 0) {
|
||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(next_entry.phys_addr), next_entry.block_size));
|
||||
MESOSPHERE_ABORT_UNLESS(next_entry.block_size <= remaining_pages * PageSize);
|
||||
|
||||
L1PageTableEntry *l1_entry = impl.GetL1Entry(virt_addr);
|
||||
switch (next_entry.block_size) {
|
||||
case L1BlockSize:
|
||||
{
|
||||
/* Clear the entry, if we should. */
|
||||
if (refresh_mapping) {
|
||||
*l1_entry = InvalidL1PageTableEntry;
|
||||
this->NoteUpdated();
|
||||
if (IsHeapPhysicalAddress(next_entry.phys_addr)) {
|
||||
cpu::FlushDataCache(GetVoidPointer(GetHeapVirtualAddress(next_entry.phys_addr)), L1BlockSize);
|
||||
}
|
||||
}
|
||||
|
||||
/* Write the updated entry. */
|
||||
*l1_entry = L1PageTableEntry(next_entry.phys_addr, entry_template, false);
|
||||
}
|
||||
break;
|
||||
case L2ContiguousBlockSize:
|
||||
case L2BlockSize:
|
||||
{
|
||||
/* Get the number of L2 blocks. */
|
||||
const size_t num_l2_blocks = next_entry.block_size / L2BlockSize;
|
||||
|
||||
/* Get the L2 entry. */
|
||||
KPhysicalAddress l2_phys = Null<KPhysicalAddress>;
|
||||
MESOSPHERE_ABORT_UNLESS(l1_entry->GetTable(l2_phys));
|
||||
const KVirtualAddress l2_virt = GetPageTableVirtualAddress(l2_phys);
|
||||
|
||||
/* Clear the entry, if we should. */
|
||||
if (refresh_mapping) {
|
||||
for (size_t i = 0; i < num_l2_blocks; i++) {
|
||||
*impl.GetL2EntryFromTable(l2_virt, virt_addr + L2BlockSize * i) = InvalidL2PageTableEntry;
|
||||
}
|
||||
this->NoteUpdated();
|
||||
if (IsHeapPhysicalAddress(next_entry.phys_addr)) {
|
||||
cpu::FlushDataCache(GetVoidPointer(GetHeapVirtualAddress(next_entry.phys_addr)), next_entry.block_size);
|
||||
}
|
||||
}
|
||||
|
||||
/* Write the updated entry. */
|
||||
const bool contig = next_entry.block_size == L2ContiguousBlockSize;
|
||||
for (size_t i = 0; i < num_l2_blocks; i++) {
|
||||
*impl.GetL2EntryFromTable(l2_virt, virt_addr + L2BlockSize * i) = L2PageTableEntry(next_entry.phys_addr + L2BlockSize * i, entry_template, contig);
|
||||
}
|
||||
}
|
||||
break;
|
||||
case L3ContiguousBlockSize:
|
||||
case L3BlockSize:
|
||||
{
|
||||
/* Get the number of L3 blocks. */
|
||||
const size_t num_l3_blocks = next_entry.block_size / L3BlockSize;
|
||||
|
||||
/* Get the L2 entry. */
|
||||
KPhysicalAddress l2_phys = Null<KPhysicalAddress>;
|
||||
MESOSPHERE_ABORT_UNLESS(l1_entry->GetTable(l2_phys));
|
||||
const KVirtualAddress l2_virt = GetPageTableVirtualAddress(l2_phys);
|
||||
L2PageTableEntry *l2_entry = impl.GetL2EntryFromTable(l2_virt, virt_addr);
|
||||
|
||||
/* Get the L3 entry. */
|
||||
KPhysicalAddress l3_phys = Null<KPhysicalAddress>;
|
||||
MESOSPHERE_ABORT_UNLESS(l2_entry->GetTable(l3_phys));
|
||||
const KVirtualAddress l3_virt = GetPageTableVirtualAddress(l3_phys);
|
||||
|
||||
/* Clear the entry, if we should. */
|
||||
if (refresh_mapping) {
|
||||
for (size_t i = 0; i < num_l3_blocks; i++) {
|
||||
*impl.GetL3EntryFromTable(l3_virt, virt_addr + L3BlockSize * i) = InvalidL3PageTableEntry;
|
||||
}
|
||||
this->NoteUpdated();
|
||||
if (IsHeapPhysicalAddress(next_entry.phys_addr)) {
|
||||
cpu::FlushDataCache(GetVoidPointer(GetHeapVirtualAddress(next_entry.phys_addr)), next_entry.block_size);
|
||||
}
|
||||
}
|
||||
|
||||
/* Write the updated entry. */
|
||||
const bool contig = next_entry.block_size == L3ContiguousBlockSize;
|
||||
for (size_t i = 0; i < num_l3_blocks; i++) {
|
||||
*impl.GetL3EntryFromTable(l3_virt, virt_addr + L3BlockSize * i) = L3PageTableEntry(next_entry.phys_addr + L3BlockSize * i, entry_template, contig);
|
||||
}
|
||||
}
|
||||
break;
|
||||
MESOSPHERE_UNREACHABLE_DEFAULT_CASE();
|
||||
}
|
||||
|
||||
/* Advance. */
|
||||
virt_addr += next_entry.block_size;
|
||||
remaining_pages -= next_entry.block_size / PageSize;
|
||||
if (remaining_pages == 0) {
|
||||
break;
|
||||
}
|
||||
MESOSPHERE_ABORT_UNLESS(impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context)));
|
||||
}
|
||||
|
||||
/* We've succeeded, now perform what coalescing we can. */
|
||||
this->MergePages(orig_virt_addr, page_list);
|
||||
if (num_pages > 1) {
|
||||
this->MergePages(orig_virt_addr + (num_pages - 1) * PageSize, page_list);
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
}
|
||||
|
||||
void KPageTable::FinalizeUpdate(PageLinkedList *page_list) {
|
||||
while (page_list->Peek()) {
|
||||
KVirtualAddress page = KVirtualAddress(page_list->Pop());
|
||||
|
@ -90,11 +90,14 @@ namespace ams::kern {
|
||||
}
|
||||
|
||||
/* Set the process's memory permissions. */
|
||||
MESOSPHERE_TODO("Set process's memory permissions");
|
||||
MESOSPHERE_R_ABORT_UNLESS(reader.SetMemoryPermissions(new_process->GetPageTable(), params));
|
||||
|
||||
/* Register the process. */
|
||||
KProcess::Register(new_process);
|
||||
|
||||
/* Set the ideal core id. */
|
||||
new_process->SetIdealCoreId(reader.GetIdealCoreId());
|
||||
|
||||
/* Save the process info. */
|
||||
infos[i].process = new_process;
|
||||
infos[i].stack_size = reader.GetStackSize();
|
||||
|
@ -188,4 +188,32 @@ namespace ams::kern {
|
||||
return ResultSuccess();
|
||||
}
|
||||
|
||||
Result KInitialProcessReader::SetMemoryPermissions(KProcessPageTable &page_table, const ams::svc::CreateProcessParameter ¶ms) const {
|
||||
const size_t rx_size = this->kip_header->GetRxSize();
|
||||
const size_t ro_size = this->kip_header->GetRoSize();
|
||||
const size_t rw_size = this->kip_header->GetRwSize();
|
||||
const size_t bss_size = this->kip_header->GetBssSize();
|
||||
|
||||
/* Set R-X pages. */
|
||||
if (rx_size) {
|
||||
const uintptr_t start = this->kip_header->GetRxAddress() + params.code_address;
|
||||
R_TRY(page_table.SetProcessMemoryPermission(start, util::AlignUp(rx_size, PageSize), ams::svc::MemoryPermission_ReadExecute));
|
||||
}
|
||||
|
||||
/* Set R-- pages. */
|
||||
if (ro_size) {
|
||||
const uintptr_t start = this->kip_header->GetRoAddress() + params.code_address;
|
||||
R_TRY(page_table.SetProcessMemoryPermission(start, util::AlignUp(ro_size, PageSize), ams::svc::MemoryPermission_Read));
|
||||
}
|
||||
|
||||
/* Set RW- pages. */
|
||||
if (rw_size || bss_size) {
|
||||
const uintptr_t start = (rw_size ? this->kip_header->GetRwAddress() : this->kip_header->GetBssAddress()) + params.code_address;
|
||||
const uintptr_t end = (bss_size ? this->kip_header->GetBssAddress() + bss_size : this->kip_header->GetRwAddress() + rw_size) + params.code_address;
|
||||
R_TRY(page_table.SetProcessMemoryPermission(start, util::AlignUp(end - start, PageSize), ams::svc::MemoryPermission_ReadWrite));
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -544,7 +544,63 @@ namespace ams::kern {
|
||||
return ResultSuccess();
|
||||
}
|
||||
|
||||
Result KPageTableBase::MakePageGroup(KPageGroup &pg, KProcessAddress addr, size_t num_pages) {
|
||||
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
||||
|
||||
const size_t size = num_pages * PageSize;
|
||||
|
||||
/* We're making a new group, not adding to an existing one. */
|
||||
R_UNLESS(pg.empty(), svc::ResultInvalidCurrentMemory());
|
||||
|
||||
auto &impl = this->GetImpl();
|
||||
|
||||
/* Begin traversal. */
|
||||
TraversalContext context;
|
||||
TraversalEntry next_entry;
|
||||
R_UNLESS(impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), addr), svc::ResultInvalidCurrentMemory());
|
||||
|
||||
/* Prepare tracking variables. */
|
||||
KPhysicalAddress cur_addr = next_entry.phys_addr;
|
||||
size_t cur_size = next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1));
|
||||
size_t tot_size = cur_size;
|
||||
|
||||
/* Iterate, adding to group as we go. */
|
||||
while (tot_size < size) {
|
||||
R_UNLESS(impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context)), svc::ResultInvalidCurrentMemory());
|
||||
|
||||
if (next_entry.phys_addr != (cur_addr + cur_size)) {
|
||||
const size_t cur_pages = cur_size / PageSize;
|
||||
|
||||
R_UNLESS(IsHeapPhysicalAddress(cur_addr), svc::ResultInvalidCurrentMemory());
|
||||
R_TRY(pg.AddBlock(GetHeapVirtualAddress(cur_addr), cur_pages));
|
||||
|
||||
cur_addr = next_entry.phys_addr;
|
||||
cur_size = next_entry.block_size;
|
||||
} else {
|
||||
cur_size += next_entry.block_size;
|
||||
}
|
||||
|
||||
tot_size += next_entry.block_size;
|
||||
}
|
||||
|
||||
/* Ensure we add the right amount for the last block. */
|
||||
if (tot_size > size) {
|
||||
cur_size -= (tot_size - size);
|
||||
}
|
||||
|
||||
/* add the last block. */
|
||||
const size_t cur_pages = cur_size / PageSize;
|
||||
R_UNLESS(IsHeapPhysicalAddress(cur_addr), svc::ResultInvalidCurrentMemory());
|
||||
R_TRY(pg.AddBlock(GetHeapVirtualAddress(cur_addr), cur_pages));
|
||||
|
||||
return ResultSuccess();
|
||||
}
|
||||
|
||||
bool KPageTableBase::IsValidPageGroup(const KPageGroup &pg, KProcessAddress addr, size_t num_pages) {
|
||||
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
||||
|
||||
const size_t size = num_pages * PageSize;
|
||||
|
||||
/* Empty groups are necessarily invalid. */
|
||||
if (pg.empty()) {
|
||||
return false;
|
||||
@ -582,7 +638,7 @@ namespace ams::kern {
|
||||
size_t tot_size = cur_size;
|
||||
|
||||
/* Iterate, comparing expected to actual. */
|
||||
while (tot_size < num_pages * PageSize) {
|
||||
while (tot_size < size) {
|
||||
if (!impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context))) {
|
||||
return false;
|
||||
}
|
||||
@ -614,8 +670,8 @@ namespace ams::kern {
|
||||
}
|
||||
|
||||
/* Ensure we compare the right amount for the last block. */
|
||||
if (tot_size > num_pages * PageSize) {
|
||||
cur_size -= (tot_size - num_pages * PageSize);
|
||||
if (tot_size > size) {
|
||||
cur_size -= (tot_size - size);
|
||||
}
|
||||
|
||||
if (!IsHeapPhysicalAddress(cur_addr)) {
|
||||
@ -629,6 +685,70 @@ namespace ams::kern {
|
||||
return cur_block_address == GetHeapVirtualAddress(cur_addr) && cur_block_pages == (cur_size / PageSize);
|
||||
}
|
||||
|
||||
Result KPageTableBase::SetMemoryPermission(KProcessAddress addr, size_t size, ams::svc::MemoryPermission svc_perm) {
|
||||
MESOSPHERE_TODO_IMPLEMENT();
|
||||
}
|
||||
|
||||
Result KPageTableBase::SetProcessMemoryPermission(KProcessAddress addr, size_t size, ams::svc::MemoryPermission svc_perm) {
|
||||
const size_t num_pages = size / PageSize;
|
||||
|
||||
/* Lock the table. */
|
||||
KScopedLightLock lk(this->general_lock);
|
||||
|
||||
/* Verify we can change the memory permission. */
|
||||
KMemoryState old_state;
|
||||
KMemoryPermission old_perm;
|
||||
R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm), nullptr, addr, size, KMemoryState_FlagCode, KMemoryState_FlagCode, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_All, KMemoryAttribute_None));
|
||||
|
||||
/* Make a new page group for the region. */
|
||||
KPageGroup pg(this->block_info_manager);
|
||||
|
||||
/* Determine new perm/state. */
|
||||
const KMemoryPermission new_perm = ConvertToKMemoryPermission(svc_perm);
|
||||
KMemoryState new_state = old_state;
|
||||
const bool is_w = (new_perm & KMemoryPermission_UserWrite) == KMemoryPermission_UserWrite;
|
||||
const bool is_x = (new_perm & KMemoryPermission_UserExecute) == KMemoryPermission_UserExecute;
|
||||
MESOSPHERE_ASSERT(!(is_w && is_x));
|
||||
|
||||
if (is_w) {
|
||||
switch (old_state) {
|
||||
case KMemoryState_Code: new_state = KMemoryState_CodeData; break;
|
||||
case KMemoryState_AliasCode: new_state = KMemoryState_AliasCodeData; break;
|
||||
MESOSPHERE_UNREACHABLE_DEFAULT_CASE();
|
||||
}
|
||||
}
|
||||
|
||||
/* Create a page group, if we're setting execute permissions. */
|
||||
if (is_x) {
|
||||
R_TRY(this->MakePageGroup(pg, GetInteger(addr), num_pages));
|
||||
}
|
||||
|
||||
/* Create an update allocator. */
|
||||
KMemoryBlockManagerUpdateAllocator allocator(this->memory_block_slab_manager);
|
||||
R_TRY(allocator.GetResult());
|
||||
|
||||
/* We're going to perform an update, so create a helper. */
|
||||
KScopedPageTableUpdater updater(this);
|
||||
|
||||
/* Perform mapping operation. */
|
||||
const KPageProperties properties = { new_perm, false, false, false };
|
||||
const auto operation = is_x ? OperationType_ChangePermissionsAndRefresh : OperationType_ChangePermissions;
|
||||
R_TRY(this->Operate(updater.GetPageList(), addr, num_pages, Null<KPhysicalAddress>, false, properties, operation, false));
|
||||
|
||||
/* Update the blocks. */
|
||||
this->memory_block_manager.Update(&allocator, addr, num_pages, new_state, new_perm, KMemoryAttribute_None);
|
||||
|
||||
/* Ensure cache coherency, if we're setting pages as executable. */
|
||||
if (is_x) {
|
||||
for (const auto &block : pg) {
|
||||
cpu::StoreDataCache(GetVoidPointer(block.GetAddress()), block.GetSize());
|
||||
}
|
||||
cpu::InvalidateEntireInstructionCache();
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
}
|
||||
|
||||
Result KPageTableBase::MapIo(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm) {
|
||||
MESOSPHERE_ASSERT(util::IsAligned(GetInteger(phys_addr), PageSize));
|
||||
MESOSPHERE_ASSERT(util::IsAligned(size, PageSize));
|
||||
|
Loading…
Reference in New Issue
Block a user