mirror of
https://github.com/Atmosphere-NX/Atmosphere.git
synced 2024-11-24 07:40:26 +01:00
kern: implement Svc(Un)MapInsecureMemory
This commit is contained in:
parent
5a918f3bc9
commit
e7a1e0fee2
@ -110,6 +110,14 @@ namespace ams::kern::arch::arm64 {
|
||||
R_RETURN(m_page_table.MapRegion(region_type, perm));
|
||||
}
|
||||
|
||||
Result MapInsecureMemory(KProcessAddress address, size_t size) {
|
||||
R_RETURN(m_page_table.MapInsecureMemory(address, size));
|
||||
}
|
||||
|
||||
Result UnmapInsecureMemory(KProcessAddress address, size_t size) {
|
||||
R_RETURN(m_page_table.UnmapInsecureMemory(address, size));
|
||||
}
|
||||
|
||||
Result MapPageGroup(KProcessAddress addr, const KPageGroup &pg, KMemoryState state, KMemoryPermission perm) {
|
||||
R_RETURN(m_page_table.MapPageGroup(addr, pg, state, perm));
|
||||
}
|
||||
|
@ -110,6 +110,10 @@ namespace ams::kern {
|
||||
KMemoryState_CodeOut = ams::svc::MemoryState_CodeOut | KMemoryState_FlagMapped | KMemoryState_FlagReferenceCounted | KMemoryState_FlagLinearMapped,
|
||||
|
||||
KMemoryState_Coverage = ams::svc::MemoryState_Coverage | KMemoryState_FlagMapped,
|
||||
|
||||
KMemoryState_Insecure = ams::svc::MemoryState_Insecure | KMemoryState_FlagMapped | KMemoryState_FlagReferenceCounted | KMemoryState_FlagLinearMapped | KMemoryState_FlagCanChangeAttribute
|
||||
| KMemoryState_FlagCanDeviceMap | KMemoryState_FlagCanAlignedDeviceMap
|
||||
| KMemoryState_FlagCanUseNonSecureIpc | KMemoryState_FlagCanUseNonDeviceIpc,
|
||||
};
|
||||
|
||||
#if 1
|
||||
@ -136,6 +140,7 @@ namespace ams::kern {
|
||||
static_assert(KMemoryState_GeneratedCode == 0x04402214);
|
||||
static_assert(KMemoryState_CodeOut == 0x04402015);
|
||||
static_assert(KMemoryState_Coverage == 0x00002016);
|
||||
static_assert(KMemoryState_Insecure == 0x05583817);
|
||||
#endif
|
||||
|
||||
enum KMemoryPermission : u8 {
|
||||
|
@ -165,6 +165,7 @@ namespace ams::kern {
|
||||
size_t m_max_heap_size;
|
||||
size_t m_mapped_physical_memory_size;
|
||||
size_t m_mapped_unsafe_physical_memory;
|
||||
size_t m_mapped_insecure_memory;
|
||||
size_t m_mapped_ipc_server_memory;
|
||||
mutable KLightLock m_general_lock;
|
||||
mutable KLightLock m_map_physical_memory_lock;
|
||||
@ -191,8 +192,8 @@ namespace ams::kern {
|
||||
m_alias_region_end(Null<KProcessAddress>), m_stack_region_start(Null<KProcessAddress>), m_stack_region_end(Null<KProcessAddress>),
|
||||
m_kernel_map_region_start(Null<KProcessAddress>), m_kernel_map_region_end(Null<KProcessAddress>), m_alias_code_region_start(Null<KProcessAddress>),
|
||||
m_alias_code_region_end(Null<KProcessAddress>), m_code_region_start(Null<KProcessAddress>), m_code_region_end(Null<KProcessAddress>),
|
||||
m_max_heap_size(), m_mapped_physical_memory_size(), m_mapped_unsafe_physical_memory(), m_mapped_ipc_server_memory(), m_general_lock(),
|
||||
m_map_physical_memory_lock(), m_device_map_lock(), m_impl(util::ConstantInitialize), m_memory_block_manager(util::ConstantInitialize),
|
||||
m_max_heap_size(), m_mapped_physical_memory_size(), m_mapped_unsafe_physical_memory(), m_mapped_insecure_memory(), m_mapped_ipc_server_memory(),
|
||||
m_general_lock(), m_map_physical_memory_lock(), m_device_map_lock(), m_impl(util::ConstantInitialize), m_memory_block_manager(util::ConstantInitialize),
|
||||
m_allocate_option(), m_address_space_width(), m_is_kernel(), m_enable_aslr(), m_enable_device_address_space_merge(),
|
||||
m_memory_block_slab_manager(), m_block_info_manager(), m_resource_limit(), m_cached_physical_linear_region(), m_cached_physical_heap_region(),
|
||||
m_heap_fill_value(), m_ipc_fill_value(), m_stack_fill_value()
|
||||
@ -365,6 +366,8 @@ namespace ams::kern {
|
||||
Result UnmapIoRegion(KProcessAddress dst_address, KPhysicalAddress phys_addr, size_t size);
|
||||
Result MapStatic(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm);
|
||||
Result MapRegion(KMemoryRegionType region_type, KMemoryPermission perm);
|
||||
Result MapInsecureMemory(KProcessAddress address, size_t size);
|
||||
Result UnmapInsecureMemory(KProcessAddress address, size_t size);
|
||||
|
||||
Result MapPages(KProcessAddress *out_addr, size_t num_pages, size_t alignment, KPhysicalAddress phys_addr, KProcessAddress region_start, size_t region_num_pages, KMemoryState state, KMemoryPermission perm) {
|
||||
R_RETURN(this->MapPages(out_addr, num_pages, alignment, phys_addr, true, region_start, region_num_pages, state, perm));
|
||||
|
@ -25,6 +25,8 @@ namespace ams::kern {
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
class KResourceLimit;
|
||||
|
||||
class KSystemControlBase {
|
||||
public:
|
||||
/* This can be overridden as needed. */
|
||||
@ -86,6 +88,10 @@ namespace ams::kern {
|
||||
static size_t CalculateRequiredSecureMemorySize(size_t size, u32 pool);
|
||||
static Result AllocateSecureMemory(KVirtualAddress *out, size_t size, u32 pool);
|
||||
static void FreeSecureMemory(KVirtualAddress address, size_t size, u32 pool);
|
||||
|
||||
/* Insecure Memory. */
|
||||
static KResourceLimit *GetInsecureMemoryResourceLimit();
|
||||
static u32 GetInsecureMemoryPool();
|
||||
protected:
|
||||
template<typename F>
|
||||
static ALWAYS_INLINE u64 GenerateUniformRange(u64 min, u64 max, F f) {
|
||||
|
@ -103,6 +103,7 @@ namespace ams::kern {
|
||||
m_max_heap_size = 0;
|
||||
m_mapped_physical_memory_size = 0;
|
||||
m_mapped_unsafe_physical_memory = 0;
|
||||
m_mapped_insecure_memory = 0;
|
||||
m_mapped_ipc_server_memory = 0;
|
||||
|
||||
m_memory_block_slab_manager = Kernel::GetSystemSystemResource().GetMemoryBlockSlabManagerPointer();
|
||||
@ -287,6 +288,7 @@ namespace ams::kern {
|
||||
m_max_heap_size = 0;
|
||||
m_mapped_physical_memory_size = 0;
|
||||
m_mapped_unsafe_physical_memory = 0;
|
||||
m_mapped_insecure_memory = 0;
|
||||
m_mapped_ipc_server_memory = 0;
|
||||
|
||||
const bool fill_memory = KTargetSystem::IsDebugMemoryFillEnabled();
|
||||
@ -340,6 +342,13 @@ namespace ams::kern {
|
||||
Kernel::GetUnsafeMemory().Release(m_mapped_unsafe_physical_memory);
|
||||
}
|
||||
|
||||
/* Release any insecure mapped memory. */
|
||||
if (m_mapped_insecure_memory) {
|
||||
if (auto * const insecure_resource_limit = KSystemControl::GetInsecureMemoryResourceLimit(); insecure_resource_limit != nullptr) {
|
||||
insecure_resource_limit->Release(ams::svc::LimitableResource_PhysicalMemoryMax, m_mapped_insecure_memory);
|
||||
}
|
||||
}
|
||||
|
||||
/* Release any ipc server memory. */
|
||||
if (m_mapped_ipc_server_memory) {
|
||||
m_resource_limit->Release(ams::svc::LimitableResource_PhysicalMemoryMax, m_mapped_ipc_server_memory);
|
||||
@ -375,6 +384,7 @@ namespace ams::kern {
|
||||
case KMemoryState_GeneratedCode:
|
||||
case KMemoryState_CodeOut:
|
||||
case KMemoryState_Coverage:
|
||||
case KMemoryState_Insecure:
|
||||
return m_alias_code_region_start;
|
||||
case KMemoryState_Code:
|
||||
case KMemoryState_CodeData:
|
||||
@ -409,6 +419,7 @@ namespace ams::kern {
|
||||
case KMemoryState_GeneratedCode:
|
||||
case KMemoryState_CodeOut:
|
||||
case KMemoryState_Coverage:
|
||||
case KMemoryState_Insecure:
|
||||
return m_alias_code_region_end - m_alias_code_region_start;
|
||||
case KMemoryState_Code:
|
||||
case KMemoryState_CodeData:
|
||||
@ -446,6 +457,7 @@ namespace ams::kern {
|
||||
case KMemoryState_GeneratedCode:
|
||||
case KMemoryState_CodeOut:
|
||||
case KMemoryState_Coverage:
|
||||
case KMemoryState_Insecure:
|
||||
return is_in_region && !is_in_heap && !is_in_alias;
|
||||
case KMemoryState_Normal:
|
||||
MESOSPHERE_ASSERT(is_in_heap);
|
||||
@ -1038,6 +1050,97 @@ namespace ams::kern {
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KPageTableBase::MapInsecureMemory(KProcessAddress address, size_t size) {
|
||||
/* Get the insecure memory resource limit and pool. */
|
||||
auto * const insecure_resource_limit = KSystemControl::GetInsecureMemoryResourceLimit();
|
||||
const auto insecure_pool = static_cast<KMemoryManager::Pool>(KSystemControl::GetInsecureMemoryPool());
|
||||
|
||||
/* Reserve the insecure memory. */
|
||||
/* NOTE: ResultOutOfMemory is returned here instead of the usual LimitReached. */
|
||||
KScopedResourceReservation memory_reservation(insecure_resource_limit, ams::svc::LimitableResource_PhysicalMemoryMax, size);
|
||||
R_UNLESS(memory_reservation.Succeeded(), svc::ResultOutOfMemory());
|
||||
|
||||
/* Allocate pages for the insecure memory. */
|
||||
KPageGroup pg(m_block_info_manager);
|
||||
R_TRY(Kernel::GetMemoryManager().AllocateAndOpen(std::addressof(pg), size / PageSize, KMemoryManager::EncodeOption(insecure_pool, KMemoryManager::Direction_FromFront)));
|
||||
|
||||
/* Close the opened pages when we're done with them. */
|
||||
/* If the mapping succeeds, each page will gain an extra reference, otherwise they will be freed automatically. */
|
||||
ON_SCOPE_EXIT { pg.Close(); };
|
||||
|
||||
/* Clear all the newly allocated pages. */
|
||||
for (const auto &it : pg) {
|
||||
std::memset(GetVoidPointer(GetHeapVirtualAddress(it.GetAddress())), m_heap_fill_value, it.GetSize());
|
||||
}
|
||||
|
||||
/* Lock the table. */
|
||||
KScopedLightLock lk(m_general_lock);
|
||||
|
||||
/* Validate that the address's state is valid. */
|
||||
size_t num_allocator_blocks;
|
||||
R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, KMemoryState_All, KMemoryState_Free, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_None, KMemoryAttribute_None));
|
||||
|
||||
/* Create an update allocator. */
|
||||
Result allocator_result;
|
||||
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), m_memory_block_slab_manager, num_allocator_blocks);
|
||||
R_TRY(allocator_result);
|
||||
|
||||
/* We're going to perform an update, so create a helper. */
|
||||
KScopedPageTableUpdater updater(this);
|
||||
|
||||
/* Map the pages. */
|
||||
const size_t num_pages = size / PageSize;
|
||||
const KPageProperties map_properties = { KMemoryPermission_UserReadWrite, false, false, DisableMergeAttribute_DisableHead };
|
||||
R_TRY(this->Operate(updater.GetPageList(), address, num_pages, pg, map_properties, OperationType_MapGroup, false));
|
||||
|
||||
/* Apply the memory block update. */
|
||||
m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState_Insecure, KMemoryPermission_UserReadWrite, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_Normal, KMemoryBlockDisableMergeAttribute_None);
|
||||
|
||||
/* Update our mapped insecure size. */
|
||||
m_mapped_insecure_memory += size;
|
||||
|
||||
/* Commit the memory reservation. */
|
||||
memory_reservation.Commit();
|
||||
|
||||
/* We succeeded. */
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KPageTableBase::UnmapInsecureMemory(KProcessAddress address, size_t size) {
|
||||
/* Lock the table. */
|
||||
KScopedLightLock lk(m_general_lock);
|
||||
|
||||
/* Check the memory state. */
|
||||
size_t num_allocator_blocks;
|
||||
R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, KMemoryState_All, KMemoryState_Insecure, KMemoryPermission_All, KMemoryPermission_UserReadWrite, KMemoryAttribute_All, KMemoryAttribute_None));
|
||||
|
||||
/* Create an update allocator. */
|
||||
Result allocator_result;
|
||||
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), m_memory_block_slab_manager, num_allocator_blocks);
|
||||
R_TRY(allocator_result);
|
||||
|
||||
/* We're going to perform an update, so create a helper. */
|
||||
KScopedPageTableUpdater updater(this);
|
||||
|
||||
/* Unmap the memory. */
|
||||
const size_t num_pages = size / PageSize;
|
||||
const KPageProperties unmap_properties = { KMemoryPermission_None, false, false, DisableMergeAttribute_None };
|
||||
R_TRY(this->Operate(updater.GetPageList(), address, num_pages, Null<KPhysicalAddress>, false, unmap_properties, OperationType_Unmap, false));
|
||||
|
||||
/* Apply the memory block update. */
|
||||
m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState_Free, KMemoryPermission_None, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_None, KMemoryBlockDisableMergeAttribute_Normal);
|
||||
|
||||
/* Update our mapped insecure size. */
|
||||
m_mapped_insecure_memory -= size;
|
||||
|
||||
/* Release the insecure memory from the insecure limit. */
|
||||
if (auto * const insecure_resource_limit = KSystemControl::GetInsecureMemoryResourceLimit(); insecure_resource_limit != nullptr) {
|
||||
insecure_resource_limit->Release(ams::svc::LimitableResource_PhysicalMemoryMax, size);
|
||||
}
|
||||
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
KProcessAddress KPageTableBase::FindFreeArea(KProcessAddress region_start, size_t region_num_pages, size_t num_pages, size_t alignment, size_t offset, size_t guard_pages) const {
|
||||
KProcessAddress address = Null<KProcessAddress>;
|
||||
|
||||
|
@ -292,4 +292,13 @@ namespace ams::kern {
|
||||
Kernel::GetMemoryManager().Close(KPageTable::GetHeapPhysicalAddress(address), size / PageSize);
|
||||
}
|
||||
|
||||
/* Insecure Memory. */
|
||||
KResourceLimit *KSystemControlBase::GetInsecureMemoryResourceLimit() {
|
||||
return std::addressof(Kernel::GetSystemResourceLimit());
|
||||
}
|
||||
|
||||
u32 KSystemControlBase::GetInsecureMemoryPool() {
|
||||
return KMemoryManager::Pool_SystemNonSecure;
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -0,0 +1,76 @@
|
||||
/*
|
||||
* Copyright (c) Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#include <mesosphere.hpp>
|
||||
|
||||
namespace ams::kern::svc {
|
||||
|
||||
/* ============================= Common ============================= */
|
||||
|
||||
namespace {
|
||||
|
||||
Result MapInsecureMemory(uintptr_t address, size_t size) {
|
||||
/* Validate the address/size. */
|
||||
R_UNLESS(util::IsAligned(size, PageSize), svc::ResultInvalidSize());
|
||||
R_UNLESS(size > 0, svc::ResultInvalidSize());
|
||||
R_UNLESS(util::IsAligned(address, PageSize), svc::ResultInvalidAddress());
|
||||
R_UNLESS((address < address + size), svc::ResultInvalidCurrentMemory());
|
||||
|
||||
/* Verify that the mapping is in range. */
|
||||
auto &pt = GetCurrentProcess().GetPageTable();
|
||||
R_UNLESS(GetCurrentProcess().GetPageTable().CanContain(address, size, KMemoryState_Insecure), svc::ResultInvalidMemoryRegion());
|
||||
|
||||
/* Map the insecure memory. */
|
||||
R_RETURN(pt.MapInsecureMemory(address, size));
|
||||
}
|
||||
|
||||
Result UnmapInsecureMemory(uintptr_t address, size_t size) {
|
||||
/* Validate the address/size. */
|
||||
R_UNLESS(util::IsAligned(size, PageSize), svc::ResultInvalidSize());
|
||||
R_UNLESS(size > 0, svc::ResultInvalidSize());
|
||||
R_UNLESS(util::IsAligned(address, PageSize), svc::ResultInvalidAddress());
|
||||
R_UNLESS((address < address + size), svc::ResultInvalidCurrentMemory());
|
||||
|
||||
/* Verify that the mapping is in range. */
|
||||
auto &pt = GetCurrentProcess().GetPageTable();
|
||||
R_UNLESS(GetCurrentProcess().GetPageTable().CanContain(address, size, KMemoryState_Insecure), svc::ResultInvalidMemoryRegion());
|
||||
|
||||
/* Map the insecure memory. */
|
||||
R_RETURN(pt.UnmapInsecureMemory(address, size));
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/* ============================= 64 ABI ============================= */
|
||||
|
||||
Result MapInsecureMemory64(ams::svc::Address address, ams::svc::Size size) {
|
||||
R_RETURN(MapInsecureMemory(address, size));
|
||||
}
|
||||
|
||||
Result UnmapInsecureMemory64(ams::svc::Address address, ams::svc::Size size) {
|
||||
R_RETURN(UnmapInsecureMemory(address, size));
|
||||
}
|
||||
|
||||
/* ============================= 64From32 ABI ============================= */
|
||||
|
||||
Result MapInsecureMemory64From32(ams::svc::Address address, ams::svc::Size size) {
|
||||
R_RETURN(MapInsecureMemory(address, size));
|
||||
}
|
||||
|
||||
Result UnmapInsecureMemory64From32(ams::svc::Address address, ams::svc::Size size) {
|
||||
R_RETURN(UnmapInsecureMemory(address, size));
|
||||
}
|
||||
|
||||
}
|
@ -95,6 +95,7 @@ namespace ams::svc {
|
||||
MemoryState_GeneratedCode = 0x14,
|
||||
MemoryState_CodeOut = 0x15,
|
||||
MemoryState_Coverage = 0x16,
|
||||
MemoryState_Insecure = 0x17,
|
||||
};
|
||||
|
||||
enum MemoryPermission : u32 {
|
||||
|
Loading…
Reference in New Issue
Block a user