os: implement SharedMemory, update AslrSpaceManager

This commit is contained in:
Michael Scire 2021-10-01 00:36:18 -07:00
parent 101e3087fe
commit 82f3416799
20 changed files with 737 additions and 196 deletions

View File

@ -34,6 +34,7 @@
#include <stratosphere/os/os_busy_mutex.hpp>
#include <stratosphere/os/os_rw_busy_mutex.hpp>
#include <stratosphere/os/os_rw_lock.hpp>
#include <stratosphere/os/os_shared_memory.hpp>
#include <stratosphere/os/os_transfer_memory.hpp>
#include <stratosphere/os/os_semaphore.hpp>
#include <stratosphere/os/os_event.hpp>

View File

@ -0,0 +1,86 @@
/*
* Copyright (c) 2018-2020 Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <vapours.hpp>
#include <stratosphere/os/os_shared_memory_types.hpp>
#include <stratosphere/os/os_shared_memory_api.hpp>
namespace ams::os {
class SharedMemory {
NON_COPYABLE(SharedMemory);
NON_MOVEABLE(SharedMemory);
private:
SharedMemoryType m_shared_memory;
public:
constexpr SharedMemory() : m_shared_memory{ .state = SharedMemoryType::State_NotInitialized } {
/* ... */
}
SharedMemory(size_t size, MemoryPermission my_perm, MemoryPermission other_perm) {
R_ABORT_UNLESS(CreateSharedMemory(std::addressof(m_shared_memory), size, my_perm, other_perm));
}
SharedMemory(size_t size, Handle handle, bool managed) {
this->Attach(size, handle, managed);
}
~SharedMemory() {
if (m_shared_memory.state == SharedMemoryType::State_NotInitialized) {
return;
}
DestroySharedMemory(std::addressof(m_shared_memory));
}
void Attach(size_t size, Handle handle, bool managed) {
return AttachSharedMemory(std::addressof(m_shared_memory), size, handle, managed);
}
void *Map(MemoryPermission perm) {
return MapSharedMemory(std::addressof(m_shared_memory), perm);
}
void Unmap() {
return UnmapSharedMemory(std::addressof(m_shared_memory));
}
void *GetMappedAddress() const {
return GetSharedMemoryAddress(std::addressof(m_shared_memory));
}
size_t GetSize() const {
return GetSharedMemorySize(std::addressof(m_shared_memory));
}
Handle GetHandle() const {
return GetSharedMemoryHandle(std::addressof(m_shared_memory));
}
operator SharedMemoryType &() {
return m_shared_memory;
}
operator const SharedMemoryType &() const {
return m_shared_memory;
}
SharedMemoryType *GetBase() {
return std::addressof(m_shared_memory);
}
};
}

View File

@ -0,0 +1,38 @@
/*
* Copyright (c) 2018-2020 Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <vapours.hpp>
#include <stratosphere/os/os_memory_permission.hpp>
namespace ams::os {
struct SharedMemoryType;
Result CreateSharedMemory(SharedMemoryType *shared_memory, size_t size, MemoryPermission my_perm, MemoryPermission other_perm);
void AttachSharedMemory(SharedMemoryType *shared_memory, size_t size, Handle handle, bool managed);
void DestroySharedMemory(SharedMemoryType *shared_memory);
void *MapSharedMemory(SharedMemoryType *shared_memory, MemoryPermission perm);
void UnmapSharedMemory(SharedMemoryType *shared_memory);
void *GetSharedMemoryAddress(const SharedMemoryType *shared_memory);
size_t GetSharedMemorySize(const SharedMemoryType *shared_memory);
Handle GetSharedMemoryHandle(const SharedMemoryType *shared_memory);
}

View File

@ -0,0 +1,42 @@
/*
* Copyright (c) 2018-2020 Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <vapours.hpp>
#include <stratosphere/os/impl/os_internal_critical_section.hpp>
namespace ams::os {
struct SharedMemoryType {
enum State {
State_NotInitialized = 0,
State_Initialized = 1,
State_Mapped = 2,
};
u8 state;
bool handle_managed;
bool allocated;
void *address;
size_t size;
Handle handle;
mutable impl::InternalCriticalSectionStorage cs_shared_memory;
};
static_assert(std::is_trivial<SharedMemoryType>::value);
}

View File

@ -170,7 +170,7 @@ namespace ams::htcs::server {
/* Attach the transfer memory. */
os::TransferMemoryType tmem;
R_ABORT_UNLESS(os::AttachTransferMemory(std::addressof(tmem), static_cast<size_t>(aligned_size), mem_handle.GetValue(), true));
os::AttachTransferMemory(std::addressof(tmem), static_cast<size_t>(aligned_size), mem_handle.GetValue(), true);
ON_SCOPE_EXIT { os::DestroyTransferMemory(std::addressof(tmem)); };
/* Map the transfer memory. */
@ -201,7 +201,7 @@ namespace ams::htcs::server {
/* Attach the transfer memory. */
os::TransferMemoryType tmem;
R_ABORT_UNLESS(os::AttachTransferMemory(std::addressof(tmem), static_cast<size_t>(aligned_size), mem_handle.GetValue(), true));
os::AttachTransferMemory(std::addressof(tmem), static_cast<size_t>(aligned_size), mem_handle.GetValue(), true);
ON_SCOPE_EXIT { os::DestroyTransferMemory(std::addressof(tmem)); };
/* Map the transfer memory. */

View File

@ -14,73 +14,114 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <stratosphere.hpp>
#include "os_address_space_allocator_types.hpp"
#include "os_address_space_allocator.hpp"
#include "os_rng_manager.hpp"
namespace ams::os::impl {
AddressSpaceAllocator::AddressSpaceAllocator(uintptr_t sa, uintptr_t ea, size_t gsz) : cs() {
/* Check preconditions. */
AMS_ASSERT(sa >= gsz);
AMS_ASSERT(ea + gsz >= ea);
template<std::unsigned_integral AddressType, std::unsigned_integral SizeType>
AddressSpaceAllocatorBase<AddressType, SizeType>::AddressSpaceAllocatorBase(u64 start_address, u64 end_address, SizeType guard_size, const AddressSpaceAllocatorForbiddenRegion *forbidden_regions, size_t num_forbidden_regions) : m_critical_section() {
/* Check pre-conditions. */
AMS_ASSERT(start_address >= guard_size);
AMS_ASSERT(end_address + guard_size >= end_address);
this->guard_page_count = util::DivideUp(gsz, MemoryPageSize);
this->start_page = sa / MemoryPageSize;
this->end_page = (ea / MemoryPageSize) + this->guard_page_count;
/* Set member variables. */
m_guard_page_count = util::DivideUp(guard_size, MemoryPageSize);
m_start_page = start_address / MemoryPageSize;
m_end_page = (end_address / MemoryPageSize) + m_guard_page_count;
/* Check forbidden region count. */
AMS_ASSERT(num_forbidden_regions <= MaxForbiddenRegions);
/* Set forbidden regions. */
for (size_t i = 0; i < num_forbidden_regions; ++i) {
if (const auto &region = forbidden_regions[i]; region.size > 0) {
/* Check region is valid. */
AMS_ASSERT(util::IsAligned(region.address, MemoryPageSize));
AMS_ASSERT(util::IsAligned(region.size, MemoryPageSize));
AMS_ASSERT((region.address / MemoryPageSize) >= m_guard_page_count);
AMS_ASSERT(region.address < region.address + region.size);
/* Set region. */
const auto idx = m_forbidden_region_count++;
m_forbidden_region_start_pages[idx] = (region.address / MemoryPageSize) - m_guard_page_count;
m_forbidden_region_end_pages[idx] = ((region.address + region.size) / MemoryPageSize) + m_guard_page_count;
}
}
}
bool AddressSpaceAllocator::GetNextNonOverlappedNodeUnsafe(uintptr_t page, size_t page_num) {
AMS_ASSERT(page < (page + page_num));
return CheckFreeSpace(page * MemoryPageSize, page_num * MemoryPageSize);
template<std::unsigned_integral AddressType, std::unsigned_integral SizeType>
bool AddressSpaceAllocatorBase<AddressType, SizeType>::CheckGuardSpace(AddressType address, SizeType size, SizeType guard_size) {
return this->CheckFreeSpace(address - guard_size, guard_size) && this->CheckFreeSpace(address + size, guard_size);
}
void *AddressSpaceAllocator::AllocateSpace(size_t size, size_t align) {
template<std::unsigned_integral AddressType, std::unsigned_integral SizeType>
bool AddressSpaceAllocatorBase<AddressType, SizeType>::GetNextNonOverlappedNodeUnsafe(AddressType page, SizeType page_count) {
/* Check pre-conditions. */
AMS_ASSERT(page < page + page_count);
return this->CheckFreeSpace(page * MemoryPageSize, page_count * MemoryPageSize);
}
template<std::unsigned_integral AddressType, std::unsigned_integral SizeType>
AddressType AddressSpaceAllocatorBase<AddressType, SizeType>::AllocateSpace(SizeType size, SizeType align, SizeType align_offset) {
/* Check pre-conditions. */
AMS_ASSERT(align > 0);
AMS_ASSERT((align_offset & ~(align - 1)) == 0);
AMS_ASSERT(util::IsAligned(align_offset, MemoryPageSize));
AMS_ASSERT(util::IsAligned(align, MemoryPageSize));
/* Determine the page count. */
const size_t page_count = util::DivideUp(size, MemoryPageSize);
const SizeType page_count = util::DivideUp(size, MemoryPageSize);
/* Determine the alignment pages. */
AMS_ASSERT(util::IsAligned(align, MemoryPageSize));
const size_t align_page_count = align / MemoryPageSize;
/* Determine alignment page counts. */
const SizeType align_offset_page_count = align_offset / MemoryPageSize;
const SizeType align_page_count = align / MemoryPageSize;
/* Check the page count. */
if (page_count > this->end_page - this->guard_page_count) {
return nullptr;
/* Check page counts. */
if (page_count + align_offset_page_count > m_end_page - m_guard_page_count) {
return 0;
}
/* Determine the range to look in. */
const uintptr_t rand_start = util::DivideUp(this->start_page + this->guard_page_count, align_page_count);
const uintptr_t rand_end = (this->end_page - page_count - this->guard_page_count) / align_page_count;
const AddressType rand_start = (align_offset_page_count <= m_start_page + m_guard_page_count) ? util::DivideUp(m_start_page + m_guard_page_count - align_offset_page_count, align_page_count) : 0;
const AddressType rand_end = (m_end_page - page_count - align_offset_page_count - m_guard_page_count) / align_page_count;
/* Check that we can find a space. */
if (rand_start > rand_end) {
return nullptr;
return 0;
}
/* Try to find space up to 512 times. */
/* Try to find a space up to 512 times. */
for (size_t i = 0; i < 512; ++i) {
/* Acquire exclusive access before doing calculations. */
std::scoped_lock lk(this->cs);
std::scoped_lock lk(m_critical_section);
/* Determine a random page. */
const uintptr_t target = ((GetRngManager().GenerateRandomU64() % (rand_end - rand_start + 1)) + rand_start) * align_page_count;
AMS_ASSERT(this->start_page <= target - this->guard_page_count && target + page_count + this->guard_page_count <= this->end_page);
const AddressType target = (((GetRngManager().GenerateRandomU64() % (rand_end - rand_start + 1)) + rand_start) * align_page_count) + align_offset_page_count;
AMS_ASSERT(m_start_page <= target - m_guard_page_count && target + page_count + m_guard_page_count <= m_end_page);
/* Check that the page is not forbidden. */
bool forbidden = false;
for (size_t j = 0; j < m_forbidden_region_count; ++j) {
if (m_forbidden_region_start_pages[j] < target + page_count && target < m_forbidden_region_end_pages[j]) {
forbidden = true;
break;
}
}
/* If the page is valid, use it. */
if (GetNextNonOverlappedNodeUnsafe(target - this->guard_page_count, page_count + 2 * this->guard_page_count)) {
return reinterpret_cast<void *>(target * MemoryPageSize);
if (!forbidden && this->GetNextNonOverlappedNodeUnsafe(target - m_guard_page_count, page_count + 2 * m_guard_page_count)) {
return target * MemoryPageSize;
}
}
/* We failed to find space. */
return nullptr;
return 0;
}
bool AddressSpaceAllocator::CheckGuardSpace(uintptr_t address, size_t size, size_t guard_size) {
return CheckFreeSpace(address - guard_size, guard_size) && CheckFreeSpace(address + size, guard_size);
}
/* Instantiate template. */
/* TODO: instantiate <u64, u64> on 32-bit? */
template class AddressSpaceAllocatorBase<uintptr_t, size_t>;
}

View File

@ -0,0 +1,54 @@
/*
* Copyright (c) 2018-2020 Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <stratosphere.hpp>
#include "os_address_space_allocator_forbidden_region.hpp"
namespace ams::os::impl {
template<std::unsigned_integral AddressType, std::unsigned_integral SizeType>
class AddressSpaceAllocatorBase {
NON_COPYABLE(AddressSpaceAllocatorBase);
NON_MOVEABLE(AddressSpaceAllocatorBase);
private:
static constexpr size_t MaxForbiddenRegions = 2;
private:
InternalCriticalSection m_critical_section;
AddressType m_start_page;
AddressType m_end_page;
SizeType m_guard_page_count;
AddressType m_forbidden_region_start_pages[MaxForbiddenRegions];
AddressType m_forbidden_region_end_pages[MaxForbiddenRegions];
size_t m_forbidden_region_count;
public:
AddressSpaceAllocatorBase(u64 start_address, u64 end_address, SizeType guard_size, const AddressSpaceAllocatorForbiddenRegion *forbidden_regions, size_t num_forbidden_regions);
AddressType AllocateSpace(SizeType size, SizeType align, SizeType align_offset);
bool CheckGuardSpace(AddressType address, SizeType size, SizeType guard_size);
private:
bool GetNextNonOverlappedNodeUnsafe(AddressType page, SizeType page_count);
public:
virtual bool CheckFreeSpace(AddressType address, SizeType size) = 0;
};
}
#ifdef ATMOSPHERE_OS_HORIZON
#include "os_address_space_allocator_impl.os.horizon.hpp"
#else
#error "Unknown OS for AddressSpaceAllocator"
#endif

View File

@ -0,0 +1,26 @@
/*
* Copyright (c) 2018-2020 Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <stratosphere.hpp>
namespace ams::os::impl {
struct AddressSpaceAllocatorForbiddenRegion {
u64 address;
u64 size;
};
}

View File

@ -18,7 +18,13 @@
namespace ams::os::impl {
inline bool CheckFreeSpace(uintptr_t address, size_t size) {
class AddressSpaceAllocator final : public AddressSpaceAllocatorBase<uintptr_t, size_t> {
private:
using Base = AddressSpaceAllocatorBase<uintptr_t, size_t>;
public:
using Base::Base;
public:
virtual bool CheckFreeSpace(uintptr_t address, size_t size) override {
/* Query the memory. */
svc::MemoryInfo memory_info;
svc::PageInfo page_info;
@ -28,5 +34,6 @@ namespace ams::os::impl {
return memory_info.state == svc::MemoryState_Free && address + size <= memory_info.addr + memory_info.size;
}
};
}

View File

@ -1,44 +0,0 @@
/*
* Copyright (c) 2018-2020 Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <stratosphere.hpp>
#ifdef ATMOSPHERE_OS_HORIZON
#include "os_address_space_allocator_impl.os.horizon.hpp"
#else
#error "Unknown OS for AddressSpaceAllocatorImpl"
#endif
namespace ams::os::impl {
class AddressSpaceAllocator {
NON_COPYABLE(AddressSpaceAllocator);
NON_MOVEABLE(AddressSpaceAllocator);
private:
InternalCriticalSection cs;
uintptr_t start_page;
uintptr_t end_page;
size_t guard_page_count;
private:
bool GetNextNonOverlappedNodeUnsafe(uintptr_t page, size_t page_num);
public:
AddressSpaceAllocator(uintptr_t sa, uintptr_t ea, size_t gsz);
void *AllocateSpace(size_t size, size_t align);
bool CheckGuardSpace(uintptr_t address, size_t size, size_t guard_size);
};
}

View File

@ -15,6 +15,7 @@
*/
#pragma once
#include <stratosphere.hpp>
#include "os_address_space_allocator_forbidden_region.hpp"
namespace ams::os::impl {
@ -28,22 +29,39 @@ namespace ams::os::impl {
static constexpr u64 AslrSize64BitDeprecated = 0x0078000000ul;
static constexpr u64 AslrBase64Bit = 0x0008000000ul;
static constexpr u64 AslrSize64Bit = 0x7FF8000000ul;
static constexpr size_t ForbiddenRegionCount = 2;
private:
static u64 GetAslrInfo(svc::InfoType type) {
u64 value;
R_ABORT_UNLESS(svc::GetInfo(std::addressof(value), type, svc::PseudoHandle::CurrentProcess, 0));
static_assert(std::same_as<size_t, uintptr_t>);
AMS_ASSERT(value <= std::numeric_limits<size_t>::max());
return static_cast<u64>(value & std::numeric_limits<size_t>::max());
}
private:
AddressSpaceAllocatorForbiddenRegion m_forbidden_regions[ForbiddenRegionCount];
public:
constexpr AslrSpaceManagerHorizonImpl() = default;
AslrSpaceManagerHorizonImpl() {
m_forbidden_regions[0] = { .address = GetHeapSpaceBeginAddress(), .size = GetHeapSpaceSize() };
m_forbidden_regions[1] = { .address = GetAliasSpaceBeginAddress(), .size = GetAliasSpaceSize() };
}
const AddressSpaceAllocatorForbiddenRegion *GetForbiddenRegions() const {
return m_forbidden_regions;
}
static size_t GetForbiddenRegionCount() {
return ForbiddenRegionCount;
}
static u64 GetHeapSpaceBeginAddress() {
return GetAslrInfo(svc::InfoType_HeapRegionAddress);
}
static u64 GetHeapSpaceBeginSize() {
static u64 GetHeapSpaceSize() {
return GetAslrInfo(svc::InfoType_HeapRegionSize);
}
@ -51,7 +69,7 @@ namespace ams::os::impl {
return GetAslrInfo(svc::InfoType_AliasRegionAddress);
}
static u64 GetAliasSpaceBeginSize() {
static u64 GetAliasSpaceSize() {
return GetAslrInfo(svc::InfoType_AliasRegionSize);
}

View File

@ -15,8 +15,7 @@
*/
#pragma once
#include <stratosphere.hpp>
#include "os_address_space_allocator_types.hpp"
#include "os_address_space_allocator.hpp"
#ifdef ATMOSPHERE_OS_HORIZON
#include "os_aslr_space_manager_impl.os.horizon.hpp"
@ -29,26 +28,67 @@ namespace ams::os::impl {
constexpr inline size_t AslrSpaceLargeAlign = 2_MB;
constexpr inline size_t AslrSpaceGuardSize = 4 * MemoryPageSize;
class AslrSpaceManager {
NON_COPYABLE(AslrSpaceManager);
NON_MOVEABLE(AslrSpaceManager);
template<typename Allocator, typename Impl>
class AslrSpaceManagerTemplate {
NON_COPYABLE(AslrSpaceManagerTemplate);
NON_MOVEABLE(AslrSpaceManagerTemplate);
private:
AddressSpaceAllocator allocator;
AslrSpaceManagerImpl impl;
Impl m_impl;
Allocator m_allocator;
public:
AslrSpaceManager() : allocator(impl.GetAslrSpaceBeginAddress(), impl.GetAslrSpaceEndAddress(), AslrSpaceGuardSize) { /* ... */ }
void *AllocateSpace(size_t size) {
void *address = this->allocator.AllocateSpace(size, AslrSpaceLargeAlign);
if (address == nullptr) {
address = this->allocator.AllocateSpace(size, MemoryPageSize);
AslrSpaceManagerTemplate() : m_impl(), m_allocator(m_impl.GetAslrSpaceBeginAddress(), m_impl.GetAslrSpaceEndAddress(), AslrSpaceGuardSize, m_impl.GetForbiddenRegions(), m_impl.GetForbiddenRegionCount()) {
/* ... */
}
return address;
uintptr_t AllocateSpace(size_t size) {
/* Try to allocate a large-aligned space, if we can. */
if (size >= AslrSpaceLargeAlign) {
if (auto large_align = m_allocator.AllocateSpace(size, AslrSpaceLargeAlign, 0); large_align != 0) {
return large_align;
}
}
/* Allocate a page-aligned space. */
return m_allocator.AllocateSpace(size, MemoryPageSize, 0);
}
bool CheckGuardSpace(uintptr_t address, size_t size) {
return this->allocator.CheckGuardSpace(address, size, AslrSpaceGuardSize);
return m_allocator.CheckGuardSpace(address, size, AslrSpaceGuardSize);
}
template<typename MapFunction, typename UnmapFunction>
Result MapAtRandomAddress(uintptr_t *out, size_t size, MapFunction map_function, UnmapFunction unmap_function) {
/* Try to map up to 64 times. */
for (int i = 0; i < 64; ++i) {
/* Reserve space to map the memory. */
const uintptr_t map_address = this->AllocateSpace(size);
if (map_address == 0) {
break;
}
/* Try to map. */
R_TRY_CATCH(map_function(map_address, size)) {
/* If we failed to map at the address, retry. */
R_CATCH(os::ResultInvalidCurrentMemoryState) { continue; }
} R_END_TRY_CATCH;
/* Check guard space. */
if (!this->CheckGuardSpace(map_address, size)) {
/* We don't have guard space, so unmap. */
unmap_function(map_address, size);
continue;
}
/* We mapped successfully. */
*out = map_address;
return ResultSuccess();
}
/* We failed to map. */
return os::ResultOutOfAddressSpace();
}
};
using AslrSpaceManager = AslrSpaceManagerTemplate<AddressSpaceAllocator, AslrSpaceManagerImpl>;
}

View File

@ -57,37 +57,28 @@ namespace ams::os::impl {
/* Convert permission. */
const auto svc_perm = ConvertToSvcMemoryPermission(perm);
/* NOTE: It is unknown what algorithm Nintendo uses for mapping, so we're using */
/* the transfer memory algorithm for now. */
/* Map at a random address. */
uintptr_t mapped_address;
R_TRY(impl::GetAslrSpaceManager().MapAtRandomAddress(std::addressof(mapped_address), size,
[handle, svc_perm](uintptr_t map_address, size_t map_size) -> Result {
R_TRY_CATCH(svc::MapIoRegion(handle, map_address, map_size, svc_perm)) {
/* TODO: What's the correct result for these? */
// R_CONVERT(svc::ResultInvalidHandle, os::ResultInvalidHandle())
// R_CONVERT(svc::ResultInvalidSize, os::Result???())
// R_CONVERT(svc::ResultInvalidState, os::Result???())
R_CONVERT(svc::ResultInvalidCurrentMemory, os::ResultInvalidCurrentMemoryState())
} R_END_TRY_CATCH_WITH_ABORT_UNLESS;
/* Try to map up to 64 times. */
for (int i = 0; i < 64; ++i) {
/* Reserve space to map the memory. */
void *map_address = impl::GetAslrSpaceManager().AllocateSpace(size);
R_UNLESS(map_address != nullptr, os::ResultOutOfAddressSpace());
/* Try to map. */
/* TODO: Result conversion/abort on unexpected result? */
R_TRY_CATCH(svc::MapIoRegion(handle, reinterpret_cast<uintptr_t>(map_address), size, svc_perm)) {
/* If we failed to map at the address, retry. */
R_CATCH(svc::ResultInvalidCurrentMemory) { continue; }
R_CATCH(svc::ResultInvalidMemoryRegion) { continue; }
} R_END_TRY_CATCH;
/* Check guard space via aslr manager. */
if (!impl::GetAslrSpaceManager().CheckGuardSpace(reinterpret_cast<uintptr_t>(map_address), size)) {
/* We don't have guard space, so unmap. */
UnmapIoRegion(handle, map_address, size);
continue;
}
/* We mapped successfully. */
*out = map_address;
return ResultSuccess();
},
[handle](uintptr_t map_address, size_t map_size) -> void {
return IoRegionImpl::UnmapIoRegion(handle, reinterpret_cast<void *>(map_address), map_size);
}
));
/* We failed to map. */
return os::ResultOutOfAddressSpace();
/* Return the address we mapped at. */
*out = reinterpret_cast<void *>(mapped_address);
return ResultSuccess();
}
void IoRegionImpl::UnmapIoRegion(Handle handle, void *address, size_t size) {

View File

@ -25,7 +25,7 @@ namespace ams::os::impl {
class OsResourceManager {
private:
RngManager rng_manager{};
AslrSpaceManager aslr_space_manager;
AslrSpaceManager aslr_space_manager{};
/* TODO */
ThreadManager thread_manager{};
/* TODO */

View File

@ -0,0 +1,30 @@
/*
* Copyright (c) 2018-2020 Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <stratosphere.hpp>
namespace ams::os::impl {
class SharedMemoryImpl {
public:
static Result Create(Handle *out, size_t size, MemoryPermission my_perm, MemoryPermission other_perm);
static void Close(Handle handle);
static Result Map(void **out, Handle handle, size_t size, MemoryPermission perm);
static void Unmap(Handle handle, void *address, size_t size);
};
}

View File

@ -0,0 +1,84 @@
/*
* Copyright (c) 2018-2020 Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <stratosphere.hpp>
#include "os_shared_memory_impl.hpp"
#include "os_aslr_space_manager.hpp"
namespace ams::os::impl {
namespace {
svc::MemoryPermission ConvertToSvcMemoryPermission(os::MemoryPermission perm) {
switch (perm) {
case os::MemoryPermission_None: return svc::MemoryPermission_None;
case os::MemoryPermission_ReadOnly: return svc::MemoryPermission_Read;
case os::MemoryPermission_WriteOnly: return svc::MemoryPermission_Write;
case os::MemoryPermission_ReadWrite: return svc::MemoryPermission_ReadWrite;
AMS_UNREACHABLE_DEFAULT_CASE();
}
}
}
Result SharedMemoryImpl::Create(Handle *out, size_t size, MemoryPermission my_perm, MemoryPermission other_perm) {
/* Convert memory permissions. */
const auto svc_my_perm = ConvertToSvcMemoryPermission(my_perm);
const auto svc_other_perm = ConvertToSvcMemoryPermission(other_perm);
/* Create the memory. */
svc::Handle handle;
R_TRY_CATCH(svc::CreateSharedMemory(std::addressof(handle), size, svc_my_perm, svc_other_perm)) {
R_CONVERT(svc::ResultOutOfHandles, os::ResultOutOfHandles())
R_CONVERT(svc::ResultOutOfResource, os::ResultOutOfResource())
} R_END_TRY_CATCH_WITH_ABORT_UNLESS;
*out = handle;
return ResultSuccess();
}
void SharedMemoryImpl::Close(Handle handle) {
R_ABORT_UNLESS(svc::CloseHandle(handle));
}
Result SharedMemoryImpl::Map(void **out, Handle handle, size_t size, MemoryPermission perm) {
/* Convert memory permission. */
const auto svc_perm = ConvertToSvcMemoryPermission(perm);
/* Map at a random address. */
uintptr_t mapped_address;
R_TRY(impl::GetAslrSpaceManager().MapAtRandomAddress(std::addressof(mapped_address), size,
[handle, svc_perm](uintptr_t map_address, size_t map_size) -> Result {
R_TRY_CATCH(svc::MapSharedMemory(handle, map_address, map_size, svc_perm)) {
R_CONVERT(svc::ResultInvalidCurrentMemory, os::ResultInvalidCurrentMemoryState())
} R_END_TRY_CATCH_WITH_ABORT_UNLESS;
return ResultSuccess();
},
[handle](uintptr_t map_address, size_t map_size) -> void {
return SharedMemoryImpl::Unmap(handle, reinterpret_cast<void *>(map_address), map_size);
}
));
/* Return the address we mapped at. */
*out = reinterpret_cast<void *>(mapped_address);
return ResultSuccess();
}
void SharedMemoryImpl::Unmap(Handle handle, void *address, size_t size) {
R_ABORT_UNLESS(svc::UnmapSharedMemory(handle, reinterpret_cast<uintptr_t>(address), size));
}
}

View File

@ -23,7 +23,7 @@ namespace ams::os::impl {
static Result Create(Handle *out, void *address, size_t size, MemoryPermission perm);
static void Close(Handle handle);
static Result Map(void **out, Handle handle, void *address, size_t size, MemoryPermission owner_perm);
static Result Map(void **out, Handle handle, size_t size, MemoryPermission owner_perm);
static void Unmap(Handle handle, void *address, size_t size);
};

View File

@ -15,6 +15,7 @@
*/
#include <stratosphere.hpp>
#include "os_transfer_memory_impl.hpp"
#include "os_aslr_space_manager.hpp"
namespace ams::os::impl {
@ -34,7 +35,7 @@ namespace ams::os::impl {
Result TransferMemoryImpl::Create(Handle *out, void *address, size_t size, MemoryPermission perm) {
/* Convert memory permission. */
auto svc_perm = ConvertToSvcMemoryPermission(perm);
const auto svc_perm = ConvertToSvcMemoryPermission(perm);
/* Create the memory. */
svc::Handle handle;
@ -51,22 +52,30 @@ namespace ams::os::impl {
R_ABORT_UNLESS(svc::CloseHandle(handle));
}
Result TransferMemoryImpl::Map(void **out, Handle handle, void *address, size_t size, MemoryPermission owner_perm) {
AMS_ASSERT(address != nullptr);
Result TransferMemoryImpl::Map(void **out, Handle handle, size_t size, MemoryPermission owner_perm) {
/* Convert memory permission. */
auto svc_owner_perm = ConvertToSvcMemoryPermission(owner_perm);
const auto svc_owner_perm = ConvertToSvcMemoryPermission(owner_perm);
/* Map the memory. */
R_TRY_CATCH(svc::MapTransferMemory(handle, reinterpret_cast<uintptr_t>(address), size, svc_owner_perm)) {
/* Map at a random address. */
uintptr_t mapped_address;
R_TRY(impl::GetAslrSpaceManager().MapAtRandomAddress(std::addressof(mapped_address), size,
[handle, svc_owner_perm](uintptr_t map_address, size_t map_size) -> Result {
R_TRY_CATCH(svc::MapTransferMemory(handle, map_address, map_size, svc_owner_perm)) {
R_CONVERT(svc::ResultInvalidHandle, os::ResultInvalidHandle())
R_CONVERT(svc::ResultInvalidSize, os::ResultInvalidTransferMemorySize())
R_CONVERT(svc::ResultInvalidState, os::ResultInvalidTransferMemoryState())
R_CONVERT(svc::ResultInvalidCurrentMemory, os::ResultInvalidCurrentMemoryState())
R_CONVERT(svc::ResultInvalidMemoryRegion, os::ResultInvalidCurrentMemoryState())
} R_END_TRY_CATCH_WITH_ABORT_UNLESS;
*out = address;
return ResultSuccess();
},
[handle](uintptr_t map_address, size_t map_size) -> void {
return TransferMemoryImpl::Unmap(handle, reinterpret_cast<void *>(map_address), map_size);
}
));
/* Return the address we mapped at. */
*out = reinterpret_cast<void *>(mapped_address);
return ResultSuccess();
}

View File

@ -0,0 +1,157 @@
/*
* Copyright (c) 2018-2020 Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <stratosphere.hpp>
#include "impl/os_thread_manager.hpp"
#include "impl/os_shared_memory_impl.hpp"
namespace ams::os {
namespace {
void SetupSharedMemoryType(SharedMemoryType *shared_memory, size_t size, Handle handle, bool managed) {
/* Set members. */
shared_memory->handle = handle;
shared_memory->size = size;
shared_memory->address = nullptr;
shared_memory->allocated = false;
/* Set managed. */
shared_memory->handle_managed = managed;
/* Create the critical section. */
util::ConstructAt(shared_memory->cs_shared_memory);
}
}
Result CreateSharedMemory(SharedMemoryType *shared_memory, size_t size, MemoryPermission my_perm, MemoryPermission other_perm) {
/* Check pre-conditions. */
AMS_ASSERT(size > 0);
AMS_ASSERT(util::IsAligned(size, MemoryPageSize));
/* Create the memory. */
Handle handle;
R_TRY(impl::SharedMemoryImpl::Create(std::addressof(handle), size, my_perm, other_perm));
/* Setup the object. */
SetupSharedMemoryType(shared_memory, size, handle, true);
return ResultSuccess();
}
void AttachSharedMemory(SharedMemoryType *shared_memory, size_t size, Handle handle, bool managed) {
/* Check pre-conditions. */
AMS_ASSERT(size > 0);
AMS_ASSERT(util::IsAligned(size, MemoryPageSize));
AMS_ASSERT(handle != svc::InvalidHandle);
/* Setup the object. */
SetupSharedMemoryType(shared_memory, size, handle, managed);
}
void DestroySharedMemory(SharedMemoryType *shared_memory) {
/* Unmap the shared memory, if required. */
if (shared_memory->state == SharedMemoryType::State_Mapped) {
UnmapSharedMemory(shared_memory);
}
/* Check the state. */
AMS_ASSERT(shared_memory->state == SharedMemoryType::State_Initialized);
/* Set state to not initialized. */
shared_memory->state = SharedMemoryType::State_NotInitialized;
/* Close the handle, if it's managed. */
if (shared_memory->handle_managed) {
impl::SharedMemoryImpl::Close(shared_memory->handle);
}
shared_memory->handle_managed = false;
/* Clear members. */
shared_memory->address = nullptr;
shared_memory->size = 0;
shared_memory->handle = svc::InvalidHandle;
/* Destroy the critical section. */
util::DestroyAt(shared_memory->cs_shared_memory);
}
void *MapSharedMemory(SharedMemoryType *shared_memory, MemoryPermission perm) {
/* Lock the current thread, and then the shared memory. */
std::scoped_lock thread_lk(util::GetReference(impl::GetCurrentThread()->cs_thread));
std::scoped_lock lk(util::GetReference(shared_memory->cs_shared_memory));
/* Ensure we're in a mappable state. */
AMS_ASSERT(shared_memory->state == SharedMemoryType::State_Initialized);
/* Try to map. */
void *mapped_address;
if (R_FAILED(impl::SharedMemoryImpl::Map(std::addressof(mapped_address), shared_memory->handle, shared_memory->size, perm))) {
return nullptr;
}
/* Set fields now that we've mapped successfully. */
shared_memory->allocated = true;
shared_memory->address = mapped_address;
shared_memory->state = SharedMemoryType::State_Mapped;
return mapped_address;
}
void UnmapSharedMemory(SharedMemoryType *shared_memory) {
/* Lock the memory. */
std::scoped_lock lk(util::GetReference(shared_memory->cs_shared_memory));
/* If the memory isn't mapped, we can't unmap it. */
if (shared_memory->state != SharedMemoryType::State_Mapped) {
return;
}
/* Unmap the memory. */
impl::SharedMemoryImpl::Unmap(shared_memory->handle, shared_memory->address, shared_memory->size);
/* Unmapped memory is necessarily not allocated. */
if (shared_memory->allocated) {
shared_memory->allocated = false;
}
/* Clear the address. */
shared_memory->address = nullptr;
shared_memory->state = SharedMemoryType::State_Initialized;
}
void *GetSharedMemoryAddress(const SharedMemoryType *shared_memory) {
/* Check pre-conditions. */
AMS_ASSERT(shared_memory->state == SharedMemoryType::State_Initialized || shared_memory->state == SharedMemoryType::State_Mapped);
return shared_memory->address;
}
size_t GetSharedMemorySize(const SharedMemoryType *shared_memory) {
/* Check pre-conditions. */
AMS_ASSERT(shared_memory->state == SharedMemoryType::State_Initialized || shared_memory->state == SharedMemoryType::State_Mapped);
return shared_memory->size;
}
Handle GetSharedMemoryHandle(const SharedMemoryType *shared_memory) {
/* Check pre-conditions. */
AMS_ASSERT(shared_memory->state == SharedMemoryType::State_Initialized || shared_memory->state == SharedMemoryType::State_Mapped);
return shared_memory->handle;
}
}

View File

@ -16,25 +16,11 @@
#include <stratosphere.hpp>
#include "impl/os_thread_manager.hpp"
#include "impl/os_transfer_memory_impl.hpp"
#include "impl/os_aslr_space_manager_types.hpp"
#include "impl/os_aslr_space_manager.hpp"
namespace ams::os {
namespace {
Result MapTransferMemoryWithAddressUnsafe(TransferMemoryType *tmem, void *address, os::MemoryPermission owner_perm) {
/* Map the transfer memory. */
void *mapped_address = nullptr;
R_TRY(impl::TransferMemoryImpl::Map(std::addressof(mapped_address), tmem->handle, address, tmem->size, owner_perm));
/* Set fields now that we've mapped. */
tmem->address = mapped_address;
tmem->state = TransferMemoryType::State_Mapped;
return ResultSuccess();
}
inline void SetupTransferMemoryType(TransferMemoryType *tmem, size_t size, Handle handle, bool managed) {
/* Set members. */
tmem->handle = handle;
@ -127,43 +113,18 @@ namespace ams::os {
/* Ensure we're in a mappable state. */
AMS_ASSERT(tmem->state == TransferMemoryType::State_Created);
/* Try to map up to 64 times. */
for (int i = 0; i < 64; ++i) {
/* Reserve space to map the memory. */
void *map_address = impl::GetAslrSpaceManager().AllocateSpace(tmem->size);
R_UNLESS(map_address != nullptr, os::ResultOutOfAddressSpace());
/* Map. */
void *mapped_address;
R_TRY(impl::TransferMemoryImpl::Map(std::addressof(mapped_address), tmem->handle, tmem->size, owner_perm));
/* Mark allocated. */
/* Set fields now that we've mapped. */
tmem->allocated = true;
auto alloc_guard = SCOPE_GUARD { tmem->allocated = false; };
tmem->address = mapped_address;
tmem->state = TransferMemoryType::State_Mapped;
/* Try to map. */
R_TRY_CATCH(MapTransferMemoryWithAddressUnsafe(tmem, map_address, owner_perm)) {
/* If we failed to map at the address, retry. */
R_CATCH(os::ResultInvalidCurrentMemoryState) { continue; }
} R_END_TRY_CATCH;
/* Check guard space via aslr manager. */
if (!impl::GetAslrSpaceManager().CheckGuardSpace(reinterpret_cast<uintptr_t>(tmem->address), tmem->size)) {
/* NOTE: Nintendo bug here. If this case occurs, they will return ResultSuccess() without actually mapping the transfer memory. */
/* This is because they basically do if (!os::ResultInvalidCurrentMemoryState::Includes(result)) { return result; }, and */
/* ResultSuccess() is not included by ResultInvalidCurrentMemoryState. */
/* We will do better than them, and will not falsely return ResultSuccess(). */
impl::TransferMemoryImpl::Unmap(tmem->handle, tmem->address, tmem->size);
continue;
}
/* We mapped successfully. */
alloc_guard.Cancel();
*out = tmem->address;
return ResultSuccess();
}
/* We failed to map. */
return os::ResultOutOfAddressSpace();
}
void UnmapTransferMemory(TransferMemoryType *tmem) {
/* Lock the memory. */
std::scoped_lock lk(GetReference(tmem->cs_transfer_memory));