kern: allocate all TTBR0 pages during init, use procidx as asid

This commit is contained in:
Michael Scire 2024-10-09 14:04:15 -07:00 committed by SciresM
parent c3fa42d958
commit a72e39d657
9 changed files with 72 additions and 110 deletions

View File

@ -93,9 +93,13 @@ namespace ams::kern::arch::arm64 {
MESOSPHERE_ASSERT(alignment < L1BlockSize);
return KPageTable::GetBlockSize(static_cast<KPageTable::BlockType>(KPageTable::GetBlockType(alignment) + 1));
}
public:
/* TODO: How should this size be determined. Does the KProcess slab count need to go in a header as a define? */
static constexpr size_t NumTtbr0Entries = 81;
private:
static constinit inline const volatile u64 s_ttbr0_entries[NumTtbr0Entries] = {};
private:
KPageTableManager *m_manager;
u64 m_ttbr;
u8 m_asid;
protected:
Result OperateImpl(PageLinkedList *page_list, KProcessAddress virt_addr, size_t num_pages, KPhysicalAddress phys_addr, bool is_pa_valid, const KPageProperties properties, OperationType operation, bool reuse_ll);
@ -168,17 +172,28 @@ namespace ams::kern::arch::arm64 {
return entry;
}
public:
constexpr explicit KPageTable(util::ConstantInitializeTag) : KPageTableBase(util::ConstantInitialize), m_manager(), m_ttbr(), m_asid() { /* ... */ }
constexpr explicit KPageTable(util::ConstantInitializeTag) : KPageTableBase(util::ConstantInitialize), m_manager(), m_asid() { /* ... */ }
explicit KPageTable() { /* ... */ }
static NOINLINE void Initialize(s32 core_id);
ALWAYS_INLINE void Activate(u32 proc_id) {
cpu::SwitchProcess(m_ttbr, proc_id);
static const volatile u64 &GetTtbr0Entry(size_t index) { return s_ttbr0_entries[index]; }
static ALWAYS_INLINE u64 GetKernelTtbr0() {
return s_ttbr0_entries[0];
}
static ALWAYS_INLINE void ActivateKernel() {
/* Activate, using asid 0 and process id = 0xFFFFFFFF */
cpu::SwitchProcess(GetKernelTtbr0(), 0xFFFFFFFF);
}
static ALWAYS_INLINE void ActivateProcess(size_t proc_idx, u32 proc_id) {
cpu::SwitchProcess(s_ttbr0_entries[proc_idx + 1], proc_id);
}
NOINLINE Result InitializeForKernel(void *table, KVirtualAddress start, KVirtualAddress end);
NOINLINE Result InitializeForProcess(ams::svc::CreateProcessFlag flags, bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address, size_t code_size, KSystemResource *system_resource, KResourceLimit *resource_limit);
NOINLINE Result InitializeForProcess(ams::svc::CreateProcessFlag flags, bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address, size_t code_size, KSystemResource *system_resource, KResourceLimit *resource_limit, size_t process_index);
Result Finalize();
private:
Result MapL1Blocks(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, PageLinkedList *page_list, bool reuse_ll);

View File

@ -23,13 +23,13 @@ namespace ams::kern::arch::arm64 {
private:
KPageTable m_page_table;
public:
void Activate(u64 id) {
void Activate(size_t process_index, u64 id) {
/* Activate the page table with the specified contextidr. */
m_page_table.Activate(id);
m_page_table.ActivateProcess(process_index, id);
}
Result Initialize(ams::svc::CreateProcessFlag flags, bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address, size_t code_size, KSystemResource *system_resource, KResourceLimit *resource_limit) {
R_RETURN(m_page_table.InitializeForProcess(flags, from_back, pool, code_address, code_size, system_resource, resource_limit));
Result Initialize(ams::svc::CreateProcessFlag flags, bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address, size_t code_size, KSystemResource *system_resource, KResourceLimit *resource_limit, size_t process_index) {
R_RETURN(m_page_table.InitializeForProcess(flags, from_back, pool, code_address, code_size, system_resource, resource_limit, process_index));
}
void Finalize() { m_page_table.Finalize(); }

View File

@ -29,8 +29,7 @@ namespace ams::kern::arch::arm64 {
NOINLINE void Initialize(s32 core_id);
void Activate() {
/* Activate, using process id = 0xFFFFFFFF */
m_page_table.Activate(0xFFFFFFFF);
m_page_table.ActivateKernel();
}
void ActivateForInit() {

View File

@ -374,7 +374,7 @@ namespace ams::kern {
/* Update the current page table. */
if (next_process) {
next_process->GetPageTable().Activate(next_process->GetProcessId());
next_process->GetPageTable().Activate(next_process->GetSlabIndex(), next_process->GetProcessId());
} else {
Kernel::GetKernelPageTable().Activate();
}

View File

@ -85,77 +85,6 @@ namespace ams::kern::arch::arm64 {
return (static_cast<u64>(asid) << 48) | (static_cast<u64>(GetInteger(table)));
}
class KPageTableAsidManager {
private:
using WordType = u32;
static constexpr u8 ReservedAsids[] = { 0 };
static constexpr size_t NumReservedAsids = util::size(ReservedAsids);
static constexpr size_t BitsPerWord = BITSIZEOF(WordType);
static constexpr size_t AsidCount = 0x100;
static constexpr size_t NumWords = AsidCount / BitsPerWord;
static constexpr WordType FullWord = ~WordType(0u);
private:
WordType m_state[NumWords];
KLightLock m_lock;
u8 m_hint;
private:
constexpr bool TestImpl(u8 asid) const {
return m_state[asid / BitsPerWord] & (1u << (asid % BitsPerWord));
}
constexpr void ReserveImpl(u8 asid) {
MESOSPHERE_ASSERT(!this->TestImpl(asid));
m_state[asid / BitsPerWord] |= (1u << (asid % BitsPerWord));
}
constexpr void ReleaseImpl(u8 asid) {
MESOSPHERE_ASSERT(this->TestImpl(asid));
m_state[asid / BitsPerWord] &= ~(1u << (asid % BitsPerWord));
}
constexpr u8 FindAvailable() const {
for (size_t i = 0; i < util::size(m_state); i++) {
if (m_state[i] == FullWord) {
continue;
}
const WordType clear_bit = (m_state[i] + 1) ^ (m_state[i]);
return BitsPerWord * i + BitsPerWord - 1 - ClearLeadingZero(clear_bit);
}
if (m_state[util::size(m_state)-1] == FullWord) {
MESOSPHERE_PANIC("Unable to reserve ASID");
}
__builtin_unreachable();
}
static constexpr ALWAYS_INLINE WordType ClearLeadingZero(WordType value) {
return __builtin_clzll(value) - (BITSIZEOF(unsigned long long) - BITSIZEOF(WordType));
}
public:
constexpr KPageTableAsidManager() : m_state(), m_lock(), m_hint() {
for (size_t i = 0; i < NumReservedAsids; i++) {
this->ReserveImpl(ReservedAsids[i]);
}
}
u8 Reserve() {
KScopedLightLock lk(m_lock);
if (this->TestImpl(m_hint)) {
m_hint = this->FindAvailable();
}
this->ReserveImpl(m_hint);
return m_hint++;
}
void Release(u8 asid) {
KScopedLightLock lk(m_lock);
this->ReleaseImpl(asid);
}
};
KPageTableAsidManager g_asid_manager;
}
ALWAYS_INLINE void KPageTable::NoteUpdated() const {
@ -184,6 +113,7 @@ namespace ams::kern::arch::arm64 {
this->OnKernelTableSinglePageUpdated(virt_addr);
}
void KPageTable::Initialize(s32 core_id) {
/* Nothing actually needed here. */
MESOSPHERE_UNUSED(core_id);
@ -194,38 +124,29 @@ namespace ams::kern::arch::arm64 {
m_asid = 0;
m_manager = Kernel::GetSystemSystemResource().GetPageTableManagerPointer();
/* Allocate a page for ttbr. */
/* NOTE: It is a postcondition of page table manager allocation that the page is all-zero. */
const u64 asid_tag = (static_cast<u64>(m_asid) << 48ul);
const KVirtualAddress page = m_manager->Allocate();
MESOSPHERE_ASSERT(page != Null<KVirtualAddress>);
m_ttbr = GetInteger(KPageTableBase::GetLinearMappedPhysicalAddress(page)) | asid_tag;
/* Initialize the base page table. */
MESOSPHERE_R_ABORT_UNLESS(KPageTableBase::InitializeForKernel(true, table, start, end));
R_SUCCEED();
}
Result KPageTable::InitializeForProcess(ams::svc::CreateProcessFlag flags, bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address, size_t code_size, KSystemResource *system_resource, KResourceLimit *resource_limit) {
/* Get an ASID */
m_asid = g_asid_manager.Reserve();
ON_RESULT_FAILURE { g_asid_manager.Release(m_asid); };
Result KPageTable::InitializeForProcess(ams::svc::CreateProcessFlag flags, bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address, size_t code_size, KSystemResource *system_resource, KResourceLimit *resource_limit, size_t process_index) {
/* Determine our ASID */
m_asid = process_index + 1;
MESOSPHERE_ABORT_UNLESS(0 < m_asid && m_asid < util::size(s_ttbr0_entries));
/* Set our manager. */
m_manager = system_resource->GetPageTableManagerPointer();
/* Allocate a new table, and set our ttbr value. */
const KVirtualAddress new_table = m_manager->Allocate();
R_UNLESS(new_table != Null<KVirtualAddress>, svc::ResultOutOfResource());
m_ttbr = EncodeTtbr(GetPageTablePhysicalAddress(new_table), m_asid);
ON_RESULT_FAILURE_2 { m_manager->Free(new_table); };
/* Get the virtual address of our L1 table. */
const KPhysicalAddress ttbr0_phys = KPhysicalAddress(s_ttbr0_entries[m_asid] & UINT64_C(0xFFFFFFFFFFFE));
const KVirtualAddress ttbr0_virt = KMemoryLayout::GetLinearVirtualAddress(ttbr0_phys);
/* Initialize our base table. */
const size_t as_width = GetAddressSpaceWidth(flags);
const KProcessAddress as_start = 0;
const KProcessAddress as_end = (1ul << as_width);
R_TRY(KPageTableBase::InitializeForProcess(flags, from_back, pool, GetVoidPointer(new_table), as_start, as_end, code_address, code_size, system_resource, resource_limit));
R_TRY(KPageTableBase::InitializeForProcess(flags, from_back, pool, GetVoidPointer(ttbr0_virt), as_start, as_end, code_address, code_size, system_resource, resource_limit));
/* Note that we've updated the table (since we created it). */
this->NoteUpdated();
@ -329,20 +250,16 @@ namespace ams::kern::arch::arm64 {
}
}
/* Free the L1 table. */
/* Clear the L1 table. */
{
const KVirtualAddress l1_table = reinterpret_cast<uintptr_t>(impl.Finalize());
ClearPageTable(l1_table);
this->GetPageTableManager().Free(l1_table);
}
/* Perform inherited finalization. */
KPageTableBase::Finalize();
}
/* Release our asid. */
g_asid_manager.Release(m_asid);
R_SUCCEED();
}

View File

@ -31,7 +31,6 @@ namespace ams::kern::board::nintendo::nx {
/* Struct representing registers saved on wake/sleep. */
class SavedSystemRegisters {
private:
u64 ttbr0_el1;
u64 elr_el1;
u64 sp_el0;
u64 spsr_el1;
@ -90,7 +89,6 @@ namespace ams::kern::board::nintendo::nx {
void SavedSystemRegisters::Save() {
/* Save system registers. */
this->ttbr0_el1 = cpu::GetTtbr0El1();
this->tpidr_el0 = cpu::GetTpidrEl0();
this->elr_el1 = cpu::GetElrEl1();
this->sp_el0 = cpu::GetSpEl0();
@ -405,7 +403,7 @@ namespace ams::kern::board::nintendo::nx {
cpu::EnsureInstructionConsistency();
/* Restore system registers. */
cpu::SetTtbr0El1 (this->ttbr0_el1);
cpu::SetTtbr0El1 (KPageTable::GetKernelTtbr0());
cpu::SetTpidrEl0 (this->tpidr_el0);
cpu::SetElrEl1 (this->elr_el1);
cpu::SetSpEl0 (this->sp_el0);

View File

@ -299,7 +299,7 @@ namespace ams::kern {
/* Setup page table. */
{
const bool from_back = (params.flags & ams::svc::CreateProcessFlag_EnableAslr) == 0;
R_TRY(m_page_table.Initialize(static_cast<ams::svc::CreateProcessFlag>(params.flags), from_back, pool, params.code_address, params.code_num_pages * PageSize, m_system_resource, res_limit));
R_TRY(m_page_table.Initialize(static_cast<ams::svc::CreateProcessFlag>(params.flags), from_back, pool, params.code_address, params.code_num_pages * PageSize, m_system_resource, res_limit, this->GetSlabIndex()));
}
ON_RESULT_FAILURE_2 { m_page_table.Finalize(); };
@ -378,7 +378,7 @@ namespace ams::kern {
/* Setup page table. */
{
const bool from_back = (params.flags & ams::svc::CreateProcessFlag_EnableAslr) == 0;
R_TRY(m_page_table.Initialize(static_cast<ams::svc::CreateProcessFlag>(params.flags), from_back, pool, params.code_address, code_size, m_system_resource, res_limit));
R_TRY(m_page_table.Initialize(static_cast<ams::svc::CreateProcessFlag>(params.flags), from_back, pool, params.code_address, code_size, m_system_resource, res_limit, this->GetSlabIndex()));
}
ON_RESULT_FAILURE_2 { m_page_table.Finalize(); };

View File

@ -125,6 +125,8 @@ SECTIONS
.gnu.version_r : { *(.gnu.version_r) } :rodata
.note.gnu.build-id : { *(.note.gnu.build-id) } :rodata
__rodata_end = .;
/* =========== DATA section =========== */
. = ALIGN(0x1000);
__data_start = . ;

View File

@ -15,6 +15,9 @@
*/
#include <mesosphere.hpp>
extern "C" void __rodata_start();
extern "C" void __rodata_end();
extern "C" void __bin_start__();
extern "C" void __bin_end__();
@ -220,6 +223,31 @@ namespace ams::kern::init {
};
static_assert(kern::arch::arm64::init::IsInitialPageAllocator<KInitialPageAllocatorForFinalizeIdentityMapping>);
void SetupAllTtbr0Entries(KInitialPageTable &init_pt, KInitialPageAllocator &allocator) {
/* Validate that the ttbr0 array is in rodata. */
const uintptr_t rodata_start = reinterpret_cast<uintptr_t>(__rodata_start);
const uintptr_t rodata_end = reinterpret_cast<uintptr_t>(__rodata_end);
MESOSPHERE_INIT_ABORT_UNLESS(rodata_start < rodata_end);
MESOSPHERE_INIT_ABORT_UNLESS(rodata_start <= reinterpret_cast<uintptr_t>(std::addressof(KPageTable::GetTtbr0Entry(0))));
MESOSPHERE_INIT_ABORT_UNLESS(reinterpret_cast<uintptr_t>(std::addressof(KPageTable::GetTtbr0Entry(KPageTable::NumTtbr0Entries))) < rodata_end);
/* Allocate pages for all ttbr0 entries. */
for (size_t i = 0; i < KPageTable::NumTtbr0Entries; ++i) {
/* Allocate a page. */
KPhysicalAddress page = allocator.Allocate(PageSize);
MESOSPHERE_INIT_ABORT_UNLESS(page != Null<KPhysicalAddress>);
/* Check that the page is allowed to be a ttbr0 entry. */
MESOSPHERE_INIT_ABORT_UNLESS((GetInteger(page) & UINT64_C(0xFFFF000000000001)) == 0);
/* Get the physical address of the ttbr0 entry. */
const auto ttbr0_phys_ptr = init_pt.GetPhysicalAddress(KVirtualAddress(std::addressof(KPageTable::GetTtbr0Entry(i))));
/* Set the entry to the newly allocated page. */
*reinterpret_cast<volatile u64 *>(GetInteger(ttbr0_phys_ptr)) = (static_cast<u64>(i) << 48) | GetInteger(page);
}
}
void FinalizeIdentityMapping(KInitialPageTable &init_pt, KInitialPageAllocator &allocator, u64 phys_to_virt_offset) {
/* Create an allocator for identity mapping finalization. */
KInitialPageAllocatorForFinalizeIdentityMapping finalize_allocator(allocator, phys_to_virt_offset);
@ -591,6 +619,9 @@ namespace ams::kern::init {
/* Create page table object for use during remaining initialization. */
KInitialPageTable init_pt;
/* Setup all ttbr0 pages. */
SetupAllTtbr0Entries(init_pt, g_initial_page_allocator);
/* Unmap the identity mapping. */
FinalizeIdentityMapping(init_pt, g_initial_page_allocator, g_phase2_linear_region_phys_to_virt_diff);