mirror of
https://github.com/Atmosphere-NX/Atmosphere.git
synced 2025-01-31 12:13:47 +01:00
kern: implement more of KMemoryManager through KPageBuffer slab init
This commit is contained in:
parent
f7d3d50f33
commit
d9e6771e63
@ -40,6 +40,7 @@ namespace ams::kern::init {
|
|||||||
const KSlabResourceCounts &GetSlabResourceCounts();
|
const KSlabResourceCounts &GetSlabResourceCounts();
|
||||||
|
|
||||||
size_t CalculateTotalSlabHeapSize();
|
size_t CalculateTotalSlabHeapSize();
|
||||||
|
NOINLINE void InitializeKPageBufferSlabHeap();
|
||||||
NOINLINE void InitializeSlabHeaps();
|
NOINLINE void InitializeSlabHeaps();
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -467,6 +467,10 @@ namespace ams::kern {
|
|||||||
return *GetVirtualMemoryRegionTree().FindFirstRegionByType(KMemoryRegionType_VirtualDramMetadataPool);
|
return *GetVirtualMemoryRegionTree().FindFirstRegionByType(KMemoryRegionType_VirtualDramMetadataPool);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static NOINLINE KMemoryRegion &GetVirtualLinearRegion(KVirtualAddress address) {
|
||||||
|
return *GetVirtualLinearMemoryRegionTree().FindContainingRegion(GetInteger(address));
|
||||||
|
}
|
||||||
|
|
||||||
static NOINLINE auto GetCarveoutRegionExtents() {
|
static NOINLINE auto GetCarveoutRegionExtents() {
|
||||||
return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionAttr_CarveoutProtected);
|
return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionAttr_CarveoutProtected);
|
||||||
}
|
}
|
||||||
|
@ -59,8 +59,28 @@ namespace ams::kern {
|
|||||||
|
|
||||||
size_t Initialize(const KMemoryRegion *region, Pool pool, KVirtualAddress metadata_region, KVirtualAddress metadata_region_end);
|
size_t Initialize(const KMemoryRegion *region, Pool pool, KVirtualAddress metadata_region, KVirtualAddress metadata_region_end);
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE void SetNext(Impl *n) { this->next = n; }
|
KVirtualAddress AllocateBlock(s32 index) { return this->heap.AllocateBlock(index); }
|
||||||
constexpr ALWAYS_INLINE void SetPrev(Impl *n) { this->prev = n; }
|
void Free(KVirtualAddress addr, size_t num_pages) { this->heap.Free(addr, num_pages); }
|
||||||
|
|
||||||
|
void TrackAllocationForOptimizedProcess(KVirtualAddress block, size_t num_pages);
|
||||||
|
|
||||||
|
constexpr KVirtualAddress GetEndAddress() const { return this->heap.GetEndAddress(); }
|
||||||
|
|
||||||
|
constexpr void SetNext(Impl *n) { this->next = n; }
|
||||||
|
constexpr void SetPrev(Impl *n) { this->prev = n; }
|
||||||
|
constexpr Impl *GetNext() const { return this->next; }
|
||||||
|
constexpr Impl *GetPrev() const { return this->prev; }
|
||||||
|
|
||||||
|
void Open(KLightLock *pool_locks, KVirtualAddress address, size_t num_pages) {
|
||||||
|
KScopedLightLock lk(pool_locks[this->pool]);
|
||||||
|
|
||||||
|
size_t index = this->heap.GetPageOffset(address);
|
||||||
|
const size_t end = index + num_pages;
|
||||||
|
while (index < end) {
|
||||||
|
const RefCount ref_count = (++this->page_reference_counts[index++]);
|
||||||
|
MESOSPHERE_ABORT_UNLESS(ref_count > 0);
|
||||||
|
}
|
||||||
|
}
|
||||||
public:
|
public:
|
||||||
static size_t CalculateMetadataOverheadSize(size_t region_size);
|
static size_t CalculateMetadataOverheadSize(size_t region_size);
|
||||||
};
|
};
|
||||||
@ -72,6 +92,10 @@ namespace ams::kern {
|
|||||||
size_t num_managers;
|
size_t num_managers;
|
||||||
u64 optimized_process_ids[Pool_Count];
|
u64 optimized_process_ids[Pool_Count];
|
||||||
bool has_optimized_process[Pool_Count];
|
bool has_optimized_process[Pool_Count];
|
||||||
|
private:
|
||||||
|
Impl &GetManager(KVirtualAddress address) {
|
||||||
|
return this->managers[KMemoryLayout::GetVirtualLinearRegion(address).GetAttributes()];
|
||||||
|
}
|
||||||
public:
|
public:
|
||||||
constexpr KMemoryManager()
|
constexpr KMemoryManager()
|
||||||
: pool_locks(), pool_managers_head(), pool_managers_tail(), managers(), num_managers(), optimized_process_ids(), has_optimized_process()
|
: pool_locks(), pool_managers_head(), pool_managers_tail(), managers(), num_managers(), optimized_process_ids(), has_optimized_process()
|
||||||
@ -80,6 +104,19 @@ namespace ams::kern {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void Initialize(KVirtualAddress metadata_region, size_t metadata_region_size);
|
void Initialize(KVirtualAddress metadata_region, size_t metadata_region_size);
|
||||||
|
|
||||||
|
KVirtualAddress AllocateContinuous(size_t num_pages, size_t align_pages, u32 option);
|
||||||
|
|
||||||
|
void Open(KVirtualAddress address, size_t num_pages) {
|
||||||
|
/* Repeatedly open references until we've done so for all pages. */
|
||||||
|
while (num_pages) {
|
||||||
|
auto &manager = this->GetManager(address);
|
||||||
|
const size_t cur_pages = std::min(num_pages, (manager.GetEndAddress() - address) / PageSize);
|
||||||
|
manager.Open(this->pool_locks, address, cur_pages);
|
||||||
|
num_pages -= cur_pages;
|
||||||
|
address += cur_pages * PageSize;
|
||||||
|
}
|
||||||
|
}
|
||||||
public:
|
public:
|
||||||
static size_t CalculateMetadataOverheadSize(size_t region_size) {
|
static size_t CalculateMetadataOverheadSize(size_t region_size) {
|
||||||
return Impl::CalculateMetadataOverheadSize(region_size);
|
return Impl::CalculateMetadataOverheadSize(region_size);
|
||||||
|
@ -22,6 +22,33 @@ namespace ams::kern {
|
|||||||
private:
|
private:
|
||||||
static constexpr inline size_t MemoryBlockPageShifts[] = { 0xC, 0x10, 0x15, 0x16, 0x19, 0x1D, 0x1E };
|
static constexpr inline size_t MemoryBlockPageShifts[] = { 0xC, 0x10, 0x15, 0x16, 0x19, 0x1D, 0x1E };
|
||||||
static constexpr size_t NumMemoryBlockPageShifts = util::size(MemoryBlockPageShifts);
|
static constexpr size_t NumMemoryBlockPageShifts = util::size(MemoryBlockPageShifts);
|
||||||
|
public:
|
||||||
|
static constexpr s32 GetAlignedBlockIndex(size_t num_pages, size_t align_pages) {
|
||||||
|
const size_t target_pages = std::max(num_pages, align_pages);
|
||||||
|
for (size_t i = 0; i < NumMemoryBlockPageShifts; i++) {
|
||||||
|
if (target_pages <= (size_t(1) << MemoryBlockPageShifts[i]) / PageSize) {
|
||||||
|
return static_cast<s32>(i);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
static constexpr s32 GetBlockIndex(size_t num_pages) {
|
||||||
|
for (size_t i = 0; i < NumMemoryBlockPageShifts; i++) {
|
||||||
|
if (num_pages >= (size_t(1) << MemoryBlockPageShifts[i]) / PageSize) {
|
||||||
|
return static_cast<s32>(i);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
static constexpr size_t GetBlockSize(size_t index) {
|
||||||
|
return size_t(1) << MemoryBlockPageShifts[index];
|
||||||
|
}
|
||||||
|
|
||||||
|
static constexpr size_t GetBlockNumPages(size_t index) {
|
||||||
|
return GetBlockSize(index) / PageSize;
|
||||||
|
}
|
||||||
private:
|
private:
|
||||||
class Block {
|
class Block {
|
||||||
private:
|
private:
|
||||||
@ -31,27 +58,143 @@ namespace ams::kern {
|
|||||||
private:
|
private:
|
||||||
u64 *bit_storages[MaxDepth];
|
u64 *bit_storages[MaxDepth];
|
||||||
size_t num_bits;
|
size_t num_bits;
|
||||||
size_t depth;
|
size_t used_depths;
|
||||||
public:
|
public:
|
||||||
constexpr Bitmap() : bit_storages(), num_bits(), depth() { /* ... */ }
|
constexpr Bitmap() : bit_storages(), num_bits(), used_depths() { /* ... */ }
|
||||||
|
|
||||||
|
constexpr size_t GetNumBits() const { return this->num_bits; }
|
||||||
|
constexpr s32 GetHighestDepthIndex() const { return static_cast<s32>(this->used_depths) - 1; }
|
||||||
|
|
||||||
u64 *Initialize(u64 *storage, size_t size) {
|
u64 *Initialize(u64 *storage, size_t size) {
|
||||||
/* Initially, everything is un-set. */
|
/* Initially, everything is un-set. */
|
||||||
this->num_bits = 0;
|
this->num_bits = 0;
|
||||||
|
|
||||||
/* Calculate the needed bitmap depth. */
|
/* Calculate the needed bitmap depth. */
|
||||||
this->depth = static_cast<size_t>(GetRequiredDepth(size));
|
this->used_depths = static_cast<size_t>(GetRequiredDepth(size));
|
||||||
MESOSPHERE_ASSERT(this->depth <= MaxDepth);
|
MESOSPHERE_ASSERT(this->used_depths <= MaxDepth);
|
||||||
|
|
||||||
/* Set the bitmap pointers. */
|
/* Set the bitmap pointers. */
|
||||||
for (s32 d = static_cast<s32>(this->depth) - 1; d >= 0; d--) {
|
for (s32 depth = this->GetHighestDepthIndex(); depth >= 0; depth--) {
|
||||||
this->bit_storages[d] = storage;
|
this->bit_storages[depth] = storage;
|
||||||
size = util::AlignUp(size, BITSIZEOF(u64)) / BITSIZEOF(u64);
|
size = util::AlignUp(size, BITSIZEOF(u64)) / BITSIZEOF(u64);
|
||||||
storage += size;
|
storage += size;
|
||||||
}
|
}
|
||||||
|
|
||||||
return storage;
|
return storage;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ssize_t FindFreeBlock() const {
|
||||||
|
uintptr_t offset = 0;
|
||||||
|
s32 depth = 0;
|
||||||
|
|
||||||
|
do {
|
||||||
|
const u64 v = this->bit_storages[depth][offset];
|
||||||
|
if (v == 0) {
|
||||||
|
/* If depth is bigger than zero, then a previous level indicated a block was free. */
|
||||||
|
MESOSPHERE_ASSERT(depth == 0);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
offset = offset * BITSIZEOF(u64) + __builtin_ctzll(v);
|
||||||
|
++depth;
|
||||||
|
} while (depth < static_cast<s32>(this->used_depths));
|
||||||
|
|
||||||
|
return static_cast<ssize_t>(offset);
|
||||||
|
}
|
||||||
|
|
||||||
|
void SetBit(size_t offset) {
|
||||||
|
this->SetBit(this->GetHighestDepthIndex(), offset);
|
||||||
|
this->num_bits++;
|
||||||
|
}
|
||||||
|
|
||||||
|
void ClearBit(size_t offset) {
|
||||||
|
this->ClearBit(this->GetHighestDepthIndex(), offset);
|
||||||
|
this->num_bits--;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool ClearRange(size_t offset, size_t count) {
|
||||||
|
s32 depth = this->GetHighestDepthIndex();
|
||||||
|
u64 *bits = this->bit_storages[depth];
|
||||||
|
size_t bit_ind = offset / BITSIZEOF(u64);
|
||||||
|
if (AMS_LIKELY(count < BITSIZEOF(u64))) {
|
||||||
|
const size_t shift = offset % BITSIZEOF(u64);
|
||||||
|
MESOSPHERE_ASSERT(shift + count <= BITSIZEOF(u64));
|
||||||
|
/* Check that all the bits are set. */
|
||||||
|
const u64 mask = ((u64(1) << count) - 1) << shift;
|
||||||
|
u64 v = bits[bit_ind];
|
||||||
|
if ((v & mask) != mask) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Clear the bits. */
|
||||||
|
v &= ~mask;
|
||||||
|
bits[bit_ind] = v;
|
||||||
|
if (v == 0) {
|
||||||
|
this->ClearBit(depth - 1, bit_ind);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
MESOSPHERE_ASSERT(offset % BITSIZEOF(u64) == 0);
|
||||||
|
MESOSPHERE_ASSERT(count % BITSIZEOF(u64) == 0);
|
||||||
|
/* Check that all the bits are set. */
|
||||||
|
size_t remaining = count;
|
||||||
|
size_t i = 0;
|
||||||
|
do {
|
||||||
|
if (bits[bit_ind + i++] != ~u64(0)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
remaining -= BITSIZEOF(u64);
|
||||||
|
} while (remaining > 0);
|
||||||
|
|
||||||
|
/* Clear the bits. */
|
||||||
|
remaining = count;
|
||||||
|
i = 0;
|
||||||
|
do {
|
||||||
|
bits[bit_ind + i] = 0;
|
||||||
|
this->ClearBit(depth - 1, bit_ind + i);
|
||||||
|
i++;
|
||||||
|
remaining -= BITSIZEOF(u64);
|
||||||
|
} while (remaining > 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
this->num_bits -= count;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
private:
|
||||||
|
void SetBit(s32 depth, size_t offset) {
|
||||||
|
while (depth >= 0) {
|
||||||
|
size_t ind = offset / BITSIZEOF(u64);
|
||||||
|
size_t which = offset % BITSIZEOF(u64);
|
||||||
|
const u64 mask = u64(1) << which;
|
||||||
|
|
||||||
|
u64 *bit = std::addressof(this->bit_storages[depth][ind]);
|
||||||
|
u64 v = *bit;
|
||||||
|
MESOSPHERE_ASSERT((v & mask) == 0);
|
||||||
|
*bit = v | mask;
|
||||||
|
if (v) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
offset = ind;
|
||||||
|
depth--;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void ClearBit(s32 depth, size_t offset) {
|
||||||
|
while (depth >= 0) {
|
||||||
|
size_t ind = offset / BITSIZEOF(u64);
|
||||||
|
size_t which = offset % BITSIZEOF(u64);
|
||||||
|
const u64 mask = u64(1) << which;
|
||||||
|
|
||||||
|
u64 *bit = std::addressof(this->bit_storages[depth][ind]);
|
||||||
|
u64 v = *bit;
|
||||||
|
MESOSPHERE_ASSERT((v & mask) != 0);
|
||||||
|
v &= ~mask;
|
||||||
|
*bit = v;
|
||||||
|
if (v) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
offset = ind;
|
||||||
|
depth--;
|
||||||
|
}
|
||||||
|
}
|
||||||
private:
|
private:
|
||||||
static constexpr s32 GetRequiredDepth(size_t region_size) {
|
static constexpr s32 GetRequiredDepth(size_t region_size) {
|
||||||
s32 depth = 0;
|
s32 depth = 0;
|
||||||
@ -82,6 +225,13 @@ namespace ams::kern {
|
|||||||
public:
|
public:
|
||||||
constexpr Block() : bitmap(), heap_address(), end_offset(), block_shift(), next_block_shift() { /* ... */ }
|
constexpr Block() : bitmap(), heap_address(), end_offset(), block_shift(), next_block_shift() { /* ... */ }
|
||||||
|
|
||||||
|
constexpr size_t GetShift() const { return this->block_shift; }
|
||||||
|
constexpr size_t GetNextShift() const { return this->next_block_shift; }
|
||||||
|
constexpr size_t GetSize() const { return u64(1) << this->GetShift(); }
|
||||||
|
constexpr size_t GetNumPages() const { return this->GetSize() / PageSize; }
|
||||||
|
constexpr size_t GetNumFreeBlocks() const { return this->bitmap.GetNumBits(); }
|
||||||
|
constexpr size_t GetNumFreePages() const { return this->GetNumFreeBlocks() * this->GetNumPages(); }
|
||||||
|
|
||||||
u64 *Initialize(KVirtualAddress addr, size_t size, size_t bs, size_t nbs, u64 *bit_storage) {
|
u64 *Initialize(KVirtualAddress addr, size_t size, size_t bs, size_t nbs, u64 *bit_storage) {
|
||||||
/* Set shifts. */
|
/* Set shifts. */
|
||||||
this->block_shift = bs;
|
this->block_shift = bs;
|
||||||
@ -89,18 +239,49 @@ namespace ams::kern {
|
|||||||
|
|
||||||
/* Align up the address. */
|
/* Align up the address. */
|
||||||
KVirtualAddress end = addr + size;
|
KVirtualAddress end = addr + size;
|
||||||
const size_t align = (this->next_block_shift != 0) ? (1ul << this->next_block_shift) : (this->block_shift);
|
const size_t align = (this->next_block_shift != 0) ? (u64(1) << this->next_block_shift) : (this->block_shift);
|
||||||
addr = util::AlignDown(GetInteger(addr), align);
|
addr = util::AlignDown(GetInteger(addr), align);
|
||||||
end = util::AlignUp(GetInteger(end), align);
|
end = util::AlignUp(GetInteger(end), align);
|
||||||
|
|
||||||
this->heap_address = addr;
|
this->heap_address = addr;
|
||||||
this->end_offset = (end - addr) / (1ul << this->block_shift);
|
this->end_offset = (end - addr) / (u64(1) << this->block_shift);
|
||||||
return this->bitmap.Initialize(bit_storage, this->end_offset);
|
return this->bitmap.Initialize(bit_storage, this->end_offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
KVirtualAddress PushBlock(KVirtualAddress address) {
|
||||||
|
/* Set the bit for the free block. */
|
||||||
|
size_t offset = (address - this->heap_address) >> this->GetShift();
|
||||||
|
this->bitmap.SetBit(offset);
|
||||||
|
|
||||||
|
/* If we have a next shift, try to clear the blocks below this one and return the new address. */
|
||||||
|
if (this->GetNextShift()) {
|
||||||
|
const size_t diff = u64(1) << (this->GetNextShift() - this->GetShift());
|
||||||
|
offset = util::AlignDown(offset, diff);
|
||||||
|
if (this->bitmap.ClearRange(offset, diff)) {
|
||||||
|
return this->heap_address + (offset << this->GetShift());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* We couldn't coalesce, or we're already as big as possible. */
|
||||||
|
return Null<KVirtualAddress>;
|
||||||
|
}
|
||||||
|
|
||||||
|
KVirtualAddress PopBlock() {
|
||||||
|
/* Find a free block. */
|
||||||
|
ssize_t soffset = this->bitmap.FindFreeBlock();
|
||||||
|
if (soffset < 0) {
|
||||||
|
return Null<KVirtualAddress>;
|
||||||
|
}
|
||||||
|
const size_t offset = static_cast<size_t>(soffset);
|
||||||
|
|
||||||
|
/* Update our tracking and return it. */
|
||||||
|
this->bitmap.ClearBit(offset);
|
||||||
|
return this->heap_address + (offset << this->GetShift());
|
||||||
|
}
|
||||||
public:
|
public:
|
||||||
static constexpr size_t CalculateMetadataOverheadSize(size_t region_size, size_t cur_block_shift, size_t next_block_shift) {
|
static constexpr size_t CalculateMetadataOverheadSize(size_t region_size, size_t cur_block_shift, size_t next_block_shift) {
|
||||||
const size_t cur_block_size = (1ul << cur_block_shift);
|
const size_t cur_block_size = (u64(1) << cur_block_shift);
|
||||||
const size_t next_block_size = (1ul << next_block_shift);
|
const size_t next_block_size = (u64(1) << next_block_shift);
|
||||||
const size_t align = (next_block_shift != 0) ? next_block_size : cur_block_size;
|
const size_t align = (next_block_shift != 0) ? next_block_size : cur_block_size;
|
||||||
return Bitmap::CalculateMetadataOverheadSize((align * 2 + util::AlignUp(region_size, align)) / cur_block_size);
|
return Bitmap::CalculateMetadataOverheadSize((align * 2 + util::AlignUp(region_size, align)) / cur_block_size);
|
||||||
}
|
}
|
||||||
@ -113,12 +294,25 @@ namespace ams::kern {
|
|||||||
Block blocks[NumMemoryBlockPageShifts];
|
Block blocks[NumMemoryBlockPageShifts];
|
||||||
private:
|
private:
|
||||||
void Initialize(KVirtualAddress heap_address, size_t heap_size, KVirtualAddress metadata_address, size_t metadata_size, const size_t *block_shifts, size_t num_block_shifts);
|
void Initialize(KVirtualAddress heap_address, size_t heap_size, KVirtualAddress metadata_address, size_t metadata_size, const size_t *block_shifts, size_t num_block_shifts);
|
||||||
|
size_t GetNumFreePages() const;
|
||||||
|
|
||||||
|
void FreeBlock(KVirtualAddress block, s32 index);
|
||||||
public:
|
public:
|
||||||
constexpr KPageHeap() : heap_address(), heap_size(), used_size(), num_blocks(), blocks() { /* ... */ }
|
constexpr KPageHeap() : heap_address(), heap_size(), used_size(), num_blocks(), blocks() { /* ... */ }
|
||||||
|
|
||||||
|
constexpr KVirtualAddress GetEndAddress() const { return this->heap_address + this->heap_size; }
|
||||||
|
constexpr size_t GetPageOffset(KVirtualAddress block) const { return (block - this->heap_address) / PageSize; }
|
||||||
|
|
||||||
void Initialize(KVirtualAddress heap_address, size_t heap_size, KVirtualAddress metadata_address, size_t metadata_size) {
|
void Initialize(KVirtualAddress heap_address, size_t heap_size, KVirtualAddress metadata_address, size_t metadata_size) {
|
||||||
return Initialize(heap_address, heap_size, metadata_address, metadata_size, MemoryBlockPageShifts, NumMemoryBlockPageShifts);
|
return Initialize(heap_address, heap_size, metadata_address, metadata_size, MemoryBlockPageShifts, NumMemoryBlockPageShifts);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void UpdateUsedSize() {
|
||||||
|
this->used_size = this->heap_size - (this->GetNumFreePages() * PageSize);
|
||||||
|
}
|
||||||
|
|
||||||
|
KVirtualAddress AllocateBlock(s32 index);
|
||||||
|
void Free(KVirtualAddress addr, size_t num_pages);
|
||||||
private:
|
private:
|
||||||
static size_t CalculateMetadataOverheadSize(size_t region_size, const size_t *block_shifts, size_t num_block_shifts);
|
static size_t CalculateMetadataOverheadSize(size_t region_size, const size_t *block_shifts, size_t num_block_shifts);
|
||||||
public:
|
public:
|
||||||
|
@ -135,6 +135,26 @@ namespace ams::kern::init {
|
|||||||
return size;
|
return size;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void InitializeKPageBufferSlabHeap() {
|
||||||
|
const auto &counts = GetSlabResourceCounts();
|
||||||
|
const size_t num_pages = counts.num_KProcess + counts.num_KThread + (counts.num_KProcess + counts.num_KThread) / 8;
|
||||||
|
const size_t slab_size = num_pages * PageSize;
|
||||||
|
|
||||||
|
/* Reserve memory from the system resource limit. */
|
||||||
|
MESOSPHERE_ABORT_UNLESS(Kernel::GetSystemResourceLimit().Reserve(ams::svc::LimitableResource_PhysicalMemoryMax, slab_size));
|
||||||
|
|
||||||
|
/* Allocate memory for the slab. */
|
||||||
|
constexpr auto AllocateOption = KMemoryManager::EncodeOption(KMemoryManager::Pool_System, KMemoryManager::Direction_FromFront);
|
||||||
|
const KVirtualAddress slab_address = Kernel::GetMemoryManager().AllocateContinuous(num_pages, 1, AllocateOption);
|
||||||
|
MESOSPHERE_ABORT_UNLESS(slab_address != Null<KVirtualAddress>);
|
||||||
|
|
||||||
|
/* Open references to the slab. */
|
||||||
|
Kernel::GetMemoryManager().Open(slab_address, num_pages);
|
||||||
|
|
||||||
|
/* Initialize the slabheap. */
|
||||||
|
KPageBuffer::InitializeSlabHeap(GetVoidPointer(slab_address), slab_size);
|
||||||
|
}
|
||||||
|
|
||||||
void InitializeSlabHeaps() {
|
void InitializeSlabHeaps() {
|
||||||
/* Get the start of the slab region, since that's where we'll be working. */
|
/* Get the start of the slab region, since that's where we'll be working. */
|
||||||
KVirtualAddress address = KMemoryLayout::GetSlabRegionAddress();
|
KVirtualAddress address = KMemoryLayout::GetSlabRegionAddress();
|
||||||
|
@ -87,6 +87,57 @@ namespace ams::kern {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
KVirtualAddress KMemoryManager::AllocateContinuous(size_t num_pages, size_t align_pages, u32 option) {
|
||||||
|
/* Early return if we're allocating no pages. */
|
||||||
|
if (num_pages == 0) {
|
||||||
|
return Null<KVirtualAddress>;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Lock the pool that we're allocating from. */
|
||||||
|
const auto [pool, dir] = DecodeOption(option);
|
||||||
|
KScopedLightLock lk(this->pool_locks[pool]);
|
||||||
|
|
||||||
|
/* Choose a heap based on our page size request. */
|
||||||
|
const s32 heap_index = KPageHeap::GetAlignedBlockIndex(num_pages, align_pages);
|
||||||
|
|
||||||
|
/* Loop, trying to iterate from each block. */
|
||||||
|
Impl *chosen_manager = nullptr;
|
||||||
|
KVirtualAddress allocated_block = Null<KVirtualAddress>;
|
||||||
|
if (dir == Direction_FromBack) {
|
||||||
|
for (chosen_manager = this->pool_managers_tail[pool]; chosen_manager != nullptr; chosen_manager = chosen_manager->GetPrev()) {
|
||||||
|
allocated_block = chosen_manager->AllocateBlock(heap_index);
|
||||||
|
if (allocated_block != Null<KVirtualAddress>) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
for (chosen_manager = this->pool_managers_head[pool]; chosen_manager != nullptr; chosen_manager = chosen_manager->GetNext()) {
|
||||||
|
allocated_block = chosen_manager->AllocateBlock(heap_index);
|
||||||
|
if (allocated_block != Null<KVirtualAddress>) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* If we failed to allocate, quit now. */
|
||||||
|
if (allocated_block == Null<KVirtualAddress>) {
|
||||||
|
return Null<KVirtualAddress>;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* If we allocated more than we need, free some. */
|
||||||
|
const size_t allocated_pages = KPageHeap::GetBlockNumPages(heap_index);
|
||||||
|
if (allocated_pages > num_pages) {
|
||||||
|
chosen_manager->Free(allocated_block + num_pages * PageSize, allocated_pages - num_pages);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Maintain the optimized memory bitmap, if we should. */
|
||||||
|
if (this->has_optimized_process[pool]) {
|
||||||
|
chosen_manager->TrackAllocationForOptimizedProcess(allocated_block, num_pages);
|
||||||
|
}
|
||||||
|
|
||||||
|
return allocated_block;
|
||||||
|
}
|
||||||
|
|
||||||
size_t KMemoryManager::Impl::Initialize(const KMemoryRegion *region, Pool p, KVirtualAddress metadata, KVirtualAddress metadata_end) {
|
size_t KMemoryManager::Impl::Initialize(const KMemoryRegion *region, Pool p, KVirtualAddress metadata, KVirtualAddress metadata_end) {
|
||||||
/* Calculate metadata sizes. */
|
/* Calculate metadata sizes. */
|
||||||
const size_t ref_count_size = (region->GetSize() / PageSize) * sizeof(u16);
|
const size_t ref_count_size = (region->GetSize() / PageSize) * sizeof(u16);
|
||||||
@ -107,9 +158,25 @@ namespace ams::kern {
|
|||||||
/* Initialize the manager's KPageHeap. */
|
/* Initialize the manager's KPageHeap. */
|
||||||
this->heap.Initialize(region->GetAddress(), region->GetSize(), metadata + manager_size, page_heap_size);
|
this->heap.Initialize(region->GetAddress(), region->GetSize(), metadata + manager_size, page_heap_size);
|
||||||
|
|
||||||
|
/* Free the memory to the heap. */
|
||||||
|
this->heap.Free(region->GetAddress(), region->GetSize() / PageSize);
|
||||||
|
|
||||||
|
/* Update the heap's used size. */
|
||||||
|
this->heap.UpdateUsedSize();
|
||||||
|
|
||||||
return total_metadata_size;
|
return total_metadata_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void KMemoryManager::Impl::TrackAllocationForOptimizedProcess(KVirtualAddress block, size_t num_pages) {
|
||||||
|
size_t offset = this->heap.GetPageOffset(block);
|
||||||
|
const size_t last = offset + num_pages - 1;
|
||||||
|
u64 *optimize_map = GetPointer<u64>(this->metadata_region);
|
||||||
|
while (offset <= last) {
|
||||||
|
optimize_map[offset / BITSIZEOF(u64)] &= ~(u64(1) << (offset % BITSIZEOF(u64)));
|
||||||
|
offset++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
size_t KMemoryManager::Impl::CalculateMetadataOverheadSize(size_t region_size) {
|
size_t KMemoryManager::Impl::CalculateMetadataOverheadSize(size_t region_size) {
|
||||||
const size_t ref_count_size = (region_size / PageSize) * sizeof(u16);
|
const size_t ref_count_size = (region_size / PageSize) * sizeof(u16);
|
||||||
const size_t optimize_map_size = (util::AlignUp((region_size / PageSize), BITSIZEOF(u64)) / BITSIZEOF(u64)) * sizeof(u64);
|
const size_t optimize_map_size = (util::AlignUp((region_size / PageSize), BITSIZEOF(u64)) / BITSIZEOF(u64)) * sizeof(u64);
|
||||||
|
@ -41,6 +41,87 @@ namespace ams::kern {
|
|||||||
MESOSPHERE_ABORT_UNLESS(KVirtualAddress(cur_bitmap_storage) <= metadata_end);
|
MESOSPHERE_ABORT_UNLESS(KVirtualAddress(cur_bitmap_storage) <= metadata_end);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
size_t KPageHeap::GetNumFreePages() const {
|
||||||
|
size_t num_free = 0;
|
||||||
|
|
||||||
|
for (size_t i = 0; i < this->num_blocks; i++) {
|
||||||
|
num_free += this->blocks[i].GetNumFreePages();
|
||||||
|
}
|
||||||
|
|
||||||
|
return num_free;
|
||||||
|
}
|
||||||
|
|
||||||
|
KVirtualAddress KPageHeap::AllocateBlock(s32 index) {
|
||||||
|
const size_t needed_size = this->blocks[index].GetSize();
|
||||||
|
|
||||||
|
for (s32 i = index; i < static_cast<s32>(this->num_blocks); i++) {
|
||||||
|
if (const KVirtualAddress addr = this->blocks[index].PopBlock(); addr != Null<KVirtualAddress>) {
|
||||||
|
if (const size_t allocated_size = this->blocks[index].GetSize(); allocated_size > needed_size) {
|
||||||
|
this->Free(addr + needed_size, (allocated_size - needed_size) / PageSize);
|
||||||
|
}
|
||||||
|
return addr;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return Null<KVirtualAddress>;
|
||||||
|
}
|
||||||
|
|
||||||
|
void KPageHeap::FreeBlock(KVirtualAddress block, s32 index) {
|
||||||
|
do {
|
||||||
|
block = this->blocks[index++].PushBlock(block);
|
||||||
|
} while (block != Null<KVirtualAddress>);
|
||||||
|
}
|
||||||
|
|
||||||
|
void KPageHeap::Free(KVirtualAddress addr, size_t num_pages) {
|
||||||
|
/* Freeing no pages is a no-op. */
|
||||||
|
if (num_pages == 0) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Find the largest block size that we can free, and free as many as possible. */
|
||||||
|
s32 big_index = static_cast<s32>(this->num_blocks) - 1;
|
||||||
|
const KVirtualAddress start = addr;
|
||||||
|
const KVirtualAddress end = addr + num_pages * PageSize;
|
||||||
|
KVirtualAddress before_start = start;
|
||||||
|
KVirtualAddress before_end = start;
|
||||||
|
KVirtualAddress after_start = end;
|
||||||
|
KVirtualAddress after_end = end;
|
||||||
|
while (big_index >= 0) {
|
||||||
|
const size_t block_size = this->blocks[big_index].GetSize();
|
||||||
|
const KVirtualAddress big_start = util::AlignUp(GetInteger(start), block_size);
|
||||||
|
const KVirtualAddress big_end = util::AlignDown(GetInteger(end), block_size);
|
||||||
|
if (big_start < big_end) {
|
||||||
|
/* Free as many big blocks as we can. */
|
||||||
|
for (auto block = big_start; block < big_end; block += block_size) {
|
||||||
|
this->FreeBlock(block, big_index);
|
||||||
|
}
|
||||||
|
before_end = big_start;
|
||||||
|
after_start = big_end;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
big_index--;
|
||||||
|
}
|
||||||
|
MESOSPHERE_ASSERT(big_index >= 0);
|
||||||
|
|
||||||
|
/* Free space before the big blocks. */
|
||||||
|
for (s32 i = big_index; i >= 0; i--) {
|
||||||
|
const size_t block_size = this->blocks[i].GetSize();
|
||||||
|
while (before_start + block_size <= before_end) {
|
||||||
|
before_end -= block_size;
|
||||||
|
this->FreeBlock(before_end, i);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Free space after the big blocks. */
|
||||||
|
for (s32 i = big_index; i >= 0; i--) {
|
||||||
|
const size_t block_size = this->blocks[i].GetSize();
|
||||||
|
while (after_start + block_size <= after_end) {
|
||||||
|
after_start += block_size;
|
||||||
|
this->FreeBlock(after_start, i);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
size_t KPageHeap::CalculateMetadataOverheadSize(size_t region_size, const size_t *block_shifts, size_t num_block_shifts) {
|
size_t KPageHeap::CalculateMetadataOverheadSize(size_t region_size, const size_t *block_shifts, size_t num_block_shifts) {
|
||||||
size_t overhead_size = 0;
|
size_t overhead_size = 0;
|
||||||
for (size_t i = 0; i < num_block_shifts; i++) {
|
for (size_t i = 0; i < num_block_shifts; i++) {
|
||||||
|
@ -38,10 +38,11 @@ namespace ams::kern {
|
|||||||
/* Initialize KSystemControl. */
|
/* Initialize KSystemControl. */
|
||||||
KSystemControl::Initialize();
|
KSystemControl::Initialize();
|
||||||
|
|
||||||
/* Initialize the memory manager. */
|
/* Initialize the memory manager and the KPageBuffer slabheap. */
|
||||||
{
|
{
|
||||||
const auto &metadata_region = KMemoryLayout::GetMetadataPoolRegion();
|
const auto &metadata_region = KMemoryLayout::GetMetadataPoolRegion();
|
||||||
Kernel::GetMemoryManager().Initialize(metadata_region.GetAddress(), metadata_region.GetSize());
|
Kernel::GetMemoryManager().Initialize(metadata_region.GetAddress(), metadata_region.GetSize());
|
||||||
|
init::InitializeKPageBufferSlabHeap();
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Note: this is not actually done here, it's done later in main after more stuff is setup. */
|
/* Note: this is not actually done here, it's done later in main after more stuff is setup. */
|
||||||
|
Loading…
x
Reference in New Issue
Block a user