kern: SvcReadDebugProcessMemory, SvcWriteDebugProcessMemory

This commit is contained in:
Michael Scire 2020-07-30 03:37:40 -07:00 committed by SciresM
parent 1ffe08672d
commit f6f43300e0
7 changed files with 669 additions and 4 deletions

View File

@ -132,6 +132,14 @@ namespace ams::kern::arch::arm64 {
return this->page_table.InvalidateProcessDataCache(address, size);
}
Result ReadDebugMemory(void *buffer, KProcessAddress address, size_t size) {
return this->page_table.ReadDebugMemory(buffer, address, size);
}
Result WriteDebugMemory(KProcessAddress address, const void *buffer, size_t size) {
return this->page_table.WriteDebugMemory(address, buffer, size);
}
Result LockForDeviceAddressSpace(KPageGroup *out, KProcessAddress address, size_t size, KMemoryPermission perm, bool is_aligned) {
return this->page_table.LockForDeviceAddressSpace(out, address, size, perm, is_aligned);
}

View File

@ -39,6 +39,8 @@ namespace ams::kern {
Result Attach(KProcess *process);
Result QueryMemoryInfo(ams::svc::MemoryInfo *out_memory_info, ams::svc::PageInfo *out_page_info, KProcessAddress address);
Result ReadMemory(KProcessAddress buffer, KProcessAddress address, size_t size);
Result WriteMemory(KProcessAddress buffer, KProcessAddress address, size_t size);
Result GetDebugEventInfo(ams::svc::lp64::DebugEventInfo *out);
Result GetDebugEventInfo(ams::svc::ilp32::DebugEventInfo *out);

View File

@ -324,6 +324,9 @@ namespace ams::kern {
Result InvalidateProcessDataCache(KProcessAddress address, size_t size);
Result ReadDebugMemory(void *buffer, KProcessAddress address, size_t size);
Result WriteDebugMemory(KProcessAddress address, const void *buffer, size_t size);
Result LockForDeviceAddressSpace(KPageGroup *out, KProcessAddress address, size_t size, KMemoryPermission perm, bool is_aligned);
Result UnlockForDeviceAddressSpace(KProcessAddress address, size_t size);
Result LockForIpcUserBuffer(KPhysicalAddress *out, KProcessAddress address, size_t size);

View File

@ -597,6 +597,276 @@ _ZN3ams4kern4arch5arm6415UserspaceAccess26InvalidateInstructionCacheEmm:
mov x0, #1
ret
/* ams::kern::arch::arm64::UserspaceAccess::ReadIoMemory32Bit(void *dst, const void *src, size_t size) */
.section .text._ZN3ams4kern4arch5arm6415UserspaceAccess17ReadIoMemory32BitEPvPKvm, "ax", %progbits
.global _ZN3ams4kern4arch5arm6415UserspaceAccess17ReadIoMemory32BitEPvPKvm
.type _ZN3ams4kern4arch5arm6415UserspaceAccess17ReadIoMemory32BitEPvPKvm, %function
.balign 0x10
_ZN3ams4kern4arch5arm6415UserspaceAccess17ReadIoMemory32BitEPvPKvm:
/* Check if we have any work to do. */
cmp x2, #0
b.eq 3f
/* Save variables in temporary registers. */
mov x4, x0
mov x5, x1
mov x6, x2
add x7, x5, x6
/* Save our return address. */
mov x8, x30
1: /* Set our return address so that on read failure we continue as though we read -1. */
adr x30, 4f
/* Read the word from io. */
ldtr w9, [x5]
dsb sy
nop
2: /* Restore our return address. */
mov x30, x8
/* Write the value we read. */
sttr w9, [x4]
/* Advance. */
add x4, x4, #4
add x5, x5, #4
cmp x5, x7
b.ne 1b
3: /* We're done! */
mov x0, #1
ret
4: /* We failed to read a value, so continue as though we read -1. */
mov w9, #0xFFFFFFFF
b 2b
/* ams::kern::arch::arm64::UserspaceAccess::ReadIoMemory16Bit(void *dst, const void *src, size_t size) */
.section .text._ZN3ams4kern4arch5arm6415UserspaceAccess17ReadIoMemory16BitEPvPKvm, "ax", %progbits
.global _ZN3ams4kern4arch5arm6415UserspaceAccess17ReadIoMemory16BitEPvPKvm
.type _ZN3ams4kern4arch5arm6415UserspaceAccess17ReadIoMemory16BitEPvPKvm, %function
.balign 0x10
_ZN3ams4kern4arch5arm6415UserspaceAccess17ReadIoMemory16BitEPvPKvm:
/* Check if we have any work to do. */
cmp x2, #0
b.eq 3f
/* Save variables in temporary registers. */
mov x4, x0
mov x5, x1
mov x6, x2
add x7, x5, x6
/* Save our return address. */
mov x8, x30
1: /* Set our return address so that on read failure we continue as though we read -1. */
adr x30, 4f
/* Read the word from io. */
ldtrh w9, [x5]
dsb sy
nop
2: /* Restore our return address. */
mov x30, x8
/* Write the value we read. */
sttrh w9, [x4]
/* Advance. */
add x4, x4, #2
add x5, x5, #2
cmp x5, x7
b.ne 1b
3: /* We're done! */
mov x0, #1
ret
4: /* We failed to read a value, so continue as though we read -1. */
mov w9, #0xFFFFFFFF
b 2b
/* ams::kern::arch::arm64::UserspaceAccess::ReadIoMemory8Bit(void *dst, const void *src, size_t size) */
.section .text._ZN3ams4kern4arch5arm6415UserspaceAccess16ReadIoMemory8BitEPvPKvm, "ax", %progbits
.global _ZN3ams4kern4arch5arm6415UserspaceAccess16ReadIoMemory8BitEPvPKvm
.type _ZN3ams4kern4arch5arm6415UserspaceAccess16ReadIoMemory8BitEPvPKvm, %function
.balign 0x10
_ZN3ams4kern4arch5arm6415UserspaceAccess16ReadIoMemory8BitEPvPKvm:
/* Check if we have any work to do. */
cmp x2, #0
b.eq 3f
/* Save variables in temporary registers. */
mov x4, x0
mov x5, x1
mov x6, x2
add x7, x5, x6
/* Save our return address. */
mov x8, x30
1: /* Set our return address so that on read failure we continue as though we read -1. */
adr x30, 4f
/* Read the word from io. */
ldtrb w9, [x5]
dsb sy
nop
2: /* Restore our return address. */
mov x30, x8
/* Write the value we read. */
sttrb w9, [x4]
/* Advance. */
add x4, x4, #1
add x5, x5, #1
cmp x5, x7
b.ne 1b
3: /* We're done! */
mov x0, #1
ret
4: /* We failed to read a value, so continue as though we read -1. */
mov w9, #0xFFFFFFFF
b 2b
/* ams::kern::arch::arm64::UserspaceAccess::WriteIoMemory32Bit(void *dst, const void *src, size_t size) */
.section .text._ZN3ams4kern4arch5arm6415UserspaceAccess18WriteIoMemory32BitEPvPKvm, "ax", %progbits
.global _ZN3ams4kern4arch5arm6415UserspaceAccess18WriteIoMemory32BitEPvPKvm
.type _ZN3ams4kern4arch5arm6415UserspaceAccess18WriteIoMemory32BitEPvPKvm, %function
.balign 0x10
_ZN3ams4kern4arch5arm6415UserspaceAccess18WriteIoMemory32BitEPvPKvm:
/* Check if we have any work to do. */
cmp x2, #0
b.eq 3f
/* Save variables in temporary registers. */
mov x4, x0
mov x5, x1
mov x6, x2
add x7, x5, x6
/* Save our return address. */
mov x8, x30
1: /* Read the word from normal memory. */
mov x30, x8
ldtr w9, [x5]
/* Set our return address so that on read failure we continue. */
adr x30, 2f
/* Write the word to io. */
sttr w9, [x5]
dsb sy
2: /* Continue. */
nop
/* Advance. */
add x4, x4, #4
add x5, x5, #4
cmp x5, x7
b.ne 1b
3: /* We're done! */
mov x0, #1
ret
/* ams::kern::arch::arm64::UserspaceAccess::WriteIoMemory16Bit(void *dst, const void *src, size_t size) */
.section .text._ZN3ams4kern4arch5arm6415UserspaceAccess18WriteIoMemory16BitEPvPKvm, "ax", %progbits
.global _ZN3ams4kern4arch5arm6415UserspaceAccess18WriteIoMemory16BitEPvPKvm
.type _ZN3ams4kern4arch5arm6415UserspaceAccess18WriteIoMemory16BitEPvPKvm, %function
.balign 0x10
_ZN3ams4kern4arch5arm6415UserspaceAccess18WriteIoMemory16BitEPvPKvm:
/* Check if we have any work to do. */
cmp x2, #0
b.eq 3f
/* Save variables in temporary registers. */
mov x4, x0
mov x5, x1
mov x6, x2
add x7, x5, x6
/* Save our return address. */
mov x8, x30
1: /* Read the word from normal memory. */
mov x30, x8
ldtrh w9, [x5]
/* Set our return address so that on read failure we continue. */
adr x30, 2f
/* Write the word to io. */
sttrh w9, [x5]
dsb sy
2: /* Continue. */
nop
/* Advance. */
add x4, x4, #2
add x5, x5, #2
cmp x5, x7
b.ne 1b
3: /* We're done! */
mov x0, #1
ret
/* ams::kern::arch::arm64::UserspaceAccess::WriteIoMemory8Bit(void *dst, const void *src, size_t size) */
.section .text._ZN3ams4kern4arch5arm6415UserspaceAccess17WriteIoMemory8BitEPvPKvm, "ax", %progbits
.global _ZN3ams4kern4arch5arm6415UserspaceAccess17WriteIoMemory8BitEPvPKvm
.type _ZN3ams4kern4arch5arm6415UserspaceAccess17WriteIoMemory8BitEPvPKvm, %function
.balign 0x10
_ZN3ams4kern4arch5arm6415UserspaceAccess17WriteIoMemory8BitEPvPKvm:
/* Check if we have any work to do. */
cmp x2, #0
b.eq 3f
/* Save variables in temporary registers. */
mov x4, x0
mov x5, x1
mov x6, x2
add x7, x5, x6
/* Save our return address. */
mov x8, x30
1: /* Read the word from normal memory. */
mov x30, x8
ldtrb w9, [x5]
/* Set our return address so that on read failure we continue. */
adr x30, 2f
/* Write the word to io. */
sttrb w9, [x5]
dsb sy
2: /* Continue. */
nop
/* Advance. */
add x4, x4, #1
add x5, x5, #1
cmp x5, x7
b.ne 1b
3: /* We're done! */
mov x0, #1
ret
/* ================ All Userspace Access Functions before this line. ================ */
/* ams::kern::arch::arm64::UserspaceAccessFunctionAreaEnd() */

View File

@ -48,6 +48,192 @@ namespace ams::kern {
return ResultSuccess();
}
Result KDebugBase::ReadMemory(KProcessAddress buffer, KProcessAddress address, size_t size) {
/* Lock ourselves. */
KScopedLightLock lk(this->lock);
/* Check that we have a valid process. */
R_UNLESS(this->process != nullptr, svc::ResultProcessTerminated());
R_UNLESS(!this->process->IsTerminated(), svc::ResultProcessTerminated());
/* Get the page tables. */
KProcessPageTable &debugger_pt = GetCurrentProcess().GetPageTable();
KProcessPageTable &target_pt = this->process->GetPageTable();
/* Verify that the regions are in range. */
R_UNLESS(target_pt.Contains(address, size), svc::ResultInvalidCurrentMemory());
R_UNLESS(debugger_pt.Contains(buffer, size), svc::ResultInvalidCurrentMemory());
/* Iterate over the target process's memory blocks. */
KProcessAddress cur_address = address;
size_t remaining = size;
while (remaining > 0) {
/* Get the current memory info. */
KMemoryInfo info;
ams::svc::PageInfo pi;
R_TRY(target_pt.QueryInfo(std::addressof(info), std::addressof(pi), cur_address));
/* Check that the memory is accessible. */
R_UNLESS(info.GetState() != static_cast<KMemoryState>(ams::svc::MemoryState_Inaccessible), svc::ResultInvalidAddress());
/* Get the current size. */
const size_t cur_size = std::min(remaining, info.GetEndAddress() - GetInteger(cur_address));
/* Read the memory. */
if (info.GetState() != KMemoryState_Io) {
/* The memory is normal memory. */
R_TRY(target_pt.ReadDebugMemory(GetVoidPointer(buffer), cur_address, cur_size));
} else {
/* The memory is IO memory. */
/* Verify that the memory is readable. */
R_UNLESS((info.GetPermission() & KMemoryPermission_UserRead) == KMemoryPermission_UserRead, svc::ResultInvalidAddress());
/* Get the physical address of the memory. */
/* NOTE: Nintendo does not verify the result of this call. */
KPhysicalAddress phys_addr;
target_pt.GetPhysicalAddress(std::addressof(phys_addr), cur_address);
/* Map the address as IO in the current process. */
R_TRY(debugger_pt.MapIo(util::AlignDown(GetInteger(phys_addr), PageSize), PageSize, KMemoryPermission_UserRead));
/* Get the address of the newly mapped IO. */
KProcessAddress io_address;
Result query_result = debugger_pt.QueryIoMapping(std::addressof(io_address), util::AlignDown(GetInteger(phys_addr), PageSize), PageSize);
MESOSPHERE_R_ASSERT(query_result);
R_TRY(query_result);
/* Ensure we clean up the new mapping on scope exit. */
ON_SCOPE_EXIT { MESOSPHERE_R_ABORT_UNLESS(debugger_pt.UnmapPages(util::AlignDown(GetInteger(io_address), PageSize), 1, KMemoryState_Io)); };
/* Adjust the io address for alignment. */
io_address += (GetInteger(cur_address) & (PageSize - 1));
/* Get the readable size. */
const size_t readable_size = std::min(cur_size, util::AlignDown(GetInteger(cur_address) + PageSize, PageSize) - GetInteger(cur_address));
/* Read the memory. */
switch ((GetInteger(cur_address) | readable_size) & 3) {
case 0:
{
R_UNLESS(UserspaceAccess::ReadIoMemory32Bit(GetVoidPointer(buffer), GetVoidPointer(io_address), readable_size), svc::ResultInvalidPointer());
}
break;
case 2:
{
R_UNLESS(UserspaceAccess::ReadIoMemory16Bit(GetVoidPointer(buffer), GetVoidPointer(io_address), readable_size), svc::ResultInvalidPointer());
}
break;
default:
{
R_UNLESS(UserspaceAccess::ReadIoMemory8Bit(GetVoidPointer(buffer), GetVoidPointer(io_address), readable_size), svc::ResultInvalidPointer());
}
break;
}
}
/* Advance. */
buffer += cur_size;
cur_address += cur_size;
remaining -= cur_size;
}
return ResultSuccess();
}
Result KDebugBase::WriteMemory(KProcessAddress buffer, KProcessAddress address, size_t size) {
/* Lock ourselves. */
KScopedLightLock lk(this->lock);
/* Check that we have a valid process. */
R_UNLESS(this->process != nullptr, svc::ResultProcessTerminated());
R_UNLESS(!this->process->IsTerminated(), svc::ResultProcessTerminated());
/* Get the page tables. */
KProcessPageTable &debugger_pt = GetCurrentProcess().GetPageTable();
KProcessPageTable &target_pt = this->process->GetPageTable();
/* Verify that the regions are in range. */
R_UNLESS(target_pt.Contains(address, size), svc::ResultInvalidCurrentMemory());
R_UNLESS(debugger_pt.Contains(buffer, size), svc::ResultInvalidCurrentMemory());
/* Iterate over the target process's memory blocks. */
KProcessAddress cur_address = address;
size_t remaining = size;
while (remaining > 0) {
/* Get the current memory info. */
KMemoryInfo info;
ams::svc::PageInfo pi;
R_TRY(target_pt.QueryInfo(std::addressof(info), std::addressof(pi), cur_address));
/* Check that the memory is accessible. */
R_UNLESS(info.GetState() != static_cast<KMemoryState>(ams::svc::MemoryState_Inaccessible), svc::ResultInvalidAddress());
/* Get the current size. */
const size_t cur_size = std::min(remaining, info.GetEndAddress() - GetInteger(cur_address));
/* Read the memory. */
if (info.GetState() != KMemoryState_Io) {
/* The memory is normal memory. */
R_TRY(target_pt.WriteDebugMemory(cur_address, GetVoidPointer(buffer), cur_size));
} else {
/* The memory is IO memory. */
/* Verify that the memory is writable. */
R_UNLESS((info.GetPermission() & KMemoryPermission_UserReadWrite) == KMemoryPermission_UserReadWrite, svc::ResultInvalidAddress());
/* Get the physical address of the memory. */
/* NOTE: Nintendo does not verify the result of this call. */
KPhysicalAddress phys_addr;
target_pt.GetPhysicalAddress(std::addressof(phys_addr), cur_address);
/* Map the address as IO in the current process. */
R_TRY(debugger_pt.MapIo(util::AlignDown(GetInteger(phys_addr), PageSize), PageSize, KMemoryPermission_UserReadWrite));
/* Get the address of the newly mapped IO. */
KProcessAddress io_address;
Result query_result = debugger_pt.QueryIoMapping(std::addressof(io_address), util::AlignDown(GetInteger(phys_addr), PageSize), PageSize);
MESOSPHERE_R_ASSERT(query_result);
R_TRY(query_result);
/* Ensure we clean up the new mapping on scope exit. */
ON_SCOPE_EXIT { MESOSPHERE_R_ABORT_UNLESS(debugger_pt.UnmapPages(util::AlignDown(GetInteger(io_address), PageSize), 1, KMemoryState_Io)); };
/* Adjust the io address for alignment. */
io_address += (GetInteger(cur_address) & (PageSize - 1));
/* Get the readable size. */
const size_t readable_size = std::min(cur_size, util::AlignDown(GetInteger(cur_address) + PageSize, PageSize) - GetInteger(cur_address));
/* Read the memory. */
switch ((GetInteger(cur_address) | readable_size) & 3) {
case 0:
{
R_UNLESS(UserspaceAccess::WriteIoMemory32Bit(GetVoidPointer(io_address), GetVoidPointer(buffer), readable_size), svc::ResultInvalidPointer());
}
break;
case 2:
{
R_UNLESS(UserspaceAccess::WriteIoMemory16Bit(GetVoidPointer(io_address), GetVoidPointer(buffer), readable_size), svc::ResultInvalidPointer());
}
break;
default:
{
R_UNLESS(UserspaceAccess::WriteIoMemory8Bit(GetVoidPointer(io_address), GetVoidPointer(buffer), readable_size), svc::ResultInvalidPointer());
}
break;
}
}
/* Advance. */
buffer += cur_size;
cur_address += cur_size;
remaining -= cur_size;
}
return ResultSuccess();
}
Result KDebugBase::Attach(KProcess *target) {
/* Check that the process isn't null. */
MESOSPHERE_ASSERT(target != nullptr);

View File

@ -1889,6 +1889,170 @@ namespace ams::kern {
return ResultSuccess();
}
Result KPageTableBase::ReadDebugMemory(void *buffer, KProcessAddress address, size_t size) {
/* Lightly validate the region is in range. */
R_UNLESS(this->Contains(address, size), svc::ResultInvalidCurrentMemory());
/* Lock the table. */
KScopedLightLock lk(this->general_lock);
/* Require that the memory either be user readable or debuggable. */
const bool can_read = R_SUCCEEDED(this->CheckMemoryStateContiguous(address, size, KMemoryState_None, KMemoryState_None, KMemoryPermission_UserRead, KMemoryPermission_UserRead, KMemoryAttribute_None, KMemoryAttribute_None));
if (!can_read) {
const bool can_debug = R_SUCCEEDED(this->CheckMemoryStateContiguous(address, size, KMemoryState_FlagCanDebug, KMemoryState_FlagCanDebug, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_None, KMemoryAttribute_None));
R_UNLESS(can_debug, svc::ResultInvalidCurrentMemory());
}
/* Get the impl. */
auto &impl = this->GetImpl();
/* Begin traversal. */
TraversalContext context;
TraversalEntry next_entry;
bool traverse_valid = impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), address);
R_UNLESS(traverse_valid, svc::ResultInvalidCurrentMemory());
/* Prepare tracking variables. */
KPhysicalAddress cur_addr = next_entry.phys_addr;
size_t cur_size = next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1));
size_t tot_size = cur_size;
auto PerformCopy = [&] ALWAYS_INLINE_LAMBDA () -> Result {
/* Ensure the address is linear mapped. */
R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), svc::ResultInvalidCurrentMemory());
/* Copy as much aligned data as we can. */
if (cur_size >= sizeof(u32)) {
const size_t copy_size = util::AlignDown(cur_size, sizeof(u32));
R_UNLESS(UserspaceAccess::CopyMemoryToUserAligned32Bit(buffer, GetVoidPointer(GetLinearMappedVirtualAddress(cur_addr)), copy_size), svc::ResultInvalidPointer());
buffer = reinterpret_cast<void *>(reinterpret_cast<uintptr_t>(buffer) + copy_size);
cur_addr += copy_size;
cur_size -= copy_size;
}
/* Copy remaining data. */
if (cur_size > 0) {
R_UNLESS(UserspaceAccess::CopyMemoryToUser(buffer, GetVoidPointer(GetLinearMappedVirtualAddress(cur_addr)), cur_size), svc::ResultInvalidPointer());
}
return ResultSuccess();
};
/* Iterate. */
while (tot_size < size) {
/* Continue the traversal. */
traverse_valid = impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
MESOSPHERE_ASSERT(traverse_valid);
if (next_entry.phys_addr != (cur_addr + cur_size)) {
/* Perform copy. */
R_TRY(PerformCopy());
/* Advance. */
buffer = reinterpret_cast<void *>(reinterpret_cast<uintptr_t>(buffer) + cur_size);
cur_addr = next_entry.phys_addr;
cur_size = next_entry.block_size;
} else {
cur_size += next_entry.block_size;
}
tot_size += next_entry.block_size;
}
/* Ensure we use the right size for the last block. */
if (tot_size > size) {
cur_size -= (tot_size - size);
}
/* Perform copy for the last block. */
R_TRY(PerformCopy());
return ResultSuccess();
}
Result KPageTableBase::WriteDebugMemory(KProcessAddress address, const void *buffer, size_t size) {
/* Lightly validate the region is in range. */
R_UNLESS(this->Contains(address, size), svc::ResultInvalidCurrentMemory());
/* Lock the table. */
KScopedLightLock lk(this->general_lock);
/* Require that the memory either be user writable or debuggable. */
const bool can_read = R_SUCCEEDED(this->CheckMemoryStateContiguous(address, size, KMemoryState_None, KMemoryState_None, KMemoryPermission_UserReadWrite, KMemoryPermission_UserReadWrite, KMemoryAttribute_None, KMemoryAttribute_None));
if (!can_read) {
const bool can_debug = R_SUCCEEDED(this->CheckMemoryStateContiguous(address, size, KMemoryState_FlagCanDebug, KMemoryState_FlagCanDebug, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_None, KMemoryAttribute_None));
R_UNLESS(can_debug, svc::ResultInvalidCurrentMemory());
}
/* Get the impl. */
auto &impl = this->GetImpl();
/* Begin traversal. */
TraversalContext context;
TraversalEntry next_entry;
bool traverse_valid = impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), address);
R_UNLESS(traverse_valid, svc::ResultInvalidCurrentMemory());
/* Prepare tracking variables. */
KPhysicalAddress cur_addr = next_entry.phys_addr;
size_t cur_size = next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1));
size_t tot_size = cur_size;
auto PerformCopy = [&] ALWAYS_INLINE_LAMBDA () -> Result {
/* Ensure the address is linear mapped. */
R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), svc::ResultInvalidCurrentMemory());
/* Copy as much aligned data as we can. */
if (cur_size >= sizeof(u32)) {
const size_t copy_size = util::AlignDown(cur_size, sizeof(u32));
R_UNLESS(UserspaceAccess::CopyMemoryFromUserAligned32Bit(GetVoidPointer(GetLinearMappedVirtualAddress(cur_addr)), buffer, copy_size), svc::ResultInvalidCurrentMemory());
buffer = reinterpret_cast<void *>(reinterpret_cast<uintptr_t>(buffer) + copy_size);
cur_addr += copy_size;
cur_size -= copy_size;
}
/* Copy remaining data. */
if (cur_size > 0) {
R_UNLESS(UserspaceAccess::CopyMemoryFromUser(GetVoidPointer(GetLinearMappedVirtualAddress(cur_addr)), buffer, cur_size), svc::ResultInvalidCurrentMemory());
}
return ResultSuccess();
};
/* Iterate. */
while (tot_size < size) {
/* Continue the traversal. */
traverse_valid = impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
MESOSPHERE_ASSERT(traverse_valid);
if (next_entry.phys_addr != (cur_addr + cur_size)) {
/* Perform copy. */
R_TRY(PerformCopy());
/* Advance. */
buffer = reinterpret_cast<void *>(reinterpret_cast<uintptr_t>(buffer) + cur_size);
cur_addr = next_entry.phys_addr;
cur_size = next_entry.block_size;
} else {
cur_size += next_entry.block_size;
}
tot_size += next_entry.block_size;
}
/* Ensure we use the right size for the last block. */
if (tot_size > size) {
cur_size -= (tot_size - size);
}
/* Perform copy for the last block. */
R_TRY(PerformCopy());
return ResultSuccess();
}
Result KPageTableBase::LockForDeviceAddressSpace(KPageGroup *out, KProcessAddress address, size_t size, KMemoryPermission perm, bool is_aligned) {
/* Lightly validate the range before doing anything else. */
const size_t num_pages = size / PageSize;

View File

@ -120,6 +120,38 @@ namespace ams::kern::svc {
return ResultSuccess();
}
Result ReadDebugProcessMemory(uintptr_t buffer, ams::svc::Handle debug_handle, uintptr_t address, size_t size) {
/* Validate address / size. */
R_UNLESS(size > 0, svc::ResultInvalidSize());
R_UNLESS((address < address + size), svc::ResultInvalidCurrentMemory());
R_UNLESS((buffer < buffer + size), svc::ResultInvalidCurrentMemory());
/* Get the debug object. */
KScopedAutoObject debug = GetCurrentProcess().GetHandleTable().GetObject<KDebug>(debug_handle);
R_UNLESS(debug.IsNotNull(), svc::ResultInvalidHandle());
/* Read the memory. */
R_TRY(debug->ReadMemory(buffer, address, size));
return ResultSuccess();
}
Result WriteDebugProcessMemory(ams::svc::Handle debug_handle, uintptr_t buffer, uintptr_t address, size_t size) {
/* Validate address / size. */
R_UNLESS(size > 0, svc::ResultInvalidSize());
R_UNLESS((address < address + size), svc::ResultInvalidCurrentMemory());
R_UNLESS((buffer < buffer + size), svc::ResultInvalidCurrentMemory());
/* Get the debug object. */
KScopedAutoObject debug = GetCurrentProcess().GetHandleTable().GetObject<KDebug>(debug_handle);
R_UNLESS(debug.IsNotNull(), svc::ResultInvalidHandle());
/* Write the memory. */
R_TRY(debug->WriteMemory(buffer, address, size));
return ResultSuccess();
}
}
/* ============================= 64 ABI ============================= */
@ -161,11 +193,11 @@ namespace ams::kern::svc {
}
Result ReadDebugProcessMemory64(ams::svc::Address buffer, ams::svc::Handle debug_handle, ams::svc::Address address, ams::svc::Size size) {
MESOSPHERE_PANIC("Stubbed SvcReadDebugProcessMemory64 was called.");
return ReadDebugProcessMemory(buffer, debug_handle, address, size);
}
Result WriteDebugProcessMemory64(ams::svc::Handle debug_handle, ams::svc::Address buffer, ams::svc::Address address, ams::svc::Size size) {
MESOSPHERE_PANIC("Stubbed SvcWriteDebugProcessMemory64 was called.");
return WriteDebugProcessMemory(debug_handle, buffer, address, size);
}
Result SetHardwareBreakPoint64(ams::svc::HardwareBreakPointRegisterName name, uint64_t flags, uint64_t value) {
@ -215,11 +247,11 @@ namespace ams::kern::svc {
}
Result ReadDebugProcessMemory64From32(ams::svc::Address buffer, ams::svc::Handle debug_handle, ams::svc::Address address, ams::svc::Size size) {
MESOSPHERE_PANIC("Stubbed SvcReadDebugProcessMemory64From32 was called.");
return ReadDebugProcessMemory(buffer, debug_handle, address, size);
}
Result WriteDebugProcessMemory64From32(ams::svc::Handle debug_handle, ams::svc::Address buffer, ams::svc::Address address, ams::svc::Size size) {
MESOSPHERE_PANIC("Stubbed SvcWriteDebugProcessMemory64From32 was called.");
return WriteDebugProcessMemory(debug_handle, buffer, address, size);
}
Result SetHardwareBreakPoint64From32(ams::svc::HardwareBreakPointRegisterName name, uint64_t flags, uint64_t value) {