From 2a255b2d61a445fb2b83cc8af7632e3d720e1292 Mon Sep 17 00:00:00 2001
From: Liam <byteslice@airmail.cc>
Date: Sun, 22 Oct 2023 21:16:38 -0400
Subject: [PATCH 1/3] kernel: add KPageTableBase

Co-authored-by: Kelebek1 <eeeedddccc@hotmail.co.uk>
---
 src/common/page_table.cpp                     |   30 +-
 src/common/page_table.h                       |   17 +-
 src/core/CMakeLists.txt                       |    6 +-
 src/core/debugger/gdbstub.cpp                 |   98 +-
 .../board/nintendo/nx/k_system_control.cpp    |   13 +-
 .../board/nintendo/nx/k_system_control.h      |    7 +-
 src/core/hle/kernel/k_capabilities.cpp        |   36 +-
 src/core/hle/kernel/k_capabilities.h          |   17 +-
 .../hle/kernel/k_device_address_space.cpp     |    4 +-
 src/core/hle/kernel/k_device_address_space.h  |   10 +-
 src/core/hle/kernel/k_memory_layout.h         |    8 +
 src/core/hle/kernel/k_memory_manager.cpp      |   12 +-
 src/core/hle/kernel/k_page_table.cpp          | 3519 ----------
 src/core/hle/kernel/k_page_table.h            |  542 +-
 src/core/hle/kernel/k_page_table_base.cpp     | 5718 +++++++++++++++++
 src/core/hle/kernel/k_page_table_base.h       |  759 +++
 src/core/hle/kernel/k_process.cpp             |   18 +-
 src/core/hle/kernel/k_process.h               |   14 +-
 src/core/hle/kernel/k_process_page_table.h    |  480 ++
 src/core/hle/kernel/k_server_session.cpp      |    2 +-
 src/core/hle/kernel/k_system_resource.cpp     |    2 +-
 src/core/hle/kernel/k_thread_local_page.cpp   |    4 +-
 src/core/hle/kernel/process_capability.cpp    |  389 --
 src/core/hle/kernel/process_capability.h      |  266 -
 src/core/hle/kernel/svc/svc_memory.cpp        |    6 +-
 .../hle/kernel/svc/svc_physical_memory.cpp    |    9 +-
 .../hle/kernel/svc/svc_process_memory.cpp     |    3 +-
 src/core/hle/kernel/svc/svc_query_memory.cpp  |    8 +-
 src/core/hle/result.h                         |   31 +
 src/core/hle/service/ldr/ldr.cpp              |   45 +-
 src/core/memory.cpp                           |    6 +-
 31 files changed, 7202 insertions(+), 4877 deletions(-)
 delete mode 100644 src/core/hle/kernel/k_page_table.cpp
 create mode 100644 src/core/hle/kernel/k_page_table_base.cpp
 create mode 100644 src/core/hle/kernel/k_page_table_base.h
 create mode 100644 src/core/hle/kernel/k_process_page_table.h
 delete mode 100644 src/core/hle/kernel/process_capability.cpp
 delete mode 100644 src/core/hle/kernel/process_capability.h

diff --git a/src/common/page_table.cpp b/src/common/page_table.cpp
index 4b16902695..166dc3dcee 100644
--- a/src/common/page_table.cpp
+++ b/src/common/page_table.cpp
@@ -9,12 +9,12 @@ PageTable::PageTable() = default;
 
 PageTable::~PageTable() noexcept = default;
 
-bool PageTable::BeginTraversal(TraversalEntry& out_entry, TraversalContext& out_context,
-                               u64 address) const {
+bool PageTable::BeginTraversal(TraversalEntry* out_entry, TraversalContext* out_context,
+                               Common::ProcessAddress address) const {
     // Setup invalid defaults.
-    out_entry.phys_addr = 0;
-    out_entry.block_size = page_size;
-    out_context.next_page = 0;
+    out_entry->phys_addr = 0;
+    out_entry->block_size = page_size;
+    out_context->next_page = 0;
 
     // Validate that we can read the actual entry.
     const auto page = address / page_size;
@@ -29,20 +29,20 @@ bool PageTable::BeginTraversal(TraversalEntry& out_entry, TraversalContext& out_
     }
 
     // Populate the results.
-    out_entry.phys_addr = phys_addr + address;
-    out_context.next_page = page + 1;
-    out_context.next_offset = address + page_size;
+    out_entry->phys_addr = phys_addr + GetInteger(address);
+    out_context->next_page = page + 1;
+    out_context->next_offset = GetInteger(address) + page_size;
 
     return true;
 }
 
-bool PageTable::ContinueTraversal(TraversalEntry& out_entry, TraversalContext& context) const {
+bool PageTable::ContinueTraversal(TraversalEntry* out_entry, TraversalContext* context) const {
     // Setup invalid defaults.
-    out_entry.phys_addr = 0;
-    out_entry.block_size = page_size;
+    out_entry->phys_addr = 0;
+    out_entry->block_size = page_size;
 
     // Validate that we can read the actual entry.
-    const auto page = context.next_page;
+    const auto page = context->next_page;
     if (page >= backing_addr.size()) {
         return false;
     }
@@ -54,9 +54,9 @@ bool PageTable::ContinueTraversal(TraversalEntry& out_entry, TraversalContext& c
     }
 
     // Populate the results.
-    out_entry.phys_addr = phys_addr + context.next_offset;
-    context.next_page = page + 1;
-    context.next_offset += page_size;
+    out_entry->phys_addr = phys_addr + context->next_offset;
+    context->next_page = page + 1;
+    context->next_offset += page_size;
 
     return true;
 }
diff --git a/src/common/page_table.h b/src/common/page_table.h
index e653d52adc..5340f7d863 100644
--- a/src/common/page_table.h
+++ b/src/common/page_table.h
@@ -6,6 +6,7 @@
 #include <atomic>
 
 #include "common/common_types.h"
+#include "common/typed_address.h"
 #include "common/virtual_buffer.h"
 
 namespace Common {
@@ -100,9 +101,9 @@ struct PageTable {
     PageTable(PageTable&&) noexcept = default;
     PageTable& operator=(PageTable&&) noexcept = default;
 
-    bool BeginTraversal(TraversalEntry& out_entry, TraversalContext& out_context,
-                        u64 address) const;
-    bool ContinueTraversal(TraversalEntry& out_entry, TraversalContext& context) const;
+    bool BeginTraversal(TraversalEntry* out_entry, TraversalContext* out_context,
+                        Common::ProcessAddress address) const;
+    bool ContinueTraversal(TraversalEntry* out_entry, TraversalContext* context) const;
 
     /**
      * Resizes the page table to be able to accommodate enough pages within
@@ -117,6 +118,16 @@ struct PageTable {
         return current_address_space_width_in_bits;
     }
 
+    bool GetPhysicalAddress(Common::PhysicalAddress* out_phys_addr,
+                            Common::ProcessAddress virt_addr) const {
+        if (virt_addr > (1ULL << this->GetAddressSpaceBits())) {
+            return false;
+        }
+
+        *out_phys_addr = backing_addr[virt_addr / page_size] + GetInteger(virt_addr);
+        return true;
+    }
+
     /**
      * Vector of memory pointers backing each page. An entry can only be non-null if the
      * corresponding attribute element is of type `Memory`.
diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt
index e4f499135a..8be3bdd080 100644
--- a/src/core/CMakeLists.txt
+++ b/src/core/CMakeLists.txt
@@ -271,8 +271,9 @@ add_library(core STATIC
     hle/kernel/k_page_heap.h
     hle/kernel/k_page_group.cpp
     hle/kernel/k_page_group.h
-    hle/kernel/k_page_table.cpp
     hle/kernel/k_page_table.h
+    hle/kernel/k_page_table_base.cpp
+    hle/kernel/k_page_table_base.h
     hle/kernel/k_page_table_manager.h
     hle/kernel/k_page_table_slab_heap.h
     hle/kernel/k_port.cpp
@@ -280,6 +281,7 @@ add_library(core STATIC
     hle/kernel/k_priority_queue.h
     hle/kernel/k_process.cpp
     hle/kernel/k_process.h
+    hle/kernel/k_process_page_table.h
     hle/kernel/k_readable_event.cpp
     hle/kernel/k_readable_event.h
     hle/kernel/k_resource_limit.cpp
@@ -330,8 +332,6 @@ add_library(core STATIC
     hle/kernel/physical_core.cpp
     hle/kernel/physical_core.h
     hle/kernel/physical_memory.h
-    hle/kernel/process_capability.cpp
-    hle/kernel/process_capability.h
     hle/kernel/slab_helpers.h
     hle/kernel/svc.cpp
     hle/kernel/svc.h
diff --git a/src/core/debugger/gdbstub.cpp b/src/core/debugger/gdbstub.cpp
index 6f5f5156ba..e9bf578951 100644
--- a/src/core/debugger/gdbstub.cpp
+++ b/src/core/debugger/gdbstub.cpp
@@ -727,29 +727,34 @@ static constexpr const char* GetMemoryPermissionString(const Kernel::Svc::Memory
     }
 }
 
-static VAddr GetModuleEnd(Kernel::KPageTable& page_table, VAddr base) {
-    Kernel::Svc::MemoryInfo mem_info;
+static VAddr GetModuleEnd(Kernel::KProcessPageTable& page_table, VAddr base) {
+    Kernel::KMemoryInfo mem_info;
+    Kernel::Svc::MemoryInfo svc_mem_info;
+    Kernel::Svc::PageInfo page_info;
     VAddr cur_addr{base};
 
     // Expect: r-x Code (.text)
-    mem_info = page_table.QueryInfo(cur_addr).GetSvcMemoryInfo();
-    cur_addr = mem_info.base_address + mem_info.size;
-    if (mem_info.state != Kernel::Svc::MemoryState::Code ||
-        mem_info.permission != Kernel::Svc::MemoryPermission::ReadExecute) {
+    R_ASSERT(page_table.QueryInfo(std::addressof(mem_info), std::addressof(page_info), cur_addr));
+    svc_mem_info = mem_info.GetSvcMemoryInfo();
+    cur_addr = svc_mem_info.base_address + svc_mem_info.size;
+    if (svc_mem_info.state != Kernel::Svc::MemoryState::Code ||
+        svc_mem_info.permission != Kernel::Svc::MemoryPermission::ReadExecute) {
         return cur_addr - 1;
     }
 
     // Expect: r-- Code (.rodata)
-    mem_info = page_table.QueryInfo(cur_addr).GetSvcMemoryInfo();
-    cur_addr = mem_info.base_address + mem_info.size;
-    if (mem_info.state != Kernel::Svc::MemoryState::Code ||
-        mem_info.permission != Kernel::Svc::MemoryPermission::Read) {
+    R_ASSERT(page_table.QueryInfo(std::addressof(mem_info), std::addressof(page_info), cur_addr));
+    svc_mem_info = mem_info.GetSvcMemoryInfo();
+    cur_addr = svc_mem_info.base_address + svc_mem_info.size;
+    if (svc_mem_info.state != Kernel::Svc::MemoryState::Code ||
+        svc_mem_info.permission != Kernel::Svc::MemoryPermission::Read) {
         return cur_addr - 1;
     }
 
     // Expect: rw- CodeData (.data)
-    mem_info = page_table.QueryInfo(cur_addr).GetSvcMemoryInfo();
-    cur_addr = mem_info.base_address + mem_info.size;
+    R_ASSERT(page_table.QueryInfo(std::addressof(mem_info), std::addressof(page_info), cur_addr));
+    svc_mem_info = mem_info.GetSvcMemoryInfo();
+    cur_addr = svc_mem_info.base_address + svc_mem_info.size;
     return cur_addr - 1;
 }
 
@@ -767,7 +772,7 @@ void GDBStub::HandleRcmd(const std::vector<u8>& command) {
 
     if (command_str == "get fastmem") {
         if (Settings::IsFastmemEnabled()) {
-            const auto& impl = page_table.PageTableImpl();
+            const auto& impl = page_table.GetImpl();
             const auto region = reinterpret_cast<uintptr_t>(impl.fastmem_arena);
             const auto region_bits = impl.current_address_space_width_in_bits;
             const auto region_size = 1ULL << region_bits;
@@ -785,20 +790,22 @@ void GDBStub::HandleRcmd(const std::vector<u8>& command) {
         reply = fmt::format("Process:     {:#x} ({})\n"
                             "Program Id:  {:#018x}\n",
                             process->GetProcessId(), process->GetName(), process->GetProgramId());
-        reply += fmt::format("Layout:\n"
-                             "  Alias: {:#012x} - {:#012x}\n"
-                             "  Heap:  {:#012x} - {:#012x}\n"
-                             "  Aslr:  {:#012x} - {:#012x}\n"
-                             "  Stack: {:#012x} - {:#012x}\n"
-                             "Modules:\n",
-                             GetInteger(page_table.GetAliasRegionStart()),
-                             GetInteger(page_table.GetAliasRegionEnd()),
-                             GetInteger(page_table.GetHeapRegionStart()),
-                             GetInteger(page_table.GetHeapRegionEnd()),
-                             GetInteger(page_table.GetAliasCodeRegionStart()),
-                             GetInteger(page_table.GetAliasCodeRegionEnd()),
-                             GetInteger(page_table.GetStackRegionStart()),
-                             GetInteger(page_table.GetStackRegionEnd()));
+        reply += fmt::format(
+            "Layout:\n"
+            "  Alias: {:#012x} - {:#012x}\n"
+            "  Heap:  {:#012x} - {:#012x}\n"
+            "  Aslr:  {:#012x} - {:#012x}\n"
+            "  Stack: {:#012x} - {:#012x}\n"
+            "Modules:\n",
+            GetInteger(page_table.GetAliasRegionStart()),
+            GetInteger(page_table.GetAliasRegionStart()) + page_table.GetAliasRegionSize() - 1,
+            GetInteger(page_table.GetHeapRegionStart()),
+            GetInteger(page_table.GetHeapRegionStart()) + page_table.GetHeapRegionSize() - 1,
+            GetInteger(page_table.GetAliasCodeRegionStart()),
+            GetInteger(page_table.GetAliasCodeRegionStart()) + page_table.GetAliasCodeRegionSize() -
+                1,
+            GetInteger(page_table.GetStackRegionStart()),
+            GetInteger(page_table.GetStackRegionStart()) + page_table.GetStackRegionSize() - 1);
 
         for (const auto& [vaddr, name] : modules) {
             reply += fmt::format("  {:#012x} - {:#012x} {}\n", vaddr,
@@ -811,27 +818,34 @@ void GDBStub::HandleRcmd(const std::vector<u8>& command) {
         while (true) {
             using MemoryAttribute = Kernel::Svc::MemoryAttribute;
 
-            auto mem_info = page_table.QueryInfo(cur_addr).GetSvcMemoryInfo();
+            Kernel::KMemoryInfo mem_info{};
+            Kernel::Svc::PageInfo page_info{};
+            R_ASSERT(page_table.QueryInfo(std::addressof(mem_info), std::addressof(page_info),
+                                          cur_addr));
+            auto svc_mem_info = mem_info.GetSvcMemoryInfo();
 
-            if (mem_info.state != Kernel::Svc::MemoryState::Inaccessible ||
-                mem_info.base_address + mem_info.size - 1 != std::numeric_limits<u64>::max()) {
-                const char* state = GetMemoryStateName(mem_info.state);
-                const char* perm = GetMemoryPermissionString(mem_info);
+            if (svc_mem_info.state != Kernel::Svc::MemoryState::Inaccessible ||
+                svc_mem_info.base_address + svc_mem_info.size - 1 !=
+                    std::numeric_limits<u64>::max()) {
+                const char* state = GetMemoryStateName(svc_mem_info.state);
+                const char* perm = GetMemoryPermissionString(svc_mem_info);
 
-                const char l = True(mem_info.attribute & MemoryAttribute::Locked) ? 'L' : '-';
-                const char i = True(mem_info.attribute & MemoryAttribute::IpcLocked) ? 'I' : '-';
-                const char d = True(mem_info.attribute & MemoryAttribute::DeviceShared) ? 'D' : '-';
-                const char u = True(mem_info.attribute & MemoryAttribute::Uncached) ? 'U' : '-';
+                const char l = True(svc_mem_info.attribute & MemoryAttribute::Locked) ? 'L' : '-';
+                const char i =
+                    True(svc_mem_info.attribute & MemoryAttribute::IpcLocked) ? 'I' : '-';
+                const char d =
+                    True(svc_mem_info.attribute & MemoryAttribute::DeviceShared) ? 'D' : '-';
+                const char u = True(svc_mem_info.attribute & MemoryAttribute::Uncached) ? 'U' : '-';
                 const char p =
-                    True(mem_info.attribute & MemoryAttribute::PermissionLocked) ? 'P' : '-';
+                    True(svc_mem_info.attribute & MemoryAttribute::PermissionLocked) ? 'P' : '-';
 
-                reply += fmt::format("  {:#012x} - {:#012x} {} {} {}{}{}{}{} [{}, {}]\n",
-                                     mem_info.base_address,
-                                     mem_info.base_address + mem_info.size - 1, perm, state, l, i,
-                                     d, u, p, mem_info.ipc_count, mem_info.device_count);
+                reply += fmt::format(
+                    "  {:#012x} - {:#012x} {} {} {}{}{}{}{} [{}, {}]\n", svc_mem_info.base_address,
+                    svc_mem_info.base_address + svc_mem_info.size - 1, perm, state, l, i, d, u, p,
+                    svc_mem_info.ipc_count, svc_mem_info.device_count);
             }
 
-            const uintptr_t next_address = mem_info.base_address + mem_info.size;
+            const uintptr_t next_address = svc_mem_info.base_address + svc_mem_info.size;
             if (next_address <= cur_addr) {
                 break;
             }
diff --git a/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp b/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp
index 59364efa1a..37fa39a734 100644
--- a/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp
+++ b/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp
@@ -222,7 +222,7 @@ Result KSystemControl::AllocateSecureMemory(KernelCore& kernel, KVirtualAddress*
     };
 
     // We succeeded.
-    *out = KPageTable::GetHeapVirtualAddress(kernel.MemoryLayout(), paddr);
+    *out = KPageTable::GetHeapVirtualAddress(kernel, paddr);
     R_SUCCEED();
 }
 
@@ -238,8 +238,17 @@ void KSystemControl::FreeSecureMemory(KernelCore& kernel, KVirtualAddress addres
     ASSERT(Common::IsAligned(size, alignment));
 
     // Close the secure region's pages.
-    kernel.MemoryManager().Close(KPageTable::GetHeapPhysicalAddress(kernel.MemoryLayout(), address),
+    kernel.MemoryManager().Close(KPageTable::GetHeapPhysicalAddress(kernel, address),
                                  size / PageSize);
 }
 
+// Insecure Memory.
+KResourceLimit* KSystemControl::GetInsecureMemoryResourceLimit(KernelCore& kernel) {
+    return kernel.GetSystemResourceLimit();
+}
+
+u32 KSystemControl::GetInsecureMemoryPool() {
+    return static_cast<u32>(KMemoryManager::Pool::SystemNonSecure);
+}
+
 } // namespace Kernel::Board::Nintendo::Nx
diff --git a/src/core/hle/kernel/board/nintendo/nx/k_system_control.h b/src/core/hle/kernel/board/nintendo/nx/k_system_control.h
index ff1feec700..60c5e58b73 100644
--- a/src/core/hle/kernel/board/nintendo/nx/k_system_control.h
+++ b/src/core/hle/kernel/board/nintendo/nx/k_system_control.h
@@ -8,7 +8,8 @@
 
 namespace Kernel {
 class KernelCore;
-}
+class KResourceLimit;
+} // namespace Kernel
 
 namespace Kernel::Board::Nintendo::Nx {
 
@@ -40,6 +41,10 @@ public:
                                        u32 pool);
     static void FreeSecureMemory(KernelCore& kernel, KVirtualAddress address, size_t size,
                                  u32 pool);
+
+    // Insecure Memory.
+    static KResourceLimit* GetInsecureMemoryResourceLimit(KernelCore& kernel);
+    static u32 GetInsecureMemoryPool();
 };
 
 } // namespace Kernel::Board::Nintendo::Nx
diff --git a/src/core/hle/kernel/k_capabilities.cpp b/src/core/hle/kernel/k_capabilities.cpp
index e7da7a21d4..fb890f978d 100644
--- a/src/core/hle/kernel/k_capabilities.cpp
+++ b/src/core/hle/kernel/k_capabilities.cpp
@@ -4,14 +4,15 @@
 #include "core/hardware_properties.h"
 #include "core/hle/kernel/k_capabilities.h"
 #include "core/hle/kernel/k_memory_layout.h"
-#include "core/hle/kernel/k_page_table.h"
+#include "core/hle/kernel/k_process_page_table.h"
 #include "core/hle/kernel/kernel.h"
 #include "core/hle/kernel/svc_results.h"
 #include "core/hle/kernel/svc_version.h"
 
 namespace Kernel {
 
-Result KCapabilities::InitializeForKip(std::span<const u32> kern_caps, KPageTable* page_table) {
+Result KCapabilities::InitializeForKip(std::span<const u32> kern_caps,
+                                       KProcessPageTable* page_table) {
     // We're initializing an initial process.
     m_svc_access_flags.reset();
     m_irq_access_flags.reset();
@@ -41,7 +42,8 @@ Result KCapabilities::InitializeForKip(std::span<const u32> kern_caps, KPageTabl
     R_RETURN(this->SetCapabilities(kern_caps, page_table));
 }
 
-Result KCapabilities::InitializeForUser(std::span<const u32> user_caps, KPageTable* page_table) {
+Result KCapabilities::InitializeForUser(std::span<const u32> user_caps,
+                                        KProcessPageTable* page_table) {
     // We're initializing a user process.
     m_svc_access_flags.reset();
     m_irq_access_flags.reset();
@@ -121,7 +123,7 @@ Result KCapabilities::SetSyscallMaskCapability(const u32 cap, u32& set_svc) {
     R_SUCCEED();
 }
 
-Result KCapabilities::MapRange_(const u32 cap, const u32 size_cap, KPageTable* page_table) {
+Result KCapabilities::MapRange_(const u32 cap, const u32 size_cap, KProcessPageTable* page_table) {
     const auto range_pack = MapRange{cap};
     const auto size_pack = MapRangeSize{size_cap};
 
@@ -142,16 +144,13 @@ Result KCapabilities::MapRange_(const u32 cap, const u32 size_cap, KPageTable* p
                                                         ? KMemoryPermission::UserRead
                                                         : KMemoryPermission::UserReadWrite;
     if (MapRangeSize{size_cap}.normal) {
-        // R_RETURN(page_table->MapStatic(phys_addr, size, perm));
+        R_RETURN(page_table->MapStatic(phys_addr, size, perm));
     } else {
-        // R_RETURN(page_table->MapIo(phys_addr, size, perm));
+        R_RETURN(page_table->MapIo(phys_addr, size, perm));
     }
-
-    UNIMPLEMENTED();
-    R_SUCCEED();
 }
 
-Result KCapabilities::MapIoPage_(const u32 cap, KPageTable* page_table) {
+Result KCapabilities::MapIoPage_(const u32 cap, KProcessPageTable* page_table) {
     // Get/validate address/size
     const u64 phys_addr = MapIoPage{cap}.address.Value() * PageSize;
     const size_t num_pages = 1;
@@ -160,10 +159,7 @@ Result KCapabilities::MapIoPage_(const u32 cap, KPageTable* page_table) {
     R_UNLESS(((phys_addr + size - 1) & ~PhysicalMapAllowedMask) == 0, ResultInvalidAddress);
 
     // Do the mapping.
-    // R_RETURN(page_table->MapIo(phys_addr, size, KMemoryPermission_UserReadWrite));
-
-    UNIMPLEMENTED();
-    R_SUCCEED();
+    R_RETURN(page_table->MapIo(phys_addr, size, KMemoryPermission::UserReadWrite));
 }
 
 template <typename F>
@@ -200,13 +196,11 @@ Result KCapabilities::ProcessMapRegionCapability(const u32 cap, F f) {
     R_SUCCEED();
 }
 
-Result KCapabilities::MapRegion_(const u32 cap, KPageTable* page_table) {
+Result KCapabilities::MapRegion_(const u32 cap, KProcessPageTable* page_table) {
     // Map each region into the process's page table.
     return ProcessMapRegionCapability(
-        cap, [](KMemoryRegionType region_type, KMemoryPermission perm) -> Result {
-            // R_RETURN(page_table->MapRegion(region_type, perm));
-            UNIMPLEMENTED();
-            R_SUCCEED();
+        cap, [page_table](KMemoryRegionType region_type, KMemoryPermission perm) -> Result {
+            R_RETURN(page_table->MapRegion(region_type, perm));
         });
 }
 
@@ -280,7 +274,7 @@ Result KCapabilities::SetDebugFlagsCapability(const u32 cap) {
 }
 
 Result KCapabilities::SetCapability(const u32 cap, u32& set_flags, u32& set_svc,
-                                    KPageTable* page_table) {
+                                    KProcessPageTable* page_table) {
     // Validate this is a capability we can act on.
     const auto type = GetCapabilityType(cap);
     R_UNLESS(type != CapabilityType::Invalid, ResultInvalidArgument);
@@ -318,7 +312,7 @@ Result KCapabilities::SetCapability(const u32 cap, u32& set_flags, u32& set_svc,
     }
 }
 
-Result KCapabilities::SetCapabilities(std::span<const u32> caps, KPageTable* page_table) {
+Result KCapabilities::SetCapabilities(std::span<const u32> caps, KProcessPageTable* page_table) {
     u32 set_flags = 0, set_svc = 0;
 
     for (size_t i = 0; i < caps.size(); i++) {
diff --git a/src/core/hle/kernel/k_capabilities.h b/src/core/hle/kernel/k_capabilities.h
index ebd4eedb1e..013d952ad4 100644
--- a/src/core/hle/kernel/k_capabilities.h
+++ b/src/core/hle/kernel/k_capabilities.h
@@ -15,15 +15,15 @@
 
 namespace Kernel {
 
-class KPageTable;
+class KProcessPageTable;
 class KernelCore;
 
 class KCapabilities {
 public:
     constexpr explicit KCapabilities() = default;
 
-    Result InitializeForKip(std::span<const u32> kern_caps, KPageTable* page_table);
-    Result InitializeForUser(std::span<const u32> user_caps, KPageTable* page_table);
+    Result InitializeForKip(std::span<const u32> kern_caps, KProcessPageTable* page_table);
+    Result InitializeForUser(std::span<const u32> user_caps, KProcessPageTable* page_table);
 
     static Result CheckCapabilities(KernelCore& kernel, std::span<const u32> user_caps);
 
@@ -264,9 +264,9 @@ private:
 
     Result SetCorePriorityCapability(const u32 cap);
     Result SetSyscallMaskCapability(const u32 cap, u32& set_svc);
-    Result MapRange_(const u32 cap, const u32 size_cap, KPageTable* page_table);
-    Result MapIoPage_(const u32 cap, KPageTable* page_table);
-    Result MapRegion_(const u32 cap, KPageTable* page_table);
+    Result MapRange_(const u32 cap, const u32 size_cap, KProcessPageTable* page_table);
+    Result MapIoPage_(const u32 cap, KProcessPageTable* page_table);
+    Result MapRegion_(const u32 cap, KProcessPageTable* page_table);
     Result SetInterruptPairCapability(const u32 cap);
     Result SetProgramTypeCapability(const u32 cap);
     Result SetKernelVersionCapability(const u32 cap);
@@ -277,8 +277,9 @@ private:
     static Result ProcessMapRegionCapability(const u32 cap, F f);
     static Result CheckMapRegion(KernelCore& kernel, const u32 cap);
 
-    Result SetCapability(const u32 cap, u32& set_flags, u32& set_svc, KPageTable* page_table);
-    Result SetCapabilities(std::span<const u32> caps, KPageTable* page_table);
+    Result SetCapability(const u32 cap, u32& set_flags, u32& set_svc,
+                         KProcessPageTable* page_table);
+    Result SetCapabilities(std::span<const u32> caps, KProcessPageTable* page_table);
 
 private:
     Svc::SvcAccessFlagSet m_svc_access_flags{};
diff --git a/src/core/hle/kernel/k_device_address_space.cpp b/src/core/hle/kernel/k_device_address_space.cpp
index f488967153..f0703f795b 100644
--- a/src/core/hle/kernel/k_device_address_space.cpp
+++ b/src/core/hle/kernel/k_device_address_space.cpp
@@ -54,7 +54,7 @@ Result KDeviceAddressSpace::Detach(Svc::DeviceName device_name) {
     R_SUCCEED();
 }
 
-Result KDeviceAddressSpace::Map(KPageTable* page_table, KProcessAddress process_address,
+Result KDeviceAddressSpace::Map(KProcessPageTable* page_table, KProcessAddress process_address,
                                 size_t size, u64 device_address, u32 option, bool is_aligned) {
     // Check that the address falls within the space.
     R_UNLESS((m_space_address <= device_address &&
@@ -113,7 +113,7 @@ Result KDeviceAddressSpace::Map(KPageTable* page_table, KProcessAddress process_
     R_SUCCEED();
 }
 
-Result KDeviceAddressSpace::Unmap(KPageTable* page_table, KProcessAddress process_address,
+Result KDeviceAddressSpace::Unmap(KProcessPageTable* page_table, KProcessAddress process_address,
                                   size_t size, u64 device_address) {
     // Check that the address falls within the space.
     R_UNLESS((m_space_address <= device_address &&
diff --git a/src/core/hle/kernel/k_device_address_space.h b/src/core/hle/kernel/k_device_address_space.h
index 18556e3cc1..ff0ec8152b 100644
--- a/src/core/hle/kernel/k_device_address_space.h
+++ b/src/core/hle/kernel/k_device_address_space.h
@@ -5,7 +5,7 @@
 
 #include <string>
 
-#include "core/hle/kernel/k_page_table.h"
+#include "core/hle/kernel/k_process_page_table.h"
 #include "core/hle/kernel/k_typed_address.h"
 #include "core/hle/kernel/slab_helpers.h"
 #include "core/hle/result.h"
@@ -31,23 +31,23 @@ public:
     Result Attach(Svc::DeviceName device_name);
     Result Detach(Svc::DeviceName device_name);
 
-    Result MapByForce(KPageTable* page_table, KProcessAddress process_address, size_t size,
+    Result MapByForce(KProcessPageTable* page_table, KProcessAddress process_address, size_t size,
                       u64 device_address, u32 option) {
         R_RETURN(this->Map(page_table, process_address, size, device_address, option, false));
     }
 
-    Result MapAligned(KPageTable* page_table, KProcessAddress process_address, size_t size,
+    Result MapAligned(KProcessPageTable* page_table, KProcessAddress process_address, size_t size,
                       u64 device_address, u32 option) {
         R_RETURN(this->Map(page_table, process_address, size, device_address, option, true));
     }
 
-    Result Unmap(KPageTable* page_table, KProcessAddress process_address, size_t size,
+    Result Unmap(KProcessPageTable* page_table, KProcessAddress process_address, size_t size,
                  u64 device_address);
 
     static void Initialize();
 
 private:
-    Result Map(KPageTable* page_table, KProcessAddress process_address, size_t size,
+    Result Map(KProcessPageTable* page_table, KProcessAddress process_address, size_t size,
                u64 device_address, u32 option, bool is_aligned);
 
 private:
diff --git a/src/core/hle/kernel/k_memory_layout.h b/src/core/hle/kernel/k_memory_layout.h
index c8122644fd..d7adb31699 100644
--- a/src/core/hle/kernel/k_memory_layout.h
+++ b/src/core/hle/kernel/k_memory_layout.h
@@ -394,6 +394,14 @@ private:
         return region.GetEndAddress();
     }
 
+public:
+    static const KMemoryRegion* Find(const KMemoryLayout& layout, KVirtualAddress address) {
+        return Find(address, layout.GetVirtualMemoryRegionTree());
+    }
+    static const KMemoryRegion* Find(const KMemoryLayout& layout, KPhysicalAddress address) {
+        return Find(address, layout.GetPhysicalMemoryRegionTree());
+    }
+
 private:
     u64 m_linear_phys_to_virt_diff{};
     u64 m_linear_virt_to_phys_diff{};
diff --git a/src/core/hle/kernel/k_memory_manager.cpp b/src/core/hle/kernel/k_memory_manager.cpp
index cdc5572d85..0a973ec8ce 100644
--- a/src/core/hle/kernel/k_memory_manager.cpp
+++ b/src/core/hle/kernel/k_memory_manager.cpp
@@ -456,8 +456,7 @@ size_t KMemoryManager::Impl::Initialize(KPhysicalAddress address, size_t size,
 }
 
 void KMemoryManager::Impl::InitializeOptimizedMemory(KernelCore& kernel) {
-    auto optimize_pa =
-        KPageTable::GetHeapPhysicalAddress(kernel.MemoryLayout(), m_management_region);
+    auto optimize_pa = KPageTable::GetHeapPhysicalAddress(kernel, m_management_region);
     auto* optimize_map = kernel.System().DeviceMemory().GetPointer<u64>(optimize_pa);
 
     std::memset(optimize_map, 0, CalculateOptimizedProcessOverheadSize(m_heap.GetSize()));
@@ -465,8 +464,7 @@ void KMemoryManager::Impl::InitializeOptimizedMemory(KernelCore& kernel) {
 
 void KMemoryManager::Impl::TrackUnoptimizedAllocation(KernelCore& kernel, KPhysicalAddress block,
                                                       size_t num_pages) {
-    auto optimize_pa =
-        KPageTable::GetHeapPhysicalAddress(kernel.MemoryLayout(), m_management_region);
+    auto optimize_pa = KPageTable::GetHeapPhysicalAddress(kernel, m_management_region);
     auto* optimize_map = kernel.System().DeviceMemory().GetPointer<u64>(optimize_pa);
 
     // Get the range we're tracking.
@@ -485,8 +483,7 @@ void KMemoryManager::Impl::TrackUnoptimizedAllocation(KernelCore& kernel, KPhysi
 
 void KMemoryManager::Impl::TrackOptimizedAllocation(KernelCore& kernel, KPhysicalAddress block,
                                                     size_t num_pages) {
-    auto optimize_pa =
-        KPageTable::GetHeapPhysicalAddress(kernel.MemoryLayout(), m_management_region);
+    auto optimize_pa = KPageTable::GetHeapPhysicalAddress(kernel, m_management_region);
     auto* optimize_map = kernel.System().DeviceMemory().GetPointer<u64>(optimize_pa);
 
     // Get the range we're tracking.
@@ -506,8 +503,7 @@ void KMemoryManager::Impl::TrackOptimizedAllocation(KernelCore& kernel, KPhysica
 bool KMemoryManager::Impl::ProcessOptimizedAllocation(KernelCore& kernel, KPhysicalAddress block,
                                                       size_t num_pages, u8 fill_pattern) {
     auto& device_memory = kernel.System().DeviceMemory();
-    auto optimize_pa =
-        KPageTable::GetHeapPhysicalAddress(kernel.MemoryLayout(), m_management_region);
+    auto optimize_pa = KPageTable::GetHeapPhysicalAddress(kernel, m_management_region);
     auto* optimize_map = device_memory.GetPointer<u64>(optimize_pa);
 
     // We want to return whether any pages were newly allocated.
diff --git a/src/core/hle/kernel/k_page_table.cpp b/src/core/hle/kernel/k_page_table.cpp
deleted file mode 100644
index 1d47bdf6b5..0000000000
--- a/src/core/hle/kernel/k_page_table.cpp
+++ /dev/null
@@ -1,3519 +0,0 @@
-// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-#include "common/alignment.h"
-#include "common/assert.h"
-#include "common/literals.h"
-#include "common/scope_exit.h"
-#include "common/settings.h"
-#include "core/core.h"
-#include "core/hle/kernel/k_address_space_info.h"
-#include "core/hle/kernel/k_memory_block.h"
-#include "core/hle/kernel/k_memory_block_manager.h"
-#include "core/hle/kernel/k_page_group.h"
-#include "core/hle/kernel/k_page_table.h"
-#include "core/hle/kernel/k_process.h"
-#include "core/hle/kernel/k_resource_limit.h"
-#include "core/hle/kernel/k_scoped_resource_reservation.h"
-#include "core/hle/kernel/k_system_control.h"
-#include "core/hle/kernel/k_system_resource.h"
-#include "core/hle/kernel/kernel.h"
-#include "core/hle/kernel/svc_results.h"
-#include "core/memory.h"
-
-namespace Kernel {
-
-namespace {
-
-class KScopedLightLockPair {
-    YUZU_NON_COPYABLE(KScopedLightLockPair);
-    YUZU_NON_MOVEABLE(KScopedLightLockPair);
-
-private:
-    KLightLock* m_lower;
-    KLightLock* m_upper;
-
-public:
-    KScopedLightLockPair(KLightLock& lhs, KLightLock& rhs) {
-        // Ensure our locks are in a consistent order.
-        if (std::addressof(lhs) <= std::addressof(rhs)) {
-            m_lower = std::addressof(lhs);
-            m_upper = std::addressof(rhs);
-        } else {
-            m_lower = std::addressof(rhs);
-            m_upper = std::addressof(lhs);
-        }
-
-        // Acquire both locks.
-        m_lower->Lock();
-        if (m_lower != m_upper) {
-            m_upper->Lock();
-        }
-    }
-
-    ~KScopedLightLockPair() {
-        // Unlock the upper lock.
-        if (m_upper != nullptr && m_upper != m_lower) {
-            m_upper->Unlock();
-        }
-
-        // Unlock the lower lock.
-        if (m_lower != nullptr) {
-            m_lower->Unlock();
-        }
-    }
-
-public:
-    // Utility.
-    void TryUnlockHalf(KLightLock& lock) {
-        // Only allow unlocking if the lock is half the pair.
-        if (m_lower != m_upper) {
-            // We want to be sure the lock is one we own.
-            if (m_lower == std::addressof(lock)) {
-                lock.Unlock();
-                m_lower = nullptr;
-            } else if (m_upper == std::addressof(lock)) {
-                lock.Unlock();
-                m_upper = nullptr;
-            }
-        }
-    }
-};
-
-using namespace Common::Literals;
-
-constexpr size_t GetAddressSpaceWidthFromType(Svc::CreateProcessFlag as_type) {
-    switch (as_type) {
-    case Svc::CreateProcessFlag::AddressSpace32Bit:
-    case Svc::CreateProcessFlag::AddressSpace32BitWithoutAlias:
-        return 32;
-    case Svc::CreateProcessFlag::AddressSpace64BitDeprecated:
-        return 36;
-    case Svc::CreateProcessFlag::AddressSpace64Bit:
-        return 39;
-    default:
-        ASSERT(false);
-        return {};
-    }
-}
-
-} // namespace
-
-KPageTable::KPageTable(Core::System& system_)
-    : m_general_lock{system_.Kernel()},
-      m_map_physical_memory_lock{system_.Kernel()}, m_system{system_}, m_kernel{system_.Kernel()} {}
-
-KPageTable::~KPageTable() = default;
-
-Result KPageTable::InitializeForProcess(Svc::CreateProcessFlag as_type, bool enable_aslr,
-                                        bool enable_das_merge, bool from_back,
-                                        KMemoryManager::Pool pool, KProcessAddress code_addr,
-                                        size_t code_size, KSystemResource* system_resource,
-                                        KResourceLimit* resource_limit,
-                                        Core::Memory::Memory& memory) {
-
-    const auto GetSpaceStart = [this](KAddressSpaceInfo::Type type) {
-        return KAddressSpaceInfo::GetAddressSpaceStart(m_address_space_width, type);
-    };
-    const auto GetSpaceSize = [this](KAddressSpaceInfo::Type type) {
-        return KAddressSpaceInfo::GetAddressSpaceSize(m_address_space_width, type);
-    };
-
-    // Set the tracking memory
-    m_memory = std::addressof(memory);
-
-    //  Set our width and heap/alias sizes
-    m_address_space_width = GetAddressSpaceWidthFromType(as_type);
-    const KProcessAddress start = 0;
-    const KProcessAddress end{1ULL << m_address_space_width};
-    size_t alias_region_size{GetSpaceSize(KAddressSpaceInfo::Type::Alias)};
-    size_t heap_region_size{GetSpaceSize(KAddressSpaceInfo::Type::Heap)};
-
-    ASSERT(code_addr < code_addr + code_size);
-    ASSERT(code_addr + code_size - 1 <= end - 1);
-
-    // Adjust heap/alias size if we don't have an alias region
-    if (as_type == Svc::CreateProcessFlag::AddressSpace32BitWithoutAlias) {
-        heap_region_size += alias_region_size;
-        alias_region_size = 0;
-    }
-
-    // Set code regions and determine remaining
-    constexpr size_t RegionAlignment{2_MiB};
-    KProcessAddress process_code_start{};
-    KProcessAddress process_code_end{};
-    size_t stack_region_size{};
-    size_t kernel_map_region_size{};
-
-    if (m_address_space_width == 39) {
-        alias_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Alias);
-        heap_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Heap);
-        stack_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Stack);
-        kernel_map_region_size = GetSpaceSize(KAddressSpaceInfo::Type::MapSmall);
-        m_code_region_start = GetSpaceStart(KAddressSpaceInfo::Type::Map39Bit);
-        m_code_region_end = m_code_region_start + GetSpaceSize(KAddressSpaceInfo::Type::Map39Bit);
-        m_alias_code_region_start = m_code_region_start;
-        m_alias_code_region_end = m_code_region_end;
-        process_code_start = Common::AlignDown(GetInteger(code_addr), RegionAlignment);
-        process_code_end = Common::AlignUp(GetInteger(code_addr) + code_size, RegionAlignment);
-    } else {
-        stack_region_size = 0;
-        kernel_map_region_size = 0;
-        m_code_region_start = GetSpaceStart(KAddressSpaceInfo::Type::MapSmall);
-        m_code_region_end = m_code_region_start + GetSpaceSize(KAddressSpaceInfo::Type::MapSmall);
-        m_stack_region_start = m_code_region_start;
-        m_alias_code_region_start = m_code_region_start;
-        m_alias_code_region_end = GetSpaceStart(KAddressSpaceInfo::Type::MapLarge) +
-                                  GetSpaceSize(KAddressSpaceInfo::Type::MapLarge);
-        m_stack_region_end = m_code_region_end;
-        m_kernel_map_region_start = m_code_region_start;
-        m_kernel_map_region_end = m_code_region_end;
-        process_code_start = m_code_region_start;
-        process_code_end = m_code_region_end;
-    }
-
-    // Set other basic fields
-    m_enable_aslr = enable_aslr;
-    m_enable_device_address_space_merge = enable_das_merge;
-    m_address_space_start = start;
-    m_address_space_end = end;
-    m_is_kernel = false;
-    m_memory_block_slab_manager = system_resource->GetMemoryBlockSlabManagerPointer();
-    m_block_info_manager = system_resource->GetBlockInfoManagerPointer();
-    m_resource_limit = resource_limit;
-
-    // Determine the region we can place our undetermineds in
-    KProcessAddress alloc_start{};
-    size_t alloc_size{};
-    if ((process_code_start - m_code_region_start) >= (end - process_code_end)) {
-        alloc_start = m_code_region_start;
-        alloc_size = process_code_start - m_code_region_start;
-    } else {
-        alloc_start = process_code_end;
-        alloc_size = end - process_code_end;
-    }
-    const size_t needed_size =
-        (alias_region_size + heap_region_size + stack_region_size + kernel_map_region_size);
-    R_UNLESS(alloc_size >= needed_size, ResultOutOfMemory);
-
-    const size_t remaining_size{alloc_size - needed_size};
-
-    // Determine random placements for each region
-    size_t alias_rnd{}, heap_rnd{}, stack_rnd{}, kmap_rnd{};
-    if (enable_aslr) {
-        alias_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) *
-                    RegionAlignment;
-        heap_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) *
-                   RegionAlignment;
-        stack_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) *
-                    RegionAlignment;
-        kmap_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) *
-                   RegionAlignment;
-    }
-
-    // Setup heap and alias regions
-    m_alias_region_start = alloc_start + alias_rnd;
-    m_alias_region_end = m_alias_region_start + alias_region_size;
-    m_heap_region_start = alloc_start + heap_rnd;
-    m_heap_region_end = m_heap_region_start + heap_region_size;
-
-    if (alias_rnd <= heap_rnd) {
-        m_heap_region_start += alias_region_size;
-        m_heap_region_end += alias_region_size;
-    } else {
-        m_alias_region_start += heap_region_size;
-        m_alias_region_end += heap_region_size;
-    }
-
-    // Setup stack region
-    if (stack_region_size) {
-        m_stack_region_start = alloc_start + stack_rnd;
-        m_stack_region_end = m_stack_region_start + stack_region_size;
-
-        if (alias_rnd < stack_rnd) {
-            m_stack_region_start += alias_region_size;
-            m_stack_region_end += alias_region_size;
-        } else {
-            m_alias_region_start += stack_region_size;
-            m_alias_region_end += stack_region_size;
-        }
-
-        if (heap_rnd < stack_rnd) {
-            m_stack_region_start += heap_region_size;
-            m_stack_region_end += heap_region_size;
-        } else {
-            m_heap_region_start += stack_region_size;
-            m_heap_region_end += stack_region_size;
-        }
-    }
-
-    // Setup kernel map region
-    if (kernel_map_region_size) {
-        m_kernel_map_region_start = alloc_start + kmap_rnd;
-        m_kernel_map_region_end = m_kernel_map_region_start + kernel_map_region_size;
-
-        if (alias_rnd < kmap_rnd) {
-            m_kernel_map_region_start += alias_region_size;
-            m_kernel_map_region_end += alias_region_size;
-        } else {
-            m_alias_region_start += kernel_map_region_size;
-            m_alias_region_end += kernel_map_region_size;
-        }
-
-        if (heap_rnd < kmap_rnd) {
-            m_kernel_map_region_start += heap_region_size;
-            m_kernel_map_region_end += heap_region_size;
-        } else {
-            m_heap_region_start += kernel_map_region_size;
-            m_heap_region_end += kernel_map_region_size;
-        }
-
-        if (stack_region_size) {
-            if (stack_rnd < kmap_rnd) {
-                m_kernel_map_region_start += stack_region_size;
-                m_kernel_map_region_end += stack_region_size;
-            } else {
-                m_stack_region_start += kernel_map_region_size;
-                m_stack_region_end += kernel_map_region_size;
-            }
-        }
-    }
-
-    // Set heap and fill members.
-    m_current_heap_end = m_heap_region_start;
-    m_max_heap_size = 0;
-    m_mapped_physical_memory_size = 0;
-    m_mapped_unsafe_physical_memory = 0;
-    m_mapped_insecure_memory = 0;
-    m_mapped_ipc_server_memory = 0;
-
-    m_heap_fill_value = 0;
-    m_ipc_fill_value = 0;
-    m_stack_fill_value = 0;
-
-    // Set allocation option.
-    m_allocate_option =
-        KMemoryManager::EncodeOption(pool, from_back ? KMemoryManager::Direction::FromBack
-                                                     : KMemoryManager::Direction::FromFront);
-
-    // Ensure that we regions inside our address space
-    auto IsInAddressSpace = [&](KProcessAddress addr) {
-        return m_address_space_start <= addr && addr <= m_address_space_end;
-    };
-    ASSERT(IsInAddressSpace(m_alias_region_start));
-    ASSERT(IsInAddressSpace(m_alias_region_end));
-    ASSERT(IsInAddressSpace(m_heap_region_start));
-    ASSERT(IsInAddressSpace(m_heap_region_end));
-    ASSERT(IsInAddressSpace(m_stack_region_start));
-    ASSERT(IsInAddressSpace(m_stack_region_end));
-    ASSERT(IsInAddressSpace(m_kernel_map_region_start));
-    ASSERT(IsInAddressSpace(m_kernel_map_region_end));
-
-    // Ensure that we selected regions that don't overlap
-    const KProcessAddress alias_start{m_alias_region_start};
-    const KProcessAddress alias_last{m_alias_region_end - 1};
-    const KProcessAddress heap_start{m_heap_region_start};
-    const KProcessAddress heap_last{m_heap_region_end - 1};
-    const KProcessAddress stack_start{m_stack_region_start};
-    const KProcessAddress stack_last{m_stack_region_end - 1};
-    const KProcessAddress kmap_start{m_kernel_map_region_start};
-    const KProcessAddress kmap_last{m_kernel_map_region_end - 1};
-    ASSERT(alias_last < heap_start || heap_last < alias_start);
-    ASSERT(alias_last < stack_start || stack_last < alias_start);
-    ASSERT(alias_last < kmap_start || kmap_last < alias_start);
-    ASSERT(heap_last < stack_start || stack_last < heap_start);
-    ASSERT(heap_last < kmap_start || kmap_last < heap_start);
-
-    m_current_heap_end = m_heap_region_start;
-    m_max_heap_size = 0;
-    m_mapped_physical_memory_size = 0;
-    m_memory_pool = pool;
-
-    m_page_table_impl = std::make_unique<Common::PageTable>();
-    m_page_table_impl->Resize(m_address_space_width, PageBits);
-
-    // Initialize our memory block manager.
-    R_RETURN(m_memory_block_manager.Initialize(m_address_space_start, m_address_space_end,
-                                               m_memory_block_slab_manager));
-}
-
-void KPageTable::Finalize() {
-    auto HostUnmapCallback = [&](KProcessAddress addr, u64 size) {
-        if (Settings::IsFastmemEnabled()) {
-            m_system.DeviceMemory().buffer.Unmap(GetInteger(addr), size);
-        }
-    };
-
-    // Finalize memory blocks.
-    m_memory_block_manager.Finalize(m_memory_block_slab_manager, std::move(HostUnmapCallback));
-
-    // Release any insecure mapped memory.
-    if (m_mapped_insecure_memory) {
-        UNIMPLEMENTED();
-    }
-
-    // Release any ipc server memory.
-    if (m_mapped_ipc_server_memory) {
-        UNIMPLEMENTED();
-    }
-
-    // Close the backing page table, as the destructor is not called for guest objects.
-    m_page_table_impl.reset();
-}
-
-Result KPageTable::MapProcessCode(KProcessAddress addr, size_t num_pages, KMemoryState state,
-                                  KMemoryPermission perm) {
-    const u64 size{num_pages * PageSize};
-
-    // Validate the mapping request.
-    R_UNLESS(this->CanContain(addr, size, state), ResultInvalidCurrentMemory);
-
-    // Lock the table.
-    KScopedLightLock lk(m_general_lock);
-
-    // Verify that the destination memory is unmapped.
-    R_TRY(this->CheckMemoryState(addr, size, KMemoryState::All, KMemoryState::Free,
-                                 KMemoryPermission::None, KMemoryPermission::None,
-                                 KMemoryAttribute::None, KMemoryAttribute::None));
-
-    // Create an update allocator.
-    Result allocator_result{ResultSuccess};
-    KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
-                                                 m_memory_block_slab_manager);
-
-    // Allocate and open.
-    KPageGroup pg{m_kernel, m_block_info_manager};
-    R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpen(
-        &pg, num_pages,
-        KMemoryManager::EncodeOption(KMemoryManager::Pool::Application, m_allocation_option)));
-
-    R_TRY(Operate(addr, num_pages, pg, OperationType::MapGroup));
-
-    // Update the blocks.
-    m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm,
-                                  KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
-                                  KMemoryBlockDisableMergeAttribute::None);
-
-    R_SUCCEED();
-}
-
-Result KPageTable::MapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address,
-                                 size_t size) {
-    // Validate the mapping request.
-    R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode),
-             ResultInvalidMemoryRegion);
-
-    // Lock the table.
-    KScopedLightLock lk(m_general_lock);
-
-    // Verify that the source memory is normal heap.
-    KMemoryState src_state{};
-    KMemoryPermission src_perm{};
-    size_t num_src_allocator_blocks{};
-    R_TRY(this->CheckMemoryState(&src_state, &src_perm, nullptr, &num_src_allocator_blocks,
-                                 src_address, size, KMemoryState::All, KMemoryState::Normal,
-                                 KMemoryPermission::All, KMemoryPermission::UserReadWrite,
-                                 KMemoryAttribute::All, KMemoryAttribute::None));
-
-    // Verify that the destination memory is unmapped.
-    size_t num_dst_allocator_blocks{};
-    R_TRY(this->CheckMemoryState(&num_dst_allocator_blocks, dst_address, size, KMemoryState::All,
-                                 KMemoryState::Free, KMemoryPermission::None,
-                                 KMemoryPermission::None, KMemoryAttribute::None,
-                                 KMemoryAttribute::None));
-
-    // Create an update allocator for the source.
-    Result src_allocator_result{ResultSuccess};
-    KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result),
-                                                     m_memory_block_slab_manager,
-                                                     num_src_allocator_blocks);
-    R_TRY(src_allocator_result);
-
-    // Create an update allocator for the destination.
-    Result dst_allocator_result{ResultSuccess};
-    KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result),
-                                                     m_memory_block_slab_manager,
-                                                     num_dst_allocator_blocks);
-    R_TRY(dst_allocator_result);
-
-    // Map the code memory.
-    {
-        // Determine the number of pages being operated on.
-        const size_t num_pages = size / PageSize;
-
-        // Create page groups for the memory being mapped.
-        KPageGroup pg{m_kernel, m_block_info_manager};
-        AddRegionToPages(src_address, num_pages, pg);
-
-        // We're going to perform an update, so create a helper.
-        KScopedPageTableUpdater updater(this);
-
-        // Reprotect the source as kernel-read/not mapped.
-        const auto new_perm = static_cast<KMemoryPermission>(KMemoryPermission::KernelRead |
-                                                             KMemoryPermission::NotMapped);
-        R_TRY(Operate(src_address, num_pages, new_perm, OperationType::ChangePermissions));
-
-        // Ensure that we unprotect the source pages on failure.
-        auto unprot_guard = SCOPE_GUARD({
-            ASSERT(this->Operate(src_address, num_pages, src_perm, OperationType::ChangePermissions)
-                       .IsSuccess());
-        });
-
-        // Map the alias pages.
-        const KPageProperties dst_properties = {new_perm, false, false,
-                                                DisableMergeAttribute::DisableHead};
-        R_TRY(
-            this->MapPageGroupImpl(updater.GetPageList(), dst_address, pg, dst_properties, false));
-
-        // We successfully mapped the alias pages, so we don't need to unprotect the src pages on
-        // failure.
-        unprot_guard.Cancel();
-
-        // Apply the memory block updates.
-        m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages,
-                                      src_state, new_perm, KMemoryAttribute::Locked,
-                                      KMemoryBlockDisableMergeAttribute::Locked,
-                                      KMemoryBlockDisableMergeAttribute::None);
-        m_memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages,
-                                      KMemoryState::AliasCode, new_perm, KMemoryAttribute::None,
-                                      KMemoryBlockDisableMergeAttribute::Normal,
-                                      KMemoryBlockDisableMergeAttribute::None);
-    }
-
-    R_SUCCEED();
-}
-
-Result KPageTable::UnmapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address,
-                                   size_t size,
-                                   ICacheInvalidationStrategy icache_invalidation_strategy) {
-    // Validate the mapping request.
-    R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode),
-             ResultInvalidMemoryRegion);
-
-    // Lock the table.
-    KScopedLightLock lk(m_general_lock);
-
-    // Verify that the source memory is locked normal heap.
-    size_t num_src_allocator_blocks{};
-    R_TRY(this->CheckMemoryState(std::addressof(num_src_allocator_blocks), src_address, size,
-                                 KMemoryState::All, KMemoryState::Normal, KMemoryPermission::None,
-                                 KMemoryPermission::None, KMemoryAttribute::All,
-                                 KMemoryAttribute::Locked));
-
-    // Verify that the destination memory is aliasable code.
-    size_t num_dst_allocator_blocks{};
-    R_TRY(this->CheckMemoryStateContiguous(
-        std::addressof(num_dst_allocator_blocks), dst_address, size, KMemoryState::FlagCanCodeAlias,
-        KMemoryState::FlagCanCodeAlias, KMemoryPermission::None, KMemoryPermission::None,
-        KMemoryAttribute::All & ~KMemoryAttribute::PermissionLocked, KMemoryAttribute::None));
-
-    // Determine whether any pages being unmapped are code.
-    bool any_code_pages = false;
-    {
-        KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(dst_address);
-        while (true) {
-            // Get the memory info.
-            const KMemoryInfo info = it->GetMemoryInfo();
-
-            // Check if the memory has code flag.
-            if ((info.GetState() & KMemoryState::FlagCode) != KMemoryState::None) {
-                any_code_pages = true;
-                break;
-            }
-
-            // Check if we're done.
-            if (dst_address + size - 1 <= info.GetLastAddress()) {
-                break;
-            }
-
-            // Advance.
-            ++it;
-        }
-    }
-
-    // Ensure that we maintain the instruction cache.
-    bool reprotected_pages = false;
-    SCOPE_EXIT({
-        if (reprotected_pages && any_code_pages) {
-            if (icache_invalidation_strategy == ICacheInvalidationStrategy::InvalidateRange) {
-                m_system.InvalidateCpuInstructionCacheRange(GetInteger(dst_address), size);
-            } else {
-                m_system.InvalidateCpuInstructionCaches();
-            }
-        }
-    });
-
-    // Unmap.
-    {
-        // Determine the number of pages being operated on.
-        const size_t num_pages = size / PageSize;
-
-        // Create an update allocator for the source.
-        Result src_allocator_result{ResultSuccess};
-        KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result),
-                                                         m_memory_block_slab_manager,
-                                                         num_src_allocator_blocks);
-        R_TRY(src_allocator_result);
-
-        // Create an update allocator for the destination.
-        Result dst_allocator_result{ResultSuccess};
-        KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result),
-                                                         m_memory_block_slab_manager,
-                                                         num_dst_allocator_blocks);
-        R_TRY(dst_allocator_result);
-
-        // Unmap the aliased copy of the pages.
-        R_TRY(Operate(dst_address, num_pages, KMemoryPermission::None, OperationType::Unmap));
-
-        // Try to set the permissions for the source pages back to what they should be.
-        R_TRY(Operate(src_address, num_pages, KMemoryPermission::UserReadWrite,
-                      OperationType::ChangePermissions));
-
-        // Apply the memory block updates.
-        m_memory_block_manager.Update(
-            std::addressof(dst_allocator), dst_address, num_pages, KMemoryState::None,
-            KMemoryPermission::None, KMemoryAttribute::None,
-            KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Normal);
-        m_memory_block_manager.Update(
-            std::addressof(src_allocator), src_address, num_pages, KMemoryState::Normal,
-            KMemoryPermission::UserReadWrite, KMemoryAttribute::None,
-            KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Locked);
-
-        // Note that we reprotected pages.
-        reprotected_pages = true;
-    }
-
-    R_SUCCEED();
-}
-
-KProcessAddress KPageTable::FindFreeArea(KProcessAddress region_start, size_t region_num_pages,
-                                         size_t num_pages, size_t alignment, size_t offset,
-                                         size_t guard_pages) {
-    KProcessAddress address = 0;
-
-    if (num_pages <= region_num_pages) {
-        if (this->IsAslrEnabled()) {
-            UNIMPLEMENTED();
-        }
-        // Find the first free area.
-        if (address == 0) {
-            address = m_memory_block_manager.FindFreeArea(region_start, region_num_pages, num_pages,
-                                                          alignment, offset, guard_pages);
-        }
-    }
-
-    return address;
-}
-
-Result KPageTable::MakePageGroup(KPageGroup& pg, KProcessAddress addr, size_t num_pages) {
-    ASSERT(this->IsLockedByCurrentThread());
-
-    const size_t size = num_pages * PageSize;
-
-    // We're making a new group, not adding to an existing one.
-    R_UNLESS(pg.empty(), ResultInvalidCurrentMemory);
-
-    // Begin traversal.
-    Common::PageTable::TraversalContext context;
-    Common::PageTable::TraversalEntry next_entry;
-    R_UNLESS(m_page_table_impl->BeginTraversal(next_entry, context, GetInteger(addr)),
-             ResultInvalidCurrentMemory);
-
-    // Prepare tracking variables.
-    KPhysicalAddress cur_addr = next_entry.phys_addr;
-    size_t cur_size = next_entry.block_size - (cur_addr & (next_entry.block_size - 1));
-    size_t tot_size = cur_size;
-
-    // Iterate, adding to group as we go.
-    const auto& memory_layout = m_system.Kernel().MemoryLayout();
-    while (tot_size < size) {
-        R_UNLESS(m_page_table_impl->ContinueTraversal(next_entry, context),
-                 ResultInvalidCurrentMemory);
-
-        if (next_entry.phys_addr != (cur_addr + cur_size)) {
-            const size_t cur_pages = cur_size / PageSize;
-
-            R_UNLESS(IsHeapPhysicalAddress(memory_layout, cur_addr), ResultInvalidCurrentMemory);
-            R_TRY(pg.AddBlock(cur_addr, cur_pages));
-
-            cur_addr = next_entry.phys_addr;
-            cur_size = next_entry.block_size;
-        } else {
-            cur_size += next_entry.block_size;
-        }
-
-        tot_size += next_entry.block_size;
-    }
-
-    // Ensure we add the right amount for the last block.
-    if (tot_size > size) {
-        cur_size -= (tot_size - size);
-    }
-
-    // Add the last block.
-    const size_t cur_pages = cur_size / PageSize;
-    R_UNLESS(IsHeapPhysicalAddress(memory_layout, cur_addr), ResultInvalidCurrentMemory);
-    R_TRY(pg.AddBlock(cur_addr, cur_pages));
-
-    R_SUCCEED();
-}
-
-bool KPageTable::IsValidPageGroup(const KPageGroup& pg, KProcessAddress addr, size_t num_pages) {
-    ASSERT(this->IsLockedByCurrentThread());
-
-    const size_t size = num_pages * PageSize;
-    const auto& memory_layout = m_system.Kernel().MemoryLayout();
-
-    // Empty groups are necessarily invalid.
-    if (pg.empty()) {
-        return false;
-    }
-
-    // We're going to validate that the group we'd expect is the group we see.
-    auto cur_it = pg.begin();
-    KPhysicalAddress cur_block_address = cur_it->GetAddress();
-    size_t cur_block_pages = cur_it->GetNumPages();
-
-    auto UpdateCurrentIterator = [&]() {
-        if (cur_block_pages == 0) {
-            if ((++cur_it) == pg.end()) {
-                return false;
-            }
-
-            cur_block_address = cur_it->GetAddress();
-            cur_block_pages = cur_it->GetNumPages();
-        }
-        return true;
-    };
-
-    // Begin traversal.
-    Common::PageTable::TraversalContext context;
-    Common::PageTable::TraversalEntry next_entry;
-    if (!m_page_table_impl->BeginTraversal(next_entry, context, GetInteger(addr))) {
-        return false;
-    }
-
-    // Prepare tracking variables.
-    KPhysicalAddress cur_addr = next_entry.phys_addr;
-    size_t cur_size = next_entry.block_size - (cur_addr & (next_entry.block_size - 1));
-    size_t tot_size = cur_size;
-
-    // Iterate, comparing expected to actual.
-    while (tot_size < size) {
-        if (!m_page_table_impl->ContinueTraversal(next_entry, context)) {
-            return false;
-        }
-
-        if (next_entry.phys_addr != (cur_addr + cur_size)) {
-            const size_t cur_pages = cur_size / PageSize;
-
-            if (!IsHeapPhysicalAddress(memory_layout, cur_addr)) {
-                return false;
-            }
-
-            if (!UpdateCurrentIterator()) {
-                return false;
-            }
-
-            if (cur_block_address != cur_addr || cur_block_pages < cur_pages) {
-                return false;
-            }
-
-            cur_block_address += cur_size;
-            cur_block_pages -= cur_pages;
-            cur_addr = next_entry.phys_addr;
-            cur_size = next_entry.block_size;
-        } else {
-            cur_size += next_entry.block_size;
-        }
-
-        tot_size += next_entry.block_size;
-    }
-
-    // Ensure we compare the right amount for the last block.
-    if (tot_size > size) {
-        cur_size -= (tot_size - size);
-    }
-
-    if (!IsHeapPhysicalAddress(memory_layout, cur_addr)) {
-        return false;
-    }
-
-    if (!UpdateCurrentIterator()) {
-        return false;
-    }
-
-    return cur_block_address == cur_addr && cur_block_pages == (cur_size / PageSize);
-}
-
-Result KPageTable::UnmapProcessMemory(KProcessAddress dst_addr, size_t size,
-                                      KPageTable& src_page_table, KProcessAddress src_addr) {
-    // Acquire the table locks.
-    KScopedLightLockPair lk(src_page_table.m_general_lock, m_general_lock);
-
-    const size_t num_pages{size / PageSize};
-
-    // Check that the memory is mapped in the destination process.
-    size_t num_allocator_blocks;
-    R_TRY(CheckMemoryState(&num_allocator_blocks, dst_addr, size, KMemoryState::All,
-                           KMemoryState::SharedCode, KMemoryPermission::UserReadWrite,
-                           KMemoryPermission::UserReadWrite, KMemoryAttribute::All,
-                           KMemoryAttribute::None));
-
-    // Check that the memory is mapped in the source process.
-    R_TRY(src_page_table.CheckMemoryState(src_addr, size, KMemoryState::FlagCanMapProcess,
-                                          KMemoryState::FlagCanMapProcess, KMemoryPermission::None,
-                                          KMemoryPermission::None, KMemoryAttribute::All,
-                                          KMemoryAttribute::None));
-
-    // Create an update allocator.
-    Result allocator_result{ResultSuccess};
-    KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
-                                                 m_memory_block_slab_manager, num_allocator_blocks);
-    R_TRY(allocator_result);
-
-    R_TRY(Operate(dst_addr, num_pages, KMemoryPermission::None, OperationType::Unmap));
-
-    // Apply the memory block update.
-    m_memory_block_manager.Update(std::addressof(allocator), dst_addr, num_pages,
-                                  KMemoryState::Free, KMemoryPermission::None,
-                                  KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
-                                  KMemoryBlockDisableMergeAttribute::Normal);
-
-    m_system.InvalidateCpuInstructionCaches();
-
-    R_SUCCEED();
-}
-
-Result KPageTable::SetupForIpcClient(PageLinkedList* page_list, size_t* out_blocks_needed,
-                                     KProcessAddress address, size_t size,
-                                     KMemoryPermission test_perm, KMemoryState dst_state) {
-    // Validate pre-conditions.
-    ASSERT(this->IsLockedByCurrentThread());
-    ASSERT(test_perm == KMemoryPermission::UserReadWrite ||
-           test_perm == KMemoryPermission::UserRead);
-
-    // Check that the address is in range.
-    R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
-
-    // Get the source permission.
-    const auto src_perm = (test_perm == KMemoryPermission::UserReadWrite)
-                              ? KMemoryPermission::KernelReadWrite | KMemoryPermission::NotMapped
-                              : KMemoryPermission::UserRead;
-
-    // Get aligned extents.
-    const KProcessAddress aligned_src_start = Common::AlignDown(GetInteger(address), PageSize);
-    const KProcessAddress aligned_src_end = Common::AlignUp(GetInteger(address) + size, PageSize);
-    const KProcessAddress mapping_src_start = Common::AlignUp(GetInteger(address), PageSize);
-    const KProcessAddress mapping_src_end = Common::AlignDown(GetInteger(address) + size, PageSize);
-
-    const auto aligned_src_last = (aligned_src_end)-1;
-    const auto mapping_src_last = (mapping_src_end)-1;
-
-    // Get the test state and attribute mask.
-    KMemoryState test_state;
-    KMemoryAttribute test_attr_mask;
-    switch (dst_state) {
-    case KMemoryState::Ipc:
-        test_state = KMemoryState::FlagCanUseIpc;
-        test_attr_mask =
-            KMemoryAttribute::Uncached | KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked;
-        break;
-    case KMemoryState::NonSecureIpc:
-        test_state = KMemoryState::FlagCanUseNonSecureIpc;
-        test_attr_mask = KMemoryAttribute::Uncached | KMemoryAttribute::Locked;
-        break;
-    case KMemoryState::NonDeviceIpc:
-        test_state = KMemoryState::FlagCanUseNonDeviceIpc;
-        test_attr_mask = KMemoryAttribute::Uncached | KMemoryAttribute::Locked;
-        break;
-    default:
-        R_THROW(ResultInvalidCombination);
-    }
-
-    // Ensure that on failure, we roll back appropriately.
-    size_t mapped_size = 0;
-    ON_RESULT_FAILURE {
-        if (mapped_size > 0) {
-            this->CleanupForIpcClientOnServerSetupFailure(page_list, mapping_src_start, mapped_size,
-                                                          src_perm);
-        }
-    };
-
-    size_t blocks_needed = 0;
-
-    // Iterate, mapping as needed.
-    KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(aligned_src_start);
-    while (true) {
-        const KMemoryInfo info = it->GetMemoryInfo();
-
-        // Validate the current block.
-        R_TRY(this->CheckMemoryState(info, test_state, test_state, test_perm, test_perm,
-                                     test_attr_mask, KMemoryAttribute::None));
-
-        if (mapping_src_start < mapping_src_end && (mapping_src_start) < info.GetEndAddress() &&
-            info.GetAddress() < GetInteger(mapping_src_end)) {
-            const auto cur_start = info.GetAddress() >= GetInteger(mapping_src_start)
-                                       ? info.GetAddress()
-                                       : (mapping_src_start);
-            const auto cur_end = mapping_src_last >= info.GetLastAddress() ? info.GetEndAddress()
-                                                                           : (mapping_src_end);
-            const size_t cur_size = cur_end - cur_start;
-
-            if (info.GetAddress() < GetInteger(mapping_src_start)) {
-                ++blocks_needed;
-            }
-            if (mapping_src_last < info.GetLastAddress()) {
-                ++blocks_needed;
-            }
-
-            // Set the permissions on the block, if we need to.
-            if ((info.GetPermission() & KMemoryPermission::IpcLockChangeMask) != src_perm) {
-                R_TRY(Operate(cur_start, cur_size / PageSize, src_perm,
-                              OperationType::ChangePermissions));
-            }
-
-            // Note that we mapped this part.
-            mapped_size += cur_size;
-        }
-
-        // If the block is at the end, we're done.
-        if (aligned_src_last <= info.GetLastAddress()) {
-            break;
-        }
-
-        // Advance.
-        ++it;
-        ASSERT(it != m_memory_block_manager.end());
-    }
-
-    if (out_blocks_needed != nullptr) {
-        ASSERT(blocks_needed <= KMemoryBlockManagerUpdateAllocator::MaxBlocks);
-        *out_blocks_needed = blocks_needed;
-    }
-
-    R_SUCCEED();
-}
-
-Result KPageTable::SetupForIpcServer(KProcessAddress* out_addr, size_t size,
-                                     KProcessAddress src_addr, KMemoryPermission test_perm,
-                                     KMemoryState dst_state, KPageTable& src_page_table,
-                                     bool send) {
-    ASSERT(this->IsLockedByCurrentThread());
-    ASSERT(src_page_table.IsLockedByCurrentThread());
-
-    // Check that we can theoretically map.
-    const KProcessAddress region_start = m_alias_region_start;
-    const size_t region_size = m_alias_region_end - m_alias_region_start;
-    R_UNLESS(size < region_size, ResultOutOfAddressSpace);
-
-    // Get aligned source extents.
-    const KProcessAddress src_start = src_addr;
-    const KProcessAddress src_end = src_addr + size;
-    const KProcessAddress aligned_src_start = Common::AlignDown(GetInteger(src_start), PageSize);
-    const KProcessAddress aligned_src_end = Common::AlignUp(GetInteger(src_start) + size, PageSize);
-    const KProcessAddress mapping_src_start = Common::AlignUp(GetInteger(src_start), PageSize);
-    const KProcessAddress mapping_src_end =
-        Common::AlignDown(GetInteger(src_start) + size, PageSize);
-    const size_t aligned_src_size = aligned_src_end - aligned_src_start;
-    const size_t mapping_src_size =
-        (mapping_src_start < mapping_src_end) ? (mapping_src_end - mapping_src_start) : 0;
-
-    // Select a random address to map at.
-    KProcessAddress dst_addr =
-        this->FindFreeArea(region_start, region_size / PageSize, aligned_src_size / PageSize,
-                           PageSize, 0, this->GetNumGuardPages());
-
-    R_UNLESS(dst_addr != 0, ResultOutOfAddressSpace);
-
-    // Check that we can perform the operation we're about to perform.
-    ASSERT(this->CanContain(dst_addr, aligned_src_size, dst_state));
-
-    // Create an update allocator.
-    Result allocator_result;
-    KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
-                                                 m_memory_block_slab_manager);
-    R_TRY(allocator_result);
-
-    // We're going to perform an update, so create a helper.
-    KScopedPageTableUpdater updater(this);
-
-    // Reserve space for any partial pages we allocate.
-    const size_t unmapped_size = aligned_src_size - mapping_src_size;
-    KScopedResourceReservation memory_reservation(
-        m_resource_limit, LimitableResource::PhysicalMemoryMax, unmapped_size);
-    R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
-
-    // Ensure that we manage page references correctly.
-    KPhysicalAddress start_partial_page = 0;
-    KPhysicalAddress end_partial_page = 0;
-    KProcessAddress cur_mapped_addr = dst_addr;
-
-    // If the partial pages are mapped, an extra reference will have been opened. Otherwise, they'll
-    // free on scope exit.
-    SCOPE_EXIT({
-        if (start_partial_page != 0) {
-            m_system.Kernel().MemoryManager().Close(start_partial_page, 1);
-        }
-        if (end_partial_page != 0) {
-            m_system.Kernel().MemoryManager().Close(end_partial_page, 1);
-        }
-    });
-
-    ON_RESULT_FAILURE {
-        if (cur_mapped_addr != dst_addr) {
-            ASSERT(Operate(dst_addr, (cur_mapped_addr - dst_addr) / PageSize,
-                           KMemoryPermission::None, OperationType::Unmap)
-                       .IsSuccess());
-        }
-    };
-
-    // Allocate the start page as needed.
-    if (aligned_src_start < mapping_src_start) {
-        start_partial_page =
-            m_system.Kernel().MemoryManager().AllocateAndOpenContinuous(1, 1, m_allocate_option);
-        R_UNLESS(start_partial_page != 0, ResultOutOfMemory);
-    }
-
-    // Allocate the end page as needed.
-    if (mapping_src_end < aligned_src_end &&
-        (aligned_src_start < mapping_src_end || aligned_src_start == mapping_src_start)) {
-        end_partial_page =
-            m_system.Kernel().MemoryManager().AllocateAndOpenContinuous(1, 1, m_allocate_option);
-        R_UNLESS(end_partial_page != 0, ResultOutOfMemory);
-    }
-
-    // Get the implementation.
-    auto& src_impl = src_page_table.PageTableImpl();
-
-    // Get the fill value for partial pages.
-    const auto fill_val = m_ipc_fill_value;
-
-    // Begin traversal.
-    Common::PageTable::TraversalContext context;
-    Common::PageTable::TraversalEntry next_entry;
-    bool traverse_valid =
-        src_impl.BeginTraversal(next_entry, context, GetInteger(aligned_src_start));
-    ASSERT(traverse_valid);
-
-    // Prepare tracking variables.
-    KPhysicalAddress cur_block_addr = next_entry.phys_addr;
-    size_t cur_block_size =
-        next_entry.block_size - ((cur_block_addr) & (next_entry.block_size - 1));
-    size_t tot_block_size = cur_block_size;
-
-    // Map the start page, if we have one.
-    if (start_partial_page != 0) {
-        // Ensure the page holds correct data.
-        const KVirtualAddress start_partial_virt =
-            GetHeapVirtualAddress(m_system.Kernel().MemoryLayout(), start_partial_page);
-        if (send) {
-            const size_t partial_offset = src_start - aligned_src_start;
-            size_t copy_size, clear_size;
-            if (src_end < mapping_src_start) {
-                copy_size = size;
-                clear_size = mapping_src_start - src_end;
-            } else {
-                copy_size = mapping_src_start - src_start;
-                clear_size = 0;
-            }
-
-            std::memset(m_memory->GetPointer<void>(GetInteger(start_partial_virt)), fill_val,
-                        partial_offset);
-            std::memcpy(
-                m_memory->GetPointer<void>(GetInteger(start_partial_virt) + partial_offset),
-                m_memory->GetPointer<void>(GetInteger(GetHeapVirtualAddress(
-                                               m_system.Kernel().MemoryLayout(), cur_block_addr)) +
-                                           partial_offset),
-                copy_size);
-            if (clear_size > 0) {
-                std::memset(m_memory->GetPointer<void>(GetInteger(start_partial_virt) +
-                                                       partial_offset + copy_size),
-                            fill_val, clear_size);
-            }
-        } else {
-            std::memset(m_memory->GetPointer<void>(GetInteger(start_partial_virt)), fill_val,
-                        PageSize);
-        }
-
-        // Map the page.
-        R_TRY(Operate(cur_mapped_addr, 1, test_perm, OperationType::Map, start_partial_page));
-
-        // Update tracking extents.
-        cur_mapped_addr += PageSize;
-        cur_block_addr += PageSize;
-        cur_block_size -= PageSize;
-
-        // If the block's size was one page, we may need to continue traversal.
-        if (cur_block_size == 0 && aligned_src_size > PageSize) {
-            traverse_valid = src_impl.ContinueTraversal(next_entry, context);
-            ASSERT(traverse_valid);
-
-            cur_block_addr = next_entry.phys_addr;
-            cur_block_size = next_entry.block_size;
-            tot_block_size += next_entry.block_size;
-        }
-    }
-
-    // Map the remaining pages.
-    while (aligned_src_start + tot_block_size < mapping_src_end) {
-        // Continue the traversal.
-        traverse_valid = src_impl.ContinueTraversal(next_entry, context);
-        ASSERT(traverse_valid);
-
-        // Process the block.
-        if (next_entry.phys_addr != cur_block_addr + cur_block_size) {
-            // Map the block we've been processing so far.
-            R_TRY(Operate(cur_mapped_addr, cur_block_size / PageSize, test_perm, OperationType::Map,
-                          cur_block_addr));
-
-            // Update tracking extents.
-            cur_mapped_addr += cur_block_size;
-            cur_block_addr = next_entry.phys_addr;
-            cur_block_size = next_entry.block_size;
-        } else {
-            cur_block_size += next_entry.block_size;
-        }
-        tot_block_size += next_entry.block_size;
-    }
-
-    // Handle the last direct-mapped page.
-    if (const KProcessAddress mapped_block_end =
-            aligned_src_start + tot_block_size - cur_block_size;
-        mapped_block_end < mapping_src_end) {
-        const size_t last_block_size = mapping_src_end - mapped_block_end;
-
-        // Map the last block.
-        R_TRY(Operate(cur_mapped_addr, last_block_size / PageSize, test_perm, OperationType::Map,
-                      cur_block_addr));
-
-        // Update tracking extents.
-        cur_mapped_addr += last_block_size;
-        cur_block_addr += last_block_size;
-        if (mapped_block_end + cur_block_size < aligned_src_end &&
-            cur_block_size == last_block_size) {
-            traverse_valid = src_impl.ContinueTraversal(next_entry, context);
-            ASSERT(traverse_valid);
-
-            cur_block_addr = next_entry.phys_addr;
-        }
-    }
-
-    // Map the end page, if we have one.
-    if (end_partial_page != 0) {
-        // Ensure the page holds correct data.
-        const KVirtualAddress end_partial_virt =
-            GetHeapVirtualAddress(m_system.Kernel().MemoryLayout(), end_partial_page);
-        if (send) {
-            const size_t copy_size = src_end - mapping_src_end;
-            std::memcpy(m_memory->GetPointer<void>(GetInteger(end_partial_virt)),
-                        m_memory->GetPointer<void>(GetInteger(GetHeapVirtualAddress(
-                            m_system.Kernel().MemoryLayout(), cur_block_addr))),
-                        copy_size);
-            std::memset(m_memory->GetPointer<void>(GetInteger(end_partial_virt) + copy_size),
-                        fill_val, PageSize - copy_size);
-        } else {
-            std::memset(m_memory->GetPointer<void>(GetInteger(end_partial_virt)), fill_val,
-                        PageSize);
-        }
-
-        // Map the page.
-        R_TRY(Operate(cur_mapped_addr, 1, test_perm, OperationType::Map, end_partial_page));
-    }
-
-    // Update memory blocks to reflect our changes
-    m_memory_block_manager.Update(std::addressof(allocator), dst_addr, aligned_src_size / PageSize,
-                                  dst_state, test_perm, KMemoryAttribute::None,
-                                  KMemoryBlockDisableMergeAttribute::Normal,
-                                  KMemoryBlockDisableMergeAttribute::None);
-
-    // Set the output address.
-    *out_addr = dst_addr + (src_start - aligned_src_start);
-
-    // We succeeded.
-    memory_reservation.Commit();
-    R_SUCCEED();
-}
-
-Result KPageTable::SetupForIpc(KProcessAddress* out_dst_addr, size_t size, KProcessAddress src_addr,
-                               KPageTable& src_page_table, KMemoryPermission test_perm,
-                               KMemoryState dst_state, bool send) {
-    // For convenience, alias this.
-    KPageTable& dst_page_table = *this;
-
-    // Acquire the table locks.
-    KScopedLightLockPair lk(src_page_table.m_general_lock, dst_page_table.m_general_lock);
-
-    // We're going to perform an update, so create a helper.
-    KScopedPageTableUpdater updater(std::addressof(src_page_table));
-
-    // Perform client setup.
-    size_t num_allocator_blocks;
-    R_TRY(src_page_table.SetupForIpcClient(updater.GetPageList(),
-                                           std::addressof(num_allocator_blocks), src_addr, size,
-                                           test_perm, dst_state));
-
-    // Create an update allocator.
-    Result allocator_result;
-    KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
-                                                 src_page_table.m_memory_block_slab_manager,
-                                                 num_allocator_blocks);
-    R_TRY(allocator_result);
-
-    // Get the mapped extents.
-    const KProcessAddress src_map_start = Common::AlignUp(GetInteger(src_addr), PageSize);
-    const KProcessAddress src_map_end = Common::AlignDown(GetInteger(src_addr) + size, PageSize);
-    const size_t src_map_size = src_map_end - src_map_start;
-
-    // Ensure that we clean up appropriately if we fail after this.
-    const auto src_perm = (test_perm == KMemoryPermission::UserReadWrite)
-                              ? KMemoryPermission::KernelReadWrite | KMemoryPermission::NotMapped
-                              : KMemoryPermission::UserRead;
-    ON_RESULT_FAILURE {
-        if (src_map_end > src_map_start) {
-            src_page_table.CleanupForIpcClientOnServerSetupFailure(
-                updater.GetPageList(), src_map_start, src_map_size, src_perm);
-        }
-    };
-
-    // Perform server setup.
-    R_TRY(dst_page_table.SetupForIpcServer(out_dst_addr, size, src_addr, test_perm, dst_state,
-                                           src_page_table, send));
-
-    // If anything was mapped, ipc-lock the pages.
-    if (src_map_start < src_map_end) {
-        // Get the source permission.
-        src_page_table.m_memory_block_manager.UpdateLock(std::addressof(allocator), src_map_start,
-                                                         (src_map_end - src_map_start) / PageSize,
-                                                         &KMemoryBlock::LockForIpc, src_perm);
-    }
-
-    R_SUCCEED();
-}
-
-Result KPageTable::CleanupForIpcServer(KProcessAddress address, size_t size,
-                                       KMemoryState dst_state) {
-    // Validate the address.
-    R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
-
-    // Lock the table.
-    KScopedLightLock lk(m_general_lock);
-
-    // Validate the memory state.
-    size_t num_allocator_blocks;
-    R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size,
-                                 KMemoryState::All, dst_state, KMemoryPermission::UserRead,
-                                 KMemoryPermission::UserRead, KMemoryAttribute::All,
-                                 KMemoryAttribute::None));
-
-    // Create an update allocator.
-    Result allocator_result;
-    KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
-                                                 m_memory_block_slab_manager, num_allocator_blocks);
-    R_TRY(allocator_result);
-
-    // We're going to perform an update, so create a helper.
-    KScopedPageTableUpdater updater(this);
-
-    // Get aligned extents.
-    const KProcessAddress aligned_start = Common::AlignDown(GetInteger(address), PageSize);
-    const KProcessAddress aligned_end = Common::AlignUp(GetInteger(address) + size, PageSize);
-    const size_t aligned_size = aligned_end - aligned_start;
-    const size_t aligned_num_pages = aligned_size / PageSize;
-
-    // Unmap the pages.
-    R_TRY(Operate(aligned_start, aligned_num_pages, KMemoryPermission::None, OperationType::Unmap));
-
-    // Update memory blocks.
-    m_memory_block_manager.Update(std::addressof(allocator), aligned_start, aligned_num_pages,
-                                  KMemoryState::None, KMemoryPermission::None,
-                                  KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
-                                  KMemoryBlockDisableMergeAttribute::Normal);
-
-    // Release from the resource limit as relevant.
-    const KProcessAddress mapping_start = Common::AlignUp(GetInteger(address), PageSize);
-    const KProcessAddress mapping_end = Common::AlignDown(GetInteger(address) + size, PageSize);
-    const size_t mapping_size = (mapping_start < mapping_end) ? mapping_end - mapping_start : 0;
-    m_resource_limit->Release(LimitableResource::PhysicalMemoryMax, aligned_size - mapping_size);
-
-    R_SUCCEED();
-}
-
-Result KPageTable::CleanupForIpcClient(KProcessAddress address, size_t size,
-                                       KMemoryState dst_state) {
-    // Validate the address.
-    R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
-
-    // Get aligned source extents.
-    const KProcessAddress mapping_start = Common::AlignUp(GetInteger(address), PageSize);
-    const KProcessAddress mapping_end = Common::AlignDown(GetInteger(address) + size, PageSize);
-    const KProcessAddress mapping_last = mapping_end - 1;
-    const size_t mapping_size = (mapping_start < mapping_end) ? (mapping_end - mapping_start) : 0;
-
-    // If nothing was mapped, we're actually done immediately.
-    R_SUCCEED_IF(mapping_size == 0);
-
-    // Get the test state and attribute mask.
-    KMemoryState test_state;
-    KMemoryAttribute test_attr_mask;
-    switch (dst_state) {
-    case KMemoryState::Ipc:
-        test_state = KMemoryState::FlagCanUseIpc;
-        test_attr_mask =
-            KMemoryAttribute::Uncached | KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked;
-        break;
-    case KMemoryState::NonSecureIpc:
-        test_state = KMemoryState::FlagCanUseNonSecureIpc;
-        test_attr_mask = KMemoryAttribute::Uncached | KMemoryAttribute::Locked;
-        break;
-    case KMemoryState::NonDeviceIpc:
-        test_state = KMemoryState::FlagCanUseNonDeviceIpc;
-        test_attr_mask = KMemoryAttribute::Uncached | KMemoryAttribute::Locked;
-        break;
-    default:
-        R_THROW(ResultInvalidCombination);
-    }
-
-    // Lock the table.
-    // NOTE: Nintendo does this *after* creating the updater below, but this does not follow
-    // convention elsewhere in KPageTable.
-    KScopedLightLock lk(m_general_lock);
-
-    // We're going to perform an update, so create a helper.
-    KScopedPageTableUpdater updater(this);
-
-    // Ensure that on failure, we roll back appropriately.
-    size_t mapped_size = 0;
-    ON_RESULT_FAILURE {
-        if (mapped_size > 0) {
-            // Determine where the mapping ends.
-            const auto mapped_end = (mapping_start) + mapped_size;
-            const auto mapped_last = mapped_end - 1;
-
-            // Get current and next iterators.
-            KMemoryBlockManager::const_iterator start_it =
-                m_memory_block_manager.FindIterator(mapping_start);
-            KMemoryBlockManager::const_iterator next_it = start_it;
-            ++next_it;
-
-            // Get the current block info.
-            KMemoryInfo cur_info = start_it->GetMemoryInfo();
-
-            // Create tracking variables.
-            KProcessAddress cur_address = cur_info.GetAddress();
-            size_t cur_size = cur_info.GetSize();
-            bool cur_perm_eq = cur_info.GetPermission() == cur_info.GetOriginalPermission();
-            bool cur_needs_set_perm = !cur_perm_eq && cur_info.GetIpcLockCount() == 1;
-            bool first =
-                cur_info.GetIpcDisableMergeCount() == 1 &&
-                (cur_info.GetDisableMergeAttribute() & KMemoryBlockDisableMergeAttribute::Locked) ==
-                    KMemoryBlockDisableMergeAttribute::None;
-
-            while (((cur_address) + cur_size - 1) < mapped_last) {
-                // Check that we have a next block.
-                ASSERT(next_it != m_memory_block_manager.end());
-
-                // Get the next info.
-                const KMemoryInfo next_info = next_it->GetMemoryInfo();
-
-                // Check if we can consolidate the next block's permission set with the current one.
-
-                const bool next_perm_eq =
-                    next_info.GetPermission() == next_info.GetOriginalPermission();
-                const bool next_needs_set_perm = !next_perm_eq && next_info.GetIpcLockCount() == 1;
-                if (cur_perm_eq == next_perm_eq && cur_needs_set_perm == next_needs_set_perm &&
-                    cur_info.GetOriginalPermission() == next_info.GetOriginalPermission()) {
-                    // We can consolidate the reprotection for the current and next block into a
-                    // single call.
-                    cur_size += next_info.GetSize();
-                } else {
-                    // We have to operate on the current block.
-                    if ((cur_needs_set_perm || first) && !cur_perm_eq) {
-                        ASSERT(Operate(cur_address, cur_size / PageSize, cur_info.GetPermission(),
-                                       OperationType::ChangePermissions)
-                                   .IsSuccess());
-                    }
-
-                    // Advance.
-                    cur_address = next_info.GetAddress();
-                    cur_size = next_info.GetSize();
-                    first = false;
-                }
-
-                // Advance.
-                cur_info = next_info;
-                cur_perm_eq = next_perm_eq;
-                cur_needs_set_perm = next_needs_set_perm;
-                ++next_it;
-            }
-
-            // Process the last block.
-            if ((first || cur_needs_set_perm) && !cur_perm_eq) {
-                ASSERT(Operate(cur_address, cur_size / PageSize, cur_info.GetPermission(),
-                               OperationType::ChangePermissions)
-                           .IsSuccess());
-            }
-        }
-    };
-
-    // Iterate, reprotecting as needed.
-    {
-        // Get current and next iterators.
-        KMemoryBlockManager::const_iterator start_it =
-            m_memory_block_manager.FindIterator(mapping_start);
-        KMemoryBlockManager::const_iterator next_it = start_it;
-        ++next_it;
-
-        // Validate the current block.
-        KMemoryInfo cur_info = start_it->GetMemoryInfo();
-        ASSERT(this->CheckMemoryState(cur_info, test_state, test_state, KMemoryPermission::None,
-                                      KMemoryPermission::None,
-                                      test_attr_mask | KMemoryAttribute::IpcLocked,
-                                      KMemoryAttribute::IpcLocked)
-                   .IsSuccess());
-
-        // Create tracking variables.
-        KProcessAddress cur_address = cur_info.GetAddress();
-        size_t cur_size = cur_info.GetSize();
-        bool cur_perm_eq = cur_info.GetPermission() == cur_info.GetOriginalPermission();
-        bool cur_needs_set_perm = !cur_perm_eq && cur_info.GetIpcLockCount() == 1;
-        bool first =
-            cur_info.GetIpcDisableMergeCount() == 1 &&
-            (cur_info.GetDisableMergeAttribute() & KMemoryBlockDisableMergeAttribute::Locked) ==
-                KMemoryBlockDisableMergeAttribute::None;
-
-        while ((cur_address + cur_size - 1) < mapping_last) {
-            // Check that we have a next block.
-            ASSERT(next_it != m_memory_block_manager.end());
-
-            // Get the next info.
-            const KMemoryInfo next_info = next_it->GetMemoryInfo();
-
-            // Validate the next block.
-            ASSERT(this->CheckMemoryState(next_info, test_state, test_state,
-                                          KMemoryPermission::None, KMemoryPermission::None,
-                                          test_attr_mask | KMemoryAttribute::IpcLocked,
-                                          KMemoryAttribute::IpcLocked)
-                       .IsSuccess());
-
-            // Check if we can consolidate the next block's permission set with the current one.
-            const bool next_perm_eq =
-                next_info.GetPermission() == next_info.GetOriginalPermission();
-            const bool next_needs_set_perm = !next_perm_eq && next_info.GetIpcLockCount() == 1;
-            if (cur_perm_eq == next_perm_eq && cur_needs_set_perm == next_needs_set_perm &&
-                cur_info.GetOriginalPermission() == next_info.GetOriginalPermission()) {
-                // We can consolidate the reprotection for the current and next block into a single
-                // call.
-                cur_size += next_info.GetSize();
-            } else {
-                // We have to operate on the current block.
-                if ((cur_needs_set_perm || first) && !cur_perm_eq) {
-                    R_TRY(Operate(cur_address, cur_size / PageSize,
-                                  cur_needs_set_perm ? cur_info.GetOriginalPermission()
-                                                     : cur_info.GetPermission(),
-                                  OperationType::ChangePermissions));
-                }
-
-                // Mark that we mapped the block.
-                mapped_size += cur_size;
-
-                // Advance.
-                cur_address = next_info.GetAddress();
-                cur_size = next_info.GetSize();
-                first = false;
-            }
-
-            // Advance.
-            cur_info = next_info;
-            cur_perm_eq = next_perm_eq;
-            cur_needs_set_perm = next_needs_set_perm;
-            ++next_it;
-        }
-
-        // Process the last block.
-        const auto lock_count =
-            cur_info.GetIpcLockCount() +
-            (next_it != m_memory_block_manager.end()
-                 ? (next_it->GetIpcDisableMergeCount() - next_it->GetIpcLockCount())
-                 : 0);
-        if ((first || cur_needs_set_perm || (lock_count == 1)) && !cur_perm_eq) {
-            R_TRY(Operate(cur_address, cur_size / PageSize,
-                          cur_needs_set_perm ? cur_info.GetOriginalPermission()
-                                             : cur_info.GetPermission(),
-                          OperationType::ChangePermissions));
-        }
-    }
-
-    // Create an update allocator.
-    // NOTE: Guaranteed zero blocks needed here.
-    Result allocator_result;
-    KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
-                                                 m_memory_block_slab_manager, 0);
-    R_TRY(allocator_result);
-
-    // Unlock the pages.
-    m_memory_block_manager.UpdateLock(std::addressof(allocator), mapping_start,
-                                      mapping_size / PageSize, &KMemoryBlock::UnlockForIpc,
-                                      KMemoryPermission::None);
-
-    R_SUCCEED();
-}
-
-void KPageTable::CleanupForIpcClientOnServerSetupFailure([[maybe_unused]] PageLinkedList* page_list,
-                                                         KProcessAddress address, size_t size,
-                                                         KMemoryPermission prot_perm) {
-    ASSERT(this->IsLockedByCurrentThread());
-    ASSERT(Common::IsAligned(GetInteger(address), PageSize));
-    ASSERT(Common::IsAligned(size, PageSize));
-
-    // Get the mapped extents.
-    const KProcessAddress src_map_start = address;
-    const KProcessAddress src_map_end = address + size;
-    const KProcessAddress src_map_last = src_map_end - 1;
-
-    // This function is only invoked when there's something to do.
-    ASSERT(src_map_end > src_map_start);
-
-    // Iterate over blocks, fixing permissions.
-    KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(address);
-    while (true) {
-        const KMemoryInfo info = it->GetMemoryInfo();
-
-        const auto cur_start = info.GetAddress() >= GetInteger(src_map_start)
-                                   ? info.GetAddress()
-                                   : GetInteger(src_map_start);
-        const auto cur_end =
-            src_map_last <= info.GetLastAddress() ? src_map_end : info.GetEndAddress();
-
-        // If we can, fix the protections on the block.
-        if ((info.GetIpcLockCount() == 0 &&
-             (info.GetPermission() & KMemoryPermission::IpcLockChangeMask) != prot_perm) ||
-            (info.GetIpcLockCount() != 0 &&
-             (info.GetOriginalPermission() & KMemoryPermission::IpcLockChangeMask) != prot_perm)) {
-            // Check if we actually need to fix the protections on the block.
-            if (cur_end == src_map_end || info.GetAddress() <= GetInteger(src_map_start) ||
-                (info.GetPermission() & KMemoryPermission::IpcLockChangeMask) != prot_perm) {
-                ASSERT(Operate(cur_start, (cur_end - cur_start) / PageSize, info.GetPermission(),
-                               OperationType::ChangePermissions)
-                           .IsSuccess());
-            }
-        }
-
-        // If we're past the end of the region, we're done.
-        if (src_map_last <= info.GetLastAddress()) {
-            break;
-        }
-
-        // Advance.
-        ++it;
-        ASSERT(it != m_memory_block_manager.end());
-    }
-}
-
-Result KPageTable::MapPhysicalMemory(KProcessAddress address, size_t size) {
-    // Lock the physical memory lock.
-    KScopedLightLock phys_lk(m_map_physical_memory_lock);
-
-    // Calculate the last address for convenience.
-    const KProcessAddress last_address = address + size - 1;
-
-    // Define iteration variables.
-    KProcessAddress cur_address;
-    size_t mapped_size;
-
-    // The entire mapping process can be retried.
-    while (true) {
-        // Check if the memory is already mapped.
-        {
-            // Lock the table.
-            KScopedLightLock lk(m_general_lock);
-
-            // Iterate over the memory.
-            cur_address = address;
-            mapped_size = 0;
-
-            auto it = m_memory_block_manager.FindIterator(cur_address);
-            while (true) {
-                // Check that the iterator is valid.
-                ASSERT(it != m_memory_block_manager.end());
-
-                // Get the memory info.
-                const KMemoryInfo info = it->GetMemoryInfo();
-
-                // Check if we're done.
-                if (last_address <= info.GetLastAddress()) {
-                    if (info.GetState() != KMemoryState::Free) {
-                        mapped_size += (last_address + 1 - cur_address);
-                    }
-                    break;
-                }
-
-                // Track the memory if it's mapped.
-                if (info.GetState() != KMemoryState::Free) {
-                    mapped_size += KProcessAddress(info.GetEndAddress()) - cur_address;
-                }
-
-                // Advance.
-                cur_address = info.GetEndAddress();
-                ++it;
-            }
-
-            // If the size mapped is the size requested, we've nothing to do.
-            R_SUCCEED_IF(size == mapped_size);
-        }
-
-        // Allocate and map the memory.
-        {
-            // Reserve the memory from the process resource limit.
-            KScopedResourceReservation memory_reservation(
-                m_resource_limit, LimitableResource::PhysicalMemoryMax, size - mapped_size);
-            R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
-
-            // Allocate pages for the new memory.
-            KPageGroup pg{m_kernel, m_block_info_manager};
-            R_TRY(m_system.Kernel().MemoryManager().AllocateForProcess(
-                &pg, (size - mapped_size) / PageSize, m_allocate_option, 0, 0));
-
-            // If we fail in the next bit (or retry), we need to cleanup the pages.
-            // auto pg_guard = SCOPE_GUARD {
-            //    pg.OpenFirst();
-            //    pg.Close();
-            //};
-
-            // Map the memory.
-            {
-                // Lock the table.
-                KScopedLightLock lk(m_general_lock);
-
-                size_t num_allocator_blocks = 0;
-
-                // Verify that nobody has mapped memory since we first checked.
-                {
-                    // Iterate over the memory.
-                    size_t checked_mapped_size = 0;
-                    cur_address = address;
-
-                    auto it = m_memory_block_manager.FindIterator(cur_address);
-                    while (true) {
-                        // Check that the iterator is valid.
-                        ASSERT(it != m_memory_block_manager.end());
-
-                        // Get the memory info.
-                        const KMemoryInfo info = it->GetMemoryInfo();
-
-                        const bool is_free = info.GetState() == KMemoryState::Free;
-                        if (is_free) {
-                            if (info.GetAddress() < GetInteger(address)) {
-                                ++num_allocator_blocks;
-                            }
-                            if (last_address < info.GetLastAddress()) {
-                                ++num_allocator_blocks;
-                            }
-                        }
-
-                        // Check if we're done.
-                        if (last_address <= info.GetLastAddress()) {
-                            if (!is_free) {
-                                checked_mapped_size += (last_address + 1 - cur_address);
-                            }
-                            break;
-                        }
-
-                        // Track the memory if it's mapped.
-                        if (!is_free) {
-                            checked_mapped_size +=
-                                KProcessAddress(info.GetEndAddress()) - cur_address;
-                        }
-
-                        // Advance.
-                        cur_address = info.GetEndAddress();
-                        ++it;
-                    }
-
-                    // If the size now isn't what it was before, somebody mapped or unmapped
-                    // concurrently. If this happened, retry.
-                    if (mapped_size != checked_mapped_size) {
-                        continue;
-                    }
-                }
-
-                // Create an update allocator.
-                ASSERT(num_allocator_blocks <= KMemoryBlockManagerUpdateAllocator::MaxBlocks);
-                Result allocator_result;
-                KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
-                                                             m_memory_block_slab_manager,
-                                                             num_allocator_blocks);
-                R_TRY(allocator_result);
-
-                // We're going to perform an update, so create a helper.
-                KScopedPageTableUpdater updater(this);
-
-                // Prepare to iterate over the memory.
-                auto pg_it = pg.begin();
-                KPhysicalAddress pg_phys_addr = pg_it->GetAddress();
-                size_t pg_pages = pg_it->GetNumPages();
-
-                // Reset the current tracking address, and make sure we clean up on failure.
-                // pg_guard.Cancel();
-                cur_address = address;
-                ON_RESULT_FAILURE {
-                    if (cur_address > address) {
-                        const KProcessAddress last_unmap_address = cur_address - 1;
-
-                        // Iterate, unmapping the pages.
-                        cur_address = address;
-
-                        auto it = m_memory_block_manager.FindIterator(cur_address);
-                        while (true) {
-                            // Check that the iterator is valid.
-                            ASSERT(it != m_memory_block_manager.end());
-
-                            // Get the memory info.
-                            const KMemoryInfo info = it->GetMemoryInfo();
-
-                            // If the memory state is free, we mapped it and need to unmap it.
-                            if (info.GetState() == KMemoryState::Free) {
-                                // Determine the range to unmap.
-                                const size_t cur_pages =
-                                    std::min(KProcessAddress(info.GetEndAddress()) - cur_address,
-                                             last_unmap_address + 1 - cur_address) /
-                                    PageSize;
-
-                                // Unmap.
-                                ASSERT(Operate(cur_address, cur_pages, KMemoryPermission::None,
-                                               OperationType::Unmap)
-                                           .IsSuccess());
-                            }
-
-                            // Check if we're done.
-                            if (last_unmap_address <= info.GetLastAddress()) {
-                                break;
-                            }
-
-                            // Advance.
-                            cur_address = info.GetEndAddress();
-                            ++it;
-                        }
-                    }
-
-                    // Release any remaining unmapped memory.
-                    m_system.Kernel().MemoryManager().OpenFirst(pg_phys_addr, pg_pages);
-                    m_system.Kernel().MemoryManager().Close(pg_phys_addr, pg_pages);
-                    for (++pg_it; pg_it != pg.end(); ++pg_it) {
-                        m_system.Kernel().MemoryManager().OpenFirst(pg_it->GetAddress(),
-                                                                    pg_it->GetNumPages());
-                        m_system.Kernel().MemoryManager().Close(pg_it->GetAddress(),
-                                                                pg_it->GetNumPages());
-                    }
-                };
-
-                auto it = m_memory_block_manager.FindIterator(cur_address);
-                while (true) {
-                    // Check that the iterator is valid.
-                    ASSERT(it != m_memory_block_manager.end());
-
-                    // Get the memory info.
-                    const KMemoryInfo info = it->GetMemoryInfo();
-
-                    // If it's unmapped, we need to map it.
-                    if (info.GetState() == KMemoryState::Free) {
-                        // Determine the range to map.
-                        size_t map_pages =
-                            std::min(KProcessAddress(info.GetEndAddress()) - cur_address,
-                                     last_address + 1 - cur_address) /
-                            PageSize;
-
-                        // While we have pages to map, map them.
-                        {
-                            // Create a page group for the current mapping range.
-                            KPageGroup cur_pg(m_kernel, m_block_info_manager);
-                            {
-                                ON_RESULT_FAILURE_2 {
-                                    cur_pg.OpenFirst();
-                                    cur_pg.Close();
-                                };
-
-                                size_t remain_pages = map_pages;
-                                while (remain_pages > 0) {
-                                    // Check if we're at the end of the physical block.
-                                    if (pg_pages == 0) {
-                                        // Ensure there are more pages to map.
-                                        ASSERT(pg_it != pg.end());
-
-                                        // Advance our physical block.
-                                        ++pg_it;
-                                        pg_phys_addr = pg_it->GetAddress();
-                                        pg_pages = pg_it->GetNumPages();
-                                    }
-
-                                    // Add whatever we can to the current block.
-                                    const size_t cur_pages = std::min(pg_pages, remain_pages);
-                                    R_TRY(cur_pg.AddBlock(pg_phys_addr +
-                                                              ((pg_pages - cur_pages) * PageSize),
-                                                          cur_pages));
-
-                                    // Advance.
-                                    remain_pages -= cur_pages;
-                                    pg_pages -= cur_pages;
-                                }
-                            }
-
-                            // Map the pages.
-                            R_TRY(this->Operate(cur_address, map_pages, cur_pg,
-                                                OperationType::MapFirstGroup));
-                        }
-                    }
-
-                    // Check if we're done.
-                    if (last_address <= info.GetLastAddress()) {
-                        break;
-                    }
-
-                    // Advance.
-                    cur_address = info.GetEndAddress();
-                    ++it;
-                }
-
-                // We succeeded, so commit the memory reservation.
-                memory_reservation.Commit();
-
-                // Increase our tracked mapped size.
-                m_mapped_physical_memory_size += (size - mapped_size);
-
-                // Update the relevant memory blocks.
-                m_memory_block_manager.UpdateIfMatch(
-                    std::addressof(allocator), address, size / PageSize, KMemoryState::Free,
-                    KMemoryPermission::None, KMemoryAttribute::None, KMemoryState::Normal,
-                    KMemoryPermission::UserReadWrite, KMemoryAttribute::None,
-                    address == this->GetAliasRegionStart()
-                        ? KMemoryBlockDisableMergeAttribute::Normal
-                        : KMemoryBlockDisableMergeAttribute::None,
-                    KMemoryBlockDisableMergeAttribute::None);
-
-                R_SUCCEED();
-            }
-        }
-    }
-}
-
-Result KPageTable::UnmapPhysicalMemory(KProcessAddress address, size_t size) {
-    // Lock the physical memory lock.
-    KScopedLightLock phys_lk(m_map_physical_memory_lock);
-
-    // Lock the table.
-    KScopedLightLock lk(m_general_lock);
-
-    // Calculate the last address for convenience.
-    const KProcessAddress last_address = address + size - 1;
-
-    // Define iteration variables.
-    KProcessAddress map_start_address = 0;
-    KProcessAddress map_last_address = 0;
-
-    KProcessAddress cur_address;
-    size_t mapped_size;
-    size_t num_allocator_blocks = 0;
-
-    // Check if the memory is mapped.
-    {
-        // Iterate over the memory.
-        cur_address = address;
-        mapped_size = 0;
-
-        auto it = m_memory_block_manager.FindIterator(cur_address);
-        while (true) {
-            // Check that the iterator is valid.
-            ASSERT(it != m_memory_block_manager.end());
-
-            // Get the memory info.
-            const KMemoryInfo info = it->GetMemoryInfo();
-
-            // Verify the memory's state.
-            const bool is_normal = info.GetState() == KMemoryState::Normal &&
-                                   info.GetAttribute() == KMemoryAttribute::None;
-            const bool is_free = info.GetState() == KMemoryState::Free;
-            R_UNLESS(is_normal || is_free, ResultInvalidCurrentMemory);
-
-            if (is_normal) {
-                R_UNLESS(info.GetAttribute() == KMemoryAttribute::None, ResultInvalidCurrentMemory);
-
-                if (map_start_address == 0) {
-                    map_start_address = cur_address;
-                }
-                map_last_address =
-                    (last_address >= info.GetLastAddress()) ? info.GetLastAddress() : last_address;
-
-                if (info.GetAddress() < GetInteger(address)) {
-                    ++num_allocator_blocks;
-                }
-                if (last_address < info.GetLastAddress()) {
-                    ++num_allocator_blocks;
-                }
-
-                mapped_size += (map_last_address + 1 - cur_address);
-            }
-
-            // Check if we're done.
-            if (last_address <= info.GetLastAddress()) {
-                break;
-            }
-
-            // Advance.
-            cur_address = info.GetEndAddress();
-            ++it;
-        }
-
-        // If there's nothing mapped, we've nothing to do.
-        R_SUCCEED_IF(mapped_size == 0);
-    }
-
-    // Create an update allocator.
-    ASSERT(num_allocator_blocks <= KMemoryBlockManagerUpdateAllocator::MaxBlocks);
-    Result allocator_result;
-    KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
-                                                 m_memory_block_slab_manager, num_allocator_blocks);
-    R_TRY(allocator_result);
-
-    // We're going to perform an update, so create a helper.
-    KScopedPageTableUpdater updater(this);
-
-    // Separate the mapping.
-    R_TRY(Operate(map_start_address, (map_last_address + 1 - map_start_address) / PageSize,
-                  KMemoryPermission::None, OperationType::Separate));
-
-    // Reset the current tracking address, and make sure we clean up on failure.
-    cur_address = address;
-
-    // Iterate over the memory, unmapping as we go.
-    auto it = m_memory_block_manager.FindIterator(cur_address);
-
-    const auto clear_merge_attr =
-        (it->GetState() == KMemoryState::Normal &&
-         it->GetAddress() == this->GetAliasRegionStart() && it->GetAddress() == address)
-            ? KMemoryBlockDisableMergeAttribute::Normal
-            : KMemoryBlockDisableMergeAttribute::None;
-
-    while (true) {
-        // Check that the iterator is valid.
-        ASSERT(it != m_memory_block_manager.end());
-
-        // Get the memory info.
-        const KMemoryInfo info = it->GetMemoryInfo();
-
-        // If the memory state is normal, we need to unmap it.
-        if (info.GetState() == KMemoryState::Normal) {
-            // Determine the range to unmap.
-            const size_t cur_pages = std::min(KProcessAddress(info.GetEndAddress()) - cur_address,
-                                              last_address + 1 - cur_address) /
-                                     PageSize;
-
-            // Unmap.
-            ASSERT(Operate(cur_address, cur_pages, KMemoryPermission::None, OperationType::Unmap)
-                       .IsSuccess());
-        }
-
-        // Check if we're done.
-        if (last_address <= info.GetLastAddress()) {
-            break;
-        }
-
-        // Advance.
-        cur_address = info.GetEndAddress();
-        ++it;
-    }
-
-    // Release the memory resource.
-    m_mapped_physical_memory_size -= mapped_size;
-    m_resource_limit->Release(LimitableResource::PhysicalMemoryMax, mapped_size);
-
-    // Update memory blocks.
-    m_memory_block_manager.Update(std::addressof(allocator), address, size / PageSize,
-                                  KMemoryState::Free, KMemoryPermission::None,
-                                  KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
-                                  clear_merge_attr);
-
-    // We succeeded.
-    R_SUCCEED();
-}
-
-Result KPageTable::MapMemory(KProcessAddress dst_address, KProcessAddress src_address,
-                             size_t size) {
-    // Lock the table.
-    KScopedLightLock lk(m_general_lock);
-
-    // Validate that the source address's state is valid.
-    KMemoryState src_state;
-    size_t num_src_allocator_blocks;
-    R_TRY(this->CheckMemoryState(std::addressof(src_state), nullptr, nullptr,
-                                 std::addressof(num_src_allocator_blocks), src_address, size,
-                                 KMemoryState::FlagCanAlias, KMemoryState::FlagCanAlias,
-                                 KMemoryPermission::All, KMemoryPermission::UserReadWrite,
-                                 KMemoryAttribute::All, KMemoryAttribute::None));
-
-    // Validate that the dst address's state is valid.
-    size_t num_dst_allocator_blocks;
-    R_TRY(this->CheckMemoryState(std::addressof(num_dst_allocator_blocks), dst_address, size,
-                                 KMemoryState::All, KMemoryState::Free, KMemoryPermission::None,
-                                 KMemoryPermission::None, KMemoryAttribute::None,
-                                 KMemoryAttribute::None));
-
-    // Create an update allocator for the source.
-    Result src_allocator_result;
-    KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result),
-                                                     m_memory_block_slab_manager,
-                                                     num_src_allocator_blocks);
-    R_TRY(src_allocator_result);
-
-    // Create an update allocator for the destination.
-    Result dst_allocator_result;
-    KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result),
-                                                     m_memory_block_slab_manager,
-                                                     num_dst_allocator_blocks);
-    R_TRY(dst_allocator_result);
-
-    // Map the memory.
-    {
-        // Determine the number of pages being operated on.
-        const size_t num_pages = size / PageSize;
-
-        // Create page groups for the memory being unmapped.
-        KPageGroup pg{m_kernel, m_block_info_manager};
-
-        // Create the page group representing the source.
-        R_TRY(this->MakePageGroup(pg, src_address, num_pages));
-
-        // We're going to perform an update, so create a helper.
-        KScopedPageTableUpdater updater(this);
-
-        // Reprotect the source as kernel-read/not mapped.
-        const KMemoryPermission new_src_perm = static_cast<KMemoryPermission>(
-            KMemoryPermission::KernelRead | KMemoryPermission::NotMapped);
-        const KMemoryAttribute new_src_attr = KMemoryAttribute::Locked;
-        const KPageProperties src_properties = {new_src_perm, false, false,
-                                                DisableMergeAttribute::DisableHeadBodyTail};
-        R_TRY(this->Operate(src_address, num_pages, src_properties.perm,
-                            OperationType::ChangePermissions));
-
-        // Ensure that we unprotect the source pages on failure.
-        ON_RESULT_FAILURE {
-            const KPageProperties unprotect_properties = {
-                KMemoryPermission::UserReadWrite, false, false,
-                DisableMergeAttribute::EnableHeadBodyTail};
-            ASSERT(this->Operate(src_address, num_pages, unprotect_properties.perm,
-                                 OperationType::ChangePermissions) == ResultSuccess);
-        };
-
-        // Map the alias pages.
-        const KPageProperties dst_map_properties = {KMemoryPermission::UserReadWrite, false, false,
-                                                    DisableMergeAttribute::DisableHead};
-        R_TRY(this->MapPageGroupImpl(updater.GetPageList(), dst_address, pg, dst_map_properties,
-                                     false));
-
-        // Apply the memory block updates.
-        m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages,
-                                      src_state, new_src_perm, new_src_attr,
-                                      KMemoryBlockDisableMergeAttribute::Locked,
-                                      KMemoryBlockDisableMergeAttribute::None);
-        m_memory_block_manager.Update(
-            std::addressof(dst_allocator), dst_address, num_pages, KMemoryState::Stack,
-            KMemoryPermission::UserReadWrite, KMemoryAttribute::None,
-            KMemoryBlockDisableMergeAttribute::Normal, KMemoryBlockDisableMergeAttribute::None);
-    }
-
-    R_SUCCEED();
-}
-
-Result KPageTable::UnmapMemory(KProcessAddress dst_address, KProcessAddress src_address,
-                               size_t size) {
-    // Lock the table.
-    KScopedLightLock lk(m_general_lock);
-
-    // Validate that the source address's state is valid.
-    KMemoryState src_state;
-    size_t num_src_allocator_blocks;
-    R_TRY(this->CheckMemoryState(
-        std::addressof(src_state), nullptr, nullptr, std::addressof(num_src_allocator_blocks),
-        src_address, size, KMemoryState::FlagCanAlias, KMemoryState::FlagCanAlias,
-        KMemoryPermission::All, KMemoryPermission::NotMapped | KMemoryPermission::KernelRead,
-        KMemoryAttribute::All, KMemoryAttribute::Locked));
-
-    // Validate that the dst address's state is valid.
-    KMemoryPermission dst_perm;
-    size_t num_dst_allocator_blocks;
-    R_TRY(this->CheckMemoryState(
-        nullptr, std::addressof(dst_perm), nullptr, std::addressof(num_dst_allocator_blocks),
-        dst_address, size, KMemoryState::All, KMemoryState::Stack, KMemoryPermission::None,
-        KMemoryPermission::None, KMemoryAttribute::All, KMemoryAttribute::None));
-
-    // Create an update allocator for the source.
-    Result src_allocator_result;
-    KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result),
-                                                     m_memory_block_slab_manager,
-                                                     num_src_allocator_blocks);
-    R_TRY(src_allocator_result);
-
-    // Create an update allocator for the destination.
-    Result dst_allocator_result;
-    KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result),
-                                                     m_memory_block_slab_manager,
-                                                     num_dst_allocator_blocks);
-    R_TRY(dst_allocator_result);
-
-    // Unmap the memory.
-    {
-        // Determine the number of pages being operated on.
-        const size_t num_pages = size / PageSize;
-
-        // Create page groups for the memory being unmapped.
-        KPageGroup pg{m_kernel, m_block_info_manager};
-
-        // Create the page group representing the destination.
-        R_TRY(this->MakePageGroup(pg, dst_address, num_pages));
-
-        // Ensure the page group is the valid for the source.
-        R_UNLESS(this->IsValidPageGroup(pg, src_address, num_pages), ResultInvalidMemoryRegion);
-
-        // We're going to perform an update, so create a helper.
-        KScopedPageTableUpdater updater(this);
-
-        // Unmap the aliased copy of the pages.
-        const KPageProperties dst_unmap_properties = {KMemoryPermission::None, false, false,
-                                                      DisableMergeAttribute::None};
-        R_TRY(
-            this->Operate(dst_address, num_pages, dst_unmap_properties.perm, OperationType::Unmap));
-
-        // Ensure that we re-map the aliased pages on failure.
-        ON_RESULT_FAILURE {
-            this->RemapPageGroup(updater.GetPageList(), dst_address, size, pg);
-        };
-
-        // Try to set the permissions for the source pages back to what they should be.
-        const KPageProperties src_properties = {KMemoryPermission::UserReadWrite, false, false,
-                                                DisableMergeAttribute::EnableAndMergeHeadBodyTail};
-        R_TRY(this->Operate(src_address, num_pages, src_properties.perm,
-                            OperationType::ChangePermissions));
-
-        // Apply the memory block updates.
-        m_memory_block_manager.Update(
-            std::addressof(src_allocator), src_address, num_pages, src_state,
-            KMemoryPermission::UserReadWrite, KMemoryAttribute::None,
-            KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Locked);
-        m_memory_block_manager.Update(
-            std::addressof(dst_allocator), dst_address, num_pages, KMemoryState::None,
-            KMemoryPermission::None, KMemoryAttribute::None,
-            KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Normal);
-    }
-
-    R_SUCCEED();
-}
-
-Result KPageTable::AllocateAndMapPagesImpl(PageLinkedList* page_list, KProcessAddress address,
-                                           size_t num_pages, KMemoryPermission perm) {
-    ASSERT(this->IsLockedByCurrentThread());
-
-    // Create a page group to hold the pages we allocate.
-    KPageGroup pg{m_kernel, m_block_info_manager};
-
-    // Allocate the pages.
-    R_TRY(
-        m_kernel.MemoryManager().AllocateAndOpen(std::addressof(pg), num_pages, m_allocate_option));
-
-    // Ensure that the page group is closed when we're done working with it.
-    SCOPE_EXIT({ pg.Close(); });
-
-    // Clear all pages.
-    for (const auto& it : pg) {
-        std::memset(m_system.DeviceMemory().GetPointer<void>(it.GetAddress()), m_heap_fill_value,
-                    it.GetSize());
-    }
-
-    // Map the pages.
-    R_RETURN(this->Operate(address, num_pages, pg, OperationType::MapGroup));
-}
-
-Result KPageTable::MapPageGroupImpl(PageLinkedList* page_list, KProcessAddress address,
-                                    const KPageGroup& pg, const KPageProperties properties,
-                                    bool reuse_ll) {
-    ASSERT(this->IsLockedByCurrentThread());
-
-    // Note the current address, so that we can iterate.
-    const KProcessAddress start_address = address;
-    KProcessAddress cur_address = address;
-
-    // Ensure that we clean up on failure.
-    ON_RESULT_FAILURE {
-        ASSERT(!reuse_ll);
-        if (cur_address != start_address) {
-            const KPageProperties unmap_properties = {KMemoryPermission::None, false, false,
-                                                      DisableMergeAttribute::None};
-            ASSERT(this->Operate(start_address, (cur_address - start_address) / PageSize,
-                                 unmap_properties.perm, OperationType::Unmap) == ResultSuccess);
-        }
-    };
-
-    // Iterate, mapping all pages in the group.
-    for (const auto& block : pg) {
-        // Map and advance.
-        const KPageProperties cur_properties =
-            (cur_address == start_address)
-                ? properties
-                : KPageProperties{properties.perm, properties.io, properties.uncached,
-                                  DisableMergeAttribute::None};
-        this->Operate(cur_address, block.GetNumPages(), cur_properties.perm, OperationType::Map,
-                      block.GetAddress());
-        cur_address += block.GetSize();
-    }
-
-    // We succeeded!
-    R_SUCCEED();
-}
-
-void KPageTable::RemapPageGroup(PageLinkedList* page_list, KProcessAddress address, size_t size,
-                                const KPageGroup& pg) {
-    ASSERT(this->IsLockedByCurrentThread());
-
-    // Note the current address, so that we can iterate.
-    const KProcessAddress start_address = address;
-    const KProcessAddress last_address = start_address + size - 1;
-    const KProcessAddress end_address = last_address + 1;
-
-    // Iterate over the memory.
-    auto pg_it = pg.begin();
-    ASSERT(pg_it != pg.end());
-
-    KPhysicalAddress pg_phys_addr = pg_it->GetAddress();
-    size_t pg_pages = pg_it->GetNumPages();
-
-    auto it = m_memory_block_manager.FindIterator(start_address);
-    while (true) {
-        // Check that the iterator is valid.
-        ASSERT(it != m_memory_block_manager.end());
-
-        // Get the memory info.
-        const KMemoryInfo info = it->GetMemoryInfo();
-
-        // Determine the range to map.
-        KProcessAddress map_address = std::max<KProcessAddress>(info.GetAddress(), start_address);
-        const KProcessAddress map_end_address =
-            std::min<KProcessAddress>(info.GetEndAddress(), end_address);
-        ASSERT(map_end_address != map_address);
-
-        // Determine if we should disable head merge.
-        const bool disable_head_merge =
-            info.GetAddress() >= GetInteger(start_address) &&
-            True(info.GetDisableMergeAttribute() & KMemoryBlockDisableMergeAttribute::Normal);
-        const KPageProperties map_properties = {
-            info.GetPermission(), false, false,
-            disable_head_merge ? DisableMergeAttribute::DisableHead : DisableMergeAttribute::None};
-
-        // While we have pages to map, map them.
-        size_t map_pages = (map_end_address - map_address) / PageSize;
-        while (map_pages > 0) {
-            // Check if we're at the end of the physical block.
-            if (pg_pages == 0) {
-                // Ensure there are more pages to map.
-                ASSERT(pg_it != pg.end());
-
-                // Advance our physical block.
-                ++pg_it;
-                pg_phys_addr = pg_it->GetAddress();
-                pg_pages = pg_it->GetNumPages();
-            }
-
-            // Map whatever we can.
-            const size_t cur_pages = std::min(pg_pages, map_pages);
-            ASSERT(this->Operate(map_address, map_pages, map_properties.perm, OperationType::Map,
-                                 pg_phys_addr) == ResultSuccess);
-
-            // Advance.
-            map_address += cur_pages * PageSize;
-            map_pages -= cur_pages;
-
-            pg_phys_addr += cur_pages * PageSize;
-            pg_pages -= cur_pages;
-        }
-
-        // Check if we're done.
-        if (last_address <= info.GetLastAddress()) {
-            break;
-        }
-
-        // Advance.
-        ++it;
-    }
-
-    // Check that we re-mapped precisely the page group.
-    ASSERT((++pg_it) == pg.end());
-}
-
-Result KPageTable::MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment,
-                            KPhysicalAddress phys_addr, bool is_pa_valid,
-                            KProcessAddress region_start, size_t region_num_pages,
-                            KMemoryState state, KMemoryPermission perm) {
-    ASSERT(Common::IsAligned(alignment, PageSize) && alignment >= PageSize);
-
-    // Ensure this is a valid map request.
-    R_UNLESS(this->CanContain(region_start, region_num_pages * PageSize, state),
-             ResultInvalidCurrentMemory);
-    R_UNLESS(num_pages < region_num_pages, ResultOutOfMemory);
-
-    // Lock the table.
-    KScopedLightLock lk(m_general_lock);
-
-    // Find a random address to map at.
-    KProcessAddress addr = this->FindFreeArea(region_start, region_num_pages, num_pages, alignment,
-                                              0, this->GetNumGuardPages());
-    R_UNLESS(addr != 0, ResultOutOfMemory);
-    ASSERT(Common::IsAligned(GetInteger(addr), alignment));
-    ASSERT(this->CanContain(addr, num_pages * PageSize, state));
-    ASSERT(this->CheckMemoryState(addr, num_pages * PageSize, KMemoryState::All, KMemoryState::Free,
-                                  KMemoryPermission::None, KMemoryPermission::None,
-                                  KMemoryAttribute::None, KMemoryAttribute::None) == ResultSuccess);
-
-    // Create an update allocator.
-    Result allocator_result;
-    KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
-                                                 m_memory_block_slab_manager);
-    R_TRY(allocator_result);
-
-    // We're going to perform an update, so create a helper.
-    KScopedPageTableUpdater updater(this);
-
-    // Perform mapping operation.
-    if (is_pa_valid) {
-        const KPageProperties properties = {perm, false, false, DisableMergeAttribute::DisableHead};
-        R_TRY(this->Operate(addr, num_pages, properties.perm, OperationType::Map, phys_addr));
-    } else {
-        R_TRY(this->AllocateAndMapPagesImpl(updater.GetPageList(), addr, num_pages, perm));
-    }
-
-    // Update the blocks.
-    m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm,
-                                  KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
-                                  KMemoryBlockDisableMergeAttribute::None);
-
-    // We successfully mapped the pages.
-    *out_addr = addr;
-    R_SUCCEED();
-}
-
-Result KPageTable::MapPages(KProcessAddress address, size_t num_pages, KMemoryState state,
-                            KMemoryPermission perm) {
-    // Check that the map is in range.
-    const size_t size = num_pages * PageSize;
-    R_UNLESS(this->CanContain(address, size, state), ResultInvalidCurrentMemory);
-
-    // Lock the table.
-    KScopedLightLock lk(m_general_lock);
-
-    // Check the memory state.
-    size_t num_allocator_blocks;
-    R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size,
-                                 KMemoryState::All, KMemoryState::Free, KMemoryPermission::None,
-                                 KMemoryPermission::None, KMemoryAttribute::None,
-                                 KMemoryAttribute::None));
-
-    // Create an update allocator.
-    Result allocator_result;
-    KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
-                                                 m_memory_block_slab_manager, num_allocator_blocks);
-    R_TRY(allocator_result);
-
-    // We're going to perform an update, so create a helper.
-    KScopedPageTableUpdater updater(this);
-
-    // Map the pages.
-    R_TRY(this->AllocateAndMapPagesImpl(updater.GetPageList(), address, num_pages, perm));
-
-    // Update the blocks.
-    m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, state, perm,
-                                  KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
-                                  KMemoryBlockDisableMergeAttribute::None);
-
-    R_SUCCEED();
-}
-
-Result KPageTable::UnmapPages(KProcessAddress address, size_t num_pages, KMemoryState state) {
-    // Check that the unmap is in range.
-    const size_t size = num_pages * PageSize;
-    R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
-
-    // Lock the table.
-    KScopedLightLock lk(m_general_lock);
-
-    // Check the memory state.
-    size_t num_allocator_blocks;
-    R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size,
-                                 KMemoryState::All, state, KMemoryPermission::None,
-                                 KMemoryPermission::None, KMemoryAttribute::All,
-                                 KMemoryAttribute::None));
-
-    // Create an update allocator.
-    Result allocator_result;
-    KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
-                                                 m_memory_block_slab_manager, num_allocator_blocks);
-    R_TRY(allocator_result);
-
-    // We're going to perform an update, so create a helper.
-    KScopedPageTableUpdater updater(this);
-
-    // Perform the unmap.
-    const KPageProperties unmap_properties = {KMemoryPermission::None, false, false,
-                                              DisableMergeAttribute::None};
-    R_TRY(this->Operate(address, num_pages, unmap_properties.perm, OperationType::Unmap));
-
-    // Update the blocks.
-    m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState::Free,
-                                  KMemoryPermission::None, KMemoryAttribute::None,
-                                  KMemoryBlockDisableMergeAttribute::None,
-                                  KMemoryBlockDisableMergeAttribute::Normal);
-
-    R_SUCCEED();
-}
-
-Result KPageTable::MapPageGroup(KProcessAddress* out_addr, const KPageGroup& pg,
-                                KProcessAddress region_start, size_t region_num_pages,
-                                KMemoryState state, KMemoryPermission perm) {
-    ASSERT(!this->IsLockedByCurrentThread());
-
-    // Ensure this is a valid map request.
-    const size_t num_pages = pg.GetNumPages();
-    R_UNLESS(this->CanContain(region_start, region_num_pages * PageSize, state),
-             ResultInvalidCurrentMemory);
-    R_UNLESS(num_pages < region_num_pages, ResultOutOfMemory);
-
-    // Lock the table.
-    KScopedLightLock lk(m_general_lock);
-
-    // Find a random address to map at.
-    KProcessAddress addr = this->FindFreeArea(region_start, region_num_pages, num_pages, PageSize,
-                                              0, this->GetNumGuardPages());
-    R_UNLESS(addr != 0, ResultOutOfMemory);
-    ASSERT(this->CanContain(addr, num_pages * PageSize, state));
-    ASSERT(this->CheckMemoryState(addr, num_pages * PageSize, KMemoryState::All, KMemoryState::Free,
-                                  KMemoryPermission::None, KMemoryPermission::None,
-                                  KMemoryAttribute::None, KMemoryAttribute::None) == ResultSuccess);
-
-    // Create an update allocator.
-    Result allocator_result;
-    KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
-                                                 m_memory_block_slab_manager);
-    R_TRY(allocator_result);
-
-    // We're going to perform an update, so create a helper.
-    KScopedPageTableUpdater updater(this);
-
-    // Perform mapping operation.
-    const KPageProperties properties = {perm, false, false, DisableMergeAttribute::DisableHead};
-    R_TRY(this->MapPageGroupImpl(updater.GetPageList(), addr, pg, properties, false));
-
-    // Update the blocks.
-    m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm,
-                                  KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
-                                  KMemoryBlockDisableMergeAttribute::None);
-
-    // We successfully mapped the pages.
-    *out_addr = addr;
-    R_SUCCEED();
-}
-
-Result KPageTable::MapPageGroup(KProcessAddress addr, const KPageGroup& pg, KMemoryState state,
-                                KMemoryPermission perm) {
-    ASSERT(!this->IsLockedByCurrentThread());
-
-    // Ensure this is a valid map request.
-    const size_t num_pages = pg.GetNumPages();
-    const size_t size = num_pages * PageSize;
-    R_UNLESS(this->CanContain(addr, size, state), ResultInvalidCurrentMemory);
-
-    // Lock the table.
-    KScopedLightLock lk(m_general_lock);
-
-    // Check if state allows us to map.
-    size_t num_allocator_blocks;
-    R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), addr, size,
-                                 KMemoryState::All, KMemoryState::Free, KMemoryPermission::None,
-                                 KMemoryPermission::None, KMemoryAttribute::None,
-                                 KMemoryAttribute::None));
-
-    // Create an update allocator.
-    Result allocator_result;
-    KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
-                                                 m_memory_block_slab_manager, num_allocator_blocks);
-    R_TRY(allocator_result);
-
-    // We're going to perform an update, so create a helper.
-    KScopedPageTableUpdater updater(this);
-
-    // Perform mapping operation.
-    const KPageProperties properties = {perm, false, false, DisableMergeAttribute::DisableHead};
-    R_TRY(this->MapPageGroupImpl(updater.GetPageList(), addr, pg, properties, false));
-
-    // Update the blocks.
-    m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm,
-                                  KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
-                                  KMemoryBlockDisableMergeAttribute::None);
-
-    // We successfully mapped the pages.
-    R_SUCCEED();
-}
-
-Result KPageTable::UnmapPageGroup(KProcessAddress address, const KPageGroup& pg,
-                                  KMemoryState state) {
-    ASSERT(!this->IsLockedByCurrentThread());
-
-    // Ensure this is a valid unmap request.
-    const size_t num_pages = pg.GetNumPages();
-    const size_t size = num_pages * PageSize;
-    R_UNLESS(this->CanContain(address, size, state), ResultInvalidCurrentMemory);
-
-    // Lock the table.
-    KScopedLightLock lk(m_general_lock);
-
-    // Check if state allows us to unmap.
-    size_t num_allocator_blocks;
-    R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size,
-                                 KMemoryState::All, state, KMemoryPermission::None,
-                                 KMemoryPermission::None, KMemoryAttribute::All,
-                                 KMemoryAttribute::None));
-
-    // Check that the page group is valid.
-    R_UNLESS(this->IsValidPageGroup(pg, address, num_pages), ResultInvalidCurrentMemory);
-
-    // Create an update allocator.
-    Result allocator_result;
-    KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
-                                                 m_memory_block_slab_manager, num_allocator_blocks);
-    R_TRY(allocator_result);
-
-    // We're going to perform an update, so create a helper.
-    KScopedPageTableUpdater updater(this);
-
-    // Perform unmapping operation.
-    const KPageProperties properties = {KMemoryPermission::None, false, false,
-                                        DisableMergeAttribute::None};
-    R_TRY(this->Operate(address, num_pages, properties.perm, OperationType::Unmap));
-
-    // Update the blocks.
-    m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState::Free,
-                                  KMemoryPermission::None, KMemoryAttribute::None,
-                                  KMemoryBlockDisableMergeAttribute::None,
-                                  KMemoryBlockDisableMergeAttribute::Normal);
-
-    R_SUCCEED();
-}
-
-Result KPageTable::MakeAndOpenPageGroup(KPageGroup* out, KProcessAddress address, size_t num_pages,
-                                        KMemoryState state_mask, KMemoryState state,
-                                        KMemoryPermission perm_mask, KMemoryPermission perm,
-                                        KMemoryAttribute attr_mask, KMemoryAttribute attr) {
-    // Ensure that the page group isn't null.
-    ASSERT(out != nullptr);
-
-    // Make sure that the region we're mapping is valid for the table.
-    const size_t size = num_pages * PageSize;
-    R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
-
-    // Lock the table.
-    KScopedLightLock lk(m_general_lock);
-
-    // Check if state allows us to create the group.
-    R_TRY(this->CheckMemoryState(address, size, state_mask | KMemoryState::FlagReferenceCounted,
-                                 state | KMemoryState::FlagReferenceCounted, perm_mask, perm,
-                                 attr_mask, attr));
-
-    // Create a new page group for the region.
-    R_TRY(this->MakePageGroup(*out, address, num_pages));
-
-    R_SUCCEED();
-}
-
-Result KPageTable::SetProcessMemoryPermission(KProcessAddress addr, size_t size,
-                                              Svc::MemoryPermission svc_perm) {
-    const size_t num_pages = size / PageSize;
-
-    // Lock the table.
-    KScopedLightLock lk(m_general_lock);
-
-    // Verify we can change the memory permission.
-    KMemoryState old_state;
-    KMemoryPermission old_perm;
-    size_t num_allocator_blocks;
-    R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm), nullptr,
-                                 std::addressof(num_allocator_blocks), addr, size,
-                                 KMemoryState::FlagCode, KMemoryState::FlagCode,
-                                 KMemoryPermission::None, KMemoryPermission::None,
-                                 KMemoryAttribute::All, KMemoryAttribute::None));
-
-    // Determine new perm/state.
-    const KMemoryPermission new_perm = ConvertToKMemoryPermission(svc_perm);
-    KMemoryState new_state = old_state;
-    const bool is_w = (new_perm & KMemoryPermission::UserWrite) == KMemoryPermission::UserWrite;
-    const bool is_x = (new_perm & KMemoryPermission::UserExecute) == KMemoryPermission::UserExecute;
-    const bool was_x =
-        (old_perm & KMemoryPermission::UserExecute) == KMemoryPermission::UserExecute;
-    ASSERT(!(is_w && is_x));
-
-    if (is_w) {
-        switch (old_state) {
-        case KMemoryState::Code:
-            new_state = KMemoryState::CodeData;
-            break;
-        case KMemoryState::AliasCode:
-            new_state = KMemoryState::AliasCodeData;
-            break;
-        default:
-            ASSERT(false);
-            break;
-        }
-    }
-
-    // Succeed if there's nothing to do.
-    R_SUCCEED_IF(old_perm == new_perm && old_state == new_state);
-
-    // Create an update allocator.
-    Result allocator_result{ResultSuccess};
-    KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
-                                                 m_memory_block_slab_manager, num_allocator_blocks);
-    R_TRY(allocator_result);
-
-    // Perform mapping operation.
-    const auto operation =
-        was_x ? OperationType::ChangePermissionsAndRefresh : OperationType::ChangePermissions;
-    R_TRY(Operate(addr, num_pages, new_perm, operation));
-
-    // Update the blocks.
-    m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, new_state, new_perm,
-                                  KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
-                                  KMemoryBlockDisableMergeAttribute::None);
-
-    // Ensure cache coherency, if we're setting pages as executable.
-    if (is_x) {
-        m_system.InvalidateCpuInstructionCacheRange(GetInteger(addr), size);
-    }
-
-    R_SUCCEED();
-}
-
-KMemoryInfo KPageTable::QueryInfoImpl(KProcessAddress addr) {
-    KScopedLightLock lk(m_general_lock);
-
-    return m_memory_block_manager.FindBlock(addr)->GetMemoryInfo();
-}
-
-KMemoryInfo KPageTable::QueryInfo(KProcessAddress addr) {
-    if (!Contains(addr, 1)) {
-        return {
-            .m_address = GetInteger(m_address_space_end),
-            .m_size = 0 - GetInteger(m_address_space_end),
-            .m_state = static_cast<KMemoryState>(Svc::MemoryState::Inaccessible),
-            .m_device_disable_merge_left_count = 0,
-            .m_device_disable_merge_right_count = 0,
-            .m_ipc_lock_count = 0,
-            .m_device_use_count = 0,
-            .m_ipc_disable_merge_count = 0,
-            .m_permission = KMemoryPermission::None,
-            .m_attribute = KMemoryAttribute::None,
-            .m_original_permission = KMemoryPermission::None,
-            .m_disable_merge_attribute = KMemoryBlockDisableMergeAttribute::None,
-        };
-    }
-
-    return QueryInfoImpl(addr);
-}
-
-Result KPageTable::SetMemoryPermission(KProcessAddress addr, size_t size,
-                                       Svc::MemoryPermission svc_perm) {
-    const size_t num_pages = size / PageSize;
-
-    // Lock the table.
-    KScopedLightLock lk(m_general_lock);
-
-    // Verify we can change the memory permission.
-    KMemoryState old_state;
-    KMemoryPermission old_perm;
-    size_t num_allocator_blocks;
-    R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm), nullptr,
-                                 std::addressof(num_allocator_blocks), addr, size,
-                                 KMemoryState::FlagCanReprotect, KMemoryState::FlagCanReprotect,
-                                 KMemoryPermission::None, KMemoryPermission::None,
-                                 KMemoryAttribute::All, KMemoryAttribute::None));
-
-    // Determine new perm.
-    const KMemoryPermission new_perm = ConvertToKMemoryPermission(svc_perm);
-    R_SUCCEED_IF(old_perm == new_perm);
-
-    // Create an update allocator.
-    Result allocator_result{ResultSuccess};
-    KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
-                                                 m_memory_block_slab_manager, num_allocator_blocks);
-    R_TRY(allocator_result);
-
-    // Perform mapping operation.
-    R_TRY(Operate(addr, num_pages, new_perm, OperationType::ChangePermissions));
-
-    // Update the blocks.
-    m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm,
-                                  KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
-                                  KMemoryBlockDisableMergeAttribute::None);
-
-    R_SUCCEED();
-}
-
-Result KPageTable::SetMemoryAttribute(KProcessAddress addr, size_t size, u32 mask, u32 attr) {
-    const size_t num_pages = size / PageSize;
-    ASSERT((static_cast<KMemoryAttribute>(mask) | KMemoryAttribute::SetMask) ==
-           KMemoryAttribute::SetMask);
-
-    // Lock the table.
-    KScopedLightLock lk(m_general_lock);
-
-    // Verify we can change the memory attribute.
-    KMemoryState old_state;
-    KMemoryPermission old_perm;
-    KMemoryAttribute old_attr;
-    size_t num_allocator_blocks;
-    constexpr auto AttributeTestMask =
-        ~(KMemoryAttribute::SetMask | KMemoryAttribute::DeviceShared);
-    const KMemoryState state_test_mask =
-        static_cast<KMemoryState>(((mask & static_cast<u32>(KMemoryAttribute::Uncached))
-                                       ? static_cast<u32>(KMemoryState::FlagCanChangeAttribute)
-                                       : 0) |
-                                  ((mask & static_cast<u32>(KMemoryAttribute::PermissionLocked))
-                                       ? static_cast<u32>(KMemoryState::FlagCanPermissionLock)
-                                       : 0));
-    R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm),
-                                 std::addressof(old_attr), std::addressof(num_allocator_blocks),
-                                 addr, size, state_test_mask, state_test_mask,
-                                 KMemoryPermission::None, KMemoryPermission::None,
-                                 AttributeTestMask, KMemoryAttribute::None, ~AttributeTestMask));
-
-    // Create an update allocator.
-    Result allocator_result{ResultSuccess};
-    KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
-                                                 m_memory_block_slab_manager, num_allocator_blocks);
-    R_TRY(allocator_result);
-
-    // If we need to, perform a change attribute operation.
-    if (True(KMemoryAttribute::Uncached & static_cast<KMemoryAttribute>(mask))) {
-        // Perform operation.
-        R_TRY(this->Operate(addr, num_pages, old_perm,
-                            OperationType::ChangePermissionsAndRefreshAndFlush, 0));
-    }
-
-    // Update the blocks.
-    m_memory_block_manager.UpdateAttribute(std::addressof(allocator), addr, num_pages,
-                                           static_cast<KMemoryAttribute>(mask),
-                                           static_cast<KMemoryAttribute>(attr));
-
-    R_SUCCEED();
-}
-
-Result KPageTable::SetMaxHeapSize(size_t size) {
-    // Lock the table.
-    KScopedLightLock lk(m_general_lock);
-
-    // Only process page tables are allowed to set heap size.
-    ASSERT(!this->IsKernel());
-
-    m_max_heap_size = size;
-
-    R_SUCCEED();
-}
-
-Result KPageTable::SetHeapSize(u64* out, size_t size) {
-    // Lock the physical memory mutex.
-    KScopedLightLock map_phys_mem_lk(m_map_physical_memory_lock);
-
-    // Try to perform a reduction in heap, instead of an extension.
-    KProcessAddress cur_address{};
-    size_t allocation_size{};
-    {
-        // Lock the table.
-        KScopedLightLock lk(m_general_lock);
-
-        // Validate that setting heap size is possible at all.
-        R_UNLESS(!m_is_kernel, ResultOutOfMemory);
-        R_UNLESS(size <= static_cast<size_t>(m_heap_region_end - m_heap_region_start),
-                 ResultOutOfMemory);
-        R_UNLESS(size <= m_max_heap_size, ResultOutOfMemory);
-
-        if (size < GetHeapSize()) {
-            // The size being requested is less than the current size, so we need to free the end of
-            // the heap.
-
-            // Validate memory state.
-            size_t num_allocator_blocks;
-            R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks),
-                                         m_heap_region_start + size, GetHeapSize() - size,
-                                         KMemoryState::All, KMemoryState::Normal,
-                                         KMemoryPermission::All, KMemoryPermission::UserReadWrite,
-                                         KMemoryAttribute::All, KMemoryAttribute::None));
-
-            // Create an update allocator.
-            Result allocator_result{ResultSuccess};
-            KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
-                                                         m_memory_block_slab_manager,
-                                                         num_allocator_blocks);
-            R_TRY(allocator_result);
-
-            // Unmap the end of the heap.
-            const auto num_pages = (GetHeapSize() - size) / PageSize;
-            R_TRY(Operate(m_heap_region_start + size, num_pages, KMemoryPermission::None,
-                          OperationType::Unmap));
-
-            // Release the memory from the resource limit.
-            m_resource_limit->Release(LimitableResource::PhysicalMemoryMax, num_pages * PageSize);
-
-            // Apply the memory block update.
-            m_memory_block_manager.Update(std::addressof(allocator), m_heap_region_start + size,
-                                          num_pages, KMemoryState::Free, KMemoryPermission::None,
-                                          KMemoryAttribute::None,
-                                          KMemoryBlockDisableMergeAttribute::None,
-                                          size == 0 ? KMemoryBlockDisableMergeAttribute::Normal
-                                                    : KMemoryBlockDisableMergeAttribute::None);
-
-            // Update the current heap end.
-            m_current_heap_end = m_heap_region_start + size;
-
-            // Set the output.
-            *out = GetInteger(m_heap_region_start);
-            R_SUCCEED();
-        } else if (size == GetHeapSize()) {
-            // The size requested is exactly the current size.
-            *out = GetInteger(m_heap_region_start);
-            R_SUCCEED();
-        } else {
-            // We have to allocate memory. Determine how much to allocate and where while the table
-            // is locked.
-            cur_address = m_current_heap_end;
-            allocation_size = size - GetHeapSize();
-        }
-    }
-
-    // Reserve memory for the heap extension.
-    KScopedResourceReservation memory_reservation(
-        m_resource_limit, LimitableResource::PhysicalMemoryMax, allocation_size);
-    R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
-
-    // Allocate pages for the heap extension.
-    KPageGroup pg{m_kernel, m_block_info_manager};
-    R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpen(
-        &pg, allocation_size / PageSize,
-        KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option)));
-
-    // Clear all the newly allocated pages.
-    for (const auto& it : pg) {
-        std::memset(m_system.DeviceMemory().GetPointer<void>(it.GetAddress()), m_heap_fill_value,
-                    it.GetSize());
-    }
-
-    // Map the pages.
-    {
-        // Lock the table.
-        KScopedLightLock lk(m_general_lock);
-
-        // Ensure that the heap hasn't changed since we began executing.
-        ASSERT(cur_address == m_current_heap_end);
-
-        // Check the memory state.
-        size_t num_allocator_blocks{};
-        R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), m_current_heap_end,
-                                     allocation_size, KMemoryState::All, KMemoryState::Free,
-                                     KMemoryPermission::None, KMemoryPermission::None,
-                                     KMemoryAttribute::None, KMemoryAttribute::None));
-
-        // Create an update allocator.
-        Result allocator_result{ResultSuccess};
-        KMemoryBlockManagerUpdateAllocator allocator(
-            std::addressof(allocator_result), m_memory_block_slab_manager, num_allocator_blocks);
-        R_TRY(allocator_result);
-
-        // Map the pages.
-        const auto num_pages = allocation_size / PageSize;
-        R_TRY(Operate(m_current_heap_end, num_pages, pg, OperationType::MapGroup));
-
-        // Clear all the newly allocated pages.
-        for (size_t cur_page = 0; cur_page < num_pages; ++cur_page) {
-            std::memset(m_memory->GetPointer(m_current_heap_end + (cur_page * PageSize)), 0,
-                        PageSize);
-        }
-
-        // We succeeded, so commit our memory reservation.
-        memory_reservation.Commit();
-
-        // Apply the memory block update.
-        m_memory_block_manager.Update(
-            std::addressof(allocator), m_current_heap_end, num_pages, KMemoryState::Normal,
-            KMemoryPermission::UserReadWrite, KMemoryAttribute::None,
-            m_heap_region_start == m_current_heap_end ? KMemoryBlockDisableMergeAttribute::Normal
-                                                      : KMemoryBlockDisableMergeAttribute::None,
-            KMemoryBlockDisableMergeAttribute::None);
-
-        // Update the current heap end.
-        m_current_heap_end = m_heap_region_start + size;
-
-        // Set the output.
-        *out = GetInteger(m_heap_region_start);
-        R_SUCCEED();
-    }
-}
-
-Result KPageTable::LockForMapDeviceAddressSpace(bool* out_is_io, KProcessAddress address,
-                                                size_t size, KMemoryPermission perm,
-                                                bool is_aligned, bool check_heap) {
-    // Lightly validate the range before doing anything else.
-    const size_t num_pages = size / PageSize;
-    R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
-
-    // Lock the table.
-    KScopedLightLock lk(m_general_lock);
-
-    // Check the memory state.
-    const auto test_state =
-        (is_aligned ? KMemoryState::FlagCanAlignedDeviceMap : KMemoryState::FlagCanDeviceMap) |
-        (check_heap ? KMemoryState::FlagReferenceCounted : KMemoryState::None);
-    size_t num_allocator_blocks;
-    KMemoryState old_state;
-    R_TRY(this->CheckMemoryState(std::addressof(old_state), nullptr, nullptr,
-                                 std::addressof(num_allocator_blocks), address, size, test_state,
-                                 test_state, perm, perm,
-                                 KMemoryAttribute::IpcLocked | KMemoryAttribute::Locked,
-                                 KMemoryAttribute::None, KMemoryAttribute::DeviceShared));
-
-    // Create an update allocator.
-    Result allocator_result;
-    KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
-                                                 m_memory_block_slab_manager, num_allocator_blocks);
-    R_TRY(allocator_result);
-
-    // Update the memory blocks.
-    m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages,
-                                      &KMemoryBlock::ShareToDevice, KMemoryPermission::None);
-
-    // Set whether the locked memory was io.
-    *out_is_io =
-        static_cast<Svc::MemoryState>(old_state & KMemoryState::Mask) == Svc::MemoryState::Io;
-
-    R_SUCCEED();
-}
-
-Result KPageTable::LockForUnmapDeviceAddressSpace(KProcessAddress address, size_t size,
-                                                  bool check_heap) {
-    // Lightly validate the range before doing anything else.
-    const size_t num_pages = size / PageSize;
-    R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
-
-    // Lock the table.
-    KScopedLightLock lk(m_general_lock);
-
-    // Check the memory state.
-    const auto test_state = KMemoryState::FlagCanDeviceMap |
-                            (check_heap ? KMemoryState::FlagReferenceCounted : KMemoryState::None);
-    size_t num_allocator_blocks;
-    R_TRY(this->CheckMemoryStateContiguous(
-        std::addressof(num_allocator_blocks), address, size, test_state, test_state,
-        KMemoryPermission::None, KMemoryPermission::None,
-        KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked, KMemoryAttribute::DeviceShared));
-
-    // Create an update allocator.
-    Result allocator_result;
-    KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
-                                                 m_memory_block_slab_manager, num_allocator_blocks);
-    R_TRY(allocator_result);
-
-    // Update the memory blocks.
-    const KMemoryBlockManager::MemoryBlockLockFunction lock_func =
-        m_enable_device_address_space_merge
-            ? &KMemoryBlock::UpdateDeviceDisableMergeStateForShare
-            : &KMemoryBlock::UpdateDeviceDisableMergeStateForShareRight;
-    m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, lock_func,
-                                      KMemoryPermission::None);
-
-    R_SUCCEED();
-}
-
-Result KPageTable::UnlockForDeviceAddressSpace(KProcessAddress address, size_t size) {
-    // Lightly validate the range before doing anything else.
-    const size_t num_pages = size / PageSize;
-    R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
-
-    // Lock the table.
-    KScopedLightLock lk(m_general_lock);
-
-    // Check the memory state.
-    size_t num_allocator_blocks;
-    R_TRY(this->CheckMemoryStateContiguous(
-        std::addressof(num_allocator_blocks), address, size, KMemoryState::FlagCanDeviceMap,
-        KMemoryState::FlagCanDeviceMap, KMemoryPermission::None, KMemoryPermission::None,
-        KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked, KMemoryAttribute::DeviceShared));
-
-    // Create an update allocator.
-    Result allocator_result{ResultSuccess};
-    KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
-                                                 m_memory_block_slab_manager, num_allocator_blocks);
-    R_TRY(allocator_result);
-
-    // Update the memory blocks.
-    m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages,
-                                      &KMemoryBlock::UnshareToDevice, KMemoryPermission::None);
-
-    R_SUCCEED();
-}
-
-Result KPageTable::LockForIpcUserBuffer(KPhysicalAddress* out, KProcessAddress address,
-                                        size_t size) {
-    R_RETURN(this->LockMemoryAndOpen(
-        nullptr, out, address, size, KMemoryState::FlagCanIpcUserBuffer,
-        KMemoryState::FlagCanIpcUserBuffer, KMemoryPermission::All,
-        KMemoryPermission::UserReadWrite, KMemoryAttribute::All, KMemoryAttribute::None,
-        KMemoryPermission::NotMapped | KMemoryPermission::KernelReadWrite,
-        KMemoryAttribute::Locked));
-}
-
-Result KPageTable::UnlockForIpcUserBuffer(KProcessAddress address, size_t size) {
-    R_RETURN(this->UnlockMemory(address, size, KMemoryState::FlagCanIpcUserBuffer,
-                                KMemoryState::FlagCanIpcUserBuffer, KMemoryPermission::None,
-                                KMemoryPermission::None, KMemoryAttribute::All,
-                                KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite,
-                                KMemoryAttribute::Locked, nullptr));
-}
-
-Result KPageTable::LockForTransferMemory(KPageGroup* out, KProcessAddress address, size_t size,
-                                         KMemoryPermission perm) {
-    R_RETURN(this->LockMemoryAndOpen(out, nullptr, address, size, KMemoryState::FlagCanTransfer,
-                                     KMemoryState::FlagCanTransfer, KMemoryPermission::All,
-                                     KMemoryPermission::UserReadWrite, KMemoryAttribute::All,
-                                     KMemoryAttribute::None, perm, KMemoryAttribute::Locked));
-}
-
-Result KPageTable::UnlockForTransferMemory(KProcessAddress address, size_t size,
-                                           const KPageGroup& pg) {
-    R_RETURN(this->UnlockMemory(address, size, KMemoryState::FlagCanTransfer,
-                                KMemoryState::FlagCanTransfer, KMemoryPermission::None,
-                                KMemoryPermission::None, KMemoryAttribute::All,
-                                KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite,
-                                KMemoryAttribute::Locked, std::addressof(pg)));
-}
-
-Result KPageTable::LockForCodeMemory(KPageGroup* out, KProcessAddress addr, size_t size) {
-    R_RETURN(this->LockMemoryAndOpen(
-        out, nullptr, addr, size, KMemoryState::FlagCanCodeMemory, KMemoryState::FlagCanCodeMemory,
-        KMemoryPermission::All, KMemoryPermission::UserReadWrite, KMemoryAttribute::All,
-        KMemoryAttribute::None, KMemoryPermission::NotMapped | KMemoryPermission::KernelReadWrite,
-        KMemoryAttribute::Locked));
-}
-
-Result KPageTable::UnlockForCodeMemory(KProcessAddress addr, size_t size, const KPageGroup& pg) {
-    R_RETURN(this->UnlockMemory(
-        addr, size, KMemoryState::FlagCanCodeMemory, KMemoryState::FlagCanCodeMemory,
-        KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::All,
-        KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite, KMemoryAttribute::Locked, &pg));
-}
-
-bool KPageTable::IsRegionContiguous(KProcessAddress addr, u64 size) const {
-    auto start_ptr = m_system.DeviceMemory().GetPointer<u8>(GetInteger(addr));
-    for (u64 offset{}; offset < size; offset += PageSize) {
-        if (start_ptr != m_system.DeviceMemory().GetPointer<u8>(GetInteger(addr) + offset)) {
-            return false;
-        }
-        start_ptr += PageSize;
-    }
-    return true;
-}
-
-void KPageTable::AddRegionToPages(KProcessAddress start, size_t num_pages,
-                                  KPageGroup& page_linked_list) {
-    KProcessAddress addr{start};
-    while (addr < start + (num_pages * PageSize)) {
-        const KPhysicalAddress paddr{GetPhysicalAddr(addr)};
-        ASSERT(paddr != 0);
-        page_linked_list.AddBlock(paddr, 1);
-        addr += PageSize;
-    }
-}
-
-KProcessAddress KPageTable::AllocateVirtualMemory(KProcessAddress start, size_t region_num_pages,
-                                                  u64 needed_num_pages, size_t align) {
-    if (m_enable_aslr) {
-        UNIMPLEMENTED();
-    }
-    return m_memory_block_manager.FindFreeArea(start, region_num_pages, needed_num_pages, align, 0,
-                                               IsKernel() ? 1 : 4);
-}
-
-Result KPageTable::Operate(KProcessAddress addr, size_t num_pages, const KPageGroup& page_group,
-                           OperationType operation) {
-    ASSERT(this->IsLockedByCurrentThread());
-
-    ASSERT(Common::IsAligned(GetInteger(addr), PageSize));
-    ASSERT(num_pages > 0);
-    ASSERT(num_pages == page_group.GetNumPages());
-
-    switch (operation) {
-    case OperationType::MapGroup:
-    case OperationType::MapFirstGroup: {
-        // We want to maintain a new reference to every page in the group.
-        KScopedPageGroup spg(page_group, operation != OperationType::MapFirstGroup);
-
-        for (const auto& node : page_group) {
-            const size_t size{node.GetNumPages() * PageSize};
-
-            // Map the pages.
-            m_memory->MapMemoryRegion(*m_page_table_impl, addr, size, node.GetAddress());
-
-            addr += size;
-        }
-
-        // We succeeded! We want to persist the reference to the pages.
-        spg.CancelClose();
-
-        break;
-    }
-    default:
-        ASSERT(false);
-        break;
-    }
-
-    R_SUCCEED();
-}
-
-Result KPageTable::Operate(KProcessAddress addr, size_t num_pages, KMemoryPermission perm,
-                           OperationType operation, KPhysicalAddress map_addr) {
-    ASSERT(this->IsLockedByCurrentThread());
-
-    ASSERT(num_pages > 0);
-    ASSERT(Common::IsAligned(GetInteger(addr), PageSize));
-    ASSERT(ContainsPages(addr, num_pages));
-
-    switch (operation) {
-    case OperationType::Unmap: {
-        // Ensure that any pages we track close on exit.
-        KPageGroup pages_to_close{m_kernel, this->GetBlockInfoManager()};
-        SCOPE_EXIT({ pages_to_close.CloseAndReset(); });
-
-        this->AddRegionToPages(addr, num_pages, pages_to_close);
-        m_memory->UnmapRegion(*m_page_table_impl, addr, num_pages * PageSize);
-        break;
-    }
-    case OperationType::Map: {
-        ASSERT(map_addr);
-        ASSERT(Common::IsAligned(GetInteger(map_addr), PageSize));
-        m_memory->MapMemoryRegion(*m_page_table_impl, addr, num_pages * PageSize, map_addr);
-
-        // Open references to pages, if we should.
-        if (IsHeapPhysicalAddress(m_kernel.MemoryLayout(), map_addr)) {
-            m_kernel.MemoryManager().Open(map_addr, num_pages);
-        }
-        break;
-    }
-    case OperationType::Separate: {
-        // HACK: Unimplemented.
-        break;
-    }
-    case OperationType::ChangePermissions:
-    case OperationType::ChangePermissionsAndRefresh:
-    case OperationType::ChangePermissionsAndRefreshAndFlush:
-        break;
-    default:
-        ASSERT(false);
-        break;
-    }
-    R_SUCCEED();
-}
-
-void KPageTable::FinalizeUpdate(PageLinkedList* page_list) {
-    while (page_list->Peek()) {
-        [[maybe_unused]] auto page = page_list->Pop();
-
-        // TODO(bunnei): Free pages once they are allocated in guest memory
-        // ASSERT(this->GetPageTableManager().IsInPageTableHeap(page));
-        // ASSERT(this->GetPageTableManager().GetRefCount(page) == 0);
-        // this->GetPageTableManager().Free(page);
-    }
-}
-
-KProcessAddress KPageTable::GetRegionAddress(Svc::MemoryState state) const {
-    switch (state) {
-    case Svc::MemoryState::Free:
-    case Svc::MemoryState::Kernel:
-        return m_address_space_start;
-    case Svc::MemoryState::Normal:
-        return m_heap_region_start;
-    case Svc::MemoryState::Ipc:
-    case Svc::MemoryState::NonSecureIpc:
-    case Svc::MemoryState::NonDeviceIpc:
-        return m_alias_region_start;
-    case Svc::MemoryState::Stack:
-        return m_stack_region_start;
-    case Svc::MemoryState::Static:
-    case Svc::MemoryState::ThreadLocal:
-        return m_kernel_map_region_start;
-    case Svc::MemoryState::Io:
-    case Svc::MemoryState::Shared:
-    case Svc::MemoryState::AliasCode:
-    case Svc::MemoryState::AliasCodeData:
-    case Svc::MemoryState::Transfered:
-    case Svc::MemoryState::SharedTransfered:
-    case Svc::MemoryState::SharedCode:
-    case Svc::MemoryState::GeneratedCode:
-    case Svc::MemoryState::CodeOut:
-    case Svc::MemoryState::Coverage:
-    case Svc::MemoryState::Insecure:
-        return m_alias_code_region_start;
-    case Svc::MemoryState::Code:
-    case Svc::MemoryState::CodeData:
-        return m_code_region_start;
-    default:
-        UNREACHABLE();
-    }
-}
-
-size_t KPageTable::GetRegionSize(Svc::MemoryState state) const {
-    switch (state) {
-    case Svc::MemoryState::Free:
-    case Svc::MemoryState::Kernel:
-        return m_address_space_end - m_address_space_start;
-    case Svc::MemoryState::Normal:
-        return m_heap_region_end - m_heap_region_start;
-    case Svc::MemoryState::Ipc:
-    case Svc::MemoryState::NonSecureIpc:
-    case Svc::MemoryState::NonDeviceIpc:
-        return m_alias_region_end - m_alias_region_start;
-    case Svc::MemoryState::Stack:
-        return m_stack_region_end - m_stack_region_start;
-    case Svc::MemoryState::Static:
-    case Svc::MemoryState::ThreadLocal:
-        return m_kernel_map_region_end - m_kernel_map_region_start;
-    case Svc::MemoryState::Io:
-    case Svc::MemoryState::Shared:
-    case Svc::MemoryState::AliasCode:
-    case Svc::MemoryState::AliasCodeData:
-    case Svc::MemoryState::Transfered:
-    case Svc::MemoryState::SharedTransfered:
-    case Svc::MemoryState::SharedCode:
-    case Svc::MemoryState::GeneratedCode:
-    case Svc::MemoryState::CodeOut:
-    case Svc::MemoryState::Coverage:
-    case Svc::MemoryState::Insecure:
-        return m_alias_code_region_end - m_alias_code_region_start;
-    case Svc::MemoryState::Code:
-    case Svc::MemoryState::CodeData:
-        return m_code_region_end - m_code_region_start;
-    default:
-        UNREACHABLE();
-    }
-}
-
-bool KPageTable::CanContain(KProcessAddress addr, size_t size, Svc::MemoryState state) const {
-    const KProcessAddress end = addr + size;
-    const KProcessAddress last = end - 1;
-
-    const KProcessAddress region_start = this->GetRegionAddress(state);
-    const size_t region_size = this->GetRegionSize(state);
-
-    const bool is_in_region =
-        region_start <= addr && addr < end && last <= region_start + region_size - 1;
-    const bool is_in_heap = !(end <= m_heap_region_start || m_heap_region_end <= addr ||
-                              m_heap_region_start == m_heap_region_end);
-    const bool is_in_alias = !(end <= m_alias_region_start || m_alias_region_end <= addr ||
-                               m_alias_region_start == m_alias_region_end);
-    switch (state) {
-    case Svc::MemoryState::Free:
-    case Svc::MemoryState::Kernel:
-        return is_in_region;
-    case Svc::MemoryState::Io:
-    case Svc::MemoryState::Static:
-    case Svc::MemoryState::Code:
-    case Svc::MemoryState::CodeData:
-    case Svc::MemoryState::Shared:
-    case Svc::MemoryState::AliasCode:
-    case Svc::MemoryState::AliasCodeData:
-    case Svc::MemoryState::Stack:
-    case Svc::MemoryState::ThreadLocal:
-    case Svc::MemoryState::Transfered:
-    case Svc::MemoryState::SharedTransfered:
-    case Svc::MemoryState::SharedCode:
-    case Svc::MemoryState::GeneratedCode:
-    case Svc::MemoryState::CodeOut:
-    case Svc::MemoryState::Coverage:
-    case Svc::MemoryState::Insecure:
-        return is_in_region && !is_in_heap && !is_in_alias;
-    case Svc::MemoryState::Normal:
-        ASSERT(is_in_heap);
-        return is_in_region && !is_in_alias;
-    case Svc::MemoryState::Ipc:
-    case Svc::MemoryState::NonSecureIpc:
-    case Svc::MemoryState::NonDeviceIpc:
-        ASSERT(is_in_alias);
-        return is_in_region && !is_in_heap;
-    default:
-        return false;
-    }
-}
-
-Result KPageTable::CheckMemoryState(const KMemoryInfo& info, KMemoryState state_mask,
-                                    KMemoryState state, KMemoryPermission perm_mask,
-                                    KMemoryPermission perm, KMemoryAttribute attr_mask,
-                                    KMemoryAttribute attr) const {
-    // Validate the states match expectation.
-    R_UNLESS((info.m_state & state_mask) == state, ResultInvalidCurrentMemory);
-    R_UNLESS((info.m_permission & perm_mask) == perm, ResultInvalidCurrentMemory);
-    R_UNLESS((info.m_attribute & attr_mask) == attr, ResultInvalidCurrentMemory);
-
-    R_SUCCEED();
-}
-
-Result KPageTable::CheckMemoryStateContiguous(size_t* out_blocks_needed, KProcessAddress addr,
-                                              size_t size, KMemoryState state_mask,
-                                              KMemoryState state, KMemoryPermission perm_mask,
-                                              KMemoryPermission perm, KMemoryAttribute attr_mask,
-                                              KMemoryAttribute attr) const {
-    ASSERT(this->IsLockedByCurrentThread());
-
-    // Get information about the first block.
-    const KProcessAddress last_addr = addr + size - 1;
-    KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(addr);
-    KMemoryInfo info = it->GetMemoryInfo();
-
-    // If the start address isn't aligned, we need a block.
-    const size_t blocks_for_start_align =
-        (Common::AlignDown(GetInteger(addr), PageSize) != info.GetAddress()) ? 1 : 0;
-
-    while (true) {
-        // Validate against the provided masks.
-        R_TRY(this->CheckMemoryState(info, state_mask, state, perm_mask, perm, attr_mask, attr));
-
-        // Break once we're done.
-        if (last_addr <= info.GetLastAddress()) {
-            break;
-        }
-
-        // Advance our iterator.
-        it++;
-        ASSERT(it != m_memory_block_manager.cend());
-        info = it->GetMemoryInfo();
-    }
-
-    // If the end address isn't aligned, we need a block.
-    const size_t blocks_for_end_align =
-        (Common::AlignUp(GetInteger(addr) + size, PageSize) != info.GetEndAddress()) ? 1 : 0;
-
-    if (out_blocks_needed != nullptr) {
-        *out_blocks_needed = blocks_for_start_align + blocks_for_end_align;
-    }
-
-    R_SUCCEED();
-}
-
-Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
-                                    KMemoryAttribute* out_attr, size_t* out_blocks_needed,
-                                    KMemoryBlockManager::const_iterator it,
-                                    KProcessAddress last_addr, KMemoryState state_mask,
-                                    KMemoryState state, KMemoryPermission perm_mask,
-                                    KMemoryPermission perm, KMemoryAttribute attr_mask,
-                                    KMemoryAttribute attr, KMemoryAttribute ignore_attr) const {
-    ASSERT(this->IsLockedByCurrentThread());
-
-    // Get information about the first block.
-    KMemoryInfo info = it->GetMemoryInfo();
-
-    // Validate all blocks in the range have correct state.
-    const KMemoryState first_state = info.m_state;
-    const KMemoryPermission first_perm = info.m_permission;
-    const KMemoryAttribute first_attr = info.m_attribute;
-    while (true) {
-        // Validate the current block.
-        R_UNLESS(info.m_state == first_state, ResultInvalidCurrentMemory);
-        R_UNLESS(info.m_permission == first_perm, ResultInvalidCurrentMemory);
-        R_UNLESS((info.m_attribute | ignore_attr) == (first_attr | ignore_attr),
-                 ResultInvalidCurrentMemory);
-
-        // Validate against the provided masks.
-        R_TRY(this->CheckMemoryState(info, state_mask, state, perm_mask, perm, attr_mask, attr));
-
-        // Break once we're done.
-        if (last_addr <= info.GetLastAddress()) {
-            break;
-        }
-
-        // Advance our iterator.
-        it++;
-        ASSERT(it != m_memory_block_manager.cend());
-        info = it->GetMemoryInfo();
-    }
-
-    // Write output state.
-    if (out_state != nullptr) {
-        *out_state = first_state;
-    }
-    if (out_perm != nullptr) {
-        *out_perm = first_perm;
-    }
-    if (out_attr != nullptr) {
-        *out_attr = static_cast<KMemoryAttribute>(first_attr & ~ignore_attr);
-    }
-
-    // If the end address isn't aligned, we need a block.
-    if (out_blocks_needed != nullptr) {
-        const size_t blocks_for_end_align =
-            (Common::AlignDown(GetInteger(last_addr), PageSize) + PageSize != info.GetEndAddress())
-                ? 1
-                : 0;
-        *out_blocks_needed = blocks_for_end_align;
-    }
-
-    R_SUCCEED();
-}
-
-Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
-                                    KMemoryAttribute* out_attr, size_t* out_blocks_needed,
-                                    KProcessAddress addr, size_t size, KMemoryState state_mask,
-                                    KMemoryState state, KMemoryPermission perm_mask,
-                                    KMemoryPermission perm, KMemoryAttribute attr_mask,
-                                    KMemoryAttribute attr, KMemoryAttribute ignore_attr) const {
-    ASSERT(this->IsLockedByCurrentThread());
-
-    // Check memory state.
-    const KProcessAddress last_addr = addr + size - 1;
-    KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(addr);
-    R_TRY(this->CheckMemoryState(out_state, out_perm, out_attr, out_blocks_needed, it, last_addr,
-                                 state_mask, state, perm_mask, perm, attr_mask, attr, ignore_attr));
-
-    // If the start address isn't aligned, we need a block.
-    if (out_blocks_needed != nullptr &&
-        Common::AlignDown(GetInteger(addr), PageSize) != it->GetAddress()) {
-        ++(*out_blocks_needed);
-    }
-
-    R_SUCCEED();
-}
-
-Result KPageTable::LockMemoryAndOpen(KPageGroup* out_pg, KPhysicalAddress* out_KPhysicalAddress,
-                                     KProcessAddress addr, size_t size, KMemoryState state_mask,
-                                     KMemoryState state, KMemoryPermission perm_mask,
-                                     KMemoryPermission perm, KMemoryAttribute attr_mask,
-                                     KMemoryAttribute attr, KMemoryPermission new_perm,
-                                     KMemoryAttribute lock_attr) {
-    // Validate basic preconditions.
-    ASSERT((lock_attr & attr) == KMemoryAttribute::None);
-    ASSERT((lock_attr & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)) ==
-           KMemoryAttribute::None);
-
-    // Validate the lock request.
-    const size_t num_pages = size / PageSize;
-    R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory);
-
-    // Lock the table.
-    KScopedLightLock lk(m_general_lock);
-
-    // Check that the output page group is empty, if it exists.
-    if (out_pg) {
-        ASSERT(out_pg->GetNumPages() == 0);
-    }
-
-    // Check the state.
-    KMemoryState old_state{};
-    KMemoryPermission old_perm{};
-    KMemoryAttribute old_attr{};
-    size_t num_allocator_blocks{};
-    R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm),
-                                 std::addressof(old_attr), std::addressof(num_allocator_blocks),
-                                 addr, size, state_mask | KMemoryState::FlagReferenceCounted,
-                                 state | KMemoryState::FlagReferenceCounted, perm_mask, perm,
-                                 attr_mask, attr));
-
-    // Get the physical address, if we're supposed to.
-    if (out_KPhysicalAddress != nullptr) {
-        ASSERT(this->GetPhysicalAddressLocked(out_KPhysicalAddress, addr));
-    }
-
-    // Make the page group, if we're supposed to.
-    if (out_pg != nullptr) {
-        R_TRY(this->MakePageGroup(*out_pg, addr, num_pages));
-    }
-
-    // Create an update allocator.
-    Result allocator_result{ResultSuccess};
-    KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
-                                                 m_memory_block_slab_manager, num_allocator_blocks);
-    R_TRY(allocator_result);
-
-    // Decide on new perm and attr.
-    new_perm = (new_perm != KMemoryPermission::None) ? new_perm : old_perm;
-    KMemoryAttribute new_attr = static_cast<KMemoryAttribute>(old_attr | lock_attr);
-
-    // Update permission, if we need to.
-    if (new_perm != old_perm) {
-        R_TRY(Operate(addr, num_pages, new_perm, OperationType::ChangePermissions));
-    }
-
-    // Apply the memory block updates.
-    m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm,
-                                  new_attr, KMemoryBlockDisableMergeAttribute::Locked,
-                                  KMemoryBlockDisableMergeAttribute::None);
-
-    // If we have an output page group, open.
-    if (out_pg) {
-        out_pg->Open();
-    }
-
-    R_SUCCEED();
-}
-
-Result KPageTable::UnlockMemory(KProcessAddress addr, size_t size, KMemoryState state_mask,
-                                KMemoryState state, KMemoryPermission perm_mask,
-                                KMemoryPermission perm, KMemoryAttribute attr_mask,
-                                KMemoryAttribute attr, KMemoryPermission new_perm,
-                                KMemoryAttribute lock_attr, const KPageGroup* pg) {
-    // Validate basic preconditions.
-    ASSERT((attr_mask & lock_attr) == lock_attr);
-    ASSERT((attr & lock_attr) == lock_attr);
-
-    // Validate the unlock request.
-    const size_t num_pages = size / PageSize;
-    R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory);
-
-    // Lock the table.
-    KScopedLightLock lk(m_general_lock);
-
-    // Check the state.
-    KMemoryState old_state{};
-    KMemoryPermission old_perm{};
-    KMemoryAttribute old_attr{};
-    size_t num_allocator_blocks{};
-    R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm),
-                                 std::addressof(old_attr), std::addressof(num_allocator_blocks),
-                                 addr, size, state_mask | KMemoryState::FlagReferenceCounted,
-                                 state | KMemoryState::FlagReferenceCounted, perm_mask, perm,
-                                 attr_mask, attr));
-
-    // Check the page group.
-    if (pg != nullptr) {
-        R_UNLESS(this->IsValidPageGroup(*pg, addr, num_pages), ResultInvalidMemoryRegion);
-    }
-
-    // Decide on new perm and attr.
-    new_perm = (new_perm != KMemoryPermission::None) ? new_perm : old_perm;
-    KMemoryAttribute new_attr = static_cast<KMemoryAttribute>(old_attr & ~lock_attr);
-
-    // Create an update allocator.
-    Result allocator_result{ResultSuccess};
-    KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
-                                                 m_memory_block_slab_manager, num_allocator_blocks);
-    R_TRY(allocator_result);
-
-    // Update permission, if we need to.
-    if (new_perm != old_perm) {
-        R_TRY(Operate(addr, num_pages, new_perm, OperationType::ChangePermissions));
-    }
-
-    // Apply the memory block updates.
-    m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm,
-                                  new_attr, KMemoryBlockDisableMergeAttribute::None,
-                                  KMemoryBlockDisableMergeAttribute::Locked);
-
-    R_SUCCEED();
-}
-
-} // namespace Kernel
diff --git a/src/core/hle/kernel/k_page_table.h b/src/core/hle/kernel/k_page_table.h
index 66f16faaf9..5541bc13f8 100644
--- a/src/core/hle/kernel/k_page_table.h
+++ b/src/core/hle/kernel/k_page_table.h
@@ -3,548 +3,14 @@
 
 #pragma once
 
-#include <memory>
-
-#include "common/common_funcs.h"
-#include "common/page_table.h"
-#include "core/file_sys/program_metadata.h"
-#include "core/hle/kernel/k_dynamic_resource_manager.h"
-#include "core/hle/kernel/k_light_lock.h"
-#include "core/hle/kernel/k_memory_block.h"
-#include "core/hle/kernel/k_memory_block_manager.h"
-#include "core/hle/kernel/k_memory_layout.h"
-#include "core/hle/kernel/k_memory_manager.h"
-#include "core/hle/kernel/k_typed_address.h"
-#include "core/hle/result.h"
-#include "core/memory.h"
-
-namespace Core {
-class System;
-}
+#include "core/hle/kernel/k_page_table_base.h"
 
 namespace Kernel {
 
-enum class DisableMergeAttribute : u8 {
-    None = (0U << 0),
-    DisableHead = (1U << 0),
-    DisableHeadAndBody = (1U << 1),
-    EnableHeadAndBody = (1U << 2),
-    DisableTail = (1U << 3),
-    EnableTail = (1U << 4),
-    EnableAndMergeHeadBodyTail = (1U << 5),
-    EnableHeadBodyTail = EnableHeadAndBody | EnableTail,
-    DisableHeadBodyTail = DisableHeadAndBody | DisableTail,
-};
-
-struct KPageProperties {
-    KMemoryPermission perm;
-    bool io;
-    bool uncached;
-    DisableMergeAttribute disable_merge_attributes;
-};
-static_assert(std::is_trivial_v<KPageProperties>);
-static_assert(sizeof(KPageProperties) == sizeof(u32));
-
-class KBlockInfoManager;
-class KMemoryBlockManager;
-class KResourceLimit;
-class KSystemResource;
-
-class KPageTable final {
-protected:
-    struct PageLinkedList;
-
+class KPageTable final : public KPageTableBase {
 public:
-    enum class ICacheInvalidationStrategy : u32 { InvalidateRange, InvalidateAll };
-
-    YUZU_NON_COPYABLE(KPageTable);
-    YUZU_NON_MOVEABLE(KPageTable);
-
-    explicit KPageTable(Core::System& system_);
-    ~KPageTable();
-
-    Result InitializeForProcess(Svc::CreateProcessFlag as_type, bool enable_aslr,
-                                bool enable_das_merge, bool from_back, KMemoryManager::Pool pool,
-                                KProcessAddress code_addr, size_t code_size,
-                                KSystemResource* system_resource, KResourceLimit* resource_limit,
-                                Core::Memory::Memory& memory);
-
-    void Finalize();
-
-    Result MapProcessCode(KProcessAddress addr, size_t pages_count, KMemoryState state,
-                          KMemoryPermission perm);
-    Result MapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size);
-    Result UnmapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size,
-                           ICacheInvalidationStrategy icache_invalidation_strategy);
-    Result UnmapProcessMemory(KProcessAddress dst_addr, size_t size, KPageTable& src_page_table,
-                              KProcessAddress src_addr);
-    Result MapPhysicalMemory(KProcessAddress addr, size_t size);
-    Result UnmapPhysicalMemory(KProcessAddress addr, size_t size);
-    Result MapMemory(KProcessAddress dst_addr, KProcessAddress src_addr, size_t size);
-    Result UnmapMemory(KProcessAddress dst_addr, KProcessAddress src_addr, size_t size);
-    Result SetProcessMemoryPermission(KProcessAddress addr, size_t size,
-                                      Svc::MemoryPermission svc_perm);
-    KMemoryInfo QueryInfo(KProcessAddress addr);
-    Result SetMemoryPermission(KProcessAddress addr, size_t size, Svc::MemoryPermission perm);
-    Result SetMemoryAttribute(KProcessAddress addr, size_t size, u32 mask, u32 attr);
-    Result SetMaxHeapSize(size_t size);
-    Result SetHeapSize(u64* out, size_t size);
-    Result LockForMapDeviceAddressSpace(bool* out_is_io, KProcessAddress address, size_t size,
-                                        KMemoryPermission perm, bool is_aligned, bool check_heap);
-    Result LockForUnmapDeviceAddressSpace(KProcessAddress address, size_t size, bool check_heap);
-
-    Result UnlockForDeviceAddressSpace(KProcessAddress addr, size_t size);
-
-    Result LockForIpcUserBuffer(KPhysicalAddress* out, KProcessAddress address, size_t size);
-    Result UnlockForIpcUserBuffer(KProcessAddress address, size_t size);
-
-    Result SetupForIpc(KProcessAddress* out_dst_addr, size_t size, KProcessAddress src_addr,
-                       KPageTable& src_page_table, KMemoryPermission test_perm,
-                       KMemoryState dst_state, bool send);
-    Result CleanupForIpcServer(KProcessAddress address, size_t size, KMemoryState dst_state);
-    Result CleanupForIpcClient(KProcessAddress address, size_t size, KMemoryState dst_state);
-
-    Result LockForTransferMemory(KPageGroup* out, KProcessAddress address, size_t size,
-                                 KMemoryPermission perm);
-    Result UnlockForTransferMemory(KProcessAddress address, size_t size, const KPageGroup& pg);
-    Result LockForCodeMemory(KPageGroup* out, KProcessAddress addr, size_t size);
-    Result UnlockForCodeMemory(KProcessAddress addr, size_t size, const KPageGroup& pg);
-    Result MakeAndOpenPageGroup(KPageGroup* out, KProcessAddress address, size_t num_pages,
-                                KMemoryState state_mask, KMemoryState state,
-                                KMemoryPermission perm_mask, KMemoryPermission perm,
-                                KMemoryAttribute attr_mask, KMemoryAttribute attr);
-
-    Common::PageTable& PageTableImpl() {
-        return *m_page_table_impl;
-    }
-
-    const Common::PageTable& PageTableImpl() const {
-        return *m_page_table_impl;
-    }
-
-    KBlockInfoManager* GetBlockInfoManager() {
-        return m_block_info_manager;
-    }
-
-    Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment,
-                    KPhysicalAddress phys_addr, KProcessAddress region_start,
-                    size_t region_num_pages, KMemoryState state, KMemoryPermission perm) {
-        R_RETURN(this->MapPages(out_addr, num_pages, alignment, phys_addr, true, region_start,
-                                region_num_pages, state, perm));
-    }
-
-    Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment,
-                    KPhysicalAddress phys_addr, KMemoryState state, KMemoryPermission perm) {
-        R_RETURN(this->MapPages(out_addr, num_pages, alignment, phys_addr, true,
-                                this->GetRegionAddress(state),
-                                this->GetRegionSize(state) / PageSize, state, perm));
-    }
-
-    Result MapPages(KProcessAddress* out_addr, size_t num_pages, KMemoryState state,
-                    KMemoryPermission perm) {
-        R_RETURN(this->MapPages(out_addr, num_pages, PageSize, 0, false,
-                                this->GetRegionAddress(state),
-                                this->GetRegionSize(state) / PageSize, state, perm));
-    }
-
-    Result MapPages(KProcessAddress address, size_t num_pages, KMemoryState state,
-                    KMemoryPermission perm);
-    Result UnmapPages(KProcessAddress address, size_t num_pages, KMemoryState state);
-
-    Result MapPageGroup(KProcessAddress* out_addr, const KPageGroup& pg,
-                        KProcessAddress region_start, size_t region_num_pages, KMemoryState state,
-                        KMemoryPermission perm);
-    Result MapPageGroup(KProcessAddress address, const KPageGroup& pg, KMemoryState state,
-                        KMemoryPermission perm);
-    Result UnmapPageGroup(KProcessAddress address, const KPageGroup& pg, KMemoryState state);
-    void RemapPageGroup(PageLinkedList* page_list, KProcessAddress address, size_t size,
-                        const KPageGroup& pg);
-
-    KProcessAddress GetRegionAddress(Svc::MemoryState state) const;
-    size_t GetRegionSize(Svc::MemoryState state) const;
-    bool CanContain(KProcessAddress addr, size_t size, Svc::MemoryState state) const;
-
-    KProcessAddress GetRegionAddress(KMemoryState state) const {
-        return this->GetRegionAddress(static_cast<Svc::MemoryState>(state & KMemoryState::Mask));
-    }
-    size_t GetRegionSize(KMemoryState state) const {
-        return this->GetRegionSize(static_cast<Svc::MemoryState>(state & KMemoryState::Mask));
-    }
-    bool CanContain(KProcessAddress addr, size_t size, KMemoryState state) const {
-        return this->CanContain(addr, size,
-                                static_cast<Svc::MemoryState>(state & KMemoryState::Mask));
-    }
-
-protected:
-    struct PageLinkedList {
-    private:
-        struct Node {
-            Node* m_next;
-            std::array<u8, PageSize - sizeof(Node*)> m_buffer;
-        };
-
-    public:
-        constexpr PageLinkedList() = default;
-
-        void Push(Node* n) {
-            ASSERT(Common::IsAligned(reinterpret_cast<uintptr_t>(n), PageSize));
-            n->m_next = m_root;
-            m_root = n;
-        }
-
-        void Push(Core::Memory::Memory& memory, KVirtualAddress addr) {
-            this->Push(memory.GetPointer<Node>(GetInteger(addr)));
-        }
-
-        Node* Peek() const {
-            return m_root;
-        }
-
-        Node* Pop() {
-            Node* const r = m_root;
-
-            m_root = r->m_next;
-            r->m_next = nullptr;
-
-            return r;
-        }
-
-    private:
-        Node* m_root{};
-    };
-    static_assert(std::is_trivially_destructible<PageLinkedList>::value);
-
-private:
-    enum class OperationType : u32 {
-        Map = 0,
-        MapGroup = 1,
-        MapFirstGroup = 2,
-        Unmap = 3,
-        ChangePermissions = 4,
-        ChangePermissionsAndRefresh = 5,
-        ChangePermissionsAndRefreshAndFlush = 6,
-        Separate = 7,
-    };
-
-    static constexpr KMemoryAttribute DefaultMemoryIgnoreAttr =
-        KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared;
-
-    Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment,
-                    KPhysicalAddress phys_addr, bool is_pa_valid, KProcessAddress region_start,
-                    size_t region_num_pages, KMemoryState state, KMemoryPermission perm);
-    bool IsRegionContiguous(KProcessAddress addr, u64 size) const;
-    void AddRegionToPages(KProcessAddress start, size_t num_pages, KPageGroup& page_linked_list);
-    KMemoryInfo QueryInfoImpl(KProcessAddress addr);
-    KProcessAddress AllocateVirtualMemory(KProcessAddress start, size_t region_num_pages,
-                                          u64 needed_num_pages, size_t align);
-    Result Operate(KProcessAddress addr, size_t num_pages, const KPageGroup& page_group,
-                   OperationType operation);
-    Result Operate(KProcessAddress addr, size_t num_pages, KMemoryPermission perm,
-                   OperationType operation, KPhysicalAddress map_addr = 0);
-    void FinalizeUpdate(PageLinkedList* page_list);
-
-    KProcessAddress FindFreeArea(KProcessAddress region_start, size_t region_num_pages,
-                                 size_t num_pages, size_t alignment, size_t offset,
-                                 size_t guard_pages);
-
-    Result CheckMemoryStateContiguous(size_t* out_blocks_needed, KProcessAddress addr, size_t size,
-                                      KMemoryState state_mask, KMemoryState state,
-                                      KMemoryPermission perm_mask, KMemoryPermission perm,
-                                      KMemoryAttribute attr_mask, KMemoryAttribute attr) const;
-    Result CheckMemoryStateContiguous(KProcessAddress addr, size_t size, KMemoryState state_mask,
-                                      KMemoryState state, KMemoryPermission perm_mask,
-                                      KMemoryPermission perm, KMemoryAttribute attr_mask,
-                                      KMemoryAttribute attr) const {
-        R_RETURN(this->CheckMemoryStateContiguous(nullptr, addr, size, state_mask, state, perm_mask,
-                                                  perm, attr_mask, attr));
-    }
-
-    Result CheckMemoryState(const KMemoryInfo& info, KMemoryState state_mask, KMemoryState state,
-                            KMemoryPermission perm_mask, KMemoryPermission perm,
-                            KMemoryAttribute attr_mask, KMemoryAttribute attr) const;
-    Result CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
-                            KMemoryAttribute* out_attr, size_t* out_blocks_needed,
-                            KMemoryBlockManager::const_iterator it, KProcessAddress last_addr,
-                            KMemoryState state_mask, KMemoryState state,
-                            KMemoryPermission perm_mask, KMemoryPermission perm,
-                            KMemoryAttribute attr_mask, KMemoryAttribute attr,
-                            KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const;
-    Result CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
-                            KMemoryAttribute* out_attr, size_t* out_blocks_needed,
-                            KProcessAddress addr, size_t size, KMemoryState state_mask,
-                            KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm,
-                            KMemoryAttribute attr_mask, KMemoryAttribute attr,
-                            KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const;
-    Result CheckMemoryState(size_t* out_blocks_needed, KProcessAddress addr, size_t size,
-                            KMemoryState state_mask, KMemoryState state,
-                            KMemoryPermission perm_mask, KMemoryPermission perm,
-                            KMemoryAttribute attr_mask, KMemoryAttribute attr,
-                            KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const {
-        R_RETURN(CheckMemoryState(nullptr, nullptr, nullptr, out_blocks_needed, addr, size,
-                                  state_mask, state, perm_mask, perm, attr_mask, attr,
-                                  ignore_attr));
-    }
-    Result CheckMemoryState(KProcessAddress addr, size_t size, KMemoryState state_mask,
-                            KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm,
-                            KMemoryAttribute attr_mask, KMemoryAttribute attr,
-                            KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const {
-        R_RETURN(this->CheckMemoryState(nullptr, addr, size, state_mask, state, perm_mask, perm,
-                                        attr_mask, attr, ignore_attr));
-    }
-
-    Result LockMemoryAndOpen(KPageGroup* out_pg, KPhysicalAddress* out_KPhysicalAddress,
-                             KProcessAddress addr, size_t size, KMemoryState state_mask,
-                             KMemoryState state, KMemoryPermission perm_mask,
-                             KMemoryPermission perm, KMemoryAttribute attr_mask,
-                             KMemoryAttribute attr, KMemoryPermission new_perm,
-                             KMemoryAttribute lock_attr);
-    Result UnlockMemory(KProcessAddress addr, size_t size, KMemoryState state_mask,
-                        KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm,
-                        KMemoryAttribute attr_mask, KMemoryAttribute attr,
-                        KMemoryPermission new_perm, KMemoryAttribute lock_attr,
-                        const KPageGroup* pg);
-
-    Result MakePageGroup(KPageGroup& pg, KProcessAddress addr, size_t num_pages);
-    bool IsValidPageGroup(const KPageGroup& pg, KProcessAddress addr, size_t num_pages);
-
-    bool IsLockedByCurrentThread() const {
-        return m_general_lock.IsLockedByCurrentThread();
-    }
-
-    bool IsHeapPhysicalAddress(const KMemoryLayout& layout, KPhysicalAddress phys_addr) {
-        ASSERT(this->IsLockedByCurrentThread());
-
-        return layout.IsHeapPhysicalAddress(m_cached_physical_heap_region, phys_addr);
-    }
-
-    bool GetPhysicalAddressLocked(KPhysicalAddress* out, KProcessAddress virt_addr) const {
-        ASSERT(this->IsLockedByCurrentThread());
-
-        *out = GetPhysicalAddr(virt_addr);
-
-        return *out != 0;
-    }
-
-    Result SetupForIpcClient(PageLinkedList* page_list, size_t* out_blocks_needed,
-                             KProcessAddress address, size_t size, KMemoryPermission test_perm,
-                             KMemoryState dst_state);
-    Result SetupForIpcServer(KProcessAddress* out_addr, size_t size, KProcessAddress src_addr,
-                             KMemoryPermission test_perm, KMemoryState dst_state,
-                             KPageTable& src_page_table, bool send);
-    void CleanupForIpcClientOnServerSetupFailure(PageLinkedList* page_list, KProcessAddress address,
-                                                 size_t size, KMemoryPermission prot_perm);
-
-    Result AllocateAndMapPagesImpl(PageLinkedList* page_list, KProcessAddress address,
-                                   size_t num_pages, KMemoryPermission perm);
-    Result MapPageGroupImpl(PageLinkedList* page_list, KProcessAddress address,
-                            const KPageGroup& pg, const KPageProperties properties, bool reuse_ll);
-
-    mutable KLightLock m_general_lock;
-    mutable KLightLock m_map_physical_memory_lock;
-
-public:
-    constexpr KProcessAddress GetAddressSpaceStart() const {
-        return m_address_space_start;
-    }
-    constexpr KProcessAddress GetAddressSpaceEnd() const {
-        return m_address_space_end;
-    }
-    constexpr size_t GetAddressSpaceSize() const {
-        return m_address_space_end - m_address_space_start;
-    }
-    constexpr KProcessAddress GetHeapRegionStart() const {
-        return m_heap_region_start;
-    }
-    constexpr KProcessAddress GetHeapRegionEnd() const {
-        return m_heap_region_end;
-    }
-    constexpr size_t GetHeapRegionSize() const {
-        return m_heap_region_end - m_heap_region_start;
-    }
-    constexpr KProcessAddress GetAliasRegionStart() const {
-        return m_alias_region_start;
-    }
-    constexpr KProcessAddress GetAliasRegionEnd() const {
-        return m_alias_region_end;
-    }
-    constexpr size_t GetAliasRegionSize() const {
-        return m_alias_region_end - m_alias_region_start;
-    }
-    constexpr KProcessAddress GetStackRegionStart() const {
-        return m_stack_region_start;
-    }
-    constexpr KProcessAddress GetStackRegionEnd() const {
-        return m_stack_region_end;
-    }
-    constexpr size_t GetStackRegionSize() const {
-        return m_stack_region_end - m_stack_region_start;
-    }
-    constexpr KProcessAddress GetKernelMapRegionStart() const {
-        return m_kernel_map_region_start;
-    }
-    constexpr KProcessAddress GetKernelMapRegionEnd() const {
-        return m_kernel_map_region_end;
-    }
-    constexpr KProcessAddress GetCodeRegionStart() const {
-        return m_code_region_start;
-    }
-    constexpr KProcessAddress GetCodeRegionEnd() const {
-        return m_code_region_end;
-    }
-    constexpr KProcessAddress GetAliasCodeRegionStart() const {
-        return m_alias_code_region_start;
-    }
-    constexpr KProcessAddress GetAliasCodeRegionEnd() const {
-        return m_alias_code_region_end;
-    }
-    constexpr size_t GetAliasCodeRegionSize() const {
-        return m_alias_code_region_end - m_alias_code_region_start;
-    }
-    size_t GetNormalMemorySize() const {
-        KScopedLightLock lk(m_general_lock);
-        return GetHeapSize() + m_mapped_physical_memory_size;
-    }
-    constexpr size_t GetAddressSpaceWidth() const {
-        return m_address_space_width;
-    }
-    constexpr size_t GetHeapSize() const {
-        return m_current_heap_end - m_heap_region_start;
-    }
-    constexpr size_t GetNumGuardPages() const {
-        return IsKernel() ? 1 : 4;
-    }
-    KPhysicalAddress GetPhysicalAddr(KProcessAddress addr) const {
-        const auto backing_addr = m_page_table_impl->backing_addr[addr >> PageBits];
-        ASSERT(backing_addr);
-        return backing_addr + GetInteger(addr);
-    }
-    constexpr bool Contains(KProcessAddress addr) const {
-        return m_address_space_start <= addr && addr <= m_address_space_end - 1;
-    }
-    constexpr bool Contains(KProcessAddress addr, size_t size) const {
-        return m_address_space_start <= addr && addr < addr + size &&
-               addr + size - 1 <= m_address_space_end - 1;
-    }
-    constexpr bool IsInAliasRegion(KProcessAddress addr, size_t size) const {
-        return this->Contains(addr, size) && m_alias_region_start <= addr &&
-               addr + size - 1 <= m_alias_region_end - 1;
-    }
-    constexpr bool IsInHeapRegion(KProcessAddress addr, size_t size) const {
-        return this->Contains(addr, size) && m_heap_region_start <= addr &&
-               addr + size - 1 <= m_heap_region_end - 1;
-    }
-
-public:
-    static KVirtualAddress GetLinearMappedVirtualAddress(const KMemoryLayout& layout,
-                                                         KPhysicalAddress addr) {
-        return layout.GetLinearVirtualAddress(addr);
-    }
-
-    static KPhysicalAddress GetLinearMappedPhysicalAddress(const KMemoryLayout& layout,
-                                                           KVirtualAddress addr) {
-        return layout.GetLinearPhysicalAddress(addr);
-    }
-
-    static KVirtualAddress GetHeapVirtualAddress(const KMemoryLayout& layout,
-                                                 KPhysicalAddress addr) {
-        return GetLinearMappedVirtualAddress(layout, addr);
-    }
-
-    static KPhysicalAddress GetHeapPhysicalAddress(const KMemoryLayout& layout,
-                                                   KVirtualAddress addr) {
-        return GetLinearMappedPhysicalAddress(layout, addr);
-    }
-
-    static KVirtualAddress GetPageTableVirtualAddress(const KMemoryLayout& layout,
-                                                      KPhysicalAddress addr) {
-        return GetLinearMappedVirtualAddress(layout, addr);
-    }
-
-    static KPhysicalAddress GetPageTablePhysicalAddress(const KMemoryLayout& layout,
-                                                        KVirtualAddress addr) {
-        return GetLinearMappedPhysicalAddress(layout, addr);
-    }
-
-private:
-    constexpr bool IsKernel() const {
-        return m_is_kernel;
-    }
-    constexpr bool IsAslrEnabled() const {
-        return m_enable_aslr;
-    }
-
-    constexpr bool ContainsPages(KProcessAddress addr, size_t num_pages) const {
-        return (m_address_space_start <= addr) &&
-               (num_pages <= (m_address_space_end - m_address_space_start) / PageSize) &&
-               (addr + num_pages * PageSize - 1 <= m_address_space_end - 1);
-    }
-
-private:
-    class KScopedPageTableUpdater {
-    private:
-        KPageTable* m_pt{};
-        PageLinkedList m_ll;
-
-    public:
-        explicit KScopedPageTableUpdater(KPageTable* pt) : m_pt(pt) {}
-        explicit KScopedPageTableUpdater(KPageTable& pt) : KScopedPageTableUpdater(&pt) {}
-        ~KScopedPageTableUpdater() {
-            m_pt->FinalizeUpdate(this->GetPageList());
-        }
-
-        PageLinkedList* GetPageList() {
-            return std::addressof(m_ll);
-        }
-    };
-
-private:
-    KProcessAddress m_address_space_start{};
-    KProcessAddress m_address_space_end{};
-    KProcessAddress m_heap_region_start{};
-    KProcessAddress m_heap_region_end{};
-    KProcessAddress m_current_heap_end{};
-    KProcessAddress m_alias_region_start{};
-    KProcessAddress m_alias_region_end{};
-    KProcessAddress m_stack_region_start{};
-    KProcessAddress m_stack_region_end{};
-    KProcessAddress m_kernel_map_region_start{};
-    KProcessAddress m_kernel_map_region_end{};
-    KProcessAddress m_code_region_start{};
-    KProcessAddress m_code_region_end{};
-    KProcessAddress m_alias_code_region_start{};
-    KProcessAddress m_alias_code_region_end{};
-
-    size_t m_max_heap_size{};
-    size_t m_mapped_physical_memory_size{};
-    size_t m_mapped_unsafe_physical_memory{};
-    size_t m_mapped_insecure_memory{};
-    size_t m_mapped_ipc_server_memory{};
-    size_t m_address_space_width{};
-
-    KMemoryBlockManager m_memory_block_manager;
-    u32 m_allocate_option{};
-
-    bool m_is_kernel{};
-    bool m_enable_aslr{};
-    bool m_enable_device_address_space_merge{};
-
-    KMemoryBlockSlabManager* m_memory_block_slab_manager{};
-    KBlockInfoManager* m_block_info_manager{};
-    KResourceLimit* m_resource_limit{};
-
-    u32 m_heap_fill_value{};
-    u32 m_ipc_fill_value{};
-    u32 m_stack_fill_value{};
-    const KMemoryRegion* m_cached_physical_heap_region{};
-
-    KMemoryManager::Pool m_memory_pool{KMemoryManager::Pool::Application};
-    KMemoryManager::Direction m_allocation_option{KMemoryManager::Direction::FromFront};
-
-    std::unique_ptr<Common::PageTable> m_page_table_impl;
-
-    Core::System& m_system;
-    KernelCore& m_kernel;
-    Core::Memory::Memory* m_memory{};
+    explicit KPageTable(KernelCore& kernel) : KPageTableBase(kernel) {}
+    ~KPageTable() = default;
 };
 
 } // namespace Kernel
diff --git a/src/core/hle/kernel/k_page_table_base.cpp b/src/core/hle/kernel/k_page_table_base.cpp
new file mode 100644
index 0000000000..1cc019c066
--- /dev/null
+++ b/src/core/hle/kernel/k_page_table_base.cpp
@@ -0,0 +1,5718 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "common/scope_exit.h"
+#include "common/settings.h"
+#include "core/core.h"
+#include "core/hle/kernel/k_address_space_info.h"
+#include "core/hle/kernel/k_page_table_base.h"
+#include "core/hle/kernel/k_scoped_resource_reservation.h"
+#include "core/hle/kernel/k_system_resource.h"
+
+namespace Kernel {
+
+namespace {
+
+class KScopedLightLockPair {
+    YUZU_NON_COPYABLE(KScopedLightLockPair);
+    YUZU_NON_MOVEABLE(KScopedLightLockPair);
+
+private:
+    KLightLock* m_lower;
+    KLightLock* m_upper;
+
+public:
+    KScopedLightLockPair(KLightLock& lhs, KLightLock& rhs) {
+        // Ensure our locks are in a consistent order.
+        if (std::addressof(lhs) <= std::addressof(rhs)) {
+            m_lower = std::addressof(lhs);
+            m_upper = std::addressof(rhs);
+        } else {
+            m_lower = std::addressof(rhs);
+            m_upper = std::addressof(lhs);
+        }
+
+        // Acquire both locks.
+        m_lower->Lock();
+        if (m_lower != m_upper) {
+            m_upper->Lock();
+        }
+    }
+
+    ~KScopedLightLockPair() {
+        // Unlock the upper lock.
+        if (m_upper != nullptr && m_upper != m_lower) {
+            m_upper->Unlock();
+        }
+
+        // Unlock the lower lock.
+        if (m_lower != nullptr) {
+            m_lower->Unlock();
+        }
+    }
+
+public:
+    // Utility.
+    void TryUnlockHalf(KLightLock& lock) {
+        // Only allow unlocking if the lock is half the pair.
+        if (m_lower != m_upper) {
+            // We want to be sure the lock is one we own.
+            if (m_lower == std::addressof(lock)) {
+                lock.Unlock();
+                m_lower = nullptr;
+            } else if (m_upper == std::addressof(lock)) {
+                lock.Unlock();
+                m_upper = nullptr;
+            }
+        }
+    }
+};
+
+void InvalidateEntireInstructionCache(Core::System& system) {
+    system.InvalidateCpuInstructionCaches();
+}
+
+template <typename AddressType>
+Result InvalidateDataCache(AddressType addr, u64 size) {
+    R_SUCCEED();
+}
+
+template <typename AddressType>
+Result StoreDataCache(AddressType addr, u64 size) {
+    R_SUCCEED();
+}
+
+template <typename AddressType>
+Result FlushDataCache(AddressType addr, u64 size) {
+    R_SUCCEED();
+}
+
+} // namespace
+
+void KPageTableBase::MemoryRange::Open() {
+    // If the range contains heap pages, open them.
+    if (this->IsHeap()) {
+        m_kernel.MemoryManager().Open(this->GetAddress(), this->GetSize() / PageSize);
+    }
+}
+
+void KPageTableBase::MemoryRange::Close() {
+    // If the range contains heap pages, close them.
+    if (this->IsHeap()) {
+        m_kernel.MemoryManager().Close(this->GetAddress(), this->GetSize() / PageSize);
+    }
+}
+
+KPageTableBase::KPageTableBase(KernelCore& kernel)
+    : m_kernel(kernel), m_system(kernel.System()), m_general_lock(kernel),
+      m_map_physical_memory_lock(kernel), m_device_map_lock(kernel) {}
+KPageTableBase::~KPageTableBase() = default;
+
+Result KPageTableBase::InitializeForKernel(bool is_64_bit, KVirtualAddress start,
+                                           KVirtualAddress end, Core::Memory::Memory& memory) {
+    // Initialize our members.
+    m_address_space_width =
+        static_cast<u32>(is_64_bit ? Common::BitSize<u64>() : Common::BitSize<u32>());
+    m_address_space_start = KProcessAddress(GetInteger(start));
+    m_address_space_end = KProcessAddress(GetInteger(end));
+    m_is_kernel = true;
+    m_enable_aslr = true;
+    m_enable_device_address_space_merge = false;
+
+    m_heap_region_start = 0;
+    m_heap_region_end = 0;
+    m_current_heap_end = 0;
+    m_alias_region_start = 0;
+    m_alias_region_end = 0;
+    m_stack_region_start = 0;
+    m_stack_region_end = 0;
+    m_kernel_map_region_start = 0;
+    m_kernel_map_region_end = 0;
+    m_alias_code_region_start = 0;
+    m_alias_code_region_end = 0;
+    m_code_region_start = 0;
+    m_code_region_end = 0;
+    m_max_heap_size = 0;
+    m_mapped_physical_memory_size = 0;
+    m_mapped_unsafe_physical_memory = 0;
+    m_mapped_insecure_memory = 0;
+    m_mapped_ipc_server_memory = 0;
+
+    m_memory_block_slab_manager =
+        m_kernel.GetSystemSystemResource().GetMemoryBlockSlabManagerPointer();
+    m_block_info_manager = m_kernel.GetSystemSystemResource().GetBlockInfoManagerPointer();
+    m_resource_limit = m_kernel.GetSystemResourceLimit();
+
+    m_allocate_option = KMemoryManager::EncodeOption(KMemoryManager::Pool::System,
+                                                     KMemoryManager::Direction::FromFront);
+    m_heap_fill_value = MemoryFillValue_Zero;
+    m_ipc_fill_value = MemoryFillValue_Zero;
+    m_stack_fill_value = MemoryFillValue_Zero;
+
+    m_cached_physical_linear_region = nullptr;
+    m_cached_physical_heap_region = nullptr;
+
+    // Initialize our implementation.
+    m_impl = std::make_unique<Common::PageTable>();
+    m_impl->Resize(m_address_space_width, PageBits);
+
+    // Set the tracking memory.
+    m_memory = std::addressof(memory);
+
+    // Initialize our memory block manager.
+    R_RETURN(m_memory_block_manager.Initialize(m_address_space_start, m_address_space_end,
+                                               m_memory_block_slab_manager));
+}
+
+Result KPageTableBase::InitializeForProcess(Svc::CreateProcessFlag as_type, bool enable_aslr,
+                                            bool enable_das_merge, bool from_back,
+                                            KMemoryManager::Pool pool, KProcessAddress code_address,
+                                            size_t code_size, KSystemResource* system_resource,
+                                            KResourceLimit* resource_limit,
+                                            Core::Memory::Memory& memory) {
+    // Calculate region extents.
+    const size_t as_width = GetAddressSpaceWidth(as_type);
+    const KProcessAddress start = 0;
+    const KProcessAddress end = (1ULL << as_width);
+
+    // Validate the region.
+    ASSERT(start <= code_address);
+    ASSERT(code_address < code_address + code_size);
+    ASSERT(code_address + code_size - 1 <= end - 1);
+
+    // Define helpers.
+    auto GetSpaceStart = [&](KAddressSpaceInfo::Type type) {
+        return KAddressSpaceInfo::GetAddressSpaceStart(m_address_space_width, type);
+    };
+    auto GetSpaceSize = [&](KAddressSpaceInfo::Type type) {
+        return KAddressSpaceInfo::GetAddressSpaceSize(m_address_space_width, type);
+    };
+
+    // Set our bit width and heap/alias sizes.
+    m_address_space_width = static_cast<u32>(GetAddressSpaceWidth(as_type));
+    size_t alias_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Alias);
+    size_t heap_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Heap);
+
+    // Adjust heap/alias size if we don't have an alias region.
+    if ((as_type & Svc::CreateProcessFlag::AddressSpaceMask) ==
+        Svc::CreateProcessFlag::AddressSpace32BitWithoutAlias) {
+        heap_region_size += alias_region_size;
+        alias_region_size = 0;
+    }
+
+    // Set code regions and determine remaining sizes.
+    KProcessAddress process_code_start;
+    KProcessAddress process_code_end;
+    size_t stack_region_size;
+    size_t kernel_map_region_size;
+    if (m_address_space_width == 39) {
+        alias_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Alias);
+        heap_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Heap);
+        stack_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Stack);
+        kernel_map_region_size = GetSpaceSize(KAddressSpaceInfo::Type::MapSmall);
+        m_code_region_start = GetSpaceStart(KAddressSpaceInfo::Type::Map39Bit);
+        m_code_region_end = m_code_region_start + GetSpaceSize(KAddressSpaceInfo::Type::Map39Bit);
+        m_alias_code_region_start = m_code_region_start;
+        m_alias_code_region_end = m_code_region_end;
+        process_code_start = Common::AlignDown(GetInteger(code_address), RegionAlignment);
+        process_code_end = Common::AlignUp(GetInteger(code_address) + code_size, RegionAlignment);
+    } else {
+        stack_region_size = 0;
+        kernel_map_region_size = 0;
+        m_code_region_start = GetSpaceStart(KAddressSpaceInfo::Type::MapSmall);
+        m_code_region_end = m_code_region_start + GetSpaceSize(KAddressSpaceInfo::Type::MapSmall);
+        m_stack_region_start = m_code_region_start;
+        m_alias_code_region_start = m_code_region_start;
+        m_alias_code_region_end = GetSpaceStart(KAddressSpaceInfo::Type::MapLarge) +
+                                  GetSpaceSize(KAddressSpaceInfo::Type::MapLarge);
+        m_stack_region_end = m_code_region_end;
+        m_kernel_map_region_start = m_code_region_start;
+        m_kernel_map_region_end = m_code_region_end;
+        process_code_start = m_code_region_start;
+        process_code_end = m_code_region_end;
+    }
+
+    // Set other basic fields.
+    m_enable_aslr = enable_aslr;
+    m_enable_device_address_space_merge = enable_das_merge;
+    m_address_space_start = start;
+    m_address_space_end = end;
+    m_is_kernel = false;
+    m_memory_block_slab_manager = system_resource->GetMemoryBlockSlabManagerPointer();
+    m_block_info_manager = system_resource->GetBlockInfoManagerPointer();
+    m_resource_limit = resource_limit;
+
+    // Determine the region we can place our undetermineds in.
+    KProcessAddress alloc_start;
+    size_t alloc_size;
+    if ((GetInteger(process_code_start) - GetInteger(m_code_region_start)) >=
+        (GetInteger(end) - GetInteger(process_code_end))) {
+        alloc_start = m_code_region_start;
+        alloc_size = GetInteger(process_code_start) - GetInteger(m_code_region_start);
+    } else {
+        alloc_start = process_code_end;
+        alloc_size = GetInteger(end) - GetInteger(process_code_end);
+    }
+    const size_t needed_size =
+        (alias_region_size + heap_region_size + stack_region_size + kernel_map_region_size);
+    R_UNLESS(alloc_size >= needed_size, ResultOutOfMemory);
+
+    const size_t remaining_size = alloc_size - needed_size;
+
+    // Determine random placements for each region.
+    size_t alias_rnd = 0, heap_rnd = 0, stack_rnd = 0, kmap_rnd = 0;
+    if (enable_aslr) {
+        alias_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) *
+                    RegionAlignment;
+        heap_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) *
+                   RegionAlignment;
+        stack_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) *
+                    RegionAlignment;
+        kmap_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) *
+                   RegionAlignment;
+    }
+
+    // Setup heap and alias regions.
+    m_alias_region_start = alloc_start + alias_rnd;
+    m_alias_region_end = m_alias_region_start + alias_region_size;
+    m_heap_region_start = alloc_start + heap_rnd;
+    m_heap_region_end = m_heap_region_start + heap_region_size;
+
+    if (alias_rnd <= heap_rnd) {
+        m_heap_region_start += alias_region_size;
+        m_heap_region_end += alias_region_size;
+    } else {
+        m_alias_region_start += heap_region_size;
+        m_alias_region_end += heap_region_size;
+    }
+
+    // Setup stack region.
+    if (stack_region_size) {
+        m_stack_region_start = alloc_start + stack_rnd;
+        m_stack_region_end = m_stack_region_start + stack_region_size;
+
+        if (alias_rnd < stack_rnd) {
+            m_stack_region_start += alias_region_size;
+            m_stack_region_end += alias_region_size;
+        } else {
+            m_alias_region_start += stack_region_size;
+            m_alias_region_end += stack_region_size;
+        }
+
+        if (heap_rnd < stack_rnd) {
+            m_stack_region_start += heap_region_size;
+            m_stack_region_end += heap_region_size;
+        } else {
+            m_heap_region_start += stack_region_size;
+            m_heap_region_end += stack_region_size;
+        }
+    }
+
+    // Setup kernel map region.
+    if (kernel_map_region_size) {
+        m_kernel_map_region_start = alloc_start + kmap_rnd;
+        m_kernel_map_region_end = m_kernel_map_region_start + kernel_map_region_size;
+
+        if (alias_rnd < kmap_rnd) {
+            m_kernel_map_region_start += alias_region_size;
+            m_kernel_map_region_end += alias_region_size;
+        } else {
+            m_alias_region_start += kernel_map_region_size;
+            m_alias_region_end += kernel_map_region_size;
+        }
+
+        if (heap_rnd < kmap_rnd) {
+            m_kernel_map_region_start += heap_region_size;
+            m_kernel_map_region_end += heap_region_size;
+        } else {
+            m_heap_region_start += kernel_map_region_size;
+            m_heap_region_end += kernel_map_region_size;
+        }
+
+        if (stack_region_size) {
+            if (stack_rnd < kmap_rnd) {
+                m_kernel_map_region_start += stack_region_size;
+                m_kernel_map_region_end += stack_region_size;
+            } else {
+                m_stack_region_start += kernel_map_region_size;
+                m_stack_region_end += kernel_map_region_size;
+            }
+        }
+    }
+
+    // Set heap and fill members.
+    m_current_heap_end = m_heap_region_start;
+    m_max_heap_size = 0;
+    m_mapped_physical_memory_size = 0;
+    m_mapped_unsafe_physical_memory = 0;
+    m_mapped_insecure_memory = 0;
+    m_mapped_ipc_server_memory = 0;
+
+    // const bool fill_memory = KTargetSystem::IsDebugMemoryFillEnabled();
+    const bool fill_memory = false;
+    m_heap_fill_value = fill_memory ? MemoryFillValue_Heap : MemoryFillValue_Zero;
+    m_ipc_fill_value = fill_memory ? MemoryFillValue_Ipc : MemoryFillValue_Zero;
+    m_stack_fill_value = fill_memory ? MemoryFillValue_Stack : MemoryFillValue_Zero;
+
+    // Set allocation option.
+    m_allocate_option =
+        KMemoryManager::EncodeOption(pool, from_back ? KMemoryManager::Direction::FromBack
+                                                     : KMemoryManager::Direction::FromFront);
+
+    // Ensure that we regions inside our address space.
+    auto IsInAddressSpace = [&](KProcessAddress addr) {
+        return m_address_space_start <= addr && addr <= m_address_space_end;
+    };
+    ASSERT(IsInAddressSpace(m_alias_region_start));
+    ASSERT(IsInAddressSpace(m_alias_region_end));
+    ASSERT(IsInAddressSpace(m_heap_region_start));
+    ASSERT(IsInAddressSpace(m_heap_region_end));
+    ASSERT(IsInAddressSpace(m_stack_region_start));
+    ASSERT(IsInAddressSpace(m_stack_region_end));
+    ASSERT(IsInAddressSpace(m_kernel_map_region_start));
+    ASSERT(IsInAddressSpace(m_kernel_map_region_end));
+
+    // Ensure that we selected regions that don't overlap.
+    const KProcessAddress alias_start = m_alias_region_start;
+    const KProcessAddress alias_last = m_alias_region_end - 1;
+    const KProcessAddress heap_start = m_heap_region_start;
+    const KProcessAddress heap_last = m_heap_region_end - 1;
+    const KProcessAddress stack_start = m_stack_region_start;
+    const KProcessAddress stack_last = m_stack_region_end - 1;
+    const KProcessAddress kmap_start = m_kernel_map_region_start;
+    const KProcessAddress kmap_last = m_kernel_map_region_end - 1;
+    ASSERT(alias_last < heap_start || heap_last < alias_start);
+    ASSERT(alias_last < stack_start || stack_last < alias_start);
+    ASSERT(alias_last < kmap_start || kmap_last < alias_start);
+    ASSERT(heap_last < stack_start || stack_last < heap_start);
+    ASSERT(heap_last < kmap_start || kmap_last < heap_start);
+
+    // Initialize our implementation.
+    m_impl = std::make_unique<Common::PageTable>();
+    m_impl->Resize(m_address_space_width, PageBits);
+
+    // Set the tracking memory.
+    m_memory = std::addressof(memory);
+
+    // Initialize our memory block manager.
+    R_RETURN(m_memory_block_manager.Initialize(m_address_space_start, m_address_space_end,
+                                               m_memory_block_slab_manager));
+}
+
+void KPageTableBase::Finalize() {
+    auto HostUnmapCallback = [&](KProcessAddress addr, u64 size) {
+        if (Settings::IsFastmemEnabled()) {
+            m_system.DeviceMemory().buffer.Unmap(GetInteger(addr), size);
+        }
+    };
+
+    // Finalize memory blocks.
+    m_memory_block_manager.Finalize(m_memory_block_slab_manager, std::move(HostUnmapCallback));
+
+    // Free any unsafe mapped memory.
+    if (m_mapped_unsafe_physical_memory) {
+        UNIMPLEMENTED();
+    }
+
+    // Release any insecure mapped memory.
+    if (m_mapped_insecure_memory) {
+        if (auto* const insecure_resource_limit =
+                KSystemControl::GetInsecureMemoryResourceLimit(m_kernel);
+            insecure_resource_limit != nullptr) {
+            insecure_resource_limit->Release(Svc::LimitableResource::PhysicalMemoryMax,
+                                             m_mapped_insecure_memory);
+        }
+    }
+
+    // Release any ipc server memory.
+    if (m_mapped_ipc_server_memory) {
+        m_resource_limit->Release(Svc::LimitableResource::PhysicalMemoryMax,
+                                  m_mapped_ipc_server_memory);
+    }
+
+    // Invalidate the entire instruction cache.
+    InvalidateEntireInstructionCache(m_system);
+
+    // Close the backing page table, as the destructor is not called for guest objects.
+    m_impl.reset();
+}
+
+KProcessAddress KPageTableBase::GetRegionAddress(Svc::MemoryState state) const {
+    switch (state) {
+    case Svc::MemoryState::Free:
+    case Svc::MemoryState::Kernel:
+        return m_address_space_start;
+    case Svc::MemoryState::Normal:
+        return m_heap_region_start;
+    case Svc::MemoryState::Ipc:
+    case Svc::MemoryState::NonSecureIpc:
+    case Svc::MemoryState::NonDeviceIpc:
+        return m_alias_region_start;
+    case Svc::MemoryState::Stack:
+        return m_stack_region_start;
+    case Svc::MemoryState::Static:
+    case Svc::MemoryState::ThreadLocal:
+        return m_kernel_map_region_start;
+    case Svc::MemoryState::Io:
+    case Svc::MemoryState::Shared:
+    case Svc::MemoryState::AliasCode:
+    case Svc::MemoryState::AliasCodeData:
+    case Svc::MemoryState::Transfered:
+    case Svc::MemoryState::SharedTransfered:
+    case Svc::MemoryState::SharedCode:
+    case Svc::MemoryState::GeneratedCode:
+    case Svc::MemoryState::CodeOut:
+    case Svc::MemoryState::Coverage:
+    case Svc::MemoryState::Insecure:
+        return m_alias_code_region_start;
+    case Svc::MemoryState::Code:
+    case Svc::MemoryState::CodeData:
+        return m_code_region_start;
+    default:
+        UNREACHABLE();
+    }
+}
+
+size_t KPageTableBase::GetRegionSize(Svc::MemoryState state) const {
+    switch (state) {
+    case Svc::MemoryState::Free:
+    case Svc::MemoryState::Kernel:
+        return m_address_space_end - m_address_space_start;
+    case Svc::MemoryState::Normal:
+        return m_heap_region_end - m_heap_region_start;
+    case Svc::MemoryState::Ipc:
+    case Svc::MemoryState::NonSecureIpc:
+    case Svc::MemoryState::NonDeviceIpc:
+        return m_alias_region_end - m_alias_region_start;
+    case Svc::MemoryState::Stack:
+        return m_stack_region_end - m_stack_region_start;
+    case Svc::MemoryState::Static:
+    case Svc::MemoryState::ThreadLocal:
+        return m_kernel_map_region_end - m_kernel_map_region_start;
+    case Svc::MemoryState::Io:
+    case Svc::MemoryState::Shared:
+    case Svc::MemoryState::AliasCode:
+    case Svc::MemoryState::AliasCodeData:
+    case Svc::MemoryState::Transfered:
+    case Svc::MemoryState::SharedTransfered:
+    case Svc::MemoryState::SharedCode:
+    case Svc::MemoryState::GeneratedCode:
+    case Svc::MemoryState::CodeOut:
+    case Svc::MemoryState::Coverage:
+    case Svc::MemoryState::Insecure:
+        return m_alias_code_region_end - m_alias_code_region_start;
+    case Svc::MemoryState::Code:
+    case Svc::MemoryState::CodeData:
+        return m_code_region_end - m_code_region_start;
+    default:
+        UNREACHABLE();
+    }
+}
+
+bool KPageTableBase::CanContain(KProcessAddress addr, size_t size, Svc::MemoryState state) const {
+    const KProcessAddress end = addr + size;
+    const KProcessAddress last = end - 1;
+
+    const KProcessAddress region_start = this->GetRegionAddress(state);
+    const size_t region_size = this->GetRegionSize(state);
+
+    const bool is_in_region =
+        region_start <= addr && addr < end && last <= region_start + region_size - 1;
+    const bool is_in_heap = !(end <= m_heap_region_start || m_heap_region_end <= addr ||
+                              m_heap_region_start == m_heap_region_end);
+    const bool is_in_alias = !(end <= m_alias_region_start || m_alias_region_end <= addr ||
+                               m_alias_region_start == m_alias_region_end);
+    switch (state) {
+    case Svc::MemoryState::Free:
+    case Svc::MemoryState::Kernel:
+        return is_in_region;
+    case Svc::MemoryState::Io:
+    case Svc::MemoryState::Static:
+    case Svc::MemoryState::Code:
+    case Svc::MemoryState::CodeData:
+    case Svc::MemoryState::Shared:
+    case Svc::MemoryState::AliasCode:
+    case Svc::MemoryState::AliasCodeData:
+    case Svc::MemoryState::Stack:
+    case Svc::MemoryState::ThreadLocal:
+    case Svc::MemoryState::Transfered:
+    case Svc::MemoryState::SharedTransfered:
+    case Svc::MemoryState::SharedCode:
+    case Svc::MemoryState::GeneratedCode:
+    case Svc::MemoryState::CodeOut:
+    case Svc::MemoryState::Coverage:
+    case Svc::MemoryState::Insecure:
+        return is_in_region && !is_in_heap && !is_in_alias;
+    case Svc::MemoryState::Normal:
+        ASSERT(is_in_heap);
+        return is_in_region && !is_in_alias;
+    case Svc::MemoryState::Ipc:
+    case Svc::MemoryState::NonSecureIpc:
+    case Svc::MemoryState::NonDeviceIpc:
+        ASSERT(is_in_alias);
+        return is_in_region && !is_in_heap;
+    default:
+        return false;
+    }
+}
+
+Result KPageTableBase::CheckMemoryState(const KMemoryInfo& info, KMemoryState state_mask,
+                                        KMemoryState state, KMemoryPermission perm_mask,
+                                        KMemoryPermission perm, KMemoryAttribute attr_mask,
+                                        KMemoryAttribute attr) const {
+    // Validate the states match expectation.
+    R_UNLESS((info.m_state & state_mask) == state, ResultInvalidCurrentMemory);
+    R_UNLESS((info.m_permission & perm_mask) == perm, ResultInvalidCurrentMemory);
+    R_UNLESS((info.m_attribute & attr_mask) == attr, ResultInvalidCurrentMemory);
+
+    R_SUCCEED();
+}
+
+Result KPageTableBase::CheckMemoryStateContiguous(size_t* out_blocks_needed, KProcessAddress addr,
+                                                  size_t size, KMemoryState state_mask,
+                                                  KMemoryState state, KMemoryPermission perm_mask,
+                                                  KMemoryPermission perm,
+                                                  KMemoryAttribute attr_mask,
+                                                  KMemoryAttribute attr) const {
+    ASSERT(this->IsLockedByCurrentThread());
+
+    // Get information about the first block.
+    const KProcessAddress last_addr = addr + size - 1;
+    KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(addr);
+    KMemoryInfo info = it->GetMemoryInfo();
+
+    // If the start address isn't aligned, we need a block.
+    const size_t blocks_for_start_align =
+        (Common::AlignDown(GetInteger(addr), PageSize) != info.GetAddress()) ? 1 : 0;
+
+    while (true) {
+        // Validate against the provided masks.
+        R_TRY(this->CheckMemoryState(info, state_mask, state, perm_mask, perm, attr_mask, attr));
+
+        // Break once we're done.
+        if (last_addr <= info.GetLastAddress()) {
+            break;
+        }
+
+        // Advance our iterator.
+        it++;
+        ASSERT(it != m_memory_block_manager.cend());
+        info = it->GetMemoryInfo();
+    }
+
+    // If the end address isn't aligned, we need a block.
+    const size_t blocks_for_end_align =
+        (Common::AlignUp(GetInteger(addr) + size, PageSize) != info.GetEndAddress()) ? 1 : 0;
+
+    if (out_blocks_needed != nullptr) {
+        *out_blocks_needed = blocks_for_start_align + blocks_for_end_align;
+    }
+
+    R_SUCCEED();
+}
+
+Result KPageTableBase::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
+                                        KMemoryAttribute* out_attr, size_t* out_blocks_needed,
+                                        KMemoryBlockManager::const_iterator it,
+                                        KProcessAddress last_addr, KMemoryState state_mask,
+                                        KMemoryState state, KMemoryPermission perm_mask,
+                                        KMemoryPermission perm, KMemoryAttribute attr_mask,
+                                        KMemoryAttribute attr, KMemoryAttribute ignore_attr) const {
+    ASSERT(this->IsLockedByCurrentThread());
+
+    // Get information about the first block.
+    KMemoryInfo info = it->GetMemoryInfo();
+
+    // Validate all blocks in the range have correct state.
+    const KMemoryState first_state = info.m_state;
+    const KMemoryPermission first_perm = info.m_permission;
+    const KMemoryAttribute first_attr = info.m_attribute;
+    while (true) {
+        // Validate the current block.
+        R_UNLESS(info.m_state == first_state, ResultInvalidCurrentMemory);
+        R_UNLESS(info.m_permission == first_perm, ResultInvalidCurrentMemory);
+        R_UNLESS((info.m_attribute | ignore_attr) == (first_attr | ignore_attr),
+                 ResultInvalidCurrentMemory);
+
+        // Validate against the provided masks.
+        R_TRY(this->CheckMemoryState(info, state_mask, state, perm_mask, perm, attr_mask, attr));
+
+        // Break once we're done.
+        if (last_addr <= info.GetLastAddress()) {
+            break;
+        }
+
+        // Advance our iterator.
+        it++;
+        ASSERT(it != m_memory_block_manager.cend());
+        info = it->GetMemoryInfo();
+    }
+
+    // Write output state.
+    if (out_state != nullptr) {
+        *out_state = first_state;
+    }
+    if (out_perm != nullptr) {
+        *out_perm = first_perm;
+    }
+    if (out_attr != nullptr) {
+        *out_attr = first_attr & ~ignore_attr;
+    }
+
+    // If the end address isn't aligned, we need a block.
+    if (out_blocks_needed != nullptr) {
+        const size_t blocks_for_end_align =
+            (Common::AlignDown(GetInteger(last_addr), PageSize) + PageSize != info.GetEndAddress())
+                ? 1
+                : 0;
+        *out_blocks_needed = blocks_for_end_align;
+    }
+
+    R_SUCCEED();
+}
+
+Result KPageTableBase::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
+                                        KMemoryAttribute* out_attr, size_t* out_blocks_needed,
+                                        KProcessAddress addr, size_t size, KMemoryState state_mask,
+                                        KMemoryState state, KMemoryPermission perm_mask,
+                                        KMemoryPermission perm, KMemoryAttribute attr_mask,
+                                        KMemoryAttribute attr, KMemoryAttribute ignore_attr) const {
+    ASSERT(this->IsLockedByCurrentThread());
+
+    // Check memory state.
+    const KProcessAddress last_addr = addr + size - 1;
+    KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(addr);
+    R_TRY(this->CheckMemoryState(out_state, out_perm, out_attr, out_blocks_needed, it, last_addr,
+                                 state_mask, state, perm_mask, perm, attr_mask, attr, ignore_attr));
+
+    // If the start address isn't aligned, we need a block.
+    if (out_blocks_needed != nullptr &&
+        Common::AlignDown(GetInteger(addr), PageSize) != it->GetAddress()) {
+        ++(*out_blocks_needed);
+    }
+
+    R_SUCCEED();
+}
+
+Result KPageTableBase::LockMemoryAndOpen(KPageGroup* out_pg, KPhysicalAddress* out_paddr,
+                                         KProcessAddress addr, size_t size, KMemoryState state_mask,
+                                         KMemoryState state, KMemoryPermission perm_mask,
+                                         KMemoryPermission perm, KMemoryAttribute attr_mask,
+                                         KMemoryAttribute attr, KMemoryPermission new_perm,
+                                         KMemoryAttribute lock_attr) {
+    // Validate basic preconditions.
+    ASSERT(False(lock_attr & attr));
+    ASSERT(False(lock_attr & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)));
+
+    // Validate the lock request.
+    const size_t num_pages = size / PageSize;
+    R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory);
+
+    // Lock the table.
+    KScopedLightLock lk(m_general_lock);
+
+    // Check that the output page group is empty, if it exists.
+    if (out_pg) {
+        ASSERT(out_pg->GetNumPages() == 0);
+    }
+
+    // Check the state.
+    KMemoryState old_state;
+    KMemoryPermission old_perm;
+    KMemoryAttribute old_attr;
+    size_t num_allocator_blocks;
+    R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm),
+                                 std::addressof(old_attr), std::addressof(num_allocator_blocks),
+                                 addr, size, state_mask | KMemoryState::FlagReferenceCounted,
+                                 state | KMemoryState::FlagReferenceCounted, perm_mask, perm,
+                                 attr_mask, attr));
+
+    // Get the physical address, if we're supposed to.
+    if (out_paddr != nullptr) {
+        ASSERT(this->GetPhysicalAddressLocked(out_paddr, addr));
+    }
+
+    // Make the page group, if we're supposed to.
+    if (out_pg != nullptr) {
+        R_TRY(this->MakePageGroup(*out_pg, addr, num_pages));
+    }
+
+    // Create an update allocator.
+    Result allocator_result;
+    KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+                                                 m_memory_block_slab_manager, num_allocator_blocks);
+    R_TRY(allocator_result);
+
+    // Decide on new perm and attr.
+    new_perm = (new_perm != KMemoryPermission::None) ? new_perm : old_perm;
+    KMemoryAttribute new_attr = old_attr | static_cast<KMemoryAttribute>(lock_attr);
+
+    // Update permission, if we need to.
+    if (new_perm != old_perm) {
+        // We're going to perform an update, so create a helper.
+        KScopedPageTableUpdater updater(this);
+
+        const KPageProperties properties = {new_perm, false,
+                                            True(old_attr & KMemoryAttribute::Uncached),
+                                            DisableMergeAttribute::DisableHeadBodyTail};
+        R_TRY(this->Operate(updater.GetPageList(), addr, num_pages, 0, false, properties,
+                            OperationType::ChangePermissions, false));
+    }
+
+    // Apply the memory block updates.
+    m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm,
+                                  new_attr, KMemoryBlockDisableMergeAttribute::Locked,
+                                  KMemoryBlockDisableMergeAttribute::None);
+
+    // If we have an output group, open.
+    if (out_pg) {
+        out_pg->Open();
+    }
+
+    R_SUCCEED();
+}
+
+Result KPageTableBase::UnlockMemory(KProcessAddress addr, size_t size, KMemoryState state_mask,
+                                    KMemoryState state, KMemoryPermission perm_mask,
+                                    KMemoryPermission perm, KMemoryAttribute attr_mask,
+                                    KMemoryAttribute attr, KMemoryPermission new_perm,
+                                    KMemoryAttribute lock_attr, const KPageGroup* pg) {
+    // Validate basic preconditions.
+    ASSERT((attr_mask & lock_attr) == lock_attr);
+    ASSERT((attr & lock_attr) == lock_attr);
+
+    // Validate the unlock request.
+    const size_t num_pages = size / PageSize;
+    R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory);
+
+    // Lock the table.
+    KScopedLightLock lk(m_general_lock);
+
+    // Check the state.
+    KMemoryState old_state;
+    KMemoryPermission old_perm;
+    KMemoryAttribute old_attr;
+    size_t num_allocator_blocks;
+    R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm),
+                                 std::addressof(old_attr), std::addressof(num_allocator_blocks),
+                                 addr, size, state_mask | KMemoryState::FlagReferenceCounted,
+                                 state | KMemoryState::FlagReferenceCounted, perm_mask, perm,
+                                 attr_mask, attr));
+
+    // Check the page group.
+    if (pg != nullptr) {
+        R_UNLESS(this->IsValidPageGroup(*pg, addr, num_pages), ResultInvalidMemoryRegion);
+    }
+
+    // Decide on new perm and attr.
+    new_perm = (new_perm != KMemoryPermission::None) ? new_perm : old_perm;
+    KMemoryAttribute new_attr = old_attr & ~static_cast<KMemoryAttribute>(lock_attr);
+
+    // Create an update allocator.
+    Result allocator_result;
+    KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+                                                 m_memory_block_slab_manager, num_allocator_blocks);
+    R_TRY(allocator_result);
+
+    // Update permission, if we need to.
+    if (new_perm != old_perm) {
+        // We're going to perform an update, so create a helper.
+        KScopedPageTableUpdater updater(this);
+
+        const KPageProperties properties = {new_perm, false,
+                                            True(old_attr & KMemoryAttribute::Uncached),
+                                            DisableMergeAttribute::EnableAndMergeHeadBodyTail};
+        R_TRY(this->Operate(updater.GetPageList(), addr, num_pages, 0, false, properties,
+                            OperationType::ChangePermissions, false));
+    }
+
+    // Apply the memory block updates.
+    m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm,
+                                  new_attr, KMemoryBlockDisableMergeAttribute::None,
+                                  KMemoryBlockDisableMergeAttribute::Locked);
+
+    R_SUCCEED();
+}
+
+Result KPageTableBase::QueryInfoImpl(KMemoryInfo* out_info, Svc::PageInfo* out_page,
+                                     KProcessAddress address) const {
+    ASSERT(this->IsLockedByCurrentThread());
+    ASSERT(out_info != nullptr);
+    ASSERT(out_page != nullptr);
+
+    const KMemoryBlock* block = m_memory_block_manager.FindBlock(address);
+    R_UNLESS(block != nullptr, ResultInvalidCurrentMemory);
+
+    *out_info = block->GetMemoryInfo();
+    out_page->flags = 0;
+    R_SUCCEED();
+}
+
+Result KPageTableBase::QueryMappingImpl(KProcessAddress* out, KPhysicalAddress address, size_t size,
+                                        Svc::MemoryState state) const {
+    ASSERT(!this->IsLockedByCurrentThread());
+    ASSERT(out != nullptr);
+
+    const KProcessAddress region_start = this->GetRegionAddress(state);
+    const size_t region_size = this->GetRegionSize(state);
+
+    // Check that the address/size are potentially valid.
+    R_UNLESS((address < address + size), ResultNotFound);
+
+    // Lock the table.
+    KScopedLightLock lk(m_general_lock);
+
+    auto& impl = this->GetImpl();
+
+    // Begin traversal.
+    TraversalContext context;
+    TraversalEntry cur_entry = {.phys_addr = 0, .block_size = 0};
+    bool cur_valid = false;
+    TraversalEntry next_entry;
+    bool next_valid;
+    size_t tot_size = 0;
+
+    next_valid =
+        impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), region_start);
+    next_entry.block_size =
+        (next_entry.block_size - (GetInteger(region_start) & (next_entry.block_size - 1)));
+
+    // Iterate, looking for entry.
+    while (true) {
+        if ((!next_valid && !cur_valid) ||
+            (next_valid && cur_valid &&
+             next_entry.phys_addr == cur_entry.phys_addr + cur_entry.block_size)) {
+            cur_entry.block_size += next_entry.block_size;
+        } else {
+            if (cur_valid && cur_entry.phys_addr <= address &&
+                address + size <= cur_entry.phys_addr + cur_entry.block_size) {
+                // Check if this region is valid.
+                const KProcessAddress mapped_address =
+                    (region_start + tot_size) + GetInteger(address - cur_entry.phys_addr);
+                if (R_SUCCEEDED(this->CheckMemoryState(
+                        mapped_address, size, KMemoryState::Mask, static_cast<KMemoryState>(state),
+                        KMemoryPermission::UserRead, KMemoryPermission::UserRead,
+                        KMemoryAttribute::None, KMemoryAttribute::None))) {
+                    // It is!
+                    *out = mapped_address;
+                    R_SUCCEED();
+                }
+            }
+
+            // Update tracking variables.
+            tot_size += cur_entry.block_size;
+            cur_entry = next_entry;
+            cur_valid = next_valid;
+        }
+
+        if (cur_entry.block_size + tot_size >= region_size) {
+            break;
+        }
+
+        next_valid = impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
+    }
+
+    // Check the last entry.
+    R_UNLESS(cur_valid, ResultNotFound);
+    R_UNLESS(cur_entry.phys_addr <= address, ResultNotFound);
+    R_UNLESS(address + size <= cur_entry.phys_addr + cur_entry.block_size, ResultNotFound);
+
+    // Check if the last region is valid.
+    const KProcessAddress mapped_address =
+        (region_start + tot_size) + GetInteger(address - cur_entry.phys_addr);
+    R_TRY_CATCH(this->CheckMemoryState(mapped_address, size, KMemoryState::All,
+                                       static_cast<KMemoryState>(state),
+                                       KMemoryPermission::UserRead, KMemoryPermission::UserRead,
+                                       KMemoryAttribute::None, KMemoryAttribute::None)) {
+        R_CONVERT_ALL(ResultNotFound);
+    }
+    R_END_TRY_CATCH;
+
+    // We found the region.
+    *out = mapped_address;
+    R_SUCCEED();
+}
+
+Result KPageTableBase::MapMemory(KProcessAddress dst_address, KProcessAddress src_address,
+                                 size_t size) {
+    // Lock the table.
+    KScopedLightLock lk(m_general_lock);
+
+    // Validate that the source address's state is valid.
+    KMemoryState src_state;
+    size_t num_src_allocator_blocks;
+    R_TRY(this->CheckMemoryState(std::addressof(src_state), nullptr, nullptr,
+                                 std::addressof(num_src_allocator_blocks), src_address, size,
+                                 KMemoryState::FlagCanAlias, KMemoryState::FlagCanAlias,
+                                 KMemoryPermission::All, KMemoryPermission::UserReadWrite,
+                                 KMemoryAttribute::All, KMemoryAttribute::None));
+
+    // Validate that the dst address's state is valid.
+    size_t num_dst_allocator_blocks;
+    R_TRY(this->CheckMemoryState(std::addressof(num_dst_allocator_blocks), dst_address, size,
+                                 KMemoryState::All, KMemoryState::Free, KMemoryPermission::None,
+                                 KMemoryPermission::None, KMemoryAttribute::None,
+                                 KMemoryAttribute::None));
+
+    // Create an update allocator for the source.
+    Result src_allocator_result;
+    KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result),
+                                                     m_memory_block_slab_manager,
+                                                     num_src_allocator_blocks);
+    R_TRY(src_allocator_result);
+
+    // Create an update allocator for the destination.
+    Result dst_allocator_result;
+    KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result),
+                                                     m_memory_block_slab_manager,
+                                                     num_dst_allocator_blocks);
+    R_TRY(dst_allocator_result);
+
+    // Map the memory.
+    {
+        // Determine the number of pages being operated on.
+        const size_t num_pages = size / PageSize;
+
+        // Create page groups for the memory being unmapped.
+        KPageGroup pg(m_kernel, m_block_info_manager);
+
+        // Create the page group representing the source.
+        R_TRY(this->MakePageGroup(pg, src_address, num_pages));
+
+        // We're going to perform an update, so create a helper.
+        KScopedPageTableUpdater updater(this);
+
+        // Reprotect the source as kernel-read/not mapped.
+        const KMemoryPermission new_src_perm = static_cast<KMemoryPermission>(
+            KMemoryPermission::KernelRead | KMemoryPermission::NotMapped);
+        const KMemoryAttribute new_src_attr = KMemoryAttribute::Locked;
+        const KPageProperties src_properties = {new_src_perm, false, false,
+                                                DisableMergeAttribute::DisableHeadBodyTail};
+        R_TRY(this->Operate(updater.GetPageList(), src_address, num_pages, 0, false, src_properties,
+                            OperationType::ChangePermissions, false));
+
+        // Ensure that we unprotect the source pages on failure.
+        ON_RESULT_FAILURE {
+            const KPageProperties unprotect_properties = {
+                KMemoryPermission::UserReadWrite, false, false,
+                DisableMergeAttribute::EnableHeadBodyTail};
+            R_ASSERT(this->Operate(updater.GetPageList(), src_address, num_pages, 0, false,
+                                   unprotect_properties, OperationType::ChangePermissions, true));
+        };
+
+        // Map the alias pages.
+        const KPageProperties dst_map_properties = {KMemoryPermission::UserReadWrite, false, false,
+                                                    DisableMergeAttribute::DisableHead};
+        R_TRY(this->MapPageGroupImpl(updater.GetPageList(), dst_address, pg, dst_map_properties,
+                                     false));
+
+        // Apply the memory block updates.
+        m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages,
+                                      src_state, new_src_perm, new_src_attr,
+                                      KMemoryBlockDisableMergeAttribute::Locked,
+                                      KMemoryBlockDisableMergeAttribute::None);
+        m_memory_block_manager.Update(
+            std::addressof(dst_allocator), dst_address, num_pages, KMemoryState::Stack,
+            KMemoryPermission::UserReadWrite, KMemoryAttribute::None,
+            KMemoryBlockDisableMergeAttribute::Normal, KMemoryBlockDisableMergeAttribute::None);
+    }
+
+    R_SUCCEED();
+}
+
+Result KPageTableBase::UnmapMemory(KProcessAddress dst_address, KProcessAddress src_address,
+                                   size_t size) {
+    // Lock the table.
+    KScopedLightLock lk(m_general_lock);
+
+    // Validate that the source address's state is valid.
+    KMemoryState src_state;
+    size_t num_src_allocator_blocks;
+    R_TRY(this->CheckMemoryState(
+        std::addressof(src_state), nullptr, nullptr, std::addressof(num_src_allocator_blocks),
+        src_address, size, KMemoryState::FlagCanAlias, KMemoryState::FlagCanAlias,
+        KMemoryPermission::All, KMemoryPermission::NotMapped | KMemoryPermission::KernelRead,
+        KMemoryAttribute::All, KMemoryAttribute::Locked));
+
+    // Validate that the dst address's state is valid.
+    KMemoryPermission dst_perm;
+    size_t num_dst_allocator_blocks;
+    R_TRY(this->CheckMemoryState(
+        nullptr, std::addressof(dst_perm), nullptr, std::addressof(num_dst_allocator_blocks),
+        dst_address, size, KMemoryState::All, KMemoryState::Stack, KMemoryPermission::None,
+        KMemoryPermission::None, KMemoryAttribute::All, KMemoryAttribute::None));
+
+    // Create an update allocator for the source.
+    Result src_allocator_result;
+    KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result),
+                                                     m_memory_block_slab_manager,
+                                                     num_src_allocator_blocks);
+    R_TRY(src_allocator_result);
+
+    // Create an update allocator for the destination.
+    Result dst_allocator_result;
+    KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result),
+                                                     m_memory_block_slab_manager,
+                                                     num_dst_allocator_blocks);
+    R_TRY(dst_allocator_result);
+
+    // Unmap the memory.
+    {
+        // Determine the number of pages being operated on.
+        const size_t num_pages = size / PageSize;
+
+        // Create page groups for the memory being unmapped.
+        KPageGroup pg(m_kernel, m_block_info_manager);
+
+        // Create the page group representing the destination.
+        R_TRY(this->MakePageGroup(pg, dst_address, num_pages));
+
+        // Ensure the page group is the valid for the source.
+        R_UNLESS(this->IsValidPageGroup(pg, src_address, num_pages), ResultInvalidMemoryRegion);
+
+        // We're going to perform an update, so create a helper.
+        KScopedPageTableUpdater updater(this);
+
+        // Unmap the aliased copy of the pages.
+        const KPageProperties dst_unmap_properties = {KMemoryPermission::None, false, false,
+                                                      DisableMergeAttribute::None};
+        R_TRY(this->Operate(updater.GetPageList(), dst_address, num_pages, 0, false,
+                            dst_unmap_properties, OperationType::Unmap, false));
+
+        // Ensure that we re-map the aliased pages on failure.
+        ON_RESULT_FAILURE {
+            this->RemapPageGroup(updater.GetPageList(), dst_address, size, pg);
+        };
+
+        // Try to set the permissions for the source pages back to what they should be.
+        const KPageProperties src_properties = {KMemoryPermission::UserReadWrite, false, false,
+                                                DisableMergeAttribute::EnableAndMergeHeadBodyTail};
+        R_TRY(this->Operate(updater.GetPageList(), src_address, num_pages, 0, false, src_properties,
+                            OperationType::ChangePermissions, false));
+
+        // Apply the memory block updates.
+        m_memory_block_manager.Update(
+            std::addressof(src_allocator), src_address, num_pages, src_state,
+            KMemoryPermission::UserReadWrite, KMemoryAttribute::None,
+            KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Locked);
+        m_memory_block_manager.Update(
+            std::addressof(dst_allocator), dst_address, num_pages, KMemoryState::None,
+            KMemoryPermission::None, KMemoryAttribute::None,
+            KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Normal);
+    }
+
+    R_SUCCEED();
+}
+
+Result KPageTableBase::MapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address,
+                                     size_t size) {
+    // Validate the mapping request.
+    R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode),
+             ResultInvalidMemoryRegion);
+
+    // Lock the table.
+    KScopedLightLock lk(m_general_lock);
+
+    // Verify that the source memory is normal heap.
+    KMemoryState src_state;
+    KMemoryPermission src_perm;
+    size_t num_src_allocator_blocks;
+    R_TRY(this->CheckMemoryState(std::addressof(src_state), std::addressof(src_perm), nullptr,
+                                 std::addressof(num_src_allocator_blocks), src_address, size,
+                                 KMemoryState::All, KMemoryState::Normal, KMemoryPermission::All,
+                                 KMemoryPermission::UserReadWrite, KMemoryAttribute::All,
+                                 KMemoryAttribute::None));
+
+    // Verify that the destination memory is unmapped.
+    size_t num_dst_allocator_blocks;
+    R_TRY(this->CheckMemoryState(std::addressof(num_dst_allocator_blocks), dst_address, size,
+                                 KMemoryState::All, KMemoryState::Free, KMemoryPermission::None,
+                                 KMemoryPermission::None, KMemoryAttribute::None,
+                                 KMemoryAttribute::None));
+
+    // Create an update allocator for the source.
+    Result src_allocator_result;
+    KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result),
+                                                     m_memory_block_slab_manager,
+                                                     num_src_allocator_blocks);
+    R_TRY(src_allocator_result);
+
+    // Create an update allocator for the destination.
+    Result dst_allocator_result;
+    KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result),
+                                                     m_memory_block_slab_manager,
+                                                     num_dst_allocator_blocks);
+    R_TRY(dst_allocator_result);
+
+    // Map the code memory.
+    {
+        // Determine the number of pages being operated on.
+        const size_t num_pages = size / PageSize;
+
+        // Create page groups for the memory being unmapped.
+        KPageGroup pg(m_kernel, m_block_info_manager);
+
+        // Create the page group representing the source.
+        R_TRY(this->MakePageGroup(pg, src_address, num_pages));
+
+        // We're going to perform an update, so create a helper.
+        KScopedPageTableUpdater updater(this);
+
+        // Reprotect the source as kernel-read/not mapped.
+        const KMemoryPermission new_perm = static_cast<KMemoryPermission>(
+            KMemoryPermission::KernelRead | KMemoryPermission::NotMapped);
+        const KPageProperties src_properties = {new_perm, false, false,
+                                                DisableMergeAttribute::DisableHeadBodyTail};
+        R_TRY(this->Operate(updater.GetPageList(), src_address, num_pages, 0, false, src_properties,
+                            OperationType::ChangePermissions, false));
+
+        // Ensure that we unprotect the source pages on failure.
+        ON_RESULT_FAILURE {
+            const KPageProperties unprotect_properties = {
+                src_perm, false, false, DisableMergeAttribute::EnableHeadBodyTail};
+            R_ASSERT(this->Operate(updater.GetPageList(), src_address, num_pages, 0, false,
+                                   unprotect_properties, OperationType::ChangePermissions, true));
+        };
+
+        // Map the alias pages.
+        const KPageProperties dst_properties = {new_perm, false, false,
+                                                DisableMergeAttribute::DisableHead};
+        R_TRY(
+            this->MapPageGroupImpl(updater.GetPageList(), dst_address, pg, dst_properties, false));
+
+        // Apply the memory block updates.
+        m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages,
+                                      src_state, new_perm, KMemoryAttribute::Locked,
+                                      KMemoryBlockDisableMergeAttribute::Locked,
+                                      KMemoryBlockDisableMergeAttribute::None);
+        m_memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages,
+                                      KMemoryState::AliasCode, new_perm, KMemoryAttribute::None,
+                                      KMemoryBlockDisableMergeAttribute::Normal,
+                                      KMemoryBlockDisableMergeAttribute::None);
+    }
+
+    R_SUCCEED();
+}
+
+Result KPageTableBase::UnmapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address,
+                                       size_t size) {
+    // Validate the mapping request.
+    R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode),
+             ResultInvalidMemoryRegion);
+
+    // Lock the table.
+    KScopedLightLock lk(m_general_lock);
+
+    // Verify that the source memory is locked normal heap.
+    size_t num_src_allocator_blocks;
+    R_TRY(this->CheckMemoryState(std::addressof(num_src_allocator_blocks), src_address, size,
+                                 KMemoryState::All, KMemoryState::Normal, KMemoryPermission::None,
+                                 KMemoryPermission::None, KMemoryAttribute::All,
+                                 KMemoryAttribute::Locked));
+
+    // Verify that the destination memory is aliasable code.
+    size_t num_dst_allocator_blocks;
+    R_TRY(this->CheckMemoryStateContiguous(
+        std::addressof(num_dst_allocator_blocks), dst_address, size, KMemoryState::FlagCanCodeAlias,
+        KMemoryState::FlagCanCodeAlias, KMemoryPermission::None, KMemoryPermission::None,
+        KMemoryAttribute::All & ~KMemoryAttribute::PermissionLocked, KMemoryAttribute::None));
+
+    // Determine whether any pages being unmapped are code.
+    bool any_code_pages = false;
+    {
+        KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(dst_address);
+        while (true) {
+            // Get the memory info.
+            const KMemoryInfo info = it->GetMemoryInfo();
+
+            // Check if the memory has code flag.
+            if (True(info.GetState() & KMemoryState::FlagCode)) {
+                any_code_pages = true;
+                break;
+            }
+
+            // Check if we're done.
+            if (dst_address + size - 1 <= info.GetLastAddress()) {
+                break;
+            }
+
+            // Advance.
+            ++it;
+        }
+    }
+
+    // Ensure that we maintain the instruction cache.
+    bool reprotected_pages = false;
+    SCOPE_EXIT({
+        if (reprotected_pages && any_code_pages) {
+            InvalidateEntireInstructionCache(m_system);
+        }
+    });
+
+    // Unmap.
+    {
+        // Determine the number of pages being operated on.
+        const size_t num_pages = size / PageSize;
+
+        // Create page groups for the memory being unmapped.
+        KPageGroup pg(m_kernel, m_block_info_manager);
+
+        // Create the page group representing the destination.
+        R_TRY(this->MakePageGroup(pg, dst_address, num_pages));
+
+        // Verify that the page group contains the same pages as the source.
+        R_UNLESS(this->IsValidPageGroup(pg, src_address, num_pages), ResultInvalidMemoryRegion);
+
+        // Create an update allocator for the source.
+        Result src_allocator_result;
+        KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result),
+                                                         m_memory_block_slab_manager,
+                                                         num_src_allocator_blocks);
+        R_TRY(src_allocator_result);
+
+        // Create an update allocator for the destination.
+        Result dst_allocator_result;
+        KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result),
+                                                         m_memory_block_slab_manager,
+                                                         num_dst_allocator_blocks);
+        R_TRY(dst_allocator_result);
+
+        // We're going to perform an update, so create a helper.
+        KScopedPageTableUpdater updater(this);
+
+        // Unmap the aliased copy of the pages.
+        const KPageProperties dst_unmap_properties = {KMemoryPermission::None, false, false,
+                                                      DisableMergeAttribute::None};
+        R_TRY(this->Operate(updater.GetPageList(), dst_address, num_pages, 0, false,
+                            dst_unmap_properties, OperationType::Unmap, false));
+
+        // Ensure that we re-map the aliased pages on failure.
+        ON_RESULT_FAILURE {
+            this->RemapPageGroup(updater.GetPageList(), dst_address, size, pg);
+        };
+
+        // Try to set the permissions for the source pages back to what they should be.
+        const KPageProperties src_properties = {KMemoryPermission::UserReadWrite, false, false,
+                                                DisableMergeAttribute::EnableAndMergeHeadBodyTail};
+        R_TRY(this->Operate(updater.GetPageList(), src_address, num_pages, 0, false, src_properties,
+                            OperationType::ChangePermissions, false));
+
+        // Apply the memory block updates.
+        m_memory_block_manager.Update(
+            std::addressof(dst_allocator), dst_address, num_pages, KMemoryState::None,
+            KMemoryPermission::None, KMemoryAttribute::None,
+            KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Normal);
+        m_memory_block_manager.Update(
+            std::addressof(src_allocator), src_address, num_pages, KMemoryState::Normal,
+            KMemoryPermission::UserReadWrite, KMemoryAttribute::None,
+            KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Locked);
+
+        // Note that we reprotected pages.
+        reprotected_pages = true;
+    }
+
+    R_SUCCEED();
+}
+
+Result KPageTableBase::MapInsecureMemory(KProcessAddress address, size_t size) {
+    // Get the insecure memory resource limit and pool.
+    auto* const insecure_resource_limit = KSystemControl::GetInsecureMemoryResourceLimit(m_kernel);
+    const auto insecure_pool =
+        static_cast<KMemoryManager::Pool>(KSystemControl::GetInsecureMemoryPool());
+
+    // Reserve the insecure memory.
+    // NOTE: ResultOutOfMemory is returned here instead of the usual LimitReached.
+    KScopedResourceReservation memory_reservation(insecure_resource_limit,
+                                                  Svc::LimitableResource::PhysicalMemoryMax, size);
+    R_UNLESS(memory_reservation.Succeeded(), ResultOutOfMemory);
+
+    // Allocate pages for the insecure memory.
+    KPageGroup pg(m_kernel, m_block_info_manager);
+    R_TRY(m_kernel.MemoryManager().AllocateAndOpen(
+        std::addressof(pg), size / PageSize,
+        KMemoryManager::EncodeOption(insecure_pool, KMemoryManager::Direction::FromFront)));
+
+    // Close the opened pages when we're done with them.
+    // If the mapping succeeds, each page will gain an extra reference, otherwise they will be freed
+    // automatically.
+    SCOPE_EXIT({ pg.Close(); });
+
+    // Clear all the newly allocated pages.
+    for (const auto& it : pg) {
+        std::memset(GetHeapVirtualPointer(m_kernel, it.GetAddress()),
+                    static_cast<u32>(m_heap_fill_value), it.GetSize());
+    }
+
+    // Lock the table.
+    KScopedLightLock lk(m_general_lock);
+
+    // Validate that the address's state is valid.
+    size_t num_allocator_blocks;
+    R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size,
+                                 KMemoryState::All, KMemoryState::Free, KMemoryPermission::None,
+                                 KMemoryPermission::None, KMemoryAttribute::None,
+                                 KMemoryAttribute::None));
+
+    // Create an update allocator.
+    Result allocator_result;
+    KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+                                                 m_memory_block_slab_manager, num_allocator_blocks);
+    R_TRY(allocator_result);
+
+    // We're going to perform an update, so create a helper.
+    KScopedPageTableUpdater updater(this);
+
+    // Map the pages.
+    const size_t num_pages = size / PageSize;
+    const KPageProperties map_properties = {KMemoryPermission::UserReadWrite, false, false,
+                                            DisableMergeAttribute::DisableHead};
+    R_TRY(this->Operate(updater.GetPageList(), address, num_pages, pg, map_properties,
+                        OperationType::MapGroup, false));
+
+    // Apply the memory block update.
+    m_memory_block_manager.Update(std::addressof(allocator), address, num_pages,
+                                  KMemoryState::Insecure, KMemoryPermission::UserReadWrite,
+                                  KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
+                                  KMemoryBlockDisableMergeAttribute::None);
+
+    // Update our mapped insecure size.
+    m_mapped_insecure_memory += size;
+
+    // Commit the memory reservation.
+    memory_reservation.Commit();
+
+    // We succeeded.
+    R_SUCCEED();
+}
+
+Result KPageTableBase::UnmapInsecureMemory(KProcessAddress address, size_t size) {
+    // Lock the table.
+    KScopedLightLock lk(m_general_lock);
+
+    // Check the memory state.
+    size_t num_allocator_blocks;
+    R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size,
+                                 KMemoryState::All, KMemoryState::Insecure, KMemoryPermission::All,
+                                 KMemoryPermission::UserReadWrite, KMemoryAttribute::All,
+                                 KMemoryAttribute::None));
+
+    // Create an update allocator.
+    Result allocator_result;
+    KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+                                                 m_memory_block_slab_manager, num_allocator_blocks);
+    R_TRY(allocator_result);
+
+    // We're going to perform an update, so create a helper.
+    KScopedPageTableUpdater updater(this);
+
+    // Unmap the memory.
+    const size_t num_pages = size / PageSize;
+    const KPageProperties unmap_properties = {KMemoryPermission::None, false, false,
+                                              DisableMergeAttribute::None};
+    R_TRY(this->Operate(updater.GetPageList(), address, num_pages, 0, false, unmap_properties,
+                        OperationType::Unmap, false));
+
+    // Apply the memory block update.
+    m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState::Free,
+                                  KMemoryPermission::None, KMemoryAttribute::None,
+                                  KMemoryBlockDisableMergeAttribute::None,
+                                  KMemoryBlockDisableMergeAttribute::Normal);
+
+    // Update our mapped insecure size.
+    m_mapped_insecure_memory -= size;
+
+    // Release the insecure memory from the insecure limit.
+    if (auto* const insecure_resource_limit =
+            KSystemControl::GetInsecureMemoryResourceLimit(m_kernel);
+        insecure_resource_limit != nullptr) {
+        insecure_resource_limit->Release(Svc::LimitableResource::PhysicalMemoryMax, size);
+    }
+
+    R_SUCCEED();
+}
+
+KProcessAddress KPageTableBase::FindFreeArea(KProcessAddress region_start, size_t region_num_pages,
+                                             size_t num_pages, size_t alignment, size_t offset,
+                                             size_t guard_pages) const {
+    KProcessAddress address = 0;
+
+    if (num_pages <= region_num_pages) {
+        if (this->IsAslrEnabled()) {
+            // Try to directly find a free area up to 8 times.
+            for (size_t i = 0; i < 8; i++) {
+                const size_t random_offset =
+                    KSystemControl::GenerateRandomRange(
+                        0, (region_num_pages - num_pages - guard_pages) * PageSize / alignment) *
+                    alignment;
+                const KProcessAddress candidate =
+                    Common::AlignDown(GetInteger(region_start + random_offset), alignment) + offset;
+
+                KMemoryInfo info;
+                Svc::PageInfo page_info;
+                R_ASSERT(this->QueryInfoImpl(std::addressof(info), std::addressof(page_info),
+                                             candidate));
+
+                if (info.m_state != KMemoryState::Free) {
+                    continue;
+                }
+                if (!(region_start <= candidate)) {
+                    continue;
+                }
+                if (!(info.GetAddress() + guard_pages * PageSize <= GetInteger(candidate))) {
+                    continue;
+                }
+                if (!(candidate + (num_pages + guard_pages) * PageSize - 1 <=
+                      info.GetLastAddress())) {
+                    continue;
+                }
+                if (!(candidate + (num_pages + guard_pages) * PageSize - 1 <=
+                      region_start + region_num_pages * PageSize - 1)) {
+                    continue;
+                }
+
+                address = candidate;
+                break;
+            }
+            // Fall back to finding the first free area with a random offset.
+            if (address == 0) {
+                // NOTE: Nintendo does not account for guard pages here.
+                // This may theoretically cause an offset to be chosen that cannot be mapped.
+                // We will account for guard pages.
+                const size_t offset_pages = KSystemControl::GenerateRandomRange(
+                    0, region_num_pages - num_pages - guard_pages);
+                address = m_memory_block_manager.FindFreeArea(
+                    region_start + offset_pages * PageSize, region_num_pages - offset_pages,
+                    num_pages, alignment, offset, guard_pages);
+            }
+        }
+        // Find the first free area.
+        if (address == 0) {
+            address = m_memory_block_manager.FindFreeArea(region_start, region_num_pages, num_pages,
+                                                          alignment, offset, guard_pages);
+        }
+    }
+
+    return address;
+}
+
+size_t KPageTableBase::GetSize(KMemoryState state) const {
+    // Lock the table.
+    KScopedLightLock lk(m_general_lock);
+
+    // Iterate, counting blocks with the desired state.
+    size_t total_size = 0;
+    for (KMemoryBlockManager::const_iterator it =
+             m_memory_block_manager.FindIterator(m_address_space_start);
+         it != m_memory_block_manager.end(); ++it) {
+        // Get the memory info.
+        const KMemoryInfo info = it->GetMemoryInfo();
+        if (info.GetState() == state) {
+            total_size += info.GetSize();
+        }
+    }
+
+    return total_size;
+}
+
+size_t KPageTableBase::GetCodeSize() const {
+    return this->GetSize(KMemoryState::Code);
+}
+
+size_t KPageTableBase::GetCodeDataSize() const {
+    return this->GetSize(KMemoryState::CodeData);
+}
+
+size_t KPageTableBase::GetAliasCodeSize() const {
+    return this->GetSize(KMemoryState::AliasCode);
+}
+
+size_t KPageTableBase::GetAliasCodeDataSize() const {
+    return this->GetSize(KMemoryState::AliasCodeData);
+}
+
+Result KPageTableBase::AllocateAndMapPagesImpl(PageLinkedList* page_list, KProcessAddress address,
+                                               size_t num_pages, KMemoryPermission perm) {
+    ASSERT(this->IsLockedByCurrentThread());
+
+    // Create a page group to hold the pages we allocate.
+    KPageGroup pg(m_kernel, m_block_info_manager);
+
+    // Allocate the pages.
+    R_TRY(
+        m_kernel.MemoryManager().AllocateAndOpen(std::addressof(pg), num_pages, m_allocate_option));
+
+    // Ensure that the page group is closed when we're done working with it.
+    SCOPE_EXIT({ pg.Close(); });
+
+    // Clear all pages.
+    for (const auto& it : pg) {
+        std::memset(GetHeapVirtualPointer(m_kernel, it.GetAddress()),
+                    static_cast<u32>(m_heap_fill_value), it.GetSize());
+    }
+
+    // Map the pages.
+    const KPageProperties properties = {perm, false, false, DisableMergeAttribute::None};
+    R_RETURN(this->Operate(page_list, address, num_pages, pg, properties, OperationType::MapGroup,
+                           false));
+}
+
+Result KPageTableBase::MapPageGroupImpl(PageLinkedList* page_list, KProcessAddress address,
+                                        const KPageGroup& pg, const KPageProperties properties,
+                                        bool reuse_ll) {
+    ASSERT(this->IsLockedByCurrentThread());
+
+    // Note the current address, so that we can iterate.
+    const KProcessAddress start_address = address;
+    KProcessAddress cur_address = address;
+
+    // Ensure that we clean up on failure.
+    ON_RESULT_FAILURE {
+        ASSERT(!reuse_ll);
+        if (cur_address != start_address) {
+            const KPageProperties unmap_properties = {KMemoryPermission::None, false, false,
+                                                      DisableMergeAttribute::None};
+            R_ASSERT(this->Operate(page_list, start_address,
+                                   (cur_address - start_address) / PageSize, 0, false,
+                                   unmap_properties, OperationType::Unmap, true));
+        }
+    };
+
+    // Iterate, mapping all pages in the group.
+    for (const auto& block : pg) {
+        // Map and advance.
+        const KPageProperties cur_properties =
+            (cur_address == start_address)
+                ? properties
+                : KPageProperties{properties.perm, properties.io, properties.uncached,
+                                  DisableMergeAttribute::None};
+        R_TRY(this->Operate(page_list, cur_address, block.GetNumPages(), block.GetAddress(), true,
+                            cur_properties, OperationType::Map, reuse_ll));
+        cur_address += block.GetSize();
+    }
+
+    // We succeeded!
+    R_SUCCEED();
+}
+
+void KPageTableBase::RemapPageGroup(PageLinkedList* page_list, KProcessAddress address, size_t size,
+                                    const KPageGroup& pg) {
+    ASSERT(this->IsLockedByCurrentThread());
+
+    // Note the current address, so that we can iterate.
+    const KProcessAddress start_address = address;
+    const KProcessAddress last_address = start_address + size - 1;
+    const KProcessAddress end_address = last_address + 1;
+
+    // Iterate over the memory.
+    auto pg_it = pg.begin();
+    ASSERT(pg_it != pg.end());
+
+    KPhysicalAddress pg_phys_addr = pg_it->GetAddress();
+    size_t pg_pages = pg_it->GetNumPages();
+
+    auto it = m_memory_block_manager.FindIterator(start_address);
+    while (true) {
+        // Check that the iterator is valid.
+        ASSERT(it != m_memory_block_manager.end());
+
+        // Get the memory info.
+        const KMemoryInfo info = it->GetMemoryInfo();
+
+        // Determine the range to map.
+        KProcessAddress map_address = std::max(info.GetAddress(), GetInteger(start_address));
+        const KProcessAddress map_end_address =
+            std::min(info.GetEndAddress(), GetInteger(end_address));
+        ASSERT(map_end_address != map_address);
+
+        // Determine if we should disable head merge.
+        const bool disable_head_merge =
+            info.GetAddress() >= GetInteger(start_address) &&
+            True(info.GetDisableMergeAttribute() & KMemoryBlockDisableMergeAttribute::Normal);
+        const KPageProperties map_properties = {
+            info.GetPermission(), false, false,
+            disable_head_merge ? DisableMergeAttribute::DisableHead : DisableMergeAttribute::None};
+
+        // While we have pages to map, map them.
+        size_t map_pages = (map_end_address - map_address) / PageSize;
+        while (map_pages > 0) {
+            // Check if we're at the end of the physical block.
+            if (pg_pages == 0) {
+                // Ensure there are more pages to map.
+                ASSERT(pg_it != pg.end());
+
+                // Advance our physical block.
+                ++pg_it;
+                pg_phys_addr = pg_it->GetAddress();
+                pg_pages = pg_it->GetNumPages();
+            }
+
+            // Map whatever we can.
+            const size_t cur_pages = std::min(pg_pages, map_pages);
+            R_ASSERT(this->Operate(page_list, map_address, map_pages, pg_phys_addr, true,
+                                   map_properties, OperationType::Map, true));
+
+            // Advance.
+            map_address += cur_pages * PageSize;
+            map_pages -= cur_pages;
+
+            pg_phys_addr += cur_pages * PageSize;
+            pg_pages -= cur_pages;
+        }
+
+        // Check if we're done.
+        if (last_address <= info.GetLastAddress()) {
+            break;
+        }
+
+        // Advance.
+        ++it;
+    }
+
+    // Check that we re-mapped precisely the page group.
+    ASSERT((++pg_it) == pg.end());
+}
+
+Result KPageTableBase::MakePageGroup(KPageGroup& pg, KProcessAddress addr, size_t num_pages) {
+    ASSERT(this->IsLockedByCurrentThread());
+
+    const size_t size = num_pages * PageSize;
+
+    // We're making a new group, not adding to an existing one.
+    R_UNLESS(pg.empty(), ResultInvalidCurrentMemory);
+
+    auto& impl = this->GetImpl();
+
+    // Begin traversal.
+    TraversalContext context;
+    TraversalEntry next_entry;
+    R_UNLESS(impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), addr),
+             ResultInvalidCurrentMemory);
+
+    // Prepare tracking variables.
+    KPhysicalAddress cur_addr = next_entry.phys_addr;
+    size_t cur_size = next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1));
+    size_t tot_size = cur_size;
+
+    // Iterate, adding to group as we go.
+    while (tot_size < size) {
+        R_UNLESS(impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context)),
+                 ResultInvalidCurrentMemory);
+
+        if (next_entry.phys_addr != (cur_addr + cur_size)) {
+            const size_t cur_pages = cur_size / PageSize;
+
+            R_UNLESS(IsHeapPhysicalAddress(cur_addr), ResultInvalidCurrentMemory);
+            R_TRY(pg.AddBlock(cur_addr, cur_pages));
+
+            cur_addr = next_entry.phys_addr;
+            cur_size = next_entry.block_size;
+        } else {
+            cur_size += next_entry.block_size;
+        }
+
+        tot_size += next_entry.block_size;
+    }
+
+    // Ensure we add the right amount for the last block.
+    if (tot_size > size) {
+        cur_size -= (tot_size - size);
+    }
+
+    // add the last block.
+    const size_t cur_pages = cur_size / PageSize;
+    R_UNLESS(IsHeapPhysicalAddress(cur_addr), ResultInvalidCurrentMemory);
+    R_TRY(pg.AddBlock(cur_addr, cur_pages));
+
+    R_SUCCEED();
+}
+
+bool KPageTableBase::IsValidPageGroup(const KPageGroup& pg, KProcessAddress addr,
+                                      size_t num_pages) {
+    ASSERT(this->IsLockedByCurrentThread());
+
+    const size_t size = num_pages * PageSize;
+
+    // Empty groups are necessarily invalid.
+    if (pg.empty()) {
+        return false;
+    }
+
+    auto& impl = this->GetImpl();
+
+    // We're going to validate that the group we'd expect is the group we see.
+    auto cur_it = pg.begin();
+    KPhysicalAddress cur_block_address = cur_it->GetAddress();
+    size_t cur_block_pages = cur_it->GetNumPages();
+
+    auto UpdateCurrentIterator = [&]() {
+        if (cur_block_pages == 0) {
+            if ((++cur_it) == pg.end()) {
+                return false;
+            }
+
+            cur_block_address = cur_it->GetAddress();
+            cur_block_pages = cur_it->GetNumPages();
+        }
+        return true;
+    };
+
+    // Begin traversal.
+    TraversalContext context;
+    TraversalEntry next_entry;
+    if (!impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), addr)) {
+        return false;
+    }
+
+    // Prepare tracking variables.
+    KPhysicalAddress cur_addr = next_entry.phys_addr;
+    size_t cur_size = next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1));
+    size_t tot_size = cur_size;
+
+    // Iterate, comparing expected to actual.
+    while (tot_size < size) {
+        if (!impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context))) {
+            return false;
+        }
+
+        if (next_entry.phys_addr != (cur_addr + cur_size)) {
+            const size_t cur_pages = cur_size / PageSize;
+
+            if (!IsHeapPhysicalAddress(cur_addr)) {
+                return false;
+            }
+
+            if (!UpdateCurrentIterator()) {
+                return false;
+            }
+
+            if (cur_block_address != cur_addr || cur_block_pages < cur_pages) {
+                return false;
+            }
+
+            cur_block_address += cur_size;
+            cur_block_pages -= cur_pages;
+            cur_addr = next_entry.phys_addr;
+            cur_size = next_entry.block_size;
+        } else {
+            cur_size += next_entry.block_size;
+        }
+
+        tot_size += next_entry.block_size;
+    }
+
+    // Ensure we compare the right amount for the last block.
+    if (tot_size > size) {
+        cur_size -= (tot_size - size);
+    }
+
+    if (!IsHeapPhysicalAddress(cur_addr)) {
+        return false;
+    }
+
+    if (!UpdateCurrentIterator()) {
+        return false;
+    }
+
+    return cur_block_address == cur_addr && cur_block_pages == (cur_size / PageSize);
+}
+
+Result KPageTableBase::GetContiguousMemoryRangeWithState(
+    MemoryRange* out, KProcessAddress address, size_t size, KMemoryState state_mask,
+    KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm,
+    KMemoryAttribute attr_mask, KMemoryAttribute attr) {
+    ASSERT(this->IsLockedByCurrentThread());
+
+    auto& impl = this->GetImpl();
+
+    // Begin a traversal.
+    TraversalContext context;
+    TraversalEntry cur_entry = {.phys_addr = 0, .block_size = 0};
+    R_UNLESS(impl.BeginTraversal(std::addressof(cur_entry), std::addressof(context), address),
+             ResultInvalidCurrentMemory);
+
+    // Traverse until we have enough size or we aren't contiguous any more.
+    const KPhysicalAddress phys_address = cur_entry.phys_addr;
+    size_t contig_size;
+    for (contig_size =
+             cur_entry.block_size - (GetInteger(phys_address) & (cur_entry.block_size - 1));
+         contig_size < size; contig_size += cur_entry.block_size) {
+        if (!impl.ContinueTraversal(std::addressof(cur_entry), std::addressof(context))) {
+            break;
+        }
+        if (cur_entry.phys_addr != phys_address + contig_size) {
+            break;
+        }
+    }
+
+    // Take the minimum size for our region.
+    size = std::min(size, contig_size);
+
+    // Check that the memory is contiguous (modulo the reference count bit).
+    const KMemoryState test_state_mask = state_mask | KMemoryState::FlagReferenceCounted;
+    const bool is_heap = R_SUCCEEDED(this->CheckMemoryStateContiguous(
+        address, size, test_state_mask, state | KMemoryState::FlagReferenceCounted, perm_mask, perm,
+        attr_mask, attr));
+    if (!is_heap) {
+        R_TRY(this->CheckMemoryStateContiguous(address, size, test_state_mask, state, perm_mask,
+                                               perm, attr_mask, attr));
+    }
+
+    // The memory is contiguous, so set the output range.
+    out->Set(phys_address, size, is_heap);
+    R_SUCCEED();
+}
+
+Result KPageTableBase::SetMemoryPermission(KProcessAddress addr, size_t size,
+                                           Svc::MemoryPermission svc_perm) {
+    const size_t num_pages = size / PageSize;
+
+    // Lock the table.
+    KScopedLightLock lk(m_general_lock);
+
+    // Verify we can change the memory permission.
+    KMemoryState old_state;
+    KMemoryPermission old_perm;
+    size_t num_allocator_blocks;
+    R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm), nullptr,
+                                 std::addressof(num_allocator_blocks), addr, size,
+                                 KMemoryState::FlagCanReprotect, KMemoryState::FlagCanReprotect,
+                                 KMemoryPermission::None, KMemoryPermission::None,
+                                 KMemoryAttribute::All, KMemoryAttribute::None));
+
+    // Determine new perm.
+    const KMemoryPermission new_perm = ConvertToKMemoryPermission(svc_perm);
+    R_SUCCEED_IF(old_perm == new_perm);
+
+    // Create an update allocator.
+    Result allocator_result;
+    KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+                                                 m_memory_block_slab_manager, num_allocator_blocks);
+    R_TRY(allocator_result);
+
+    // We're going to perform an update, so create a helper.
+    KScopedPageTableUpdater updater(this);
+
+    // Perform mapping operation.
+    const KPageProperties properties = {new_perm, false, false, DisableMergeAttribute::None};
+    R_TRY(this->Operate(updater.GetPageList(), addr, num_pages, 0, false, properties,
+                        OperationType::ChangePermissions, false));
+
+    // Update the blocks.
+    m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm,
+                                  KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
+                                  KMemoryBlockDisableMergeAttribute::None);
+
+    R_SUCCEED();
+}
+
+Result KPageTableBase::SetProcessMemoryPermission(KProcessAddress addr, size_t size,
+                                                  Svc::MemoryPermission svc_perm) {
+    const size_t num_pages = size / PageSize;
+
+    // Lock the table.
+    KScopedLightLock lk(m_general_lock);
+
+    // Verify we can change the memory permission.
+    KMemoryState old_state;
+    KMemoryPermission old_perm;
+    size_t num_allocator_blocks;
+    R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm), nullptr,
+                                 std::addressof(num_allocator_blocks), addr, size,
+                                 KMemoryState::FlagCode, KMemoryState::FlagCode,
+                                 KMemoryPermission::None, KMemoryPermission::None,
+                                 KMemoryAttribute::All, KMemoryAttribute::None));
+
+    // Make a new page group for the region.
+    KPageGroup pg(m_kernel, m_block_info_manager);
+
+    // Determine new perm/state.
+    const KMemoryPermission new_perm = ConvertToKMemoryPermission(svc_perm);
+    KMemoryState new_state = old_state;
+    const bool is_w = (new_perm & KMemoryPermission::UserWrite) == KMemoryPermission::UserWrite;
+    const bool is_x = (new_perm & KMemoryPermission::UserExecute) == KMemoryPermission::UserExecute;
+    const bool was_x =
+        (old_perm & KMemoryPermission::UserExecute) == KMemoryPermission::UserExecute;
+    ASSERT(!(is_w && is_x));
+
+    if (is_w) {
+        switch (old_state) {
+        case KMemoryState::Code:
+            new_state = KMemoryState::CodeData;
+            break;
+        case KMemoryState::AliasCode:
+            new_state = KMemoryState::AliasCodeData;
+            break;
+        default:
+            UNREACHABLE();
+        }
+    }
+
+    // Create a page group, if we're setting execute permissions.
+    if (is_x) {
+        R_TRY(this->MakePageGroup(pg, GetInteger(addr), num_pages));
+    }
+
+    // Succeed if there's nothing to do.
+    R_SUCCEED_IF(old_perm == new_perm && old_state == new_state);
+
+    // Create an update allocator.
+    Result allocator_result;
+    KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+                                                 m_memory_block_slab_manager, num_allocator_blocks);
+    R_TRY(allocator_result);
+
+    // We're going to perform an update, so create a helper.
+    KScopedPageTableUpdater updater(this);
+
+    // Perform mapping operation.
+    const KPageProperties properties = {new_perm, false, false, DisableMergeAttribute::None};
+    const auto operation = was_x ? OperationType::ChangePermissionsAndRefreshAndFlush
+                                 : OperationType::ChangePermissions;
+    R_TRY(this->Operate(updater.GetPageList(), addr, num_pages, 0, false, properties, operation,
+                        false));
+
+    // Update the blocks.
+    m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, new_state, new_perm,
+                                  KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
+                                  KMemoryBlockDisableMergeAttribute::None);
+
+    // Ensure cache coherency, if we're setting pages as executable.
+    if (is_x) {
+        for (const auto& block : pg) {
+            StoreDataCache(GetHeapVirtualPointer(m_kernel, block.GetAddress()), block.GetSize());
+        }
+        InvalidateEntireInstructionCache(m_system);
+    }
+
+    R_SUCCEED();
+}
+
+Result KPageTableBase::SetMemoryAttribute(KProcessAddress addr, size_t size, KMemoryAttribute mask,
+                                          KMemoryAttribute attr) {
+    const size_t num_pages = size / PageSize;
+    ASSERT((mask | KMemoryAttribute::SetMask) == KMemoryAttribute::SetMask);
+
+    // Lock the table.
+    KScopedLightLock lk(m_general_lock);
+
+    // Verify we can change the memory attribute.
+    KMemoryState old_state;
+    KMemoryPermission old_perm;
+    KMemoryAttribute old_attr;
+    size_t num_allocator_blocks;
+    constexpr KMemoryAttribute AttributeTestMask =
+        ~(KMemoryAttribute::SetMask | KMemoryAttribute::DeviceShared);
+    const KMemoryState state_test_mask =
+        (True(mask & KMemoryAttribute::Uncached) ? KMemoryState::FlagCanChangeAttribute
+                                                 : KMemoryState::None) |
+        (True(mask & KMemoryAttribute::PermissionLocked) ? KMemoryState::FlagCanPermissionLock
+                                                         : KMemoryState::None);
+    R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm),
+                                 std::addressof(old_attr), std::addressof(num_allocator_blocks),
+                                 addr, size, state_test_mask, state_test_mask,
+                                 KMemoryPermission::None, KMemoryPermission::None,
+                                 AttributeTestMask, KMemoryAttribute::None, ~AttributeTestMask));
+
+    // Create an update allocator.
+    Result allocator_result;
+    KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+                                                 m_memory_block_slab_manager, num_allocator_blocks);
+    R_TRY(allocator_result);
+
+    // We're going to perform an update, so create a helper.
+    KScopedPageTableUpdater updater(this);
+
+    // If we need to, perform a change attribute operation.
+    if (True(mask & KMemoryAttribute::Uncached)) {
+        // Determine the new attribute.
+        const KMemoryAttribute new_attr =
+            static_cast<KMemoryAttribute>(((old_attr & ~mask) | (attr & mask)));
+
+        // Perform operation.
+        const KPageProperties properties = {old_perm, false,
+                                            True(new_attr & KMemoryAttribute::Uncached),
+                                            DisableMergeAttribute::None};
+        R_TRY(this->Operate(updater.GetPageList(), addr, num_pages, 0, false, properties,
+                            OperationType::ChangePermissionsAndRefreshAndFlush, false));
+    }
+
+    // Update the blocks.
+    m_memory_block_manager.UpdateAttribute(std::addressof(allocator), addr, num_pages, mask, attr);
+
+    R_SUCCEED();
+}
+
+Result KPageTableBase::SetHeapSize(KProcessAddress* out, size_t size) {
+    // Lock the physical memory mutex.
+    KScopedLightLock map_phys_mem_lk(m_map_physical_memory_lock);
+
+    // Try to perform a reduction in heap, instead of an extension.
+    KProcessAddress cur_address;
+    size_t allocation_size;
+    {
+        // Lock the table.
+        KScopedLightLock lk(m_general_lock);
+
+        // Validate that setting heap size is possible at all.
+        R_UNLESS(!m_is_kernel, ResultOutOfMemory);
+        R_UNLESS(size <= static_cast<size_t>(m_heap_region_end - m_heap_region_start),
+                 ResultOutOfMemory);
+        R_UNLESS(size <= m_max_heap_size, ResultOutOfMemory);
+
+        if (size < static_cast<size_t>(m_current_heap_end - m_heap_region_start)) {
+            // The size being requested is less than the current size, so we need to free the end of
+            // the heap.
+
+            // Validate memory state.
+            size_t num_allocator_blocks;
+            R_TRY(this->CheckMemoryState(
+                std::addressof(num_allocator_blocks), m_heap_region_start + size,
+                (m_current_heap_end - m_heap_region_start) - size, KMemoryState::All,
+                KMemoryState::Normal, KMemoryPermission::All, KMemoryPermission::UserReadWrite,
+                KMemoryAttribute::All, KMemoryAttribute::None));
+
+            // Create an update allocator.
+            Result allocator_result;
+            KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+                                                         m_memory_block_slab_manager,
+                                                         num_allocator_blocks);
+            R_TRY(allocator_result);
+
+            // We're going to perform an update, so create a helper.
+            KScopedPageTableUpdater updater(this);
+
+            // Unmap the end of the heap.
+            const size_t num_pages = ((m_current_heap_end - m_heap_region_start) - size) / PageSize;
+            const KPageProperties unmap_properties = {KMemoryPermission::None, false, false,
+                                                      DisableMergeAttribute::None};
+            R_TRY(this->Operate(updater.GetPageList(), m_heap_region_start + size, num_pages, 0,
+                                false, unmap_properties, OperationType::Unmap, false));
+
+            // Release the memory from the resource limit.
+            m_resource_limit->Release(Svc::LimitableResource::PhysicalMemoryMax,
+                                      num_pages * PageSize);
+
+            // Apply the memory block update.
+            m_memory_block_manager.Update(std::addressof(allocator), m_heap_region_start + size,
+                                          num_pages, KMemoryState::Free, KMemoryPermission::None,
+                                          KMemoryAttribute::None,
+                                          KMemoryBlockDisableMergeAttribute::None,
+                                          size == 0 ? KMemoryBlockDisableMergeAttribute::Normal
+                                                    : KMemoryBlockDisableMergeAttribute::None);
+
+            // Update the current heap end.
+            m_current_heap_end = m_heap_region_start + size;
+
+            // Set the output.
+            *out = m_heap_region_start;
+            R_SUCCEED();
+        } else if (size == static_cast<size_t>(m_current_heap_end - m_heap_region_start)) {
+            // The size requested is exactly the current size.
+            *out = m_heap_region_start;
+            R_SUCCEED();
+        } else {
+            // We have to allocate memory. Determine how much to allocate and where while the table
+            // is locked.
+            cur_address = m_current_heap_end;
+            allocation_size = size - (m_current_heap_end - m_heap_region_start);
+        }
+    }
+
+    // Reserve memory for the heap extension.
+    KScopedResourceReservation memory_reservation(
+        m_resource_limit, Svc::LimitableResource::PhysicalMemoryMax, allocation_size);
+    R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
+
+    // Allocate pages for the heap extension.
+    KPageGroup pg(m_kernel, m_block_info_manager);
+    R_TRY(m_kernel.MemoryManager().AllocateAndOpen(std::addressof(pg), allocation_size / PageSize,
+                                                   m_allocate_option));
+
+    // Close the opened pages when we're done with them.
+    // If the mapping succeeds, each page will gain an extra reference, otherwise they will be freed
+    // automatically.
+    SCOPE_EXIT({ pg.Close(); });
+
+    // Clear all the newly allocated pages.
+    for (const auto& it : pg) {
+        std::memset(GetHeapVirtualPointer(m_kernel, it.GetAddress()), m_heap_fill_value,
+                    it.GetSize());
+    }
+
+    // Map the pages.
+    {
+        // Lock the table.
+        KScopedLightLock lk(m_general_lock);
+
+        // Ensure that the heap hasn't changed since we began executing.
+        ASSERT(cur_address == m_current_heap_end);
+
+        // Check the memory state.
+        size_t num_allocator_blocks;
+        R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), m_current_heap_end,
+                                     allocation_size, KMemoryState::All, KMemoryState::Free,
+                                     KMemoryPermission::None, KMemoryPermission::None,
+                                     KMemoryAttribute::None, KMemoryAttribute::None));
+
+        // Create an update allocator.
+        Result allocator_result;
+        KMemoryBlockManagerUpdateAllocator allocator(
+            std::addressof(allocator_result), m_memory_block_slab_manager, num_allocator_blocks);
+        R_TRY(allocator_result);
+
+        // We're going to perform an update, so create a helper.
+        KScopedPageTableUpdater updater(this);
+
+        // Map the pages.
+        const size_t num_pages = allocation_size / PageSize;
+        const KPageProperties map_properties = {KMemoryPermission::UserReadWrite, false, false,
+                                                (m_current_heap_end == m_heap_region_start)
+                                                    ? DisableMergeAttribute::DisableHead
+                                                    : DisableMergeAttribute::None};
+        R_TRY(this->Operate(updater.GetPageList(), m_current_heap_end, num_pages, pg,
+                            map_properties, OperationType::MapGroup, false));
+
+        // We succeeded, so commit our memory reservation.
+        memory_reservation.Commit();
+
+        // Apply the memory block update.
+        m_memory_block_manager.Update(
+            std::addressof(allocator), m_current_heap_end, num_pages, KMemoryState::Normal,
+            KMemoryPermission::UserReadWrite, KMemoryAttribute::None,
+            m_heap_region_start == m_current_heap_end ? KMemoryBlockDisableMergeAttribute::Normal
+                                                      : KMemoryBlockDisableMergeAttribute::None,
+            KMemoryBlockDisableMergeAttribute::None);
+
+        // Update the current heap end.
+        m_current_heap_end = m_heap_region_start + size;
+
+        // Set the output.
+        *out = m_heap_region_start;
+        R_SUCCEED();
+    }
+}
+
+Result KPageTableBase::SetMaxHeapSize(size_t size) {
+    // Lock the table.
+    KScopedLightLock lk(m_general_lock);
+
+    // Only process page tables are allowed to set heap size.
+    ASSERT(!this->IsKernel());
+
+    m_max_heap_size = size;
+
+    R_SUCCEED();
+}
+
+Result KPageTableBase::QueryInfo(KMemoryInfo* out_info, Svc::PageInfo* out_page_info,
+                                 KProcessAddress addr) const {
+    // If the address is invalid, create a fake block.
+    if (!this->Contains(addr, 1)) {
+        *out_info = {
+            .m_address = GetInteger(m_address_space_end),
+            .m_size = 0 - GetInteger(m_address_space_end),
+            .m_state = static_cast<KMemoryState>(Svc::MemoryState::Inaccessible),
+            .m_device_disable_merge_left_count = 0,
+            .m_device_disable_merge_right_count = 0,
+            .m_ipc_lock_count = 0,
+            .m_device_use_count = 0,
+            .m_ipc_disable_merge_count = 0,
+            .m_permission = KMemoryPermission::None,
+            .m_attribute = KMemoryAttribute::None,
+            .m_original_permission = KMemoryPermission::None,
+            .m_disable_merge_attribute = KMemoryBlockDisableMergeAttribute::None,
+        };
+        out_page_info->flags = 0;
+
+        R_SUCCEED();
+    }
+
+    // Otherwise, lock the table and query.
+    KScopedLightLock lk(m_general_lock);
+    R_RETURN(this->QueryInfoImpl(out_info, out_page_info, addr));
+}
+
+Result KPageTableBase::QueryPhysicalAddress(Svc::lp64::PhysicalMemoryInfo* out,
+                                            KProcessAddress address) const {
+    // Lock the table.
+    KScopedLightLock lk(m_general_lock);
+
+    // Align the address down to page size.
+    address = Common::AlignDown(GetInteger(address), PageSize);
+
+    // Verify that we can query the address.
+    KMemoryInfo info;
+    Svc::PageInfo page_info;
+    R_TRY(this->QueryInfoImpl(std::addressof(info), std::addressof(page_info), address));
+
+    // Check the memory state.
+    R_TRY(this->CheckMemoryState(info, KMemoryState::FlagCanQueryPhysical,
+                                 KMemoryState::FlagCanQueryPhysical,
+                                 KMemoryPermission::UserReadExecute, KMemoryPermission::UserRead,
+                                 KMemoryAttribute::None, KMemoryAttribute::None));
+
+    // Prepare to traverse.
+    KPhysicalAddress phys_addr;
+    size_t phys_size;
+
+    KProcessAddress virt_addr = info.GetAddress();
+    KProcessAddress end_addr = info.GetEndAddress();
+
+    // Perform traversal.
+    {
+        // Begin traversal.
+        TraversalContext context;
+        TraversalEntry next_entry;
+        bool traverse_valid =
+            m_impl->BeginTraversal(std::addressof(next_entry), std::addressof(context), virt_addr);
+        R_UNLESS(traverse_valid, ResultInvalidCurrentMemory);
+
+        // Set tracking variables.
+        phys_addr = next_entry.phys_addr;
+        phys_size = next_entry.block_size - (GetInteger(phys_addr) & (next_entry.block_size - 1));
+
+        // Iterate.
+        while (true) {
+            // Continue the traversal.
+            traverse_valid =
+                m_impl->ContinueTraversal(std::addressof(next_entry), std::addressof(context));
+            if (!traverse_valid) {
+                break;
+            }
+
+            if (next_entry.phys_addr != (phys_addr + phys_size)) {
+                // Check if we're done.
+                if (virt_addr <= address && address <= virt_addr + phys_size - 1) {
+                    break;
+                }
+
+                // Advance.
+                phys_addr = next_entry.phys_addr;
+                virt_addr += next_entry.block_size;
+                phys_size =
+                    next_entry.block_size - (GetInteger(phys_addr) & (next_entry.block_size - 1));
+            } else {
+                phys_size += next_entry.block_size;
+            }
+
+            // Check if we're done.
+            if (end_addr < virt_addr + phys_size) {
+                break;
+            }
+        }
+        ASSERT(virt_addr <= address && address <= virt_addr + phys_size - 1);
+
+        // Ensure we use the right size.
+        if (end_addr < virt_addr + phys_size) {
+            phys_size = end_addr - virt_addr;
+        }
+    }
+
+    // Set the output.
+    out->physical_address = GetInteger(phys_addr);
+    out->virtual_address = GetInteger(virt_addr);
+    out->size = phys_size;
+    R_SUCCEED();
+}
+
+Result KPageTableBase::MapIoImpl(KProcessAddress* out, PageLinkedList* page_list,
+                                 KPhysicalAddress phys_addr, size_t size, KMemoryState state,
+                                 KMemoryPermission perm) {
+    // Check pre-conditions.
+    ASSERT(this->IsLockedByCurrentThread());
+    ASSERT(Common::IsAligned(GetInteger(phys_addr), PageSize));
+    ASSERT(Common::IsAligned(size, PageSize));
+    ASSERT(size > 0);
+
+    R_UNLESS(phys_addr < phys_addr + size, ResultInvalidAddress);
+    const size_t num_pages = size / PageSize;
+    const KPhysicalAddress last = phys_addr + size - 1;
+
+    // Get region extents.
+    const KProcessAddress region_start = m_kernel_map_region_start;
+    const size_t region_size = m_kernel_map_region_end - m_kernel_map_region_start;
+    const size_t region_num_pages = region_size / PageSize;
+
+    ASSERT(this->CanContain(region_start, region_size, state));
+
+    // Locate the memory region.
+    const KMemoryRegion* region = KMemoryLayout::Find(m_kernel.MemoryLayout(), phys_addr);
+    R_UNLESS(region != nullptr, ResultInvalidAddress);
+
+    ASSERT(region->Contains(GetInteger(phys_addr)));
+
+    // Ensure that the region is mappable.
+    const bool is_rw = perm == KMemoryPermission::UserReadWrite;
+    while (true) {
+        // Check that the region exists.
+        R_UNLESS(region != nullptr, ResultInvalidAddress);
+
+        // Check the region attributes.
+        R_UNLESS(!region->IsDerivedFrom(KMemoryRegionType_Dram), ResultInvalidAddress);
+        R_UNLESS(!region->HasTypeAttribute(KMemoryRegionAttr_UserReadOnly) || !is_rw,
+                 ResultInvalidAddress);
+        R_UNLESS(!region->HasTypeAttribute(KMemoryRegionAttr_NoUserMap), ResultInvalidAddress);
+
+        // Check if we're done.
+        if (GetInteger(last) <= region->GetLastAddress()) {
+            break;
+        }
+
+        // Advance.
+        region = region->GetNext();
+    };
+
+    // Select an address to map at.
+    KProcessAddress addr = 0;
+    {
+        const size_t alignment = 4_KiB;
+        const KPhysicalAddress aligned_phys =
+            Common::AlignUp(GetInteger(phys_addr), alignment) + alignment - 1;
+        R_UNLESS(aligned_phys > phys_addr, ResultInvalidAddress);
+
+        const KPhysicalAddress last_aligned_paddr =
+            Common::AlignDown(GetInteger(last) + 1, alignment) - 1;
+        R_UNLESS((last_aligned_paddr <= last && aligned_phys <= last_aligned_paddr),
+                 ResultInvalidAddress);
+
+        addr = this->FindFreeArea(region_start, region_num_pages, num_pages, alignment, 0,
+                                  this->GetNumGuardPages());
+        R_UNLESS(addr != 0, ResultOutOfMemory);
+    }
+
+    // Check that we can map IO here.
+    ASSERT(this->CanContain(addr, size, state));
+    R_ASSERT(this->CheckMemoryState(addr, size, KMemoryState::All, KMemoryState::Free,
+                                    KMemoryPermission::None, KMemoryPermission::None,
+                                    KMemoryAttribute::None, KMemoryAttribute::None));
+
+    // Perform mapping operation.
+    const KPageProperties properties = {perm, state == KMemoryState::IoRegister, false,
+                                        DisableMergeAttribute::DisableHead};
+    R_TRY(this->Operate(page_list, addr, num_pages, phys_addr, true, properties, OperationType::Map,
+                        false));
+
+    // Set the output address.
+    *out = addr;
+
+    R_SUCCEED();
+}
+
+Result KPageTableBase::MapIo(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm) {
+    // Lock the table.
+    KScopedLightLock lk(m_general_lock);
+
+    // Create an update allocator.
+    Result allocator_result;
+    KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+                                                 m_memory_block_slab_manager);
+    R_TRY(allocator_result);
+
+    // We're going to perform an update, so create a helper.
+    KScopedPageTableUpdater updater(this);
+
+    // Map the io memory.
+    KProcessAddress addr;
+    R_TRY(this->MapIoImpl(std::addressof(addr), updater.GetPageList(), phys_addr, size,
+                          KMemoryState::IoRegister, perm));
+
+    // Update the blocks.
+    m_memory_block_manager.Update(std::addressof(allocator), addr, size / PageSize,
+                                  KMemoryState::IoRegister, perm, KMemoryAttribute::Locked,
+                                  KMemoryBlockDisableMergeAttribute::Normal,
+                                  KMemoryBlockDisableMergeAttribute::None);
+
+    // We successfully mapped the pages.
+    R_SUCCEED();
+}
+
+Result KPageTableBase::MapIoRegion(KProcessAddress dst_address, KPhysicalAddress phys_addr,
+                                   size_t size, Svc::MemoryMapping mapping,
+                                   Svc::MemoryPermission svc_perm) {
+    const size_t num_pages = size / PageSize;
+
+    // Lock the table.
+    KScopedLightLock lk(m_general_lock);
+
+    // Validate the memory state.
+    size_t num_allocator_blocks;
+    R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), dst_address, size,
+                                 KMemoryState::All, KMemoryState::None, KMemoryPermission::None,
+                                 KMemoryPermission::None, KMemoryAttribute::None,
+                                 KMemoryAttribute::None));
+
+    // Create an update allocator.
+    Result allocator_result;
+    KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+                                                 m_memory_block_slab_manager, num_allocator_blocks);
+    R_TRY(allocator_result);
+
+    // We're going to perform an update, so create a helper.
+    KScopedPageTableUpdater updater(this);
+
+    // Perform mapping operation.
+    const KMemoryPermission perm = ConvertToKMemoryPermission(svc_perm);
+    const KPageProperties properties = {perm, mapping == Svc::MemoryMapping::IoRegister,
+                                        mapping == Svc::MemoryMapping::Uncached,
+                                        DisableMergeAttribute::DisableHead};
+    R_TRY(this->Operate(updater.GetPageList(), dst_address, num_pages, phys_addr, true, properties,
+                        OperationType::Map, false));
+
+    // Update the blocks.
+    const auto state =
+        mapping == Svc::MemoryMapping::Memory ? KMemoryState::IoMemory : KMemoryState::IoRegister;
+    m_memory_block_manager.Update(
+        std::addressof(allocator), dst_address, num_pages, state, perm, KMemoryAttribute::Locked,
+        KMemoryBlockDisableMergeAttribute::Normal, KMemoryBlockDisableMergeAttribute::None);
+
+    // We successfully mapped the pages.
+    R_SUCCEED();
+}
+
+Result KPageTableBase::UnmapIoRegion(KProcessAddress dst_address, KPhysicalAddress phys_addr,
+                                     size_t size, Svc::MemoryMapping mapping) {
+    const size_t num_pages = size / PageSize;
+
+    // Lock the table.
+    KScopedLightLock lk(m_general_lock);
+
+    // Validate the memory state.
+    KMemoryState old_state;
+    KMemoryPermission old_perm;
+    KMemoryAttribute old_attr;
+    size_t num_allocator_blocks;
+    R_TRY(this->CheckMemoryState(
+        std::addressof(old_state), std::addressof(old_perm), std::addressof(old_attr),
+        std::addressof(num_allocator_blocks), dst_address, size, KMemoryState::All,
+        mapping == Svc::MemoryMapping::Memory ? KMemoryState::IoMemory : KMemoryState::IoRegister,
+        KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::All,
+        KMemoryAttribute::Locked));
+
+    // Validate that the region being unmapped corresponds to the physical range described.
+    {
+        // Get the impl.
+        auto& impl = this->GetImpl();
+
+        // Begin traversal.
+        TraversalContext context;
+        TraversalEntry next_entry;
+        ASSERT(
+            impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), dst_address));
+
+        // Check that the physical region matches.
+        R_UNLESS(next_entry.phys_addr == phys_addr, ResultInvalidMemoryRegion);
+
+        // Iterate.
+        for (size_t checked_size =
+                 next_entry.block_size - (GetInteger(phys_addr) & (next_entry.block_size - 1));
+             checked_size < size; checked_size += next_entry.block_size) {
+            // Continue the traversal.
+            ASSERT(impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context)));
+
+            // Check that the physical region matches.
+            R_UNLESS(next_entry.phys_addr == phys_addr + checked_size, ResultInvalidMemoryRegion);
+        }
+    }
+
+    // Create an update allocator.
+    Result allocator_result;
+    KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+                                                 m_memory_block_slab_manager, num_allocator_blocks);
+    R_TRY(allocator_result);
+
+    // We're going to perform an update, so create a helper.
+    KScopedPageTableUpdater updater(this);
+
+    // If the region being unmapped is Memory, synchronize.
+    if (mapping == Svc::MemoryMapping::Memory) {
+        // Change the region to be uncached.
+        const KPageProperties properties = {old_perm, false, true, DisableMergeAttribute::None};
+        R_ASSERT(this->Operate(updater.GetPageList(), dst_address, num_pages, 0, false, properties,
+                               OperationType::ChangePermissionsAndRefresh, false));
+
+        // Temporarily unlock ourselves, so that other operations can occur while we flush the
+        // region.
+        m_general_lock.Unlock();
+        SCOPE_EXIT({ m_general_lock.Lock(); });
+
+        // Flush the region.
+        R_ASSERT(FlushDataCache(dst_address, size));
+    }
+
+    // Perform the unmap.
+    const KPageProperties unmap_properties = {KMemoryPermission::None, false, false,
+                                              DisableMergeAttribute::None};
+    R_ASSERT(this->Operate(updater.GetPageList(), dst_address, num_pages, 0, false,
+                           unmap_properties, OperationType::Unmap, false));
+
+    // Update the blocks.
+    m_memory_block_manager.Update(std::addressof(allocator), dst_address, num_pages,
+                                  KMemoryState::Free, KMemoryPermission::None,
+                                  KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
+                                  KMemoryBlockDisableMergeAttribute::Normal);
+
+    R_SUCCEED();
+}
+
+Result KPageTableBase::MapStatic(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm) {
+    ASSERT(Common::IsAligned(GetInteger(phys_addr), PageSize));
+    ASSERT(Common::IsAligned(size, PageSize));
+    ASSERT(size > 0);
+    R_UNLESS(phys_addr < phys_addr + size, ResultInvalidAddress);
+    const size_t num_pages = size / PageSize;
+    const KPhysicalAddress last = phys_addr + size - 1;
+
+    // Get region extents.
+    const KProcessAddress region_start = this->GetRegionAddress(KMemoryState::Static);
+    const size_t region_size = this->GetRegionSize(KMemoryState::Static);
+    const size_t region_num_pages = region_size / PageSize;
+
+    // Locate the memory region.
+    const KMemoryRegion* region = KMemoryLayout::Find(m_kernel.MemoryLayout(), phys_addr);
+    R_UNLESS(region != nullptr, ResultInvalidAddress);
+
+    ASSERT(region->Contains(GetInteger(phys_addr)));
+    R_UNLESS(GetInteger(last) <= region->GetLastAddress(), ResultInvalidAddress);
+
+    // Check the region attributes.
+    const bool is_rw = perm == KMemoryPermission::UserReadWrite;
+    R_UNLESS(region->IsDerivedFrom(KMemoryRegionType_Dram), ResultInvalidAddress);
+    R_UNLESS(!region->HasTypeAttribute(KMemoryRegionAttr_NoUserMap), ResultInvalidAddress);
+    R_UNLESS(!region->HasTypeAttribute(KMemoryRegionAttr_UserReadOnly) || !is_rw,
+             ResultInvalidAddress);
+
+    // Lock the table.
+    KScopedLightLock lk(m_general_lock);
+
+    // Select an address to map at.
+    KProcessAddress addr = 0;
+    {
+        const size_t alignment = 4_KiB;
+        const KPhysicalAddress aligned_phys =
+            Common::AlignUp(GetInteger(phys_addr), alignment) + alignment - 1;
+        R_UNLESS(aligned_phys > phys_addr, ResultInvalidAddress);
+
+        const KPhysicalAddress last_aligned_paddr =
+            Common::AlignDown(GetInteger(last) + 1, alignment) - 1;
+        R_UNLESS((last_aligned_paddr <= last && aligned_phys <= last_aligned_paddr),
+                 ResultInvalidAddress);
+
+        addr = this->FindFreeArea(region_start, region_num_pages, num_pages, alignment, 0,
+                                  this->GetNumGuardPages());
+        R_UNLESS(addr != 0, ResultOutOfMemory);
+    }
+
+    // Check that we can map static here.
+    ASSERT(this->CanContain(addr, size, KMemoryState::Static));
+    R_ASSERT(this->CheckMemoryState(addr, size, KMemoryState::All, KMemoryState::Free,
+                                    KMemoryPermission::None, KMemoryPermission::None,
+                                    KMemoryAttribute::None, KMemoryAttribute::None));
+
+    // Create an update allocator.
+    Result allocator_result;
+    KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+                                                 m_memory_block_slab_manager);
+    R_TRY(allocator_result);
+
+    // We're going to perform an update, so create a helper.
+    KScopedPageTableUpdater updater(this);
+
+    // Perform mapping operation.
+    const KPageProperties properties = {perm, false, false, DisableMergeAttribute::DisableHead};
+    R_TRY(this->Operate(updater.GetPageList(), addr, num_pages, phys_addr, true, properties,
+                        OperationType::Map, false));
+
+    // Update the blocks.
+    m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, KMemoryState::Static,
+                                  perm, KMemoryAttribute::None,
+                                  KMemoryBlockDisableMergeAttribute::Normal,
+                                  KMemoryBlockDisableMergeAttribute::None);
+
+    // We successfully mapped the pages.
+    R_SUCCEED();
+}
+
+Result KPageTableBase::MapRegion(KMemoryRegionType region_type, KMemoryPermission perm) {
+    // Get the memory region.
+    const KMemoryRegion* region =
+        m_kernel.MemoryLayout().GetPhysicalMemoryRegionTree().FindFirstDerived(region_type);
+    R_UNLESS(region != nullptr, ResultOutOfRange);
+
+    // Check that the region is valid.
+    ASSERT(region->GetEndAddress() != 0);
+
+    // Map the region.
+    R_TRY_CATCH(this->MapStatic(region->GetAddress(), region->GetSize(), perm)){
+        R_CONVERT(ResultInvalidAddress, ResultOutOfRange)} R_END_TRY_CATCH;
+
+    R_SUCCEED();
+}
+
+Result KPageTableBase::MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment,
+                                KPhysicalAddress phys_addr, bool is_pa_valid,
+                                KProcessAddress region_start, size_t region_num_pages,
+                                KMemoryState state, KMemoryPermission perm) {
+    ASSERT(Common::IsAligned(alignment, PageSize) && alignment >= PageSize);
+
+    // Ensure this is a valid map request.
+    R_UNLESS(this->CanContain(region_start, region_num_pages * PageSize, state),
+             ResultInvalidCurrentMemory);
+    R_UNLESS(num_pages < region_num_pages, ResultOutOfMemory);
+
+    // Lock the table.
+    KScopedLightLock lk(m_general_lock);
+
+    // Find a random address to map at.
+    KProcessAddress addr = this->FindFreeArea(region_start, region_num_pages, num_pages, alignment,
+                                              0, this->GetNumGuardPages());
+    R_UNLESS(addr != 0, ResultOutOfMemory);
+    ASSERT(Common::IsAligned(GetInteger(addr), alignment));
+    ASSERT(this->CanContain(addr, num_pages * PageSize, state));
+    R_ASSERT(this->CheckMemoryState(
+        addr, num_pages * PageSize, KMemoryState::All, KMemoryState::Free, KMemoryPermission::None,
+        KMemoryPermission::None, KMemoryAttribute::None, KMemoryAttribute::None));
+
+    // Create an update allocator.
+    Result allocator_result;
+    KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+                                                 m_memory_block_slab_manager);
+    R_TRY(allocator_result);
+
+    // We're going to perform an update, so create a helper.
+    KScopedPageTableUpdater updater(this);
+
+    // Perform mapping operation.
+    if (is_pa_valid) {
+        const KPageProperties properties = {perm, false, false, DisableMergeAttribute::DisableHead};
+        R_TRY(this->Operate(updater.GetPageList(), addr, num_pages, phys_addr, true, properties,
+                            OperationType::Map, false));
+    } else {
+        R_TRY(this->AllocateAndMapPagesImpl(updater.GetPageList(), addr, num_pages, perm));
+    }
+
+    // Update the blocks.
+    m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm,
+                                  KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
+                                  KMemoryBlockDisableMergeAttribute::None);
+
+    // We successfully mapped the pages.
+    *out_addr = addr;
+    R_SUCCEED();
+}
+
+Result KPageTableBase::MapPages(KProcessAddress address, size_t num_pages, KMemoryState state,
+                                KMemoryPermission perm) {
+    // Check that the map is in range.
+    const size_t size = num_pages * PageSize;
+    R_UNLESS(this->CanContain(address, size, state), ResultInvalidCurrentMemory);
+
+    // Lock the table.
+    KScopedLightLock lk(m_general_lock);
+
+    // Check the memory state.
+    size_t num_allocator_blocks;
+    R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size,
+                                 KMemoryState::All, KMemoryState::Free, KMemoryPermission::None,
+                                 KMemoryPermission::None, KMemoryAttribute::None,
+                                 KMemoryAttribute::None));
+
+    // Create an update allocator.
+    Result allocator_result;
+    KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+                                                 m_memory_block_slab_manager, num_allocator_blocks);
+    R_TRY(allocator_result);
+
+    // We're going to perform an update, so create a helper.
+    KScopedPageTableUpdater updater(this);
+
+    // Map the pages.
+    R_TRY(this->AllocateAndMapPagesImpl(updater.GetPageList(), address, num_pages, perm));
+
+    // Update the blocks.
+    m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, state, perm,
+                                  KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
+                                  KMemoryBlockDisableMergeAttribute::None);
+
+    R_SUCCEED();
+}
+
+Result KPageTableBase::UnmapPages(KProcessAddress address, size_t num_pages, KMemoryState state) {
+    // Check that the unmap is in range.
+    const size_t size = num_pages * PageSize;
+    R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
+
+    // Lock the table.
+    KScopedLightLock lk(m_general_lock);
+
+    // Check the memory state.
+    size_t num_allocator_blocks;
+    R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size,
+                                 KMemoryState::All, state, KMemoryPermission::None,
+                                 KMemoryPermission::None, KMemoryAttribute::All,
+                                 KMemoryAttribute::None));
+
+    // Create an update allocator.
+    Result allocator_result;
+    KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+                                                 m_memory_block_slab_manager, num_allocator_blocks);
+    R_TRY(allocator_result);
+
+    // We're going to perform an update, so create a helper.
+    KScopedPageTableUpdater updater(this);
+
+    // Perform the unmap.
+    const KPageProperties unmap_properties = {KMemoryPermission::None, false, false,
+                                              DisableMergeAttribute::None};
+    R_TRY(this->Operate(updater.GetPageList(), address, num_pages, 0, false, unmap_properties,
+                        OperationType::Unmap, false));
+
+    // Update the blocks.
+    m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState::Free,
+                                  KMemoryPermission::None, KMemoryAttribute::None,
+                                  KMemoryBlockDisableMergeAttribute::None,
+                                  KMemoryBlockDisableMergeAttribute::Normal);
+
+    R_SUCCEED();
+}
+
+Result KPageTableBase::MapPageGroup(KProcessAddress* out_addr, const KPageGroup& pg,
+                                    KProcessAddress region_start, size_t region_num_pages,
+                                    KMemoryState state, KMemoryPermission perm) {
+    ASSERT(!this->IsLockedByCurrentThread());
+
+    // Ensure this is a valid map request.
+    const size_t num_pages = pg.GetNumPages();
+    R_UNLESS(this->CanContain(region_start, region_num_pages * PageSize, state),
+             ResultInvalidCurrentMemory);
+    R_UNLESS(num_pages < region_num_pages, ResultOutOfMemory);
+
+    // Lock the table.
+    KScopedLightLock lk(m_general_lock);
+
+    // Find a random address to map at.
+    KProcessAddress addr = this->FindFreeArea(region_start, region_num_pages, num_pages, PageSize,
+                                              0, this->GetNumGuardPages());
+    R_UNLESS(addr != 0, ResultOutOfMemory);
+    ASSERT(this->CanContain(addr, num_pages * PageSize, state));
+    R_ASSERT(this->CheckMemoryState(
+        addr, num_pages * PageSize, KMemoryState::All, KMemoryState::Free, KMemoryPermission::None,
+        KMemoryPermission::None, KMemoryAttribute::None, KMemoryAttribute::None));
+
+    // Create an update allocator.
+    Result allocator_result;
+    KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+                                                 m_memory_block_slab_manager);
+    R_TRY(allocator_result);
+
+    // We're going to perform an update, so create a helper.
+    KScopedPageTableUpdater updater(this);
+
+    // Perform mapping operation.
+    const KPageProperties properties = {perm, false, false, DisableMergeAttribute::DisableHead};
+    R_TRY(this->MapPageGroupImpl(updater.GetPageList(), addr, pg, properties, false));
+
+    // Update the blocks.
+    m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm,
+                                  KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
+                                  KMemoryBlockDisableMergeAttribute::None);
+
+    // We successfully mapped the pages.
+    *out_addr = addr;
+    R_SUCCEED();
+}
+
+Result KPageTableBase::MapPageGroup(KProcessAddress addr, const KPageGroup& pg, KMemoryState state,
+                                    KMemoryPermission perm) {
+    ASSERT(!this->IsLockedByCurrentThread());
+
+    // Ensure this is a valid map request.
+    const size_t num_pages = pg.GetNumPages();
+    const size_t size = num_pages * PageSize;
+    R_UNLESS(this->CanContain(addr, size, state), ResultInvalidCurrentMemory);
+
+    // Lock the table.
+    KScopedLightLock lk(m_general_lock);
+
+    // Check if state allows us to map.
+    size_t num_allocator_blocks;
+    R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), addr, size,
+                                 KMemoryState::All, KMemoryState::Free, KMemoryPermission::None,
+                                 KMemoryPermission::None, KMemoryAttribute::None,
+                                 KMemoryAttribute::None));
+
+    // Create an update allocator.
+    Result allocator_result;
+    KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+                                                 m_memory_block_slab_manager, num_allocator_blocks);
+    R_TRY(allocator_result);
+
+    // We're going to perform an update, so create a helper.
+    KScopedPageTableUpdater updater(this);
+
+    // Perform mapping operation.
+    const KPageProperties properties = {perm, false, false, DisableMergeAttribute::DisableHead};
+    R_TRY(this->MapPageGroupImpl(updater.GetPageList(), addr, pg, properties, false));
+
+    // Update the blocks.
+    m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm,
+                                  KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
+                                  KMemoryBlockDisableMergeAttribute::None);
+
+    // We successfully mapped the pages.
+    R_SUCCEED();
+}
+
+Result KPageTableBase::UnmapPageGroup(KProcessAddress address, const KPageGroup& pg,
+                                      KMemoryState state) {
+    ASSERT(!this->IsLockedByCurrentThread());
+
+    // Ensure this is a valid unmap request.
+    const size_t num_pages = pg.GetNumPages();
+    const size_t size = num_pages * PageSize;
+    R_UNLESS(this->CanContain(address, size, state), ResultInvalidCurrentMemory);
+
+    // Lock the table.
+    KScopedLightLock lk(m_general_lock);
+
+    // Check if state allows us to unmap.
+    size_t num_allocator_blocks;
+    R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size,
+                                 KMemoryState::All, state, KMemoryPermission::None,
+                                 KMemoryPermission::None, KMemoryAttribute::All,
+                                 KMemoryAttribute::None));
+
+    // Check that the page group is valid.
+    R_UNLESS(this->IsValidPageGroup(pg, address, num_pages), ResultInvalidCurrentMemory);
+
+    // Create an update allocator.
+    Result allocator_result;
+    KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+                                                 m_memory_block_slab_manager, num_allocator_blocks);
+    R_TRY(allocator_result);
+
+    // We're going to perform an update, so create a helper.
+    KScopedPageTableUpdater updater(this);
+
+    // Perform unmapping operation.
+    const KPageProperties properties = {KMemoryPermission::None, false, false,
+                                        DisableMergeAttribute::None};
+    R_TRY(this->Operate(updater.GetPageList(), address, num_pages, 0, false, properties,
+                        OperationType::Unmap, false));
+
+    // Update the blocks.
+    m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState::Free,
+                                  KMemoryPermission::None, KMemoryAttribute::None,
+                                  KMemoryBlockDisableMergeAttribute::None,
+                                  KMemoryBlockDisableMergeAttribute::Normal);
+
+    R_SUCCEED();
+}
+
+Result KPageTableBase::MakeAndOpenPageGroup(KPageGroup* out, KProcessAddress address,
+                                            size_t num_pages, KMemoryState state_mask,
+                                            KMemoryState state, KMemoryPermission perm_mask,
+                                            KMemoryPermission perm, KMemoryAttribute attr_mask,
+                                            KMemoryAttribute attr) {
+    // Ensure that the page group isn't null.
+    ASSERT(out != nullptr);
+
+    // Make sure that the region we're mapping is valid for the table.
+    const size_t size = num_pages * PageSize;
+    R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
+
+    // Lock the table.
+    KScopedLightLock lk(m_general_lock);
+
+    // Check if state allows us to create the group.
+    R_TRY(this->CheckMemoryState(address, size, state_mask | KMemoryState::FlagReferenceCounted,
+                                 state | KMemoryState::FlagReferenceCounted, perm_mask, perm,
+                                 attr_mask, attr));
+
+    // Create a new page group for the region.
+    R_TRY(this->MakePageGroup(*out, address, num_pages));
+
+    // Open a new reference to the pages in the group.
+    out->Open();
+
+    R_SUCCEED();
+}
+
+Result KPageTableBase::InvalidateProcessDataCache(KProcessAddress address, size_t size) {
+    // Check that the region is in range.
+    R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
+
+    // Lock the table.
+    KScopedLightLock lk(m_general_lock);
+
+    // Check the memory state.
+    R_TRY(this->CheckMemoryStateContiguous(
+        address, size, KMemoryState::FlagReferenceCounted, KMemoryState::FlagReferenceCounted,
+        KMemoryPermission::UserReadWrite, KMemoryPermission::UserReadWrite,
+        KMemoryAttribute::Uncached, KMemoryAttribute::None));
+
+    // Get the impl.
+    auto& impl = this->GetImpl();
+
+    // Begin traversal.
+    TraversalContext context;
+    TraversalEntry next_entry;
+    bool traverse_valid =
+        impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), address);
+    R_UNLESS(traverse_valid, ResultInvalidCurrentMemory);
+
+    // Prepare tracking variables.
+    KPhysicalAddress cur_addr = next_entry.phys_addr;
+    size_t cur_size = next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1));
+    size_t tot_size = cur_size;
+
+    // Iterate.
+    while (tot_size < size) {
+        // Continue the traversal.
+        traverse_valid =
+            impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
+        R_UNLESS(traverse_valid, ResultInvalidCurrentMemory);
+
+        if (next_entry.phys_addr != (cur_addr + cur_size)) {
+            // Check that the pages are linearly mapped.
+            R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), ResultInvalidCurrentMemory);
+
+            // Invalidate the block.
+            if (cur_size > 0) {
+                // NOTE: Nintendo does not check the result of invalidation.
+                InvalidateDataCache(GetLinearMappedVirtualPointer(m_kernel, cur_addr), cur_size);
+            }
+
+            // Advance.
+            cur_addr = next_entry.phys_addr;
+            cur_size = next_entry.block_size;
+        } else {
+            cur_size += next_entry.block_size;
+        }
+
+        tot_size += next_entry.block_size;
+    }
+
+    // Ensure we use the right size for the last block.
+    if (tot_size > size) {
+        cur_size -= (tot_size - size);
+    }
+
+    // Check that the last block is linearly mapped.
+    R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), ResultInvalidCurrentMemory);
+
+    // Invalidate the last block.
+    if (cur_size > 0) {
+        // NOTE: Nintendo does not check the result of invalidation.
+        InvalidateDataCache(GetLinearMappedVirtualPointer(m_kernel, cur_addr), cur_size);
+    }
+
+    R_SUCCEED();
+}
+
+Result KPageTableBase::InvalidateCurrentProcessDataCache(KProcessAddress address, size_t size) {
+    // Check pre-condition: this is being called on the current process.
+    ASSERT(this == std::addressof(GetCurrentProcess(m_kernel).GetPageTable().GetBasePageTable()));
+
+    // Check that the region is in range.
+    R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
+
+    // Lock the table.
+    KScopedLightLock lk(m_general_lock);
+
+    // Check the memory state.
+    R_TRY(this->CheckMemoryStateContiguous(
+        address, size, KMemoryState::FlagReferenceCounted, KMemoryState::FlagReferenceCounted,
+        KMemoryPermission::UserReadWrite, KMemoryPermission::UserReadWrite,
+        KMemoryAttribute::Uncached, KMemoryAttribute::None));
+
+    // Invalidate the data cache.
+    R_RETURN(InvalidateDataCache(address, size));
+}
+
+Result KPageTableBase::ReadDebugMemory(KProcessAddress dst_address, KProcessAddress src_address,
+                                       size_t size) {
+    // Lightly validate the region is in range.
+    R_UNLESS(this->Contains(src_address, size), ResultInvalidCurrentMemory);
+
+    // Lock the table.
+    KScopedLightLock lk(m_general_lock);
+
+    // Require that the memory either be user readable or debuggable.
+    const bool can_read = R_SUCCEEDED(this->CheckMemoryStateContiguous(
+        src_address, size, KMemoryState::None, KMemoryState::None, KMemoryPermission::UserRead,
+        KMemoryPermission::UserRead, KMemoryAttribute::None, KMemoryAttribute::None));
+    if (!can_read) {
+        const bool can_debug = R_SUCCEEDED(this->CheckMemoryStateContiguous(
+            src_address, size, KMemoryState::FlagCanDebug, KMemoryState::FlagCanDebug,
+            KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::None,
+            KMemoryAttribute::None));
+        R_UNLESS(can_debug, ResultInvalidCurrentMemory);
+    }
+
+    // Get the impl.
+    auto& impl = this->GetImpl();
+    auto& dst_memory = GetCurrentMemory(m_system.Kernel());
+
+    // Begin traversal.
+    TraversalContext context;
+    TraversalEntry next_entry;
+    bool traverse_valid =
+        impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), src_address);
+    R_UNLESS(traverse_valid, ResultInvalidCurrentMemory);
+
+    // Prepare tracking variables.
+    KPhysicalAddress cur_addr = next_entry.phys_addr;
+    size_t cur_size = next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1));
+    size_t tot_size = cur_size;
+
+    auto PerformCopy = [&]() -> Result {
+        // Ensure the address is linear mapped.
+        R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), ResultInvalidCurrentMemory);
+
+        // Copy as much aligned data as we can.
+        if (cur_size >= sizeof(u32)) {
+            const size_t copy_size = Common::AlignDown(cur_size, sizeof(u32));
+            const void* copy_src = GetLinearMappedVirtualPointer(m_kernel, cur_addr);
+            FlushDataCache(copy_src, copy_size);
+            R_UNLESS(dst_memory.WriteBlock(dst_address, copy_src, copy_size), ResultInvalidPointer);
+
+            dst_address += copy_size;
+            cur_addr += copy_size;
+            cur_size -= copy_size;
+        }
+
+        // Copy remaining data.
+        if (cur_size > 0) {
+            const void* copy_src = GetLinearMappedVirtualPointer(m_kernel, cur_addr);
+            FlushDataCache(copy_src, cur_size);
+            R_UNLESS(dst_memory.WriteBlock(dst_address, copy_src, cur_size), ResultInvalidPointer);
+        }
+
+        R_SUCCEED();
+    };
+
+    // Iterate.
+    while (tot_size < size) {
+        // Continue the traversal.
+        traverse_valid =
+            impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
+        ASSERT(traverse_valid);
+
+        if (next_entry.phys_addr != (cur_addr + cur_size)) {
+            // Perform copy.
+            R_TRY(PerformCopy());
+
+            // Advance.
+            dst_address += cur_size;
+
+            cur_addr = next_entry.phys_addr;
+            cur_size = next_entry.block_size;
+        } else {
+            cur_size += next_entry.block_size;
+        }
+
+        tot_size += next_entry.block_size;
+    }
+
+    // Ensure we use the right size for the last block.
+    if (tot_size > size) {
+        cur_size -= (tot_size - size);
+    }
+
+    // Perform copy for the last block.
+    R_TRY(PerformCopy());
+
+    R_SUCCEED();
+}
+
+Result KPageTableBase::WriteDebugMemory(KProcessAddress dst_address, KProcessAddress src_address,
+                                        size_t size) {
+    // Lightly validate the region is in range.
+    R_UNLESS(this->Contains(dst_address, size), ResultInvalidCurrentMemory);
+
+    // Lock the table.
+    KScopedLightLock lk(m_general_lock);
+
+    // Require that the memory either be user writable or debuggable.
+    const bool can_read = R_SUCCEEDED(this->CheckMemoryStateContiguous(
+        dst_address, size, KMemoryState::None, KMemoryState::None, KMemoryPermission::UserReadWrite,
+        KMemoryPermission::UserReadWrite, KMemoryAttribute::None, KMemoryAttribute::None));
+    if (!can_read) {
+        const bool can_debug = R_SUCCEEDED(this->CheckMemoryStateContiguous(
+            dst_address, size, KMemoryState::FlagCanDebug, KMemoryState::FlagCanDebug,
+            KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::None,
+            KMemoryAttribute::None));
+        R_UNLESS(can_debug, ResultInvalidCurrentMemory);
+    }
+
+    // Get the impl.
+    auto& impl = this->GetImpl();
+    auto& src_memory = GetCurrentMemory(m_system.Kernel());
+
+    // Begin traversal.
+    TraversalContext context;
+    TraversalEntry next_entry;
+    bool traverse_valid =
+        impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), dst_address);
+    R_UNLESS(traverse_valid, ResultInvalidCurrentMemory);
+
+    // Prepare tracking variables.
+    KPhysicalAddress cur_addr = next_entry.phys_addr;
+    size_t cur_size = next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1));
+    size_t tot_size = cur_size;
+
+    auto PerformCopy = [&]() -> Result {
+        // Ensure the address is linear mapped.
+        R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), ResultInvalidCurrentMemory);
+
+        // Copy as much aligned data as we can.
+        if (cur_size >= sizeof(u32)) {
+            const size_t copy_size = Common::AlignDown(cur_size, sizeof(u32));
+            void* copy_dst = GetLinearMappedVirtualPointer(m_kernel, cur_addr);
+            R_UNLESS(src_memory.ReadBlock(src_address, copy_dst, copy_size),
+                     ResultInvalidCurrentMemory);
+
+            StoreDataCache(GetLinearMappedVirtualPointer(m_kernel, cur_addr), copy_size);
+
+            src_address += copy_size;
+            cur_addr += copy_size;
+            cur_size -= copy_size;
+        }
+
+        // Copy remaining data.
+        if (cur_size > 0) {
+            void* copy_dst = GetLinearMappedVirtualPointer(m_kernel, cur_addr);
+            R_UNLESS(src_memory.ReadBlock(src_address, copy_dst, cur_size),
+                     ResultInvalidCurrentMemory);
+
+            StoreDataCache(GetLinearMappedVirtualPointer(m_kernel, cur_addr), cur_size);
+        }
+
+        R_SUCCEED();
+    };
+
+    // Iterate.
+    while (tot_size < size) {
+        // Continue the traversal.
+        traverse_valid =
+            impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
+        ASSERT(traverse_valid);
+
+        if (next_entry.phys_addr != (cur_addr + cur_size)) {
+            // Perform copy.
+            R_TRY(PerformCopy());
+
+            // Advance.
+            src_address += cur_size;
+
+            cur_addr = next_entry.phys_addr;
+            cur_size = next_entry.block_size;
+        } else {
+            cur_size += next_entry.block_size;
+        }
+
+        tot_size += next_entry.block_size;
+    }
+
+    // Ensure we use the right size for the last block.
+    if (tot_size > size) {
+        cur_size -= (tot_size - size);
+    }
+
+    // Perform copy for the last block.
+    R_TRY(PerformCopy());
+
+    // Invalidate the entire instruction cache, as this svc allows modifying executable pages.
+    InvalidateEntireInstructionCache(m_system);
+
+    R_SUCCEED();
+}
+
+Result KPageTableBase::ReadIoMemoryImpl(KProcessAddress dst_addr, KPhysicalAddress phys_addr,
+                                        size_t size, KMemoryState state) {
+    // Check pre-conditions.
+    ASSERT(this->IsLockedByCurrentThread());
+
+    // Determine the mapping extents.
+    const KPhysicalAddress map_start = Common::AlignDown(GetInteger(phys_addr), PageSize);
+    const KPhysicalAddress map_end = Common::AlignUp(GetInteger(phys_addr) + size, PageSize);
+    const size_t map_size = map_end - map_start;
+
+    // Get the memory reference to write into.
+    auto& dst_memory = GetCurrentMemory(m_kernel);
+
+    // We're going to perform an update, so create a helper.
+    KScopedPageTableUpdater updater(this);
+
+    // Temporarily map the io memory.
+    KProcessAddress io_addr;
+    R_TRY(this->MapIoImpl(std::addressof(io_addr), updater.GetPageList(), map_start, map_size,
+                          state, KMemoryPermission::UserRead));
+
+    // Ensure we unmap the io memory when we're done with it.
+    const KPageProperties unmap_properties =
+        KPageProperties{KMemoryPermission::None, false, false, DisableMergeAttribute::None};
+    SCOPE_EXIT({
+        R_ASSERT(this->Operate(updater.GetPageList(), io_addr, map_size / PageSize, 0, false,
+                               unmap_properties, OperationType::Unmap, true));
+    });
+
+    // Read the memory.
+    const KProcessAddress read_addr = io_addr + (GetInteger(phys_addr) & (PageSize - 1));
+    dst_memory.CopyBlock(dst_addr, read_addr, size);
+
+    R_SUCCEED();
+}
+
+Result KPageTableBase::WriteIoMemoryImpl(KPhysicalAddress phys_addr, KProcessAddress src_addr,
+                                         size_t size, KMemoryState state) {
+    // Check pre-conditions.
+    ASSERT(this->IsLockedByCurrentThread());
+
+    // Determine the mapping extents.
+    const KPhysicalAddress map_start = Common::AlignDown(GetInteger(phys_addr), PageSize);
+    const KPhysicalAddress map_end = Common::AlignUp(GetInteger(phys_addr) + size, PageSize);
+    const size_t map_size = map_end - map_start;
+
+    // Get the memory reference to read from.
+    auto& src_memory = GetCurrentMemory(m_kernel);
+
+    // We're going to perform an update, so create a helper.
+    KScopedPageTableUpdater updater(this);
+
+    // Temporarily map the io memory.
+    KProcessAddress io_addr;
+    R_TRY(this->MapIoImpl(std::addressof(io_addr), updater.GetPageList(), map_start, map_size,
+                          state, KMemoryPermission::UserReadWrite));
+
+    // Ensure we unmap the io memory when we're done with it.
+    const KPageProperties unmap_properties =
+        KPageProperties{KMemoryPermission::None, false, false, DisableMergeAttribute::None};
+    SCOPE_EXIT({
+        R_ASSERT(this->Operate(updater.GetPageList(), io_addr, map_size / PageSize, 0, false,
+                               unmap_properties, OperationType::Unmap, true));
+    });
+
+    // Write the memory.
+    const KProcessAddress write_addr = io_addr + (GetInteger(phys_addr) & (PageSize - 1));
+    R_UNLESS(src_memory.CopyBlock(write_addr, src_addr, size), ResultInvalidPointer);
+
+    R_SUCCEED();
+}
+
+Result KPageTableBase::ReadDebugIoMemory(KProcessAddress dst_address, KProcessAddress src_address,
+                                         size_t size, KMemoryState state) {
+    // Lightly validate the range before doing anything else.
+    R_UNLESS(this->Contains(src_address, size), ResultInvalidCurrentMemory);
+
+    // We need to lock both this table, and the current process's table, so set up some aliases.
+    KPageTableBase& src_page_table = *this;
+    KPageTableBase& dst_page_table = GetCurrentProcess(m_kernel).GetPageTable().GetBasePageTable();
+
+    // Acquire the table locks.
+    KScopedLightLockPair lk(src_page_table.m_general_lock, dst_page_table.m_general_lock);
+
+    // Check that the desired range is readable io memory.
+    R_TRY(this->CheckMemoryStateContiguous(src_address, size, KMemoryState::All, state,
+                                           KMemoryPermission::UserRead, KMemoryPermission::UserRead,
+                                           KMemoryAttribute::None, KMemoryAttribute::None));
+
+    // Read the memory.
+    KProcessAddress dst = dst_address;
+    const KProcessAddress last_address = src_address + size - 1;
+    while (src_address <= last_address) {
+        // Get the current physical address.
+        KPhysicalAddress phys_addr;
+        ASSERT(src_page_table.GetPhysicalAddressLocked(std::addressof(phys_addr), src_address));
+
+        // Determine the current read size.
+        const size_t cur_size =
+            std::min<size_t>(last_address - src_address + 1,
+                             Common::AlignDown(GetInteger(src_address) + PageSize, PageSize) -
+                                 GetInteger(src_address));
+
+        // Read.
+        R_TRY(dst_page_table.ReadIoMemoryImpl(dst, phys_addr, cur_size, state));
+
+        // Advance.
+        src_address += cur_size;
+        dst += cur_size;
+    }
+
+    R_SUCCEED();
+}
+
+Result KPageTableBase::WriteDebugIoMemory(KProcessAddress dst_address, KProcessAddress src_address,
+                                          size_t size, KMemoryState state) {
+    // Lightly validate the range before doing anything else.
+    R_UNLESS(this->Contains(dst_address, size), ResultInvalidCurrentMemory);
+
+    // We need to lock both this table, and the current process's table, so set up some aliases.
+    KPageTableBase& src_page_table = *this;
+    KPageTableBase& dst_page_table = GetCurrentProcess(m_kernel).GetPageTable().GetBasePageTable();
+
+    // Acquire the table locks.
+    KScopedLightLockPair lk(src_page_table.m_general_lock, dst_page_table.m_general_lock);
+
+    // Check that the desired range is writable io memory.
+    R_TRY(this->CheckMemoryStateContiguous(
+        dst_address, size, KMemoryState::All, state, KMemoryPermission::UserReadWrite,
+        KMemoryPermission::UserReadWrite, KMemoryAttribute::None, KMemoryAttribute::None));
+
+    // Read the memory.
+    KProcessAddress src = src_address;
+    const KProcessAddress last_address = dst_address + size - 1;
+    while (dst_address <= last_address) {
+        // Get the current physical address.
+        KPhysicalAddress phys_addr;
+        ASSERT(src_page_table.GetPhysicalAddressLocked(std::addressof(phys_addr), dst_address));
+
+        // Determine the current read size.
+        const size_t cur_size =
+            std::min<size_t>(last_address - dst_address + 1,
+                             Common::AlignDown(GetInteger(dst_address) + PageSize, PageSize) -
+                                 GetInteger(dst_address));
+
+        // Read.
+        R_TRY(dst_page_table.WriteIoMemoryImpl(phys_addr, src, cur_size, state));
+
+        // Advance.
+        dst_address += cur_size;
+        src += cur_size;
+    }
+
+    R_SUCCEED();
+}
+
+Result KPageTableBase::LockForMapDeviceAddressSpace(bool* out_is_io, KProcessAddress address,
+                                                    size_t size, KMemoryPermission perm,
+                                                    bool is_aligned, bool check_heap) {
+    // Lightly validate the range before doing anything else.
+    const size_t num_pages = size / PageSize;
+    R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
+
+    // Lock the table.
+    KScopedLightLock lk(m_general_lock);
+
+    // Check the memory state.
+    const KMemoryState test_state =
+        (is_aligned ? KMemoryState::FlagCanAlignedDeviceMap : KMemoryState::FlagCanDeviceMap) |
+        (check_heap ? KMemoryState::FlagReferenceCounted : KMemoryState::None);
+    size_t num_allocator_blocks;
+    KMemoryState old_state;
+    R_TRY(this->CheckMemoryState(std::addressof(old_state), nullptr, nullptr,
+                                 std::addressof(num_allocator_blocks), address, size, test_state,
+                                 test_state, perm, perm,
+                                 KMemoryAttribute::IpcLocked | KMemoryAttribute::Locked,
+                                 KMemoryAttribute::None, KMemoryAttribute::DeviceShared));
+
+    // Create an update allocator.
+    Result allocator_result;
+    KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+                                                 m_memory_block_slab_manager, num_allocator_blocks);
+    R_TRY(allocator_result);
+
+    // Update the memory blocks.
+    m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages,
+                                      &KMemoryBlock::ShareToDevice, KMemoryPermission::None);
+
+    // Set whether the locked memory was io.
+    *out_is_io =
+        static_cast<Svc::MemoryState>(old_state & KMemoryState::Mask) == Svc::MemoryState::Io;
+
+    R_SUCCEED();
+}
+
+Result KPageTableBase::LockForUnmapDeviceAddressSpace(KProcessAddress address, size_t size,
+                                                      bool check_heap) {
+    // Lightly validate the range before doing anything else.
+    const size_t num_pages = size / PageSize;
+    R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
+
+    // Lock the table.
+    KScopedLightLock lk(m_general_lock);
+
+    // Check the memory state.
+    const KMemoryState test_state =
+        KMemoryState::FlagCanDeviceMap |
+        (check_heap ? KMemoryState::FlagReferenceCounted : KMemoryState::None);
+    size_t num_allocator_blocks;
+    R_TRY(this->CheckMemoryStateContiguous(
+        std::addressof(num_allocator_blocks), address, size, test_state, test_state,
+        KMemoryPermission::None, KMemoryPermission::None,
+        KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked, KMemoryAttribute::DeviceShared));
+
+    // Create an update allocator.
+    Result allocator_result;
+    KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+                                                 m_memory_block_slab_manager, num_allocator_blocks);
+    R_TRY(allocator_result);
+
+    // Update the memory blocks.
+    const KMemoryBlockManager::MemoryBlockLockFunction lock_func =
+        m_enable_device_address_space_merge
+            ? &KMemoryBlock::UpdateDeviceDisableMergeStateForShare
+            : &KMemoryBlock::UpdateDeviceDisableMergeStateForShareRight;
+    m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, lock_func,
+                                      KMemoryPermission::None);
+
+    R_SUCCEED();
+}
+
+Result KPageTableBase::UnlockForDeviceAddressSpace(KProcessAddress address, size_t size) {
+    // Lightly validate the range before doing anything else.
+    const size_t num_pages = size / PageSize;
+    R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
+
+    // Lock the table.
+    KScopedLightLock lk(m_general_lock);
+
+    // Check the memory state.
+    size_t num_allocator_blocks;
+    R_TRY(this->CheckMemoryStateContiguous(
+        std::addressof(num_allocator_blocks), address, size, KMemoryState::FlagCanDeviceMap,
+        KMemoryState::FlagCanDeviceMap, KMemoryPermission::None, KMemoryPermission::None,
+        KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked, KMemoryAttribute::DeviceShared));
+
+    // Create an update allocator.
+    Result allocator_result;
+    KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+                                                 m_memory_block_slab_manager, num_allocator_blocks);
+    R_TRY(allocator_result);
+
+    // Update the memory blocks.
+    m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages,
+                                      &KMemoryBlock::UnshareToDevice, KMemoryPermission::None);
+
+    R_SUCCEED();
+}
+
+Result KPageTableBase::UnlockForDeviceAddressSpacePartialMap(KProcessAddress address, size_t size) {
+    // Lightly validate the range before doing anything else.
+    const size_t num_pages = size / PageSize;
+    R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
+
+    // Lock the table.
+    KScopedLightLock lk(m_general_lock);
+
+    // Check memory state.
+    size_t allocator_num_blocks = 0;
+    R_TRY(this->CheckMemoryStateContiguous(
+        std::addressof(allocator_num_blocks), address, size, KMemoryState::FlagCanDeviceMap,
+        KMemoryState::FlagCanDeviceMap, KMemoryPermission::None, KMemoryPermission::None,
+        KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked, KMemoryAttribute::DeviceShared));
+
+    // Create an update allocator for the region.
+    Result allocator_result;
+    KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+                                                 m_memory_block_slab_manager, allocator_num_blocks);
+    R_TRY(allocator_result);
+
+    // Update the memory blocks.
+    m_memory_block_manager.UpdateLock(
+        std::addressof(allocator), address, num_pages,
+        m_enable_device_address_space_merge
+            ? &KMemoryBlock::UpdateDeviceDisableMergeStateForUnshare
+            : &KMemoryBlock::UpdateDeviceDisableMergeStateForUnshareRight,
+        KMemoryPermission::None);
+
+    R_SUCCEED();
+}
+
+Result KPageTableBase::OpenMemoryRangeForMapDeviceAddressSpace(KPageTableBase::MemoryRange* out,
+                                                               KProcessAddress address, size_t size,
+                                                               KMemoryPermission perm,
+                                                               bool is_aligned) {
+    // Lock the table.
+    KScopedLightLock lk(m_general_lock);
+
+    // Get the range.
+    const KMemoryState test_state =
+        (is_aligned ? KMemoryState::FlagCanAlignedDeviceMap : KMemoryState::FlagCanDeviceMap);
+    R_TRY(this->GetContiguousMemoryRangeWithState(
+        out, address, size, test_state, test_state, perm, perm,
+        KMemoryAttribute::IpcLocked | KMemoryAttribute::Locked, KMemoryAttribute::None));
+
+    // We got the range, so open it.
+    out->Open();
+
+    R_SUCCEED();
+}
+
+Result KPageTableBase::OpenMemoryRangeForUnmapDeviceAddressSpace(MemoryRange* out,
+                                                                 KProcessAddress address,
+                                                                 size_t size) {
+    // Lock the table.
+    KScopedLightLock lk(m_general_lock);
+
+    // Get the range.
+    R_TRY(this->GetContiguousMemoryRangeWithState(
+        out, address, size, KMemoryState::FlagCanDeviceMap, KMemoryState::FlagCanDeviceMap,
+        KMemoryPermission::None, KMemoryPermission::None,
+        KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked, KMemoryAttribute::DeviceShared));
+
+    // We got the range, so open it.
+    out->Open();
+
+    R_SUCCEED();
+}
+
+Result KPageTableBase::LockForIpcUserBuffer(KPhysicalAddress* out, KProcessAddress address,
+                                            size_t size) {
+    R_RETURN(this->LockMemoryAndOpen(
+        nullptr, out, address, size, KMemoryState::FlagCanIpcUserBuffer,
+        KMemoryState::FlagCanIpcUserBuffer, KMemoryPermission::All,
+        KMemoryPermission::UserReadWrite, KMemoryAttribute::All, KMemoryAttribute::None,
+        static_cast<KMemoryPermission>(KMemoryPermission::NotMapped |
+                                       KMemoryPermission::KernelReadWrite),
+        KMemoryAttribute::Locked));
+}
+
+Result KPageTableBase::UnlockForIpcUserBuffer(KProcessAddress address, size_t size) {
+    R_RETURN(this->UnlockMemory(address, size, KMemoryState::FlagCanIpcUserBuffer,
+                                KMemoryState::FlagCanIpcUserBuffer, KMemoryPermission::None,
+                                KMemoryPermission::None, KMemoryAttribute::All,
+                                KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite,
+                                KMemoryAttribute::Locked, nullptr));
+}
+
+Result KPageTableBase::LockForTransferMemory(KPageGroup* out, KProcessAddress address, size_t size,
+                                             KMemoryPermission perm) {
+    R_RETURN(this->LockMemoryAndOpen(out, nullptr, address, size, KMemoryState::FlagCanTransfer,
+                                     KMemoryState::FlagCanTransfer, KMemoryPermission::All,
+                                     KMemoryPermission::UserReadWrite, KMemoryAttribute::All,
+                                     KMemoryAttribute::None, perm, KMemoryAttribute::Locked));
+}
+
+Result KPageTableBase::UnlockForTransferMemory(KProcessAddress address, size_t size,
+                                               const KPageGroup& pg) {
+    R_RETURN(this->UnlockMemory(address, size, KMemoryState::FlagCanTransfer,
+                                KMemoryState::FlagCanTransfer, KMemoryPermission::None,
+                                KMemoryPermission::None, KMemoryAttribute::All,
+                                KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite,
+                                KMemoryAttribute::Locked, std::addressof(pg)));
+}
+
+Result KPageTableBase::LockForCodeMemory(KPageGroup* out, KProcessAddress address, size_t size) {
+    R_RETURN(this->LockMemoryAndOpen(
+        out, nullptr, address, size, KMemoryState::FlagCanCodeMemory,
+        KMemoryState::FlagCanCodeMemory, KMemoryPermission::All, KMemoryPermission::UserReadWrite,
+        KMemoryAttribute::All, KMemoryAttribute::None,
+        static_cast<KMemoryPermission>(KMemoryPermission::NotMapped |
+                                       KMemoryPermission::KernelReadWrite),
+        KMemoryAttribute::Locked));
+}
+
+Result KPageTableBase::UnlockForCodeMemory(KProcessAddress address, size_t size,
+                                           const KPageGroup& pg) {
+    R_RETURN(this->UnlockMemory(address, size, KMemoryState::FlagCanCodeMemory,
+                                KMemoryState::FlagCanCodeMemory, KMemoryPermission::None,
+                                KMemoryPermission::None, KMemoryAttribute::All,
+                                KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite,
+                                KMemoryAttribute::Locked, std::addressof(pg)));
+}
+
+Result KPageTableBase::OpenMemoryRangeForProcessCacheOperation(MemoryRange* out,
+                                                               KProcessAddress address,
+                                                               size_t size) {
+    // Lock the table.
+    KScopedLightLock lk(m_general_lock);
+
+    // Get the range.
+    R_TRY(this->GetContiguousMemoryRangeWithState(
+        out, address, size, KMemoryState::FlagReferenceCounted, KMemoryState::FlagReferenceCounted,
+        KMemoryPermission::UserRead, KMemoryPermission::UserRead, KMemoryAttribute::Uncached,
+        KMemoryAttribute::None));
+
+    // We got the range, so open it.
+    out->Open();
+
+    R_SUCCEED();
+}
+
+Result KPageTableBase::CopyMemoryFromLinearToUser(
+    KProcessAddress dst_addr, size_t size, KProcessAddress src_addr, KMemoryState src_state_mask,
+    KMemoryState src_state, KMemoryPermission src_test_perm, KMemoryAttribute src_attr_mask,
+    KMemoryAttribute src_attr) {
+    // Lightly validate the range before doing anything else.
+    R_UNLESS(this->Contains(src_addr, size), ResultInvalidCurrentMemory);
+
+    // Get the destination memory reference.
+    auto& dst_memory = GetCurrentMemory(m_kernel);
+
+    // Copy the memory.
+    {
+        // Lock the table.
+        KScopedLightLock lk(m_general_lock);
+
+        // Check memory state.
+        R_TRY(this->CheckMemoryStateContiguous(
+            src_addr, size, src_state_mask, src_state, src_test_perm, src_test_perm,
+            src_attr_mask | KMemoryAttribute::Uncached, src_attr));
+
+        auto& impl = this->GetImpl();
+
+        // Begin traversal.
+        TraversalContext context;
+        TraversalEntry next_entry;
+        bool traverse_valid =
+            impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), src_addr);
+        ASSERT(traverse_valid);
+
+        // Prepare tracking variables.
+        KPhysicalAddress cur_addr = next_entry.phys_addr;
+        size_t cur_size =
+            next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1));
+        size_t tot_size = cur_size;
+
+        auto PerformCopy = [&]() -> Result {
+            // Ensure the address is linear mapped.
+            R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), ResultInvalidCurrentMemory);
+
+            // Copy as much aligned data as we can.
+            if (cur_size >= sizeof(u32)) {
+                const size_t copy_size = Common::AlignDown(cur_size, sizeof(u32));
+                R_UNLESS(dst_memory.WriteBlock(dst_addr,
+                                               GetLinearMappedVirtualPointer(m_kernel, cur_addr),
+                                               copy_size),
+                         ResultInvalidCurrentMemory);
+
+                dst_addr += copy_size;
+                cur_addr += copy_size;
+                cur_size -= copy_size;
+            }
+
+            // Copy remaining data.
+            if (cur_size > 0) {
+                R_UNLESS(dst_memory.WriteBlock(
+                             dst_addr, GetLinearMappedVirtualPointer(m_kernel, cur_addr), cur_size),
+                         ResultInvalidCurrentMemory);
+            }
+
+            R_SUCCEED();
+        };
+
+        // Iterate.
+        while (tot_size < size) {
+            // Continue the traversal.
+            traverse_valid =
+                impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
+            ASSERT(traverse_valid);
+
+            if (next_entry.phys_addr != (cur_addr + cur_size)) {
+                // Perform copy.
+                R_TRY(PerformCopy());
+
+                // Advance.
+                dst_addr += cur_size;
+
+                cur_addr = next_entry.phys_addr;
+                cur_size = next_entry.block_size;
+            } else {
+                cur_size += next_entry.block_size;
+            }
+
+            tot_size += next_entry.block_size;
+        }
+
+        // Ensure we use the right size for the last block.
+        if (tot_size > size) {
+            cur_size -= (tot_size - size);
+        }
+
+        // Perform copy for the last block.
+        R_TRY(PerformCopy());
+    }
+
+    R_SUCCEED();
+}
+
+Result KPageTableBase::CopyMemoryFromLinearToKernel(
+    void* buffer, size_t size, KProcessAddress src_addr, KMemoryState src_state_mask,
+    KMemoryState src_state, KMemoryPermission src_test_perm, KMemoryAttribute src_attr_mask,
+    KMemoryAttribute src_attr) {
+    // Lightly validate the range before doing anything else.
+    R_UNLESS(this->Contains(src_addr, size), ResultInvalidCurrentMemory);
+
+    // Copy the memory.
+    {
+        // Lock the table.
+        KScopedLightLock lk(m_general_lock);
+
+        // Check memory state.
+        R_TRY(this->CheckMemoryStateContiguous(
+            src_addr, size, src_state_mask, src_state, src_test_perm, src_test_perm,
+            src_attr_mask | KMemoryAttribute::Uncached, src_attr));
+
+        auto& impl = this->GetImpl();
+
+        // Begin traversal.
+        TraversalContext context;
+        TraversalEntry next_entry;
+        bool traverse_valid =
+            impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), src_addr);
+        ASSERT(traverse_valid);
+
+        // Prepare tracking variables.
+        KPhysicalAddress cur_addr = next_entry.phys_addr;
+        size_t cur_size =
+            next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1));
+        size_t tot_size = cur_size;
+
+        auto PerformCopy = [&]() -> Result {
+            // Ensure the address is linear mapped.
+            R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), ResultInvalidCurrentMemory);
+
+            // Copy the data.
+            std::memcpy(buffer, GetLinearMappedVirtualPointer(m_kernel, cur_addr), cur_size);
+
+            R_SUCCEED();
+        };
+
+        // Iterate.
+        while (tot_size < size) {
+            // Continue the traversal.
+            traverse_valid =
+                impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
+            ASSERT(traverse_valid);
+
+            if (next_entry.phys_addr != (cur_addr + cur_size)) {
+                // Perform copy.
+                R_TRY(PerformCopy());
+
+                // Advance.
+                buffer = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(buffer) + cur_size);
+
+                cur_addr = next_entry.phys_addr;
+                cur_size = next_entry.block_size;
+            } else {
+                cur_size += next_entry.block_size;
+            }
+
+            tot_size += next_entry.block_size;
+        }
+
+        // Ensure we use the right size for the last block.
+        if (tot_size > size) {
+            cur_size -= (tot_size - size);
+        }
+
+        // Perform copy for the last block.
+        R_TRY(PerformCopy());
+    }
+
+    R_SUCCEED();
+}
+
+Result KPageTableBase::CopyMemoryFromUserToLinear(
+    KProcessAddress dst_addr, size_t size, KMemoryState dst_state_mask, KMemoryState dst_state,
+    KMemoryPermission dst_test_perm, KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr,
+    KProcessAddress src_addr) {
+    // Lightly validate the range before doing anything else.
+    R_UNLESS(this->Contains(dst_addr, size), ResultInvalidCurrentMemory);
+
+    // Get the source memory reference.
+    auto& src_memory = GetCurrentMemory(m_kernel);
+
+    // Copy the memory.
+    {
+        // Lock the table.
+        KScopedLightLock lk(m_general_lock);
+
+        // Check memory state.
+        R_TRY(this->CheckMemoryStateContiguous(
+            dst_addr, size, dst_state_mask, dst_state, dst_test_perm, dst_test_perm,
+            dst_attr_mask | KMemoryAttribute::Uncached, dst_attr));
+
+        auto& impl = this->GetImpl();
+
+        // Begin traversal.
+        TraversalContext context;
+        TraversalEntry next_entry;
+        bool traverse_valid =
+            impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), dst_addr);
+        ASSERT(traverse_valid);
+
+        // Prepare tracking variables.
+        KPhysicalAddress cur_addr = next_entry.phys_addr;
+        size_t cur_size =
+            next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1));
+        size_t tot_size = cur_size;
+
+        auto PerformCopy = [&]() -> Result {
+            // Ensure the address is linear mapped.
+            R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), ResultInvalidCurrentMemory);
+
+            // Copy as much aligned data as we can.
+            if (cur_size >= sizeof(u32)) {
+                const size_t copy_size = Common::AlignDown(cur_size, sizeof(u32));
+                R_UNLESS(src_memory.ReadBlock(src_addr,
+                                              GetLinearMappedVirtualPointer(m_kernel, cur_addr),
+                                              copy_size),
+                         ResultInvalidCurrentMemory);
+                src_addr += copy_size;
+                cur_addr += copy_size;
+                cur_size -= copy_size;
+            }
+
+            // Copy remaining data.
+            if (cur_size > 0) {
+                R_UNLESS(src_memory.ReadBlock(
+                             src_addr, GetLinearMappedVirtualPointer(m_kernel, cur_addr), cur_size),
+                         ResultInvalidCurrentMemory);
+            }
+
+            R_SUCCEED();
+        };
+
+        // Iterate.
+        while (tot_size < size) {
+            // Continue the traversal.
+            traverse_valid =
+                impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
+            ASSERT(traverse_valid);
+
+            if (next_entry.phys_addr != (cur_addr + cur_size)) {
+                // Perform copy.
+                R_TRY(PerformCopy());
+
+                // Advance.
+                src_addr += cur_size;
+
+                cur_addr = next_entry.phys_addr;
+                cur_size = next_entry.block_size;
+            } else {
+                cur_size += next_entry.block_size;
+            }
+
+            tot_size += next_entry.block_size;
+        }
+
+        // Ensure we use the right size for the last block.
+        if (tot_size > size) {
+            cur_size -= (tot_size - size);
+        }
+
+        // Perform copy for the last block.
+        R_TRY(PerformCopy());
+    }
+
+    R_SUCCEED();
+}
+
+Result KPageTableBase::CopyMemoryFromKernelToLinear(KProcessAddress dst_addr, size_t size,
+                                                    KMemoryState dst_state_mask,
+                                                    KMemoryState dst_state,
+                                                    KMemoryPermission dst_test_perm,
+                                                    KMemoryAttribute dst_attr_mask,
+                                                    KMemoryAttribute dst_attr, void* buffer) {
+    // Lightly validate the range before doing anything else.
+    R_UNLESS(this->Contains(dst_addr, size), ResultInvalidCurrentMemory);
+
+    // Copy the memory.
+    {
+        // Lock the table.
+        KScopedLightLock lk(m_general_lock);
+
+        // Check memory state.
+        R_TRY(this->CheckMemoryStateContiguous(
+            dst_addr, size, dst_state_mask, dst_state, dst_test_perm, dst_test_perm,
+            dst_attr_mask | KMemoryAttribute::Uncached, dst_attr));
+
+        auto& impl = this->GetImpl();
+
+        // Begin traversal.
+        TraversalContext context;
+        TraversalEntry next_entry;
+        bool traverse_valid =
+            impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), dst_addr);
+        ASSERT(traverse_valid);
+
+        // Prepare tracking variables.
+        KPhysicalAddress cur_addr = next_entry.phys_addr;
+        size_t cur_size =
+            next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1));
+        size_t tot_size = cur_size;
+
+        auto PerformCopy = [&]() -> Result {
+            // Ensure the address is linear mapped.
+            R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), ResultInvalidCurrentMemory);
+
+            // Copy the data.
+            std::memcpy(GetLinearMappedVirtualPointer(m_kernel, cur_addr), buffer, cur_size);
+
+            R_SUCCEED();
+        };
+
+        // Iterate.
+        while (tot_size < size) {
+            // Continue the traversal.
+            traverse_valid =
+                impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
+            ASSERT(traverse_valid);
+
+            if (next_entry.phys_addr != (cur_addr + cur_size)) {
+                // Perform copy.
+                R_TRY(PerformCopy());
+
+                // Advance.
+                buffer = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(buffer) + cur_size);
+
+                cur_addr = next_entry.phys_addr;
+                cur_size = next_entry.block_size;
+            } else {
+                cur_size += next_entry.block_size;
+            }
+
+            tot_size += next_entry.block_size;
+        }
+
+        // Ensure we use the right size for the last block.
+        if (tot_size > size) {
+            cur_size -= (tot_size - size);
+        }
+
+        // Perform copy for the last block.
+        R_TRY(PerformCopy());
+    }
+
+    R_SUCCEED();
+}
+
+Result KPageTableBase::CopyMemoryFromHeapToHeap(
+    KPageTableBase& dst_page_table, KProcessAddress dst_addr, size_t size,
+    KMemoryState dst_state_mask, KMemoryState dst_state, KMemoryPermission dst_test_perm,
+    KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr, KProcessAddress src_addr,
+    KMemoryState src_state_mask, KMemoryState src_state, KMemoryPermission src_test_perm,
+    KMemoryAttribute src_attr_mask, KMemoryAttribute src_attr) {
+    // For convenience, alias this.
+    KPageTableBase& src_page_table = *this;
+
+    // Lightly validate the ranges before doing anything else.
+    R_UNLESS(src_page_table.Contains(src_addr, size), ResultInvalidCurrentMemory);
+    R_UNLESS(dst_page_table.Contains(dst_addr, size), ResultInvalidCurrentMemory);
+
+    // Copy the memory.
+    {
+        // Acquire the table locks.
+        KScopedLightLockPair lk(src_page_table.m_general_lock, dst_page_table.m_general_lock);
+
+        // Check memory state.
+        R_TRY(src_page_table.CheckMemoryStateContiguous(
+            src_addr, size, src_state_mask, src_state, src_test_perm, src_test_perm,
+            src_attr_mask | KMemoryAttribute::Uncached, src_attr));
+        R_TRY(dst_page_table.CheckMemoryStateContiguous(
+            dst_addr, size, dst_state_mask, dst_state, dst_test_perm, dst_test_perm,
+            dst_attr_mask | KMemoryAttribute::Uncached, dst_attr));
+
+        // Get implementations.
+        auto& src_impl = src_page_table.GetImpl();
+        auto& dst_impl = dst_page_table.GetImpl();
+
+        // Prepare for traversal.
+        TraversalContext src_context;
+        TraversalContext dst_context;
+        TraversalEntry src_next_entry;
+        TraversalEntry dst_next_entry;
+        bool traverse_valid;
+
+        // Begin traversal.
+        traverse_valid = src_impl.BeginTraversal(std::addressof(src_next_entry),
+                                                 std::addressof(src_context), src_addr);
+        ASSERT(traverse_valid);
+        traverse_valid = dst_impl.BeginTraversal(std::addressof(dst_next_entry),
+                                                 std::addressof(dst_context), dst_addr);
+        ASSERT(traverse_valid);
+
+        // Prepare tracking variables.
+        KPhysicalAddress cur_src_block_addr = src_next_entry.phys_addr;
+        KPhysicalAddress cur_dst_block_addr = dst_next_entry.phys_addr;
+        size_t cur_src_size = src_next_entry.block_size -
+                              (GetInteger(cur_src_block_addr) & (src_next_entry.block_size - 1));
+        size_t cur_dst_size = dst_next_entry.block_size -
+                              (GetInteger(cur_dst_block_addr) & (dst_next_entry.block_size - 1));
+
+        // Adjust the initial block sizes.
+        src_next_entry.block_size = cur_src_size;
+        dst_next_entry.block_size = cur_dst_size;
+
+        // Before we get any crazier, succeed if there's nothing to do.
+        R_SUCCEED_IF(size == 0);
+
+        // We're going to manage dual traversal via an offset against the total size.
+        KPhysicalAddress cur_src_addr = cur_src_block_addr;
+        KPhysicalAddress cur_dst_addr = cur_dst_block_addr;
+        size_t cur_min_size = std::min<size_t>(cur_src_size, cur_dst_size);
+
+        // Iterate.
+        size_t ofs = 0;
+        while (ofs < size) {
+            // Determine how much we can copy this iteration.
+            const size_t cur_copy_size = std::min<size_t>(cur_min_size, size - ofs);
+
+            // If we need to advance the traversals, do so.
+            bool updated_src = false, updated_dst = false, skip_copy = false;
+            if (ofs + cur_copy_size != size) {
+                if (cur_src_addr + cur_min_size == cur_src_block_addr + cur_src_size) {
+                    // Continue the src traversal.
+                    traverse_valid = src_impl.ContinueTraversal(std::addressof(src_next_entry),
+                                                                std::addressof(src_context));
+                    ASSERT(traverse_valid);
+
+                    // Update source.
+                    updated_src = cur_src_addr + cur_min_size != src_next_entry.phys_addr;
+                }
+
+                if (cur_dst_addr + cur_min_size ==
+                    dst_next_entry.phys_addr + dst_next_entry.block_size) {
+                    // Continue the dst traversal.
+                    traverse_valid = dst_impl.ContinueTraversal(std::addressof(dst_next_entry),
+                                                                std::addressof(dst_context));
+                    ASSERT(traverse_valid);
+
+                    // Update destination.
+                    updated_dst = cur_dst_addr + cur_min_size != dst_next_entry.phys_addr;
+                }
+
+                // If we didn't update either of source/destination, skip the copy this iteration.
+                if (!updated_src && !updated_dst) {
+                    skip_copy = true;
+
+                    // Update the source block address.
+                    cur_src_block_addr = src_next_entry.phys_addr;
+                }
+            }
+
+            // Do the copy, unless we're skipping it.
+            if (!skip_copy) {
+                // We need both ends of the copy to be heap blocks.
+                R_UNLESS(IsHeapPhysicalAddress(cur_src_addr), ResultInvalidCurrentMemory);
+                R_UNLESS(IsHeapPhysicalAddress(cur_dst_addr), ResultInvalidCurrentMemory);
+
+                // Copy the data.
+                std::memcpy(GetHeapVirtualPointer(m_kernel, cur_dst_addr),
+                            GetHeapVirtualPointer(m_kernel, cur_src_addr), cur_copy_size);
+
+                // Update.
+                cur_src_block_addr = src_next_entry.phys_addr;
+                cur_src_addr = updated_src ? cur_src_block_addr : cur_src_addr + cur_copy_size;
+                cur_dst_block_addr = dst_next_entry.phys_addr;
+                cur_dst_addr = updated_dst ? cur_dst_block_addr : cur_dst_addr + cur_copy_size;
+
+                // Advance offset.
+                ofs += cur_copy_size;
+            }
+
+            // Update min size.
+            cur_src_size = src_next_entry.block_size;
+            cur_dst_size = dst_next_entry.block_size;
+            cur_min_size = std::min<size_t>(cur_src_block_addr - cur_src_addr + cur_src_size,
+                                            cur_dst_block_addr - cur_dst_addr + cur_dst_size);
+        }
+    }
+
+    R_SUCCEED();
+}
+
+Result KPageTableBase::CopyMemoryFromHeapToHeapWithoutCheckDestination(
+    KPageTableBase& dst_page_table, KProcessAddress dst_addr, size_t size,
+    KMemoryState dst_state_mask, KMemoryState dst_state, KMemoryPermission dst_test_perm,
+    KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr, KProcessAddress src_addr,
+    KMemoryState src_state_mask, KMemoryState src_state, KMemoryPermission src_test_perm,
+    KMemoryAttribute src_attr_mask, KMemoryAttribute src_attr) {
+    // For convenience, alias this.
+    KPageTableBase& src_page_table = *this;
+
+    // Lightly validate the ranges before doing anything else.
+    R_UNLESS(src_page_table.Contains(src_addr, size), ResultInvalidCurrentMemory);
+    R_UNLESS(dst_page_table.Contains(dst_addr, size), ResultInvalidCurrentMemory);
+
+    // Copy the memory.
+    {
+        // Acquire the table locks.
+        KScopedLightLockPair lk(src_page_table.m_general_lock, dst_page_table.m_general_lock);
+
+        // Check memory state for source.
+        R_TRY(src_page_table.CheckMemoryStateContiguous(
+            src_addr, size, src_state_mask, src_state, src_test_perm, src_test_perm,
+            src_attr_mask | KMemoryAttribute::Uncached, src_attr));
+
+        // Destination state is intentionally unchecked.
+
+        // Get implementations.
+        auto& src_impl = src_page_table.GetImpl();
+        auto& dst_impl = dst_page_table.GetImpl();
+
+        // Prepare for traversal.
+        TraversalContext src_context;
+        TraversalContext dst_context;
+        TraversalEntry src_next_entry;
+        TraversalEntry dst_next_entry;
+        bool traverse_valid;
+
+        // Begin traversal.
+        traverse_valid = src_impl.BeginTraversal(std::addressof(src_next_entry),
+                                                 std::addressof(src_context), src_addr);
+        ASSERT(traverse_valid);
+        traverse_valid = dst_impl.BeginTraversal(std::addressof(dst_next_entry),
+                                                 std::addressof(dst_context), dst_addr);
+        ASSERT(traverse_valid);
+
+        // Prepare tracking variables.
+        KPhysicalAddress cur_src_block_addr = src_next_entry.phys_addr;
+        KPhysicalAddress cur_dst_block_addr = dst_next_entry.phys_addr;
+        size_t cur_src_size = src_next_entry.block_size -
+                              (GetInteger(cur_src_block_addr) & (src_next_entry.block_size - 1));
+        size_t cur_dst_size = dst_next_entry.block_size -
+                              (GetInteger(cur_dst_block_addr) & (dst_next_entry.block_size - 1));
+
+        // Adjust the initial block sizes.
+        src_next_entry.block_size = cur_src_size;
+        dst_next_entry.block_size = cur_dst_size;
+
+        // Before we get any crazier, succeed if there's nothing to do.
+        R_SUCCEED_IF(size == 0);
+
+        // We're going to manage dual traversal via an offset against the total size.
+        KPhysicalAddress cur_src_addr = cur_src_block_addr;
+        KPhysicalAddress cur_dst_addr = cur_dst_block_addr;
+        size_t cur_min_size = std::min<size_t>(cur_src_size, cur_dst_size);
+
+        // Iterate.
+        size_t ofs = 0;
+        while (ofs < size) {
+            // Determine how much we can copy this iteration.
+            const size_t cur_copy_size = std::min<size_t>(cur_min_size, size - ofs);
+
+            // If we need to advance the traversals, do so.
+            bool updated_src = false, updated_dst = false, skip_copy = false;
+            if (ofs + cur_copy_size != size) {
+                if (cur_src_addr + cur_min_size == cur_src_block_addr + cur_src_size) {
+                    // Continue the src traversal.
+                    traverse_valid = src_impl.ContinueTraversal(std::addressof(src_next_entry),
+                                                                std::addressof(src_context));
+                    ASSERT(traverse_valid);
+
+                    // Update source.
+                    updated_src = cur_src_addr + cur_min_size != src_next_entry.phys_addr;
+                }
+
+                if (cur_dst_addr + cur_min_size ==
+                    dst_next_entry.phys_addr + dst_next_entry.block_size) {
+                    // Continue the dst traversal.
+                    traverse_valid = dst_impl.ContinueTraversal(std::addressof(dst_next_entry),
+                                                                std::addressof(dst_context));
+                    ASSERT(traverse_valid);
+
+                    // Update destination.
+                    updated_dst = cur_dst_addr + cur_min_size != dst_next_entry.phys_addr;
+                }
+
+                // If we didn't update either of source/destination, skip the copy this iteration.
+                if (!updated_src && !updated_dst) {
+                    skip_copy = true;
+
+                    // Update the source block address.
+                    cur_src_block_addr = src_next_entry.phys_addr;
+                }
+            }
+
+            // Do the copy, unless we're skipping it.
+            if (!skip_copy) {
+                // We need both ends of the copy to be heap blocks.
+                R_UNLESS(IsHeapPhysicalAddress(cur_src_addr), ResultInvalidCurrentMemory);
+                R_UNLESS(IsHeapPhysicalAddress(cur_dst_addr), ResultInvalidCurrentMemory);
+
+                // Copy the data.
+                std::memcpy(GetHeapVirtualPointer(m_kernel, cur_dst_addr),
+                            GetHeapVirtualPointer(m_kernel, cur_src_addr), cur_copy_size);
+
+                // Update.
+                cur_src_block_addr = src_next_entry.phys_addr;
+                cur_src_addr = updated_src ? cur_src_block_addr : cur_src_addr + cur_copy_size;
+                cur_dst_block_addr = dst_next_entry.phys_addr;
+                cur_dst_addr = updated_dst ? cur_dst_block_addr : cur_dst_addr + cur_copy_size;
+
+                // Advance offset.
+                ofs += cur_copy_size;
+            }
+
+            // Update min size.
+            cur_src_size = src_next_entry.block_size;
+            cur_dst_size = dst_next_entry.block_size;
+            cur_min_size = std::min<size_t>(cur_src_block_addr - cur_src_addr + cur_src_size,
+                                            cur_dst_block_addr - cur_dst_addr + cur_dst_size);
+        }
+    }
+
+    R_SUCCEED();
+}
+
+Result KPageTableBase::SetupForIpcClient(PageLinkedList* page_list, size_t* out_blocks_needed,
+                                         KProcessAddress address, size_t size,
+                                         KMemoryPermission test_perm, KMemoryState dst_state) {
+    // Validate pre-conditions.
+    ASSERT(this->IsLockedByCurrentThread());
+    ASSERT(test_perm == KMemoryPermission::UserReadWrite ||
+           test_perm == KMemoryPermission::UserRead);
+
+    // Check that the address is in range.
+    R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
+
+    // Get the source permission.
+    const auto src_perm = static_cast<KMemoryPermission>(
+        (test_perm == KMemoryPermission::UserReadWrite)
+            ? KMemoryPermission::KernelReadWrite | KMemoryPermission::NotMapped
+            : KMemoryPermission::UserRead);
+
+    // Get aligned extents.
+    const KProcessAddress aligned_src_start = Common::AlignDown(GetInteger(address), PageSize);
+    const KProcessAddress aligned_src_end = Common::AlignUp(GetInteger(address) + size, PageSize);
+    const KProcessAddress mapping_src_start = Common::AlignUp(GetInteger(address), PageSize);
+    const KProcessAddress mapping_src_end = Common::AlignDown(GetInteger(address) + size, PageSize);
+
+    const auto aligned_src_last = GetInteger(aligned_src_end) - 1;
+    const auto mapping_src_last = GetInteger(mapping_src_end) - 1;
+
+    // Get the test state and attribute mask.
+    KMemoryState test_state;
+    KMemoryAttribute test_attr_mask;
+    switch (dst_state) {
+    case KMemoryState::Ipc:
+        test_state = KMemoryState::FlagCanUseIpc;
+        test_attr_mask =
+            KMemoryAttribute::Uncached | KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked;
+        break;
+    case KMemoryState::NonSecureIpc:
+        test_state = KMemoryState::FlagCanUseNonSecureIpc;
+        test_attr_mask = KMemoryAttribute::Uncached | KMemoryAttribute::Locked;
+        break;
+    case KMemoryState::NonDeviceIpc:
+        test_state = KMemoryState::FlagCanUseNonDeviceIpc;
+        test_attr_mask = KMemoryAttribute::Uncached | KMemoryAttribute::Locked;
+        break;
+    default:
+        R_THROW(ResultInvalidCombination);
+    }
+
+    // Ensure that on failure, we roll back appropriately.
+    size_t mapped_size = 0;
+    ON_RESULT_FAILURE {
+        if (mapped_size > 0) {
+            this->CleanupForIpcClientOnServerSetupFailure(page_list, mapping_src_start, mapped_size,
+                                                          src_perm);
+        }
+    };
+
+    size_t blocks_needed = 0;
+
+    // Iterate, mapping as needed.
+    KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(aligned_src_start);
+    while (true) {
+        const KMemoryInfo info = it->GetMemoryInfo();
+
+        // Validate the current block.
+        R_TRY(this->CheckMemoryState(info, test_state, test_state, test_perm, test_perm,
+                                     test_attr_mask, KMemoryAttribute::None));
+
+        if (mapping_src_start < mapping_src_end &&
+            GetInteger(mapping_src_start) < info.GetEndAddress() &&
+            info.GetAddress() < GetInteger(mapping_src_end)) {
+            const auto cur_start = info.GetAddress() >= GetInteger(mapping_src_start)
+                                       ? info.GetAddress()
+                                       : GetInteger(mapping_src_start);
+            const auto cur_end = mapping_src_last >= info.GetLastAddress()
+                                     ? info.GetEndAddress()
+                                     : GetInteger(mapping_src_end);
+            const size_t cur_size = cur_end - cur_start;
+
+            if (info.GetAddress() < GetInteger(mapping_src_start)) {
+                ++blocks_needed;
+            }
+            if (mapping_src_last < info.GetLastAddress()) {
+                ++blocks_needed;
+            }
+
+            // Set the permissions on the block, if we need to.
+            if ((info.GetPermission() & KMemoryPermission::IpcLockChangeMask) != src_perm) {
+                const DisableMergeAttribute head_body_attr =
+                    (GetInteger(mapping_src_start) >= info.GetAddress())
+                        ? DisableMergeAttribute::DisableHeadAndBody
+                        : DisableMergeAttribute::None;
+                const DisableMergeAttribute tail_attr = (cur_end == GetInteger(mapping_src_end))
+                                                            ? DisableMergeAttribute::DisableTail
+                                                            : DisableMergeAttribute::None;
+                const KPageProperties properties = {
+                    src_perm, false, false,
+                    static_cast<DisableMergeAttribute>(head_body_attr | tail_attr)};
+                R_TRY(this->Operate(page_list, cur_start, cur_size / PageSize, 0, false, properties,
+                                    OperationType::ChangePermissions, false));
+            }
+
+            // Note that we mapped this part.
+            mapped_size += cur_size;
+        }
+
+        // If the block is at the end, we're done.
+        if (aligned_src_last <= info.GetLastAddress()) {
+            break;
+        }
+
+        // Advance.
+        ++it;
+        ASSERT(it != m_memory_block_manager.end());
+    }
+
+    if (out_blocks_needed != nullptr) {
+        ASSERT(blocks_needed <= KMemoryBlockManagerUpdateAllocator::MaxBlocks);
+        *out_blocks_needed = blocks_needed;
+    }
+
+    R_SUCCEED();
+}
+
+Result KPageTableBase::SetupForIpcServer(KProcessAddress* out_addr, size_t size,
+                                         KProcessAddress src_addr, KMemoryPermission test_perm,
+                                         KMemoryState dst_state, KPageTableBase& src_page_table,
+                                         bool send) {
+    ASSERT(this->IsLockedByCurrentThread());
+    ASSERT(src_page_table.IsLockedByCurrentThread());
+
+    // Check that we can theoretically map.
+    const KProcessAddress region_start = m_alias_region_start;
+    const size_t region_size = m_alias_region_end - m_alias_region_start;
+    R_UNLESS(size < region_size, ResultOutOfAddressSpace);
+
+    // Get aligned source extents.
+    const KProcessAddress src_start = src_addr;
+    const KProcessAddress src_end = src_addr + size;
+    const KProcessAddress aligned_src_start = Common::AlignDown(GetInteger(src_start), PageSize);
+    const KProcessAddress aligned_src_end = Common::AlignUp(GetInteger(src_start) + size, PageSize);
+    const KProcessAddress mapping_src_start = Common::AlignUp(GetInteger(src_start), PageSize);
+    const KProcessAddress mapping_src_end =
+        Common::AlignDown(GetInteger(src_start) + size, PageSize);
+    const size_t aligned_src_size = aligned_src_end - aligned_src_start;
+    const size_t mapping_src_size =
+        (mapping_src_start < mapping_src_end) ? (mapping_src_end - mapping_src_start) : 0;
+
+    // Select a random address to map at.
+    KProcessAddress dst_addr = 0;
+    {
+        const size_t alignment = 4_KiB;
+        const size_t offset = GetInteger(aligned_src_start) & (alignment - 1);
+
+        dst_addr =
+            this->FindFreeArea(region_start, region_size / PageSize, aligned_src_size / PageSize,
+                               alignment, offset, this->GetNumGuardPages());
+        R_UNLESS(dst_addr != 0, ResultOutOfAddressSpace);
+    }
+
+    // Check that we can perform the operation we're about to perform.
+    ASSERT(this->CanContain(dst_addr, aligned_src_size, dst_state));
+
+    // Create an update allocator.
+    Result allocator_result;
+    KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+                                                 m_memory_block_slab_manager);
+    R_TRY(allocator_result);
+
+    // We're going to perform an update, so create a helper.
+    KScopedPageTableUpdater updater(this);
+
+    // Reserve space for any partial pages we allocate.
+    const size_t unmapped_size = aligned_src_size - mapping_src_size;
+    KScopedResourceReservation memory_reservation(
+        m_resource_limit, Svc::LimitableResource::PhysicalMemoryMax, unmapped_size);
+    R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
+
+    // Ensure that we manage page references correctly.
+    KPhysicalAddress start_partial_page = 0;
+    KPhysicalAddress end_partial_page = 0;
+    KProcessAddress cur_mapped_addr = dst_addr;
+
+    // If the partial pages are mapped, an extra reference will have been opened. Otherwise, they'll
+    // free on scope exit.
+    SCOPE_EXIT({
+        if (start_partial_page != 0) {
+            m_kernel.MemoryManager().Close(start_partial_page, 1);
+        }
+        if (end_partial_page != 0) {
+            m_kernel.MemoryManager().Close(end_partial_page, 1);
+        }
+    });
+
+    ON_RESULT_FAILURE {
+        if (cur_mapped_addr != dst_addr) {
+            const KPageProperties unmap_properties = {KMemoryPermission::None, false, false,
+                                                      DisableMergeAttribute::None};
+            R_ASSERT(this->Operate(updater.GetPageList(), dst_addr,
+                                   (cur_mapped_addr - dst_addr) / PageSize, 0, false,
+                                   unmap_properties, OperationType::Unmap, true));
+        }
+    };
+
+    // Allocate the start page as needed.
+    if (aligned_src_start < mapping_src_start) {
+        start_partial_page =
+            m_kernel.MemoryManager().AllocateAndOpenContinuous(1, 1, m_allocate_option);
+        R_UNLESS(start_partial_page != 0, ResultOutOfMemory);
+    }
+
+    // Allocate the end page as needed.
+    if (mapping_src_end < aligned_src_end &&
+        (aligned_src_start < mapping_src_end || aligned_src_start == mapping_src_start)) {
+        end_partial_page =
+            m_kernel.MemoryManager().AllocateAndOpenContinuous(1, 1, m_allocate_option);
+        R_UNLESS(end_partial_page != 0, ResultOutOfMemory);
+    }
+
+    // Get the implementation.
+    auto& src_impl = src_page_table.GetImpl();
+
+    // Get the fill value for partial pages.
+    const auto fill_val = m_ipc_fill_value;
+
+    // Begin traversal.
+    TraversalContext context;
+    TraversalEntry next_entry;
+    bool traverse_valid = src_impl.BeginTraversal(std::addressof(next_entry),
+                                                  std::addressof(context), aligned_src_start);
+    ASSERT(traverse_valid);
+
+    // Prepare tracking variables.
+    KPhysicalAddress cur_block_addr = next_entry.phys_addr;
+    size_t cur_block_size =
+        next_entry.block_size - (GetInteger(cur_block_addr) & (next_entry.block_size - 1));
+    size_t tot_block_size = cur_block_size;
+
+    // Map the start page, if we have one.
+    if (start_partial_page != 0) {
+        // Ensure the page holds correct data.
+        u8* const start_partial_virt = GetHeapVirtualPointer(m_kernel, start_partial_page);
+        if (send) {
+            const size_t partial_offset = src_start - aligned_src_start;
+            size_t copy_size, clear_size;
+            if (src_end < mapping_src_start) {
+                copy_size = size;
+                clear_size = mapping_src_start - src_end;
+            } else {
+                copy_size = mapping_src_start - src_start;
+                clear_size = 0;
+            }
+
+            std::memset(start_partial_virt, fill_val, partial_offset);
+            std::memcpy(start_partial_virt + partial_offset,
+                        GetHeapVirtualPointer(m_kernel, cur_block_addr) + partial_offset,
+                        copy_size);
+            if (clear_size > 0) {
+                std::memset(start_partial_virt + partial_offset + copy_size, fill_val, clear_size);
+            }
+        } else {
+            std::memset(start_partial_virt, fill_val, PageSize);
+        }
+
+        // Map the page.
+        const KPageProperties start_map_properties = {test_perm, false, false,
+                                                      DisableMergeAttribute::DisableHead};
+        R_TRY(this->Operate(updater.GetPageList(), cur_mapped_addr, 1, start_partial_page, true,
+                            start_map_properties, OperationType::Map, false));
+
+        // Update tracking extents.
+        cur_mapped_addr += PageSize;
+        cur_block_addr += PageSize;
+        cur_block_size -= PageSize;
+
+        // If the block's size was one page, we may need to continue traversal.
+        if (cur_block_size == 0 && aligned_src_size > PageSize) {
+            traverse_valid =
+                src_impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
+            ASSERT(traverse_valid);
+
+            cur_block_addr = next_entry.phys_addr;
+            cur_block_size = next_entry.block_size;
+            tot_block_size += next_entry.block_size;
+        }
+    }
+
+    // Map the remaining pages.
+    while (aligned_src_start + tot_block_size < mapping_src_end) {
+        // Continue the traversal.
+        traverse_valid =
+            src_impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
+        ASSERT(traverse_valid);
+
+        // Process the block.
+        if (next_entry.phys_addr != cur_block_addr + cur_block_size) {
+            // Map the block we've been processing so far.
+            const KPageProperties map_properties = {test_perm, false, false,
+                                                    (cur_mapped_addr == dst_addr)
+                                                        ? DisableMergeAttribute::DisableHead
+                                                        : DisableMergeAttribute::None};
+            R_TRY(this->Operate(updater.GetPageList(), cur_mapped_addr, cur_block_size / PageSize,
+                                cur_block_addr, true, map_properties, OperationType::Map, false));
+
+            // Update tracking extents.
+            cur_mapped_addr += cur_block_size;
+            cur_block_addr = next_entry.phys_addr;
+            cur_block_size = next_entry.block_size;
+        } else {
+            cur_block_size += next_entry.block_size;
+        }
+        tot_block_size += next_entry.block_size;
+    }
+
+    // Handle the last direct-mapped page.
+    if (const KProcessAddress mapped_block_end =
+            aligned_src_start + tot_block_size - cur_block_size;
+        mapped_block_end < mapping_src_end) {
+        const size_t last_block_size = mapping_src_end - mapped_block_end;
+
+        // Map the last block.
+        const KPageProperties map_properties = {test_perm, false, false,
+                                                (cur_mapped_addr == dst_addr)
+                                                    ? DisableMergeAttribute::DisableHead
+                                                    : DisableMergeAttribute::None};
+        R_TRY(this->Operate(updater.GetPageList(), cur_mapped_addr, last_block_size / PageSize,
+                            cur_block_addr, true, map_properties, OperationType::Map, false));
+
+        // Update tracking extents.
+        cur_mapped_addr += last_block_size;
+        cur_block_addr += last_block_size;
+        if (mapped_block_end + cur_block_size < aligned_src_end &&
+            cur_block_size == last_block_size) {
+            traverse_valid =
+                src_impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
+            ASSERT(traverse_valid);
+
+            cur_block_addr = next_entry.phys_addr;
+        }
+    }
+
+    // Map the end page, if we have one.
+    if (end_partial_page != 0) {
+        // Ensure the page holds correct data.
+        u8* const end_partial_virt = GetHeapVirtualPointer(m_kernel, end_partial_page);
+        if (send) {
+            const size_t copy_size = src_end - mapping_src_end;
+            std::memcpy(end_partial_virt, GetHeapVirtualPointer(m_kernel, cur_block_addr),
+                        copy_size);
+            std::memset(end_partial_virt + copy_size, fill_val, PageSize - copy_size);
+        } else {
+            std::memset(end_partial_virt, fill_val, PageSize);
+        }
+
+        // Map the page.
+        const KPageProperties map_properties = {test_perm, false, false,
+                                                (cur_mapped_addr == dst_addr)
+                                                    ? DisableMergeAttribute::DisableHead
+                                                    : DisableMergeAttribute::None};
+        R_TRY(this->Operate(updater.GetPageList(), cur_mapped_addr, 1, end_partial_page, true,
+                            map_properties, OperationType::Map, false));
+    }
+
+    // Update memory blocks to reflect our changes
+    m_memory_block_manager.Update(std::addressof(allocator), dst_addr, aligned_src_size / PageSize,
+                                  dst_state, test_perm, KMemoryAttribute::None,
+                                  KMemoryBlockDisableMergeAttribute::Normal,
+                                  KMemoryBlockDisableMergeAttribute::None);
+
+    // Set the output address.
+    *out_addr = dst_addr + (src_start - aligned_src_start);
+
+    // We succeeded.
+    memory_reservation.Commit();
+    R_SUCCEED();
+}
+
+Result KPageTableBase::SetupForIpc(KProcessAddress* out_dst_addr, size_t size,
+                                   KProcessAddress src_addr, KPageTableBase& src_page_table,
+                                   KMemoryPermission test_perm, KMemoryState dst_state, bool send) {
+    // For convenience, alias this.
+    KPageTableBase& dst_page_table = *this;
+
+    // Acquire the table locks.
+    KScopedLightLockPair lk(src_page_table.m_general_lock, dst_page_table.m_general_lock);
+
+    // We're going to perform an update, so create a helper.
+    KScopedPageTableUpdater updater(std::addressof(src_page_table));
+
+    // Perform client setup.
+    size_t num_allocator_blocks;
+    R_TRY(src_page_table.SetupForIpcClient(updater.GetPageList(),
+                                           std::addressof(num_allocator_blocks), src_addr, size,
+                                           test_perm, dst_state));
+
+    // Create an update allocator.
+    Result allocator_result;
+    KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+                                                 src_page_table.m_memory_block_slab_manager,
+                                                 num_allocator_blocks);
+    R_TRY(allocator_result);
+
+    // Get the mapped extents.
+    const KProcessAddress src_map_start = Common::AlignUp(GetInteger(src_addr), PageSize);
+    const KProcessAddress src_map_end = Common::AlignDown(GetInteger(src_addr) + size, PageSize);
+    const size_t src_map_size = src_map_end - src_map_start;
+
+    // Ensure that we clean up appropriately if we fail after this.
+    const auto src_perm = static_cast<KMemoryPermission>(
+        (test_perm == KMemoryPermission::UserReadWrite)
+            ? KMemoryPermission::KernelReadWrite | KMemoryPermission::NotMapped
+            : KMemoryPermission::UserRead);
+    ON_RESULT_FAILURE {
+        if (src_map_end > src_map_start) {
+            src_page_table.CleanupForIpcClientOnServerSetupFailure(
+                updater.GetPageList(), src_map_start, src_map_size, src_perm);
+        }
+    };
+
+    // Perform server setup.
+    R_TRY(dst_page_table.SetupForIpcServer(out_dst_addr, size, src_addr, test_perm, dst_state,
+                                           src_page_table, send));
+
+    // If anything was mapped, ipc-lock the pages.
+    if (src_map_start < src_map_end) {
+        // Get the source permission.
+        src_page_table.m_memory_block_manager.UpdateLock(std::addressof(allocator), src_map_start,
+                                                         (src_map_end - src_map_start) / PageSize,
+                                                         &KMemoryBlock::LockForIpc, src_perm);
+    }
+
+    R_SUCCEED();
+}
+
+Result KPageTableBase::CleanupForIpcServer(KProcessAddress address, size_t size,
+                                           KMemoryState dst_state) {
+    // Validate the address.
+    R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
+
+    // Lock the table.
+    KScopedLightLock lk(m_general_lock);
+
+    // Validate the memory state.
+    size_t num_allocator_blocks;
+    R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size,
+                                 KMemoryState::All, dst_state, KMemoryPermission::UserRead,
+                                 KMemoryPermission::UserRead, KMemoryAttribute::All,
+                                 KMemoryAttribute::None));
+
+    // Create an update allocator.
+    Result allocator_result;
+    KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+                                                 m_memory_block_slab_manager, num_allocator_blocks);
+    R_TRY(allocator_result);
+
+    // We're going to perform an update, so create a helper.
+    KScopedPageTableUpdater updater(this);
+
+    // Get aligned extents.
+    const KProcessAddress aligned_start = Common::AlignDown(GetInteger(address), PageSize);
+    const KProcessAddress aligned_end = Common::AlignUp(GetInteger(address) + size, PageSize);
+    const size_t aligned_size = aligned_end - aligned_start;
+    const size_t aligned_num_pages = aligned_size / PageSize;
+
+    // Unmap the pages.
+    const KPageProperties unmap_properties = {KMemoryPermission::None, false, false,
+                                              DisableMergeAttribute::None};
+    R_TRY(this->Operate(updater.GetPageList(), aligned_start, aligned_num_pages, 0, false,
+                        unmap_properties, OperationType::Unmap, false));
+
+    // Update memory blocks.
+    m_memory_block_manager.Update(std::addressof(allocator), aligned_start, aligned_num_pages,
+                                  KMemoryState::None, KMemoryPermission::None,
+                                  KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
+                                  KMemoryBlockDisableMergeAttribute::Normal);
+
+    // Release from the resource limit as relevant.
+    const KProcessAddress mapping_start = Common::AlignUp(GetInteger(address), PageSize);
+    const KProcessAddress mapping_end = Common::AlignDown(GetInteger(address) + size, PageSize);
+    const size_t mapping_size = (mapping_start < mapping_end) ? mapping_end - mapping_start : 0;
+    m_resource_limit->Release(Svc::LimitableResource::PhysicalMemoryMax,
+                              aligned_size - mapping_size);
+
+    R_SUCCEED();
+}
+
+Result KPageTableBase::CleanupForIpcClient(KProcessAddress address, size_t size,
+                                           KMemoryState dst_state) {
+    // Validate the address.
+    R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
+
+    // Get aligned source extents.
+    const KProcessAddress mapping_start = Common::AlignUp(GetInteger(address), PageSize);
+    const KProcessAddress mapping_end = Common::AlignDown(GetInteger(address) + size, PageSize);
+    const KProcessAddress mapping_last = mapping_end - 1;
+    const size_t mapping_size = (mapping_start < mapping_end) ? (mapping_end - mapping_start) : 0;
+
+    // If nothing was mapped, we're actually done immediately.
+    R_SUCCEED_IF(mapping_size == 0);
+
+    // Get the test state and attribute mask.
+    KMemoryState test_state;
+    KMemoryAttribute test_attr_mask;
+    switch (dst_state) {
+    case KMemoryState::Ipc:
+        test_state = KMemoryState::FlagCanUseIpc;
+        test_attr_mask =
+            KMemoryAttribute::Uncached | KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked;
+        break;
+    case KMemoryState::NonSecureIpc:
+        test_state = KMemoryState::FlagCanUseNonSecureIpc;
+        test_attr_mask = KMemoryAttribute::Uncached | KMemoryAttribute::Locked;
+        break;
+    case KMemoryState::NonDeviceIpc:
+        test_state = KMemoryState::FlagCanUseNonDeviceIpc;
+        test_attr_mask = KMemoryAttribute::Uncached | KMemoryAttribute::Locked;
+        break;
+    default:
+        R_THROW(ResultInvalidCombination);
+    }
+
+    // Lock the table.
+    // NOTE: Nintendo does this *after* creating the updater below, but this does not follow
+    // convention elsewhere in KPageTableBase.
+    KScopedLightLock lk(m_general_lock);
+
+    // We're going to perform an update, so create a helper.
+    KScopedPageTableUpdater updater(this);
+
+    // Ensure that on failure, we roll back appropriately.
+    size_t mapped_size = 0;
+    ON_RESULT_FAILURE {
+        if (mapped_size > 0) {
+            // Determine where the mapping ends.
+            const auto mapped_end = GetInteger(mapping_start) + mapped_size;
+            const auto mapped_last = mapped_end - 1;
+
+            // Get current and next iterators.
+            KMemoryBlockManager::const_iterator start_it =
+                m_memory_block_manager.FindIterator(mapping_start);
+            KMemoryBlockManager::const_iterator next_it = start_it;
+            ++next_it;
+
+            // Get the current block info.
+            KMemoryInfo cur_info = start_it->GetMemoryInfo();
+
+            // Create tracking variables.
+            KProcessAddress cur_address = cur_info.GetAddress();
+            size_t cur_size = cur_info.GetSize();
+            bool cur_perm_eq = cur_info.GetPermission() == cur_info.GetOriginalPermission();
+            bool cur_needs_set_perm = !cur_perm_eq && cur_info.GetIpcLockCount() == 1;
+            bool first = cur_info.GetIpcDisableMergeCount() == 1 &&
+                         False(cur_info.GetDisableMergeAttribute() &
+                               KMemoryBlockDisableMergeAttribute::Locked);
+
+            while ((GetInteger(cur_address) + cur_size - 1) < mapped_last) {
+                // Check that we have a next block.
+                ASSERT(next_it != m_memory_block_manager.end());
+
+                // Get the next info.
+                const KMemoryInfo next_info = next_it->GetMemoryInfo();
+
+                // Check if we can consolidate the next block's permission set with the current one.
+                const bool next_perm_eq =
+                    next_info.GetPermission() == next_info.GetOriginalPermission();
+                const bool next_needs_set_perm = !next_perm_eq && next_info.GetIpcLockCount() == 1;
+                if (cur_perm_eq == next_perm_eq && cur_needs_set_perm == next_needs_set_perm &&
+                    cur_info.GetOriginalPermission() == next_info.GetOriginalPermission()) {
+                    // We can consolidate the reprotection for the current and next block into a
+                    // single call.
+                    cur_size += next_info.GetSize();
+                } else {
+                    // We have to operate on the current block.
+                    if ((cur_needs_set_perm || first) && !cur_perm_eq) {
+                        const KPageProperties properties = {
+                            cur_info.GetPermission(), false, false,
+                            first ? DisableMergeAttribute::EnableAndMergeHeadBodyTail
+                                  : DisableMergeAttribute::None};
+                        R_ASSERT(this->Operate(updater.GetPageList(), cur_address,
+                                               cur_size / PageSize, 0, false, properties,
+                                               OperationType::ChangePermissions, true));
+                    }
+
+                    // Advance.
+                    cur_address = next_info.GetAddress();
+                    cur_size = next_info.GetSize();
+                    first = false;
+                }
+
+                // Advance.
+                cur_info = next_info;
+                cur_perm_eq = next_perm_eq;
+                cur_needs_set_perm = next_needs_set_perm;
+                ++next_it;
+            }
+
+            // Process the last block.
+            if ((first || cur_needs_set_perm) && !cur_perm_eq) {
+                const KPageProperties properties = {
+                    cur_info.GetPermission(), false, false,
+                    first ? DisableMergeAttribute::EnableAndMergeHeadBodyTail
+                          : DisableMergeAttribute::None};
+                R_ASSERT(this->Operate(updater.GetPageList(), cur_address, cur_size / PageSize, 0,
+                                       false, properties, OperationType::ChangePermissions, true));
+            }
+        }
+    };
+
+    // Iterate, reprotecting as needed.
+    {
+        // Get current and next iterators.
+        KMemoryBlockManager::const_iterator start_it =
+            m_memory_block_manager.FindIterator(mapping_start);
+        KMemoryBlockManager::const_iterator next_it = start_it;
+        ++next_it;
+
+        // Validate the current block.
+        KMemoryInfo cur_info = start_it->GetMemoryInfo();
+        R_ASSERT(this->CheckMemoryState(
+            cur_info, test_state, test_state, KMemoryPermission::None, KMemoryPermission::None,
+            test_attr_mask | KMemoryAttribute::IpcLocked, KMemoryAttribute::IpcLocked));
+
+        // Create tracking variables.
+        KProcessAddress cur_address = cur_info.GetAddress();
+        size_t cur_size = cur_info.GetSize();
+        bool cur_perm_eq = cur_info.GetPermission() == cur_info.GetOriginalPermission();
+        bool cur_needs_set_perm = !cur_perm_eq && cur_info.GetIpcLockCount() == 1;
+        bool first =
+            cur_info.GetIpcDisableMergeCount() == 1 &&
+            False(cur_info.GetDisableMergeAttribute() & KMemoryBlockDisableMergeAttribute::Locked);
+
+        while ((cur_address + cur_size - 1) < mapping_last) {
+            // Check that we have a next block.
+            ASSERT(next_it != m_memory_block_manager.end());
+
+            // Get the next info.
+            const KMemoryInfo next_info = next_it->GetMemoryInfo();
+
+            // Validate the next block.
+            R_ASSERT(this->CheckMemoryState(
+                next_info, test_state, test_state, KMemoryPermission::None, KMemoryPermission::None,
+                test_attr_mask | KMemoryAttribute::IpcLocked, KMemoryAttribute::IpcLocked));
+
+            // Check if we can consolidate the next block's permission set with the current one.
+            const bool next_perm_eq =
+                next_info.GetPermission() == next_info.GetOriginalPermission();
+            const bool next_needs_set_perm = !next_perm_eq && next_info.GetIpcLockCount() == 1;
+            if (cur_perm_eq == next_perm_eq && cur_needs_set_perm == next_needs_set_perm &&
+                cur_info.GetOriginalPermission() == next_info.GetOriginalPermission()) {
+                // We can consolidate the reprotection for the current and next block into a single
+                // call.
+                cur_size += next_info.GetSize();
+            } else {
+                // We have to operate on the current block.
+                if ((cur_needs_set_perm || first) && !cur_perm_eq) {
+                    const KPageProperties properties = {
+                        cur_needs_set_perm ? cur_info.GetOriginalPermission()
+                                           : cur_info.GetPermission(),
+                        false, false,
+                        first ? DisableMergeAttribute::EnableHeadAndBody
+                              : DisableMergeAttribute::None};
+                    R_TRY(this->Operate(updater.GetPageList(), cur_address, cur_size / PageSize, 0,
+                                        false, properties, OperationType::ChangePermissions,
+                                        false));
+                }
+
+                // Mark that we mapped the block.
+                mapped_size += cur_size;
+
+                // Advance.
+                cur_address = next_info.GetAddress();
+                cur_size = next_info.GetSize();
+                first = false;
+            }
+
+            // Advance.
+            cur_info = next_info;
+            cur_perm_eq = next_perm_eq;
+            cur_needs_set_perm = next_needs_set_perm;
+            ++next_it;
+        }
+
+        // Process the last block.
+        const auto lock_count =
+            cur_info.GetIpcLockCount() +
+            (next_it != m_memory_block_manager.end()
+                 ? (next_it->GetIpcDisableMergeCount() - next_it->GetIpcLockCount())
+                 : 0);
+        if ((first || cur_needs_set_perm || (lock_count == 1)) && !cur_perm_eq) {
+            const DisableMergeAttribute head_body_attr =
+                first ? DisableMergeAttribute::EnableHeadAndBody : DisableMergeAttribute::None;
+            const DisableMergeAttribute tail_attr =
+                lock_count == 1 ? DisableMergeAttribute::EnableTail : DisableMergeAttribute::None;
+            const KPageProperties properties = {
+                cur_needs_set_perm ? cur_info.GetOriginalPermission() : cur_info.GetPermission(),
+                false, false, static_cast<DisableMergeAttribute>(head_body_attr | tail_attr)};
+            R_TRY(this->Operate(updater.GetPageList(), cur_address, cur_size / PageSize, 0, false,
+                                properties, OperationType::ChangePermissions, false));
+        }
+    }
+
+    // Create an update allocator.
+    // NOTE: Guaranteed zero blocks needed here.
+    Result allocator_result;
+    KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+                                                 m_memory_block_slab_manager, 0);
+    R_TRY(allocator_result);
+
+    // Unlock the pages.
+    m_memory_block_manager.UpdateLock(std::addressof(allocator), mapping_start,
+                                      mapping_size / PageSize, &KMemoryBlock::UnlockForIpc,
+                                      KMemoryPermission::None);
+
+    R_SUCCEED();
+}
+
+void KPageTableBase::CleanupForIpcClientOnServerSetupFailure(PageLinkedList* page_list,
+                                                             KProcessAddress address, size_t size,
+                                                             KMemoryPermission prot_perm) {
+    ASSERT(this->IsLockedByCurrentThread());
+    ASSERT(Common::IsAligned(GetInteger(address), PageSize));
+    ASSERT(Common::IsAligned(size, PageSize));
+
+    // Get the mapped extents.
+    const KProcessAddress src_map_start = address;
+    const KProcessAddress src_map_end = address + size;
+    const KProcessAddress src_map_last = src_map_end - 1;
+
+    // This function is only invoked when there's something to do.
+    ASSERT(src_map_end > src_map_start);
+
+    // Iterate over blocks, fixing permissions.
+    KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(address);
+    while (true) {
+        const KMemoryInfo info = it->GetMemoryInfo();
+
+        const auto cur_start = info.GetAddress() >= GetInteger(src_map_start)
+                                   ? info.GetAddress()
+                                   : GetInteger(src_map_start);
+        const auto cur_end =
+            src_map_last <= info.GetLastAddress() ? src_map_end : info.GetEndAddress();
+
+        // If we can, fix the protections on the block.
+        if ((info.GetIpcLockCount() == 0 &&
+             (info.GetPermission() & KMemoryPermission::IpcLockChangeMask) != prot_perm) ||
+            (info.GetIpcLockCount() != 0 &&
+             (info.GetOriginalPermission() & KMemoryPermission::IpcLockChangeMask) != prot_perm)) {
+            // Check if we actually need to fix the protections on the block.
+            if (cur_end == src_map_end || info.GetAddress() <= GetInteger(src_map_start) ||
+                (info.GetPermission() & KMemoryPermission::IpcLockChangeMask) != prot_perm) {
+                const bool start_nc = (info.GetAddress() == GetInteger(src_map_start))
+                                          ? (False(info.GetDisableMergeAttribute() &
+                                                   (KMemoryBlockDisableMergeAttribute::Locked |
+                                                    KMemoryBlockDisableMergeAttribute::IpcLeft)))
+                                          : info.GetAddress() <= GetInteger(src_map_start);
+
+                const DisableMergeAttribute head_body_attr =
+                    start_nc ? DisableMergeAttribute::EnableHeadAndBody
+                             : DisableMergeAttribute::None;
+                DisableMergeAttribute tail_attr;
+                if (cur_end == src_map_end && info.GetEndAddress() == src_map_end) {
+                    auto next_it = it;
+                    ++next_it;
+
+                    const auto lock_count =
+                        info.GetIpcLockCount() +
+                        (next_it != m_memory_block_manager.end()
+                             ? (next_it->GetIpcDisableMergeCount() - next_it->GetIpcLockCount())
+                             : 0);
+                    tail_attr = lock_count == 0 ? DisableMergeAttribute::EnableTail
+                                                : DisableMergeAttribute::None;
+                } else {
+                    tail_attr = DisableMergeAttribute::None;
+                }
+
+                const KPageProperties properties = {
+                    info.GetPermission(), false, false,
+                    static_cast<DisableMergeAttribute>(head_body_attr | tail_attr)};
+                R_ASSERT(this->Operate(page_list, cur_start, (cur_end - cur_start) / PageSize, 0,
+                                       false, properties, OperationType::ChangePermissions, true));
+            }
+        }
+
+        // If we're past the end of the region, we're done.
+        if (src_map_last <= info.GetLastAddress()) {
+            break;
+        }
+
+        // Advance.
+        ++it;
+        ASSERT(it != m_memory_block_manager.end());
+    }
+}
+
+Result KPageTableBase::MapPhysicalMemory(KProcessAddress address, size_t size) {
+    // Lock the physical memory lock.
+    KScopedLightLock phys_lk(m_map_physical_memory_lock);
+
+    // Calculate the last address for convenience.
+    const KProcessAddress last_address = address + size - 1;
+
+    // Define iteration variables.
+    KProcessAddress cur_address;
+    size_t mapped_size;
+
+    // The entire mapping process can be retried.
+    while (true) {
+        // Check if the memory is already mapped.
+        {
+            // Lock the table.
+            KScopedLightLock lk(m_general_lock);
+
+            // Iterate over the memory.
+            cur_address = address;
+            mapped_size = 0;
+
+            auto it = m_memory_block_manager.FindIterator(cur_address);
+            while (true) {
+                // Check that the iterator is valid.
+                ASSERT(it != m_memory_block_manager.end());
+
+                // Get the memory info.
+                const KMemoryInfo info = it->GetMemoryInfo();
+
+                // Check if we're done.
+                if (last_address <= info.GetLastAddress()) {
+                    if (info.GetState() != KMemoryState::Free) {
+                        mapped_size += (last_address + 1 - cur_address);
+                    }
+                    break;
+                }
+
+                // Track the memory if it's mapped.
+                if (info.GetState() != KMemoryState::Free) {
+                    mapped_size += KProcessAddress(info.GetEndAddress()) - cur_address;
+                }
+
+                // Advance.
+                cur_address = info.GetEndAddress();
+                ++it;
+            }
+
+            // If the size mapped is the size requested, we've nothing to do.
+            R_SUCCEED_IF(size == mapped_size);
+        }
+
+        // Allocate and map the memory.
+        {
+            // Reserve the memory from the process resource limit.
+            KScopedResourceReservation memory_reservation(
+                m_resource_limit, Svc::LimitableResource::PhysicalMemoryMax, size - mapped_size);
+            R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
+
+            // Allocate pages for the new memory.
+            KPageGroup pg(m_kernel, m_block_info_manager);
+            R_TRY(m_kernel.MemoryManager().AllocateForProcess(
+                std::addressof(pg), (size - mapped_size) / PageSize, m_allocate_option,
+                GetCurrentProcess(m_kernel).GetId(), m_heap_fill_value));
+
+            // If we fail in the next bit (or retry), we need to cleanup the pages.
+            auto pg_guard = SCOPE_GUARD({
+                pg.OpenFirst();
+                pg.Close();
+            });
+
+            // Map the memory.
+            {
+                // Lock the table.
+                KScopedLightLock lk(m_general_lock);
+
+                size_t num_allocator_blocks = 0;
+
+                // Verify that nobody has mapped memory since we first checked.
+                {
+                    // Iterate over the memory.
+                    size_t checked_mapped_size = 0;
+                    cur_address = address;
+
+                    auto it = m_memory_block_manager.FindIterator(cur_address);
+                    while (true) {
+                        // Check that the iterator is valid.
+                        ASSERT(it != m_memory_block_manager.end());
+
+                        // Get the memory info.
+                        const KMemoryInfo info = it->GetMemoryInfo();
+
+                        const bool is_free = info.GetState() == KMemoryState::Free;
+                        if (is_free) {
+                            if (info.GetAddress() < GetInteger(address)) {
+                                ++num_allocator_blocks;
+                            }
+                            if (last_address < info.GetLastAddress()) {
+                                ++num_allocator_blocks;
+                            }
+                        }
+
+                        // Check if we're done.
+                        if (last_address <= info.GetLastAddress()) {
+                            if (!is_free) {
+                                checked_mapped_size += (last_address + 1 - cur_address);
+                            }
+                            break;
+                        }
+
+                        // Track the memory if it's mapped.
+                        if (!is_free) {
+                            checked_mapped_size +=
+                                KProcessAddress(info.GetEndAddress()) - cur_address;
+                        }
+
+                        // Advance.
+                        cur_address = info.GetEndAddress();
+                        ++it;
+                    }
+
+                    // If the size now isn't what it was before, somebody mapped or unmapped
+                    // concurrently. If this happened, retry.
+                    if (mapped_size != checked_mapped_size) {
+                        continue;
+                    }
+                }
+
+                // Create an update allocator.
+                ASSERT(num_allocator_blocks <= KMemoryBlockManagerUpdateAllocator::MaxBlocks);
+                Result allocator_result;
+                KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+                                                             m_memory_block_slab_manager,
+                                                             num_allocator_blocks);
+                R_TRY(allocator_result);
+
+                // We're going to perform an update, so create a helper.
+                KScopedPageTableUpdater updater(this);
+
+                // Prepare to iterate over the memory.
+                auto pg_it = pg.begin();
+                KPhysicalAddress pg_phys_addr = pg_it->GetAddress();
+                size_t pg_pages = pg_it->GetNumPages();
+
+                // Reset the current tracking address, and make sure we clean up on failure.
+                pg_guard.Cancel();
+                cur_address = address;
+                ON_RESULT_FAILURE {
+                    if (cur_address > address) {
+                        const KProcessAddress last_unmap_address = cur_address - 1;
+
+                        // Iterate, unmapping the pages.
+                        cur_address = address;
+
+                        auto it = m_memory_block_manager.FindIterator(cur_address);
+                        while (true) {
+                            // Check that the iterator is valid.
+                            ASSERT(it != m_memory_block_manager.end());
+
+                            // Get the memory info.
+                            const KMemoryInfo info = it->GetMemoryInfo();
+
+                            // If the memory state is free, we mapped it and need to unmap it.
+                            if (info.GetState() == KMemoryState::Free) {
+                                // Determine the range to unmap.
+                                const KPageProperties unmap_properties = {
+                                    KMemoryPermission::None, false, false,
+                                    DisableMergeAttribute::None};
+                                const size_t cur_pages =
+                                    std::min(KProcessAddress(info.GetEndAddress()) - cur_address,
+                                             last_unmap_address + 1 - cur_address) /
+                                    PageSize;
+
+                                // Unmap.
+                                R_ASSERT(this->Operate(updater.GetPageList(), cur_address,
+                                                       cur_pages, 0, false, unmap_properties,
+                                                       OperationType::Unmap, true));
+                            }
+
+                            // Check if we're done.
+                            if (last_unmap_address <= info.GetLastAddress()) {
+                                break;
+                            }
+
+                            // Advance.
+                            cur_address = info.GetEndAddress();
+                            ++it;
+                        }
+                    }
+
+                    // Release any remaining unmapped memory.
+                    m_kernel.MemoryManager().OpenFirst(pg_phys_addr, pg_pages);
+                    m_kernel.MemoryManager().Close(pg_phys_addr, pg_pages);
+                    for (++pg_it; pg_it != pg.end(); ++pg_it) {
+                        m_kernel.MemoryManager().OpenFirst(pg_it->GetAddress(),
+                                                           pg_it->GetNumPages());
+                        m_kernel.MemoryManager().Close(pg_it->GetAddress(), pg_it->GetNumPages());
+                    }
+                };
+
+                auto it = m_memory_block_manager.FindIterator(cur_address);
+                while (true) {
+                    // Check that the iterator is valid.
+                    ASSERT(it != m_memory_block_manager.end());
+
+                    // Get the memory info.
+                    const KMemoryInfo info = it->GetMemoryInfo();
+
+                    // If it's unmapped, we need to map it.
+                    if (info.GetState() == KMemoryState::Free) {
+                        // Determine the range to map.
+                        const KPageProperties map_properties = {
+                            KMemoryPermission::UserReadWrite, false, false,
+                            cur_address == this->GetAliasRegionStart()
+                                ? DisableMergeAttribute::DisableHead
+                                : DisableMergeAttribute::None};
+                        size_t map_pages =
+                            std::min(KProcessAddress(info.GetEndAddress()) - cur_address,
+                                     last_address + 1 - cur_address) /
+                            PageSize;
+
+                        // While we have pages to map, map them.
+                        {
+                            // Create a page group for the current mapping range.
+                            KPageGroup cur_pg(m_kernel, m_block_info_manager);
+                            {
+                                ON_RESULT_FAILURE_2 {
+                                    cur_pg.OpenFirst();
+                                    cur_pg.Close();
+                                };
+
+                                size_t remain_pages = map_pages;
+                                while (remain_pages > 0) {
+                                    // Check if we're at the end of the physical block.
+                                    if (pg_pages == 0) {
+                                        // Ensure there are more pages to map.
+                                        ASSERT(pg_it != pg.end());
+
+                                        // Advance our physical block.
+                                        ++pg_it;
+                                        pg_phys_addr = pg_it->GetAddress();
+                                        pg_pages = pg_it->GetNumPages();
+                                    }
+
+                                    // Add whatever we can to the current block.
+                                    const size_t cur_pages = std::min(pg_pages, remain_pages);
+                                    R_TRY(cur_pg.AddBlock(pg_phys_addr +
+                                                              ((pg_pages - cur_pages) * PageSize),
+                                                          cur_pages));
+
+                                    // Advance.
+                                    remain_pages -= cur_pages;
+                                    pg_pages -= cur_pages;
+                                }
+                            }
+
+                            // Map the papges.
+                            R_TRY(this->Operate(updater.GetPageList(), cur_address, map_pages,
+                                                cur_pg, map_properties,
+                                                OperationType::MapFirstGroup, false));
+                        }
+                    }
+
+                    // Check if we're done.
+                    if (last_address <= info.GetLastAddress()) {
+                        break;
+                    }
+
+                    // Advance.
+                    cur_address = info.GetEndAddress();
+                    ++it;
+                }
+
+                // We succeeded, so commit the memory reservation.
+                memory_reservation.Commit();
+
+                // Increase our tracked mapped size.
+                m_mapped_physical_memory_size += (size - mapped_size);
+
+                // Update the relevant memory blocks.
+                m_memory_block_manager.UpdateIfMatch(
+                    std::addressof(allocator), address, size / PageSize, KMemoryState::Free,
+                    KMemoryPermission::None, KMemoryAttribute::None, KMemoryState::Normal,
+                    KMemoryPermission::UserReadWrite, KMemoryAttribute::None,
+                    address == this->GetAliasRegionStart()
+                        ? KMemoryBlockDisableMergeAttribute::Normal
+                        : KMemoryBlockDisableMergeAttribute::None,
+                    KMemoryBlockDisableMergeAttribute::None);
+
+                R_SUCCEED();
+            }
+        }
+    }
+}
+
+Result KPageTableBase::UnmapPhysicalMemory(KProcessAddress address, size_t size) {
+    // Lock the physical memory lock.
+    KScopedLightLock phys_lk(m_map_physical_memory_lock);
+
+    // Lock the table.
+    KScopedLightLock lk(m_general_lock);
+
+    // Calculate the last address for convenience.
+    const KProcessAddress last_address = address + size - 1;
+
+    // Define iteration variables.
+    KProcessAddress map_start_address = 0;
+    KProcessAddress map_last_address = 0;
+
+    KProcessAddress cur_address;
+    size_t mapped_size;
+    size_t num_allocator_blocks = 0;
+
+    // Check if the memory is mapped.
+    {
+        // Iterate over the memory.
+        cur_address = address;
+        mapped_size = 0;
+
+        auto it = m_memory_block_manager.FindIterator(cur_address);
+        while (true) {
+            // Check that the iterator is valid.
+            ASSERT(it != m_memory_block_manager.end());
+
+            // Get the memory info.
+            const KMemoryInfo info = it->GetMemoryInfo();
+
+            // Verify the memory's state.
+            const bool is_normal = info.GetState() == KMemoryState::Normal &&
+                                   info.GetAttribute() == KMemoryAttribute::None;
+            const bool is_free = info.GetState() == KMemoryState::Free;
+            R_UNLESS(is_normal || is_free, ResultInvalidCurrentMemory);
+
+            if (is_normal) {
+                R_UNLESS(info.GetAttribute() == KMemoryAttribute::None, ResultInvalidCurrentMemory);
+
+                if (map_start_address == 0) {
+                    map_start_address = cur_address;
+                }
+                map_last_address =
+                    (last_address >= info.GetLastAddress()) ? info.GetLastAddress() : last_address;
+
+                if (info.GetAddress() < GetInteger(address)) {
+                    ++num_allocator_blocks;
+                }
+                if (last_address < info.GetLastAddress()) {
+                    ++num_allocator_blocks;
+                }
+
+                mapped_size += (map_last_address + 1 - cur_address);
+            }
+
+            // Check if we're done.
+            if (last_address <= info.GetLastAddress()) {
+                break;
+            }
+
+            // Advance.
+            cur_address = info.GetEndAddress();
+            ++it;
+        }
+
+        // If there's nothing mapped, we've nothing to do.
+        R_SUCCEED_IF(mapped_size == 0);
+    }
+
+    // Create an update allocator.
+    ASSERT(num_allocator_blocks <= KMemoryBlockManagerUpdateAllocator::MaxBlocks);
+    Result allocator_result;
+    KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+                                                 m_memory_block_slab_manager, num_allocator_blocks);
+    R_TRY(allocator_result);
+
+    // We're going to perform an update, so create a helper.
+    KScopedPageTableUpdater updater(this);
+
+    // Separate the mapping.
+    const KPageProperties sep_properties = {KMemoryPermission::None, false, false,
+                                            DisableMergeAttribute::None};
+    R_TRY(this->Operate(updater.GetPageList(), map_start_address,
+                        (map_last_address + 1 - map_start_address) / PageSize, 0, false,
+                        sep_properties, OperationType::Separate, false));
+
+    // Reset the current tracking address, and make sure we clean up on failure.
+    cur_address = address;
+
+    // Iterate over the memory, unmapping as we go.
+    auto it = m_memory_block_manager.FindIterator(cur_address);
+
+    const auto clear_merge_attr =
+        (it->GetState() == KMemoryState::Normal &&
+         it->GetAddress() == this->GetAliasRegionStart() && it->GetAddress() == address)
+            ? KMemoryBlockDisableMergeAttribute::Normal
+            : KMemoryBlockDisableMergeAttribute::None;
+
+    while (true) {
+        // Check that the iterator is valid.
+        ASSERT(it != m_memory_block_manager.end());
+
+        // Get the memory info.
+        const KMemoryInfo info = it->GetMemoryInfo();
+
+        // If the memory state is normal, we need to unmap it.
+        if (info.GetState() == KMemoryState::Normal) {
+            // Determine the range to unmap.
+            const KPageProperties unmap_properties = {KMemoryPermission::None, false, false,
+                                                      DisableMergeAttribute::None};
+            const size_t cur_pages = std::min(KProcessAddress(info.GetEndAddress()) - cur_address,
+                                              last_address + 1 - cur_address) /
+                                     PageSize;
+
+            // Unmap.
+            R_ASSERT(this->Operate(updater.GetPageList(), cur_address, cur_pages, 0, false,
+                                   unmap_properties, OperationType::Unmap, false));
+        }
+
+        // Check if we're done.
+        if (last_address <= info.GetLastAddress()) {
+            break;
+        }
+
+        // Advance.
+        cur_address = info.GetEndAddress();
+        ++it;
+    }
+
+    // Release the memory resource.
+    m_mapped_physical_memory_size -= mapped_size;
+    m_resource_limit->Release(Svc::LimitableResource::PhysicalMemoryMax, mapped_size);
+
+    // Update memory blocks.
+    m_memory_block_manager.Update(std::addressof(allocator), address, size / PageSize,
+                                  KMemoryState::Free, KMemoryPermission::None,
+                                  KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
+                                  clear_merge_attr);
+
+    // We succeeded.
+    R_SUCCEED();
+}
+
+Result KPageTableBase::MapPhysicalMemoryUnsafe(KProcessAddress address, size_t size) {
+    UNIMPLEMENTED();
+    R_THROW(ResultNotImplemented);
+}
+
+Result KPageTableBase::UnmapPhysicalMemoryUnsafe(KProcessAddress address, size_t size) {
+    UNIMPLEMENTED();
+    R_THROW(ResultNotImplemented);
+}
+
+Result KPageTableBase::UnmapProcessMemory(KProcessAddress dst_address, size_t size,
+                                          KPageTableBase& src_page_table,
+                                          KProcessAddress src_address) {
+    // We need to lock both this table, and the current process's table, so set up an alias.
+    KPageTableBase& dst_page_table = *this;
+
+    // Acquire the table locks.
+    KScopedLightLockPair lk(src_page_table.m_general_lock, dst_page_table.m_general_lock);
+
+    // Check that the memory is mapped in the destination process.
+    size_t num_allocator_blocks;
+    R_TRY(dst_page_table.CheckMemoryState(
+        std::addressof(num_allocator_blocks), dst_address, size, KMemoryState::All,
+        KMemoryState::SharedCode, KMemoryPermission::UserReadWrite,
+        KMemoryPermission::UserReadWrite, KMemoryAttribute::All, KMemoryAttribute::None));
+
+    // Check that the memory is mapped in the source process.
+    R_TRY(src_page_table.CheckMemoryState(src_address, size, KMemoryState::FlagCanMapProcess,
+                                          KMemoryState::FlagCanMapProcess, KMemoryPermission::None,
+                                          KMemoryPermission::None, KMemoryAttribute::All,
+                                          KMemoryAttribute::None));
+
+    // Validate that the memory ranges are compatible.
+    {
+        // Define a helper type.
+        struct ContiguousRangeInfo {
+        public:
+            KPageTableBase& m_pt;
+            TraversalContext m_context;
+            TraversalEntry m_entry;
+            KPhysicalAddress m_phys_addr;
+            size_t m_cur_size;
+            size_t m_remaining_size;
+
+        public:
+            ContiguousRangeInfo(KPageTableBase& pt, KProcessAddress address, size_t size)
+                : m_pt(pt), m_remaining_size(size) {
+                // Begin a traversal.
+                ASSERT(m_pt.GetImpl().BeginTraversal(std::addressof(m_entry),
+                                                     std::addressof(m_context), address));
+
+                // Setup tracking fields.
+                m_phys_addr = m_entry.phys_addr;
+                m_cur_size = std::min<size_t>(
+                    m_remaining_size,
+                    m_entry.block_size - (GetInteger(m_phys_addr) & (m_entry.block_size - 1)));
+
+                // Consume the whole contiguous block.
+                this->DetermineContiguousBlockExtents();
+            }
+
+            void ContinueTraversal() {
+                // Update our remaining size.
+                m_remaining_size = m_remaining_size - m_cur_size;
+
+                // Update our tracking fields.
+                if (m_remaining_size > 0) {
+                    m_phys_addr = m_entry.phys_addr;
+                    m_cur_size = std::min<size_t>(m_remaining_size, m_entry.block_size);
+
+                    // Consume the whole contiguous block.
+                    this->DetermineContiguousBlockExtents();
+                }
+            }
+
+        private:
+            void DetermineContiguousBlockExtents() {
+                // Continue traversing until we're not contiguous, or we have enough.
+                while (m_cur_size < m_remaining_size) {
+                    ASSERT(m_pt.GetImpl().ContinueTraversal(std::addressof(m_entry),
+                                                            std::addressof(m_context)));
+
+                    // If we're not contiguous, we're done.
+                    if (m_entry.phys_addr != m_phys_addr + m_cur_size) {
+                        break;
+                    }
+
+                    // Update our current size.
+                    m_cur_size = std::min(m_remaining_size, m_cur_size + m_entry.block_size);
+                }
+            }
+        };
+
+        // Create ranges for both tables.
+        ContiguousRangeInfo src_range(src_page_table, src_address, size);
+        ContiguousRangeInfo dst_range(dst_page_table, dst_address, size);
+
+        // Validate the ranges.
+        while (src_range.m_remaining_size > 0 && dst_range.m_remaining_size > 0) {
+            R_UNLESS(src_range.m_phys_addr == dst_range.m_phys_addr, ResultInvalidMemoryRegion);
+            R_UNLESS(src_range.m_cur_size == dst_range.m_cur_size, ResultInvalidMemoryRegion);
+
+            src_range.ContinueTraversal();
+            dst_range.ContinueTraversal();
+        }
+    }
+
+    // We no longer need to hold our lock on the source page table.
+    lk.TryUnlockHalf(src_page_table.m_general_lock);
+
+    // Create an update allocator.
+    Result allocator_result;
+    KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+                                                 m_memory_block_slab_manager, num_allocator_blocks);
+    R_TRY(allocator_result);
+
+    // We're going to perform an update, so create a helper.
+    KScopedPageTableUpdater updater(this);
+
+    // Unmap the memory.
+    const size_t num_pages = size / PageSize;
+    const KPageProperties unmap_properties = {KMemoryPermission::None, false, false,
+                                              DisableMergeAttribute::None};
+    R_TRY(this->Operate(updater.GetPageList(), dst_address, num_pages, 0, false, unmap_properties,
+                        OperationType::Unmap, false));
+
+    // Apply the memory block update.
+    m_memory_block_manager.Update(std::addressof(allocator), dst_address, num_pages,
+                                  KMemoryState::Free, KMemoryPermission::None,
+                                  KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
+                                  KMemoryBlockDisableMergeAttribute::Normal);
+
+    R_SUCCEED();
+}
+
+Result KPageTableBase::Operate(PageLinkedList* page_list, KProcessAddress virt_addr,
+                               size_t num_pages, KPhysicalAddress phys_addr, bool is_pa_valid,
+                               const KPageProperties properties, OperationType operation,
+                               bool reuse_ll) {
+    ASSERT(this->IsLockedByCurrentThread());
+    ASSERT(num_pages > 0);
+    ASSERT(Common::IsAligned(GetInteger(virt_addr), PageSize));
+    ASSERT(this->ContainsPages(virt_addr, num_pages));
+
+    // As we don't allocate page entries in guest memory, we don't need to allocate them from
+    // or free them to the page list, and so it goes unused (along with page properties).
+
+    switch (operation) {
+    case OperationType::Unmap: {
+        // Ensure that any pages we track are closed on exit.
+        KPageGroup pages_to_close(m_kernel, this->GetBlockInfoManager());
+        SCOPE_EXIT({ pages_to_close.CloseAndReset(); });
+
+        // Make a page group representing the region to unmap.
+        this->MakePageGroup(pages_to_close, virt_addr, num_pages);
+
+        // Unmap.
+        m_memory->UnmapRegion(*m_impl, virt_addr, num_pages * PageSize);
+
+        R_SUCCEED();
+    }
+    case OperationType::Map: {
+        ASSERT(virt_addr != 0);
+        ASSERT(Common::IsAligned(GetInteger(virt_addr), PageSize));
+        m_memory->MapMemoryRegion(*m_impl, virt_addr, num_pages * PageSize, phys_addr);
+
+        // Open references to pages, if we should.
+        if (this->IsHeapPhysicalAddress(phys_addr)) {
+            m_kernel.MemoryManager().Open(phys_addr, num_pages);
+        }
+
+        R_SUCCEED();
+    }
+    case OperationType::Separate: {
+        // TODO: Unimplemented.
+        R_SUCCEED();
+    }
+    case OperationType::ChangePermissions:
+    case OperationType::ChangePermissionsAndRefresh:
+    case OperationType::ChangePermissionsAndRefreshAndFlush:
+        R_SUCCEED();
+    default:
+        UNREACHABLE();
+    }
+}
+
+Result KPageTableBase::Operate(PageLinkedList* page_list, KProcessAddress virt_addr,
+                               size_t num_pages, const KPageGroup& page_group,
+                               const KPageProperties properties, OperationType operation,
+                               bool reuse_ll) {
+    ASSERT(this->IsLockedByCurrentThread());
+    ASSERT(Common::IsAligned(GetInteger(virt_addr), PageSize));
+    ASSERT(num_pages > 0);
+    ASSERT(num_pages == page_group.GetNumPages());
+
+    // As we don't allocate page entries in guest memory, we don't need to allocate them from
+    // the page list, and so it goes unused (along with page properties).
+
+    switch (operation) {
+    case OperationType::MapGroup:
+    case OperationType::MapFirstGroup: {
+        // We want to maintain a new reference to every page in the group.
+        KScopedPageGroup spg(page_group, operation != OperationType::MapFirstGroup);
+
+        for (const auto& node : page_group) {
+            const size_t size{node.GetNumPages() * PageSize};
+
+            // Map the pages.
+            m_memory->MapMemoryRegion(*m_impl, virt_addr, size, node.GetAddress());
+
+            virt_addr += size;
+        }
+
+        // We succeeded! We want to persist the reference to the pages.
+        spg.CancelClose();
+
+        R_SUCCEED();
+    }
+    default:
+        UNREACHABLE();
+    }
+}
+
+void KPageTableBase::FinalizeUpdate(PageLinkedList* page_list) {
+    while (page_list->Peek()) {
+        [[maybe_unused]] auto page = page_list->Pop();
+
+        // TODO: Free page entries once they are allocated in guest memory.
+        // ASSERT(this->GetPageTableManager().IsInPageTableHeap(page));
+        // ASSERT(this->GetPageTableManager().GetRefCount(page) == 0);
+        // this->GetPageTableManager().Free(page);
+    }
+}
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_page_table_base.h b/src/core/hle/kernel/k_page_table_base.h
new file mode 100644
index 0000000000..ee2c41e67b
--- /dev/null
+++ b/src/core/hle/kernel/k_page_table_base.h
@@ -0,0 +1,759 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#pragma once
+
+#include <memory>
+
+#include "common/common_funcs.h"
+#include "common/page_table.h"
+#include "core/core.h"
+#include "core/hle/kernel/k_dynamic_resource_manager.h"
+#include "core/hle/kernel/k_light_lock.h"
+#include "core/hle/kernel/k_memory_block.h"
+#include "core/hle/kernel/k_memory_block_manager.h"
+#include "core/hle/kernel/k_memory_layout.h"
+#include "core/hle/kernel/k_memory_manager.h"
+#include "core/hle/kernel/k_typed_address.h"
+#include "core/hle/kernel/kernel.h"
+#include "core/hle/result.h"
+#include "core/memory.h"
+
+namespace Kernel {
+
+enum class DisableMergeAttribute : u8 {
+    None = (0U << 0),
+
+    DisableHead = (1U << 0),
+    DisableHeadAndBody = (1U << 1),
+    EnableHeadAndBody = (1U << 2),
+    DisableTail = (1U << 3),
+    EnableTail = (1U << 4),
+    EnableAndMergeHeadBodyTail = (1U << 5),
+
+    EnableHeadBodyTail = EnableHeadAndBody | EnableTail,
+    DisableHeadBodyTail = DisableHeadAndBody | DisableTail,
+};
+DECLARE_ENUM_FLAG_OPERATORS(DisableMergeAttribute);
+
+struct KPageProperties {
+    KMemoryPermission perm;
+    bool io;
+    bool uncached;
+    DisableMergeAttribute disable_merge_attributes;
+};
+static_assert(std::is_trivial_v<KPageProperties>);
+static_assert(sizeof(KPageProperties) == sizeof(u32));
+
+class KResourceLimit;
+class KSystemResource;
+
+class KPageTableBase {
+    YUZU_NON_COPYABLE(KPageTableBase);
+    YUZU_NON_MOVEABLE(KPageTableBase);
+
+public:
+    using TraversalEntry = Common::PageTable::TraversalEntry;
+    using TraversalContext = Common::PageTable::TraversalContext;
+
+    class MemoryRange {
+    private:
+        KernelCore& m_kernel;
+        KPhysicalAddress m_address;
+        size_t m_size;
+        bool m_heap;
+
+    public:
+        explicit MemoryRange(KernelCore& kernel)
+            : m_kernel(kernel), m_address(0), m_size(0), m_heap(false) {}
+
+        void Set(KPhysicalAddress address, size_t size, bool heap) {
+            m_address = address;
+            m_size = size;
+            m_heap = heap;
+        }
+
+        KPhysicalAddress GetAddress() const {
+            return m_address;
+        }
+        size_t GetSize() const {
+            return m_size;
+        }
+        bool IsHeap() const {
+            return m_heap;
+        }
+
+        void Open();
+        void Close();
+    };
+
+protected:
+    enum MemoryFillValue : u8 {
+        MemoryFillValue_Zero = 0,
+        MemoryFillValue_Stack = 'X',
+        MemoryFillValue_Ipc = 'Y',
+        MemoryFillValue_Heap = 'Z',
+    };
+
+    enum class OperationType {
+        Map = 0,
+        MapGroup = 1,
+        MapFirstGroup = 2,
+        Unmap = 3,
+        ChangePermissions = 4,
+        ChangePermissionsAndRefresh = 5,
+        ChangePermissionsAndRefreshAndFlush = 6,
+        Separate = 7,
+    };
+
+    static constexpr size_t MaxPhysicalMapAlignment = 1_GiB;
+    static constexpr size_t RegionAlignment = 2_MiB;
+    static_assert(RegionAlignment == KernelAslrAlignment);
+
+    struct PageLinkedList {
+    private:
+        struct Node {
+            Node* m_next;
+            std::array<u8, PageSize - sizeof(Node*)> m_buffer;
+        };
+        static_assert(std::is_trivial_v<Node>);
+
+    private:
+        Node* m_root{};
+
+    public:
+        constexpr PageLinkedList() : m_root(nullptr) {}
+
+        void Push(Node* n) {
+            ASSERT(Common::IsAligned(reinterpret_cast<uintptr_t>(n), PageSize));
+            n->m_next = m_root;
+            m_root = n;
+        }
+
+        Node* Peek() const {
+            return m_root;
+        }
+
+        Node* Pop() {
+            Node* const r = m_root;
+
+            m_root = r->m_next;
+            r->m_next = nullptr;
+
+            return r;
+        }
+    };
+    static_assert(std::is_trivially_destructible_v<PageLinkedList>);
+
+    static constexpr auto DefaultMemoryIgnoreAttr =
+        KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared;
+
+    static constexpr size_t GetAddressSpaceWidth(Svc::CreateProcessFlag as_type) {
+        switch (static_cast<Svc::CreateProcessFlag>(as_type &
+                                                    Svc::CreateProcessFlag::AddressSpaceMask)) {
+        case Svc::CreateProcessFlag::AddressSpace64Bit:
+            return 39;
+        case Svc::CreateProcessFlag::AddressSpace64BitDeprecated:
+            return 36;
+        case Svc::CreateProcessFlag::AddressSpace32Bit:
+        case Svc::CreateProcessFlag::AddressSpace32BitWithoutAlias:
+            return 32;
+        default:
+            UNREACHABLE();
+        }
+    }
+
+private:
+    class KScopedPageTableUpdater {
+    private:
+        KPageTableBase* m_pt;
+        PageLinkedList m_ll;
+
+    public:
+        explicit KScopedPageTableUpdater(KPageTableBase* pt) : m_pt(pt), m_ll() {}
+        explicit KScopedPageTableUpdater(KPageTableBase& pt)
+            : KScopedPageTableUpdater(std::addressof(pt)) {}
+        ~KScopedPageTableUpdater() {
+            m_pt->FinalizeUpdate(this->GetPageList());
+        }
+
+        PageLinkedList* GetPageList() {
+            return std::addressof(m_ll);
+        }
+    };
+
+private:
+    KernelCore& m_kernel;
+    Core::System& m_system;
+    KProcessAddress m_address_space_start{};
+    KProcessAddress m_address_space_end{};
+    KProcessAddress m_heap_region_start{};
+    KProcessAddress m_heap_region_end{};
+    KProcessAddress m_current_heap_end{};
+    KProcessAddress m_alias_region_start{};
+    KProcessAddress m_alias_region_end{};
+    KProcessAddress m_stack_region_start{};
+    KProcessAddress m_stack_region_end{};
+    KProcessAddress m_kernel_map_region_start{};
+    KProcessAddress m_kernel_map_region_end{};
+    KProcessAddress m_alias_code_region_start{};
+    KProcessAddress m_alias_code_region_end{};
+    KProcessAddress m_code_region_start{};
+    KProcessAddress m_code_region_end{};
+    size_t m_max_heap_size{};
+    size_t m_mapped_physical_memory_size{};
+    size_t m_mapped_unsafe_physical_memory{};
+    size_t m_mapped_insecure_memory{};
+    size_t m_mapped_ipc_server_memory{};
+    mutable KLightLock m_general_lock;
+    mutable KLightLock m_map_physical_memory_lock;
+    KLightLock m_device_map_lock;
+    std::unique_ptr<Common::PageTable> m_impl{};
+    Core::Memory::Memory* m_memory{};
+    KMemoryBlockManager m_memory_block_manager{};
+    u32 m_allocate_option{};
+    u32 m_address_space_width{};
+    bool m_is_kernel{};
+    bool m_enable_aslr{};
+    bool m_enable_device_address_space_merge{};
+    KMemoryBlockSlabManager* m_memory_block_slab_manager{};
+    KBlockInfoManager* m_block_info_manager{};
+    KResourceLimit* m_resource_limit{};
+    const KMemoryRegion* m_cached_physical_linear_region{};
+    const KMemoryRegion* m_cached_physical_heap_region{};
+    MemoryFillValue m_heap_fill_value{};
+    MemoryFillValue m_ipc_fill_value{};
+    MemoryFillValue m_stack_fill_value{};
+
+public:
+    explicit KPageTableBase(KernelCore& kernel);
+    ~KPageTableBase();
+
+    Result InitializeForKernel(bool is_64_bit, KVirtualAddress start, KVirtualAddress end,
+                               Core::Memory::Memory& memory);
+    Result InitializeForProcess(Svc::CreateProcessFlag as_type, bool enable_aslr,
+                                bool enable_device_address_space_merge, bool from_back,
+                                KMemoryManager::Pool pool, KProcessAddress code_address,
+                                size_t code_size, KSystemResource* system_resource,
+                                KResourceLimit* resource_limit, Core::Memory::Memory& memory);
+
+    void Finalize();
+
+    bool IsKernel() const {
+        return m_is_kernel;
+    }
+    bool IsAslrEnabled() const {
+        return m_enable_aslr;
+    }
+
+    bool Contains(KProcessAddress addr) const {
+        return m_address_space_start <= addr && addr <= m_address_space_end - 1;
+    }
+
+    bool Contains(KProcessAddress addr, size_t size) const {
+        return m_address_space_start <= addr && addr < addr + size &&
+               addr + size - 1 <= m_address_space_end - 1;
+    }
+
+    bool IsInAliasRegion(KProcessAddress addr, size_t size) const {
+        return this->Contains(addr, size) && m_alias_region_start <= addr &&
+               addr + size - 1 <= m_alias_region_end - 1;
+    }
+
+    bool IsInHeapRegion(KProcessAddress addr, size_t size) const {
+        return this->Contains(addr, size) && m_heap_region_start <= addr &&
+               addr + size - 1 <= m_heap_region_end - 1;
+    }
+
+    bool IsInUnsafeAliasRegion(KProcessAddress addr, size_t size) const {
+        // Even though Unsafe physical memory is KMemoryState_Normal, it must be mapped inside the
+        // alias code region.
+        return this->CanContain(addr, size, Svc::MemoryState::AliasCode);
+    }
+
+    KScopedLightLock AcquireDeviceMapLock() {
+        return KScopedLightLock(m_device_map_lock);
+    }
+
+    KProcessAddress GetRegionAddress(Svc::MemoryState state) const;
+    size_t GetRegionSize(Svc::MemoryState state) const;
+    bool CanContain(KProcessAddress addr, size_t size, Svc::MemoryState state) const;
+
+    KProcessAddress GetRegionAddress(KMemoryState state) const {
+        return this->GetRegionAddress(static_cast<Svc::MemoryState>(state & KMemoryState::Mask));
+    }
+    size_t GetRegionSize(KMemoryState state) const {
+        return this->GetRegionSize(static_cast<Svc::MemoryState>(state & KMemoryState::Mask));
+    }
+    bool CanContain(KProcessAddress addr, size_t size, KMemoryState state) const {
+        return this->CanContain(addr, size,
+                                static_cast<Svc::MemoryState>(state & KMemoryState::Mask));
+    }
+
+public:
+    Core::Memory::Memory& GetMemory() {
+        return *m_memory;
+    }
+
+    Core::Memory::Memory& GetMemory() const {
+        return *m_memory;
+    }
+
+    Common::PageTable& GetImpl() {
+        return *m_impl;
+    }
+
+    Common::PageTable& GetImpl() const {
+        return *m_impl;
+    }
+
+    size_t GetNumGuardPages() const {
+        return this->IsKernel() ? 1 : 4;
+    }
+
+protected:
+    // NOTE: These three functions (Operate, Operate, FinalizeUpdate) are virtual functions
+    // in Nintendo's kernel. We devirtualize them, since KPageTable is the only derived
+    // class, and this avoids unnecessary virtual function calls.
+    Result Operate(PageLinkedList* page_list, KProcessAddress virt_addr, size_t num_pages,
+                   KPhysicalAddress phys_addr, bool is_pa_valid, const KPageProperties properties,
+                   OperationType operation, bool reuse_ll);
+    Result Operate(PageLinkedList* page_list, KProcessAddress virt_addr, size_t num_pages,
+                   const KPageGroup& page_group, const KPageProperties properties,
+                   OperationType operation, bool reuse_ll);
+    void FinalizeUpdate(PageLinkedList* page_list);
+
+    bool IsLockedByCurrentThread() const {
+        return m_general_lock.IsLockedByCurrentThread();
+    }
+
+    bool IsLinearMappedPhysicalAddress(KPhysicalAddress phys_addr) {
+        ASSERT(this->IsLockedByCurrentThread());
+
+        return m_kernel.MemoryLayout().IsLinearMappedPhysicalAddress(
+            m_cached_physical_linear_region, phys_addr);
+    }
+
+    bool IsLinearMappedPhysicalAddress(KPhysicalAddress phys_addr, size_t size) {
+        ASSERT(this->IsLockedByCurrentThread());
+
+        return m_kernel.MemoryLayout().IsLinearMappedPhysicalAddress(
+            m_cached_physical_linear_region, phys_addr, size);
+    }
+
+    bool IsHeapPhysicalAddress(KPhysicalAddress phys_addr) {
+        ASSERT(this->IsLockedByCurrentThread());
+
+        return m_kernel.MemoryLayout().IsHeapPhysicalAddress(m_cached_physical_heap_region,
+                                                             phys_addr);
+    }
+
+    bool IsHeapPhysicalAddress(KPhysicalAddress phys_addr, size_t size) {
+        ASSERT(this->IsLockedByCurrentThread());
+
+        return m_kernel.MemoryLayout().IsHeapPhysicalAddress(m_cached_physical_heap_region,
+                                                             phys_addr, size);
+    }
+
+    bool IsHeapPhysicalAddressForFinalize(KPhysicalAddress phys_addr) {
+        ASSERT(!this->IsLockedByCurrentThread());
+
+        return m_kernel.MemoryLayout().IsHeapPhysicalAddress(m_cached_physical_heap_region,
+                                                             phys_addr);
+    }
+
+    bool ContainsPages(KProcessAddress addr, size_t num_pages) const {
+        return (m_address_space_start <= addr) &&
+               (num_pages <= (m_address_space_end - m_address_space_start) / PageSize) &&
+               (addr + num_pages * PageSize - 1 <= m_address_space_end - 1);
+    }
+
+private:
+    KProcessAddress FindFreeArea(KProcessAddress region_start, size_t region_num_pages,
+                                 size_t num_pages, size_t alignment, size_t offset,
+                                 size_t guard_pages) const;
+
+    Result CheckMemoryStateContiguous(size_t* out_blocks_needed, KProcessAddress addr, size_t size,
+                                      KMemoryState state_mask, KMemoryState state,
+                                      KMemoryPermission perm_mask, KMemoryPermission perm,
+                                      KMemoryAttribute attr_mask, KMemoryAttribute attr) const;
+    Result CheckMemoryStateContiguous(KProcessAddress addr, size_t size, KMemoryState state_mask,
+                                      KMemoryState state, KMemoryPermission perm_mask,
+                                      KMemoryPermission perm, KMemoryAttribute attr_mask,
+                                      KMemoryAttribute attr) const {
+        R_RETURN(this->CheckMemoryStateContiguous(nullptr, addr, size, state_mask, state, perm_mask,
+                                                  perm, attr_mask, attr));
+    }
+
+    Result CheckMemoryState(const KMemoryInfo& info, KMemoryState state_mask, KMemoryState state,
+                            KMemoryPermission perm_mask, KMemoryPermission perm,
+                            KMemoryAttribute attr_mask, KMemoryAttribute attr) const;
+    Result CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
+                            KMemoryAttribute* out_attr, size_t* out_blocks_needed,
+                            KMemoryBlockManager::const_iterator it, KProcessAddress last_addr,
+                            KMemoryState state_mask, KMemoryState state,
+                            KMemoryPermission perm_mask, KMemoryPermission perm,
+                            KMemoryAttribute attr_mask, KMemoryAttribute attr,
+                            KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const;
+    Result CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
+                            KMemoryAttribute* out_attr, size_t* out_blocks_needed,
+                            KProcessAddress addr, size_t size, KMemoryState state_mask,
+                            KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm,
+                            KMemoryAttribute attr_mask, KMemoryAttribute attr,
+                            KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const;
+    Result CheckMemoryState(size_t* out_blocks_needed, KProcessAddress addr, size_t size,
+                            KMemoryState state_mask, KMemoryState state,
+                            KMemoryPermission perm_mask, KMemoryPermission perm,
+                            KMemoryAttribute attr_mask, KMemoryAttribute attr,
+                            KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const {
+        R_RETURN(this->CheckMemoryState(nullptr, nullptr, nullptr, out_blocks_needed, addr, size,
+                                        state_mask, state, perm_mask, perm, attr_mask, attr,
+                                        ignore_attr));
+    }
+    Result CheckMemoryState(KProcessAddress addr, size_t size, KMemoryState state_mask,
+                            KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm,
+                            KMemoryAttribute attr_mask, KMemoryAttribute attr,
+                            KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const {
+        R_RETURN(this->CheckMemoryState(nullptr, addr, size, state_mask, state, perm_mask, perm,
+                                        attr_mask, attr, ignore_attr));
+    }
+
+    Result LockMemoryAndOpen(KPageGroup* out_pg, KPhysicalAddress* out_paddr, KProcessAddress addr,
+                             size_t size, KMemoryState state_mask, KMemoryState state,
+                             KMemoryPermission perm_mask, KMemoryPermission perm,
+                             KMemoryAttribute attr_mask, KMemoryAttribute attr,
+                             KMemoryPermission new_perm, KMemoryAttribute lock_attr);
+    Result UnlockMemory(KProcessAddress addr, size_t size, KMemoryState state_mask,
+                        KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm,
+                        KMemoryAttribute attr_mask, KMemoryAttribute attr,
+                        KMemoryPermission new_perm, KMemoryAttribute lock_attr,
+                        const KPageGroup* pg);
+
+    Result QueryInfoImpl(KMemoryInfo* out_info, Svc::PageInfo* out_page,
+                         KProcessAddress address) const;
+
+    Result QueryMappingImpl(KProcessAddress* out, KPhysicalAddress address, size_t size,
+                            Svc::MemoryState state) const;
+
+    Result AllocateAndMapPagesImpl(PageLinkedList* page_list, KProcessAddress address,
+                                   size_t num_pages, KMemoryPermission perm);
+    Result MapPageGroupImpl(PageLinkedList* page_list, KProcessAddress address,
+                            const KPageGroup& pg, const KPageProperties properties, bool reuse_ll);
+
+    void RemapPageGroup(PageLinkedList* page_list, KProcessAddress address, size_t size,
+                        const KPageGroup& pg);
+
+    Result MakePageGroup(KPageGroup& pg, KProcessAddress addr, size_t num_pages);
+    bool IsValidPageGroup(const KPageGroup& pg, KProcessAddress addr, size_t num_pages);
+
+    Result GetContiguousMemoryRangeWithState(MemoryRange* out, KProcessAddress address, size_t size,
+                                             KMemoryState state_mask, KMemoryState state,
+                                             KMemoryPermission perm_mask, KMemoryPermission perm,
+                                             KMemoryAttribute attr_mask, KMemoryAttribute attr);
+
+    Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment,
+                    KPhysicalAddress phys_addr, bool is_pa_valid, KProcessAddress region_start,
+                    size_t region_num_pages, KMemoryState state, KMemoryPermission perm);
+
+    Result MapIoImpl(KProcessAddress* out, PageLinkedList* page_list, KPhysicalAddress phys_addr,
+                     size_t size, KMemoryState state, KMemoryPermission perm);
+    Result ReadIoMemoryImpl(KProcessAddress dst_addr, KPhysicalAddress phys_addr, size_t size,
+                            KMemoryState state);
+    Result WriteIoMemoryImpl(KPhysicalAddress phys_addr, KProcessAddress src_addr, size_t size,
+                             KMemoryState state);
+
+    Result SetupForIpcClient(PageLinkedList* page_list, size_t* out_blocks_needed,
+                             KProcessAddress address, size_t size, KMemoryPermission test_perm,
+                             KMemoryState dst_state);
+    Result SetupForIpcServer(KProcessAddress* out_addr, size_t size, KProcessAddress src_addr,
+                             KMemoryPermission test_perm, KMemoryState dst_state,
+                             KPageTableBase& src_page_table, bool send);
+    void CleanupForIpcClientOnServerSetupFailure(PageLinkedList* page_list, KProcessAddress address,
+                                                 size_t size, KMemoryPermission prot_perm);
+
+    size_t GetSize(KMemoryState state) const;
+
+    bool GetPhysicalAddressLocked(KPhysicalAddress* out, KProcessAddress virt_addr) const {
+        // Validate pre-conditions.
+        ASSERT(this->IsLockedByCurrentThread());
+
+        return this->GetImpl().GetPhysicalAddress(out, virt_addr);
+    }
+
+public:
+    bool GetPhysicalAddress(KPhysicalAddress* out, KProcessAddress virt_addr) const {
+        // Validate pre-conditions.
+        ASSERT(!this->IsLockedByCurrentThread());
+
+        // Acquire exclusive access to the table while doing address translation.
+        KScopedLightLock lk(m_general_lock);
+
+        return this->GetPhysicalAddressLocked(out, virt_addr);
+    }
+
+    KBlockInfoManager* GetBlockInfoManager() const {
+        return m_block_info_manager;
+    }
+
+    Result SetMemoryPermission(KProcessAddress addr, size_t size, Svc::MemoryPermission perm);
+    Result SetProcessMemoryPermission(KProcessAddress addr, size_t size,
+                                      Svc::MemoryPermission perm);
+    Result SetMemoryAttribute(KProcessAddress addr, size_t size, KMemoryAttribute mask,
+                              KMemoryAttribute attr);
+    Result SetHeapSize(KProcessAddress* out, size_t size);
+    Result SetMaxHeapSize(size_t size);
+    Result QueryInfo(KMemoryInfo* out_info, Svc::PageInfo* out_page_info,
+                     KProcessAddress addr) const;
+    Result QueryPhysicalAddress(Svc::lp64::PhysicalMemoryInfo* out, KProcessAddress address) const;
+    Result QueryStaticMapping(KProcessAddress* out, KPhysicalAddress address, size_t size) const {
+        R_RETURN(this->QueryMappingImpl(out, address, size, Svc::MemoryState::Static));
+    }
+    Result QueryIoMapping(KProcessAddress* out, KPhysicalAddress address, size_t size) const {
+        R_RETURN(this->QueryMappingImpl(out, address, size, Svc::MemoryState::Io));
+    }
+    Result MapMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size);
+    Result UnmapMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size);
+    Result MapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size);
+    Result UnmapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size);
+    Result MapIo(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm);
+    Result MapIoRegion(KProcessAddress dst_address, KPhysicalAddress phys_addr, size_t size,
+                       Svc::MemoryMapping mapping, Svc::MemoryPermission perm);
+    Result UnmapIoRegion(KProcessAddress dst_address, KPhysicalAddress phys_addr, size_t size,
+                         Svc::MemoryMapping mapping);
+    Result MapStatic(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm);
+    Result MapRegion(KMemoryRegionType region_type, KMemoryPermission perm);
+    Result MapInsecureMemory(KProcessAddress address, size_t size);
+    Result UnmapInsecureMemory(KProcessAddress address, size_t size);
+
+    Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment,
+                    KPhysicalAddress phys_addr, KProcessAddress region_start,
+                    size_t region_num_pages, KMemoryState state, KMemoryPermission perm) {
+        R_RETURN(this->MapPages(out_addr, num_pages, alignment, phys_addr, true, region_start,
+                                region_num_pages, state, perm));
+    }
+
+    Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment,
+                    KPhysicalAddress phys_addr, KMemoryState state, KMemoryPermission perm) {
+        R_RETURN(this->MapPages(out_addr, num_pages, alignment, phys_addr, true,
+                                this->GetRegionAddress(state),
+                                this->GetRegionSize(state) / PageSize, state, perm));
+    }
+
+    Result MapPages(KProcessAddress* out_addr, size_t num_pages, KMemoryState state,
+                    KMemoryPermission perm) {
+        R_RETURN(this->MapPages(out_addr, num_pages, PageSize, 0, false,
+                                this->GetRegionAddress(state),
+                                this->GetRegionSize(state) / PageSize, state, perm));
+    }
+
+    Result MapPages(KProcessAddress address, size_t num_pages, KMemoryState state,
+                    KMemoryPermission perm);
+    Result UnmapPages(KProcessAddress address, size_t num_pages, KMemoryState state);
+
+    Result MapPageGroup(KProcessAddress* out_addr, const KPageGroup& pg,
+                        KProcessAddress region_start, size_t region_num_pages, KMemoryState state,
+                        KMemoryPermission perm);
+    Result MapPageGroup(KProcessAddress address, const KPageGroup& pg, KMemoryState state,
+                        KMemoryPermission perm);
+    Result UnmapPageGroup(KProcessAddress address, const KPageGroup& pg, KMemoryState state);
+
+    Result MakeAndOpenPageGroup(KPageGroup* out, KProcessAddress address, size_t num_pages,
+                                KMemoryState state_mask, KMemoryState state,
+                                KMemoryPermission perm_mask, KMemoryPermission perm,
+                                KMemoryAttribute attr_mask, KMemoryAttribute attr);
+
+    Result InvalidateProcessDataCache(KProcessAddress address, size_t size);
+    Result InvalidateCurrentProcessDataCache(KProcessAddress address, size_t size);
+
+    Result ReadDebugMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size);
+    Result ReadDebugIoMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size,
+                             KMemoryState state);
+
+    Result WriteDebugMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size);
+    Result WriteDebugIoMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size,
+                              KMemoryState state);
+
+    Result LockForMapDeviceAddressSpace(bool* out_is_io, KProcessAddress address, size_t size,
+                                        KMemoryPermission perm, bool is_aligned, bool check_heap);
+    Result LockForUnmapDeviceAddressSpace(KProcessAddress address, size_t size, bool check_heap);
+
+    Result UnlockForDeviceAddressSpace(KProcessAddress address, size_t size);
+    Result UnlockForDeviceAddressSpacePartialMap(KProcessAddress address, size_t size);
+
+    Result OpenMemoryRangeForMapDeviceAddressSpace(KPageTableBase::MemoryRange* out,
+                                                   KProcessAddress address, size_t size,
+                                                   KMemoryPermission perm, bool is_aligned);
+    Result OpenMemoryRangeForUnmapDeviceAddressSpace(MemoryRange* out, KProcessAddress address,
+                                                     size_t size);
+
+    Result LockForIpcUserBuffer(KPhysicalAddress* out, KProcessAddress address, size_t size);
+    Result UnlockForIpcUserBuffer(KProcessAddress address, size_t size);
+
+    Result LockForTransferMemory(KPageGroup* out, KProcessAddress address, size_t size,
+                                 KMemoryPermission perm);
+    Result UnlockForTransferMemory(KProcessAddress address, size_t size, const KPageGroup& pg);
+    Result LockForCodeMemory(KPageGroup* out, KProcessAddress address, size_t size);
+    Result UnlockForCodeMemory(KProcessAddress address, size_t size, const KPageGroup& pg);
+
+    Result OpenMemoryRangeForProcessCacheOperation(MemoryRange* out, KProcessAddress address,
+                                                   size_t size);
+
+    Result CopyMemoryFromLinearToUser(KProcessAddress dst_addr, size_t size,
+                                      KProcessAddress src_addr, KMemoryState src_state_mask,
+                                      KMemoryState src_state, KMemoryPermission src_test_perm,
+                                      KMemoryAttribute src_attr_mask, KMemoryAttribute src_attr);
+    Result CopyMemoryFromLinearToKernel(void* buffer, size_t size, KProcessAddress src_addr,
+                                        KMemoryState src_state_mask, KMemoryState src_state,
+                                        KMemoryPermission src_test_perm,
+                                        KMemoryAttribute src_attr_mask, KMemoryAttribute src_attr);
+    Result CopyMemoryFromUserToLinear(KProcessAddress dst_addr, size_t size,
+                                      KMemoryState dst_state_mask, KMemoryState dst_state,
+                                      KMemoryPermission dst_test_perm,
+                                      KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr,
+                                      KProcessAddress src_addr);
+    Result CopyMemoryFromKernelToLinear(KProcessAddress dst_addr, size_t size,
+                                        KMemoryState dst_state_mask, KMemoryState dst_state,
+                                        KMemoryPermission dst_test_perm,
+                                        KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr,
+                                        void* buffer);
+    Result CopyMemoryFromHeapToHeap(KPageTableBase& dst_page_table, KProcessAddress dst_addr,
+                                    size_t size, KMemoryState dst_state_mask,
+                                    KMemoryState dst_state, KMemoryPermission dst_test_perm,
+                                    KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr,
+                                    KProcessAddress src_addr, KMemoryState src_state_mask,
+                                    KMemoryState src_state, KMemoryPermission src_test_perm,
+                                    KMemoryAttribute src_attr_mask, KMemoryAttribute src_attr);
+    Result CopyMemoryFromHeapToHeapWithoutCheckDestination(
+        KPageTableBase& dst_page_table, KProcessAddress dst_addr, size_t size,
+        KMemoryState dst_state_mask, KMemoryState dst_state, KMemoryPermission dst_test_perm,
+        KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr, KProcessAddress src_addr,
+        KMemoryState src_state_mask, KMemoryState src_state, KMemoryPermission src_test_perm,
+        KMemoryAttribute src_attr_mask, KMemoryAttribute src_attr);
+
+    Result SetupForIpc(KProcessAddress* out_dst_addr, size_t size, KProcessAddress src_addr,
+                       KPageTableBase& src_page_table, KMemoryPermission test_perm,
+                       KMemoryState dst_state, bool send);
+    Result CleanupForIpcServer(KProcessAddress address, size_t size, KMemoryState dst_state);
+    Result CleanupForIpcClient(KProcessAddress address, size_t size, KMemoryState dst_state);
+
+    Result MapPhysicalMemory(KProcessAddress address, size_t size);
+    Result UnmapPhysicalMemory(KProcessAddress address, size_t size);
+
+    Result MapPhysicalMemoryUnsafe(KProcessAddress address, size_t size);
+    Result UnmapPhysicalMemoryUnsafe(KProcessAddress address, size_t size);
+
+    Result UnmapProcessMemory(KProcessAddress dst_address, size_t size, KPageTableBase& src_pt,
+                              KProcessAddress src_address);
+
+public:
+    KProcessAddress GetAddressSpaceStart() const {
+        return m_address_space_start;
+    }
+    KProcessAddress GetHeapRegionStart() const {
+        return m_heap_region_start;
+    }
+    KProcessAddress GetAliasRegionStart() const {
+        return m_alias_region_start;
+    }
+    KProcessAddress GetStackRegionStart() const {
+        return m_stack_region_start;
+    }
+    KProcessAddress GetKernelMapRegionStart() const {
+        return m_kernel_map_region_start;
+    }
+    KProcessAddress GetCodeRegionStart() const {
+        return m_code_region_start;
+    }
+    KProcessAddress GetAliasCodeRegionStart() const {
+        return m_alias_code_region_start;
+    }
+
+    size_t GetAddressSpaceSize() const {
+        return m_address_space_end - m_address_space_start;
+    }
+    size_t GetHeapRegionSize() const {
+        return m_heap_region_end - m_heap_region_start;
+    }
+    size_t GetAliasRegionSize() const {
+        return m_alias_region_end - m_alias_region_start;
+    }
+    size_t GetStackRegionSize() const {
+        return m_stack_region_end - m_stack_region_start;
+    }
+    size_t GetKernelMapRegionSize() const {
+        return m_kernel_map_region_end - m_kernel_map_region_start;
+    }
+    size_t GetCodeRegionSize() const {
+        return m_code_region_end - m_code_region_start;
+    }
+    size_t GetAliasCodeRegionSize() const {
+        return m_alias_code_region_end - m_alias_code_region_start;
+    }
+
+    size_t GetNormalMemorySize() const {
+        // Lock the table.
+        KScopedLightLock lk(m_general_lock);
+
+        return (m_current_heap_end - m_heap_region_start) + m_mapped_physical_memory_size;
+    }
+
+    size_t GetCodeSize() const;
+    size_t GetCodeDataSize() const;
+    size_t GetAliasCodeSize() const;
+    size_t GetAliasCodeDataSize() const;
+
+    u32 GetAllocateOption() const {
+        return m_allocate_option;
+    }
+
+    u32 GetAddressSpaceWidth() const {
+        return m_address_space_width;
+    }
+
+public:
+    // Linear mapped
+    static u8* GetLinearMappedVirtualPointer(KernelCore& kernel, KPhysicalAddress addr) {
+        return kernel.System().DeviceMemory().GetPointer<u8>(addr);
+    }
+
+    static KPhysicalAddress GetLinearMappedPhysicalAddress(KernelCore& kernel,
+                                                           KVirtualAddress addr) {
+        return kernel.MemoryLayout().GetLinearPhysicalAddress(addr);
+    }
+
+    static KVirtualAddress GetLinearMappedVirtualAddress(KernelCore& kernel,
+                                                         KPhysicalAddress addr) {
+        return kernel.MemoryLayout().GetLinearVirtualAddress(addr);
+    }
+
+    // Heap
+    static u8* GetHeapVirtualPointer(KernelCore& kernel, KPhysicalAddress addr) {
+        return kernel.System().DeviceMemory().GetPointer<u8>(addr);
+    }
+
+    static KPhysicalAddress GetHeapPhysicalAddress(KernelCore& kernel, KVirtualAddress addr) {
+        return GetLinearMappedPhysicalAddress(kernel, addr);
+    }
+
+    static KVirtualAddress GetHeapVirtualAddress(KernelCore& kernel, KPhysicalAddress addr) {
+        return GetLinearMappedVirtualAddress(kernel, addr);
+    }
+
+    // Member heap
+    u8* GetHeapVirtualPointer(KPhysicalAddress addr) {
+        return GetHeapVirtualPointer(m_kernel, addr);
+    }
+
+    KPhysicalAddress GetHeapPhysicalAddress(KVirtualAddress addr) {
+        return GetHeapPhysicalAddress(m_kernel, addr);
+    }
+
+    KVirtualAddress GetHeapVirtualAddress(KPhysicalAddress addr) {
+        return GetHeapVirtualAddress(m_kernel, addr);
+    }
+
+    // TODO: GetPageTableVirtualAddress
+    // TODO: GetPageTablePhysicalAddress
+};
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_process.cpp b/src/core/hle/kernel/k_process.cpp
index 1f4b0755d6..3cfb414e5d 100644
--- a/src/core/hle/kernel/k_process.cpp
+++ b/src/core/hle/kernel/k_process.cpp
@@ -298,9 +298,9 @@ Result KProcess::Initialize(const Svc::CreateProcessParameter& params, const KPa
         const bool enable_aslr = True(params.flags & Svc::CreateProcessFlag::EnableAslr);
         const bool enable_das_merge =
             False(params.flags & Svc::CreateProcessFlag::DisableDeviceAddressSpaceMerge);
-        R_TRY(m_page_table.InitializeForProcess(
-            as_type, enable_aslr, enable_das_merge, !enable_aslr, pool, params.code_address,
-            params.code_num_pages * PageSize, m_system_resource, res_limit, this->GetMemory()));
+        R_TRY(m_page_table.Initialize(as_type, enable_aslr, enable_das_merge, !enable_aslr, pool,
+                                      params.code_address, params.code_num_pages * PageSize,
+                                      m_system_resource, res_limit, this->GetMemory()));
     }
     ON_RESULT_FAILURE_2 {
         m_page_table.Finalize();
@@ -391,9 +391,9 @@ Result KProcess::Initialize(const Svc::CreateProcessParameter& params,
         const bool enable_aslr = True(params.flags & Svc::CreateProcessFlag::EnableAslr);
         const bool enable_das_merge =
             False(params.flags & Svc::CreateProcessFlag::DisableDeviceAddressSpaceMerge);
-        R_TRY(m_page_table.InitializeForProcess(as_type, enable_aslr, enable_das_merge,
-                                                !enable_aslr, pool, params.code_address, code_size,
-                                                m_system_resource, res_limit, this->GetMemory()));
+        R_TRY(m_page_table.Initialize(as_type, enable_aslr, enable_das_merge, !enable_aslr, pool,
+                                      params.code_address, code_size, m_system_resource, res_limit,
+                                      this->GetMemory()));
     }
     ON_RESULT_FAILURE_2 {
         m_page_table.Finalize();
@@ -1122,9 +1122,9 @@ Result KProcess::GetThreadList(s32* out_num_threads, KProcessAddress out_thread_
 void KProcess::Switch(KProcess* cur_process, KProcess* next_process) {}
 
 KProcess::KProcess(KernelCore& kernel)
-    : KAutoObjectWithSlabHeapAndContainer(kernel), m_page_table{kernel.System()},
-      m_state_lock{kernel}, m_list_lock{kernel}, m_cond_var{kernel.System()},
-      m_address_arbiter{kernel.System()}, m_handle_table{kernel} {}
+    : KAutoObjectWithSlabHeapAndContainer(kernel), m_page_table{kernel}, m_state_lock{kernel},
+      m_list_lock{kernel}, m_cond_var{kernel.System()}, m_address_arbiter{kernel.System()},
+      m_handle_table{kernel} {}
 KProcess::~KProcess() = default;
 
 Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std::size_t code_size,
diff --git a/src/core/hle/kernel/k_process.h b/src/core/hle/kernel/k_process.h
index f9f755afa2..8339465fd8 100644
--- a/src/core/hle/kernel/k_process.h
+++ b/src/core/hle/kernel/k_process.h
@@ -5,13 +5,14 @@
 
 #include <map>
 
+#include "core/file_sys/program_metadata.h"
 #include "core/hle/kernel/code_set.h"
 #include "core/hle/kernel/k_address_arbiter.h"
 #include "core/hle/kernel/k_capabilities.h"
 #include "core/hle/kernel/k_condition_variable.h"
 #include "core/hle/kernel/k_handle_table.h"
-#include "core/hle/kernel/k_page_table.h"
 #include "core/hle/kernel/k_page_table_manager.h"
+#include "core/hle/kernel/k_process_page_table.h"
 #include "core/hle/kernel/k_system_resource.h"
 #include "core/hle/kernel/k_thread.h"
 #include "core/hle/kernel/k_thread_local_page.h"
@@ -65,7 +66,7 @@ private:
     using TLPIterator = TLPTree::iterator;
 
 private:
-    KPageTable m_page_table;
+    KProcessPageTable m_page_table;
     std::atomic<size_t> m_used_kernel_memory_size{};
     TLPTree m_fully_used_tlp_tree{};
     TLPTree m_partially_used_tlp_tree{};
@@ -254,9 +255,8 @@ public:
         return m_is_hbl;
     }
 
-    Kernel::KMemoryManager::Direction GetAllocateOption() const {
-        // TODO: property of the KPageTableBase
-        return KMemoryManager::Direction::FromFront;
+    u32 GetAllocateOption() const {
+        return m_page_table.GetAllocateOption();
     }
 
     ThreadList& GetThreadList() {
@@ -295,10 +295,10 @@ public:
         return m_list_lock;
     }
 
-    KPageTable& GetPageTable() {
+    KProcessPageTable& GetPageTable() {
         return m_page_table;
     }
-    const KPageTable& GetPageTable() const {
+    const KProcessPageTable& GetPageTable() const {
         return m_page_table;
     }
 
diff --git a/src/core/hle/kernel/k_process_page_table.h b/src/core/hle/kernel/k_process_page_table.h
new file mode 100644
index 0000000000..b7ae5abd01
--- /dev/null
+++ b/src/core/hle/kernel/k_process_page_table.h
@@ -0,0 +1,480 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#pragma once
+
+#include "core/hle/kernel/k_page_table.h"
+#include "core/hle/kernel/k_scoped_lock.h"
+#include "core/hle/kernel/svc_types.h"
+
+namespace Core {
+class ARM_Interface;
+}
+
+namespace Kernel {
+
+class KProcessPageTable {
+private:
+    KPageTable m_page_table;
+
+public:
+    KProcessPageTable(KernelCore& kernel) : m_page_table(kernel) {}
+
+    Result Initialize(Svc::CreateProcessFlag as_type, bool enable_aslr, bool enable_das_merge,
+                      bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address,
+                      size_t code_size, KSystemResource* system_resource,
+                      KResourceLimit* resource_limit, Core::Memory::Memory& memory) {
+        R_RETURN(m_page_table.InitializeForProcess(as_type, enable_aslr, enable_das_merge,
+                                                   from_back, pool, code_address, code_size,
+                                                   system_resource, resource_limit, memory));
+    }
+
+    void Finalize() {
+        m_page_table.Finalize();
+    }
+
+    Core::Memory::Memory& GetMemory() {
+        return m_page_table.GetMemory();
+    }
+
+    Core::Memory::Memory& GetMemory() const {
+        return m_page_table.GetMemory();
+    }
+
+    Common::PageTable& GetImpl() {
+        return m_page_table.GetImpl();
+    }
+
+    Common::PageTable& GetImpl() const {
+        return m_page_table.GetImpl();
+    }
+
+    size_t GetNumGuardPages() const {
+        return m_page_table.GetNumGuardPages();
+    }
+
+    KScopedLightLock AcquireDeviceMapLock() {
+        return m_page_table.AcquireDeviceMapLock();
+    }
+
+    Result SetMemoryPermission(KProcessAddress addr, size_t size, Svc::MemoryPermission perm) {
+        R_RETURN(m_page_table.SetMemoryPermission(addr, size, perm));
+    }
+
+    Result SetProcessMemoryPermission(KProcessAddress addr, size_t size,
+                                      Svc::MemoryPermission perm) {
+        R_RETURN(m_page_table.SetProcessMemoryPermission(addr, size, perm));
+    }
+
+    Result SetMemoryAttribute(KProcessAddress addr, size_t size, KMemoryAttribute mask,
+                              KMemoryAttribute attr) {
+        R_RETURN(m_page_table.SetMemoryAttribute(addr, size, mask, attr));
+    }
+
+    Result SetHeapSize(KProcessAddress* out, size_t size) {
+        R_RETURN(m_page_table.SetHeapSize(out, size));
+    }
+
+    Result SetMaxHeapSize(size_t size) {
+        R_RETURN(m_page_table.SetMaxHeapSize(size));
+    }
+
+    Result QueryInfo(KMemoryInfo* out_info, Svc::PageInfo* out_page_info,
+                     KProcessAddress addr) const {
+        R_RETURN(m_page_table.QueryInfo(out_info, out_page_info, addr));
+    }
+
+    Result QueryPhysicalAddress(Svc::lp64::PhysicalMemoryInfo* out, KProcessAddress address) {
+        R_RETURN(m_page_table.QueryPhysicalAddress(out, address));
+    }
+
+    Result QueryStaticMapping(KProcessAddress* out, KPhysicalAddress address, size_t size) {
+        R_RETURN(m_page_table.QueryStaticMapping(out, address, size));
+    }
+
+    Result QueryIoMapping(KProcessAddress* out, KPhysicalAddress address, size_t size) {
+        R_RETURN(m_page_table.QueryIoMapping(out, address, size));
+    }
+
+    Result MapMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) {
+        R_RETURN(m_page_table.MapMemory(dst_address, src_address, size));
+    }
+
+    Result UnmapMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) {
+        R_RETURN(m_page_table.UnmapMemory(dst_address, src_address, size));
+    }
+
+    Result MapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) {
+        R_RETURN(m_page_table.MapCodeMemory(dst_address, src_address, size));
+    }
+
+    Result UnmapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) {
+        R_RETURN(m_page_table.UnmapCodeMemory(dst_address, src_address, size));
+    }
+
+    Result MapIo(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm) {
+        R_RETURN(m_page_table.MapIo(phys_addr, size, perm));
+    }
+
+    Result MapIoRegion(KProcessAddress dst_address, KPhysicalAddress phys_addr, size_t size,
+                       Svc::MemoryMapping mapping, Svc::MemoryPermission perm) {
+        R_RETURN(m_page_table.MapIoRegion(dst_address, phys_addr, size, mapping, perm));
+    }
+
+    Result UnmapIoRegion(KProcessAddress dst_address, KPhysicalAddress phys_addr, size_t size,
+                         Svc::MemoryMapping mapping) {
+        R_RETURN(m_page_table.UnmapIoRegion(dst_address, phys_addr, size, mapping));
+    }
+
+    Result MapStatic(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm) {
+        R_RETURN(m_page_table.MapStatic(phys_addr, size, perm));
+    }
+
+    Result MapRegion(KMemoryRegionType region_type, KMemoryPermission perm) {
+        R_RETURN(m_page_table.MapRegion(region_type, perm));
+    }
+
+    Result MapInsecureMemory(KProcessAddress address, size_t size) {
+        R_RETURN(m_page_table.MapInsecureMemory(address, size));
+    }
+
+    Result UnmapInsecureMemory(KProcessAddress address, size_t size) {
+        R_RETURN(m_page_table.UnmapInsecureMemory(address, size));
+    }
+
+    Result MapPageGroup(KProcessAddress addr, const KPageGroup& pg, KMemoryState state,
+                        KMemoryPermission perm) {
+        R_RETURN(m_page_table.MapPageGroup(addr, pg, state, perm));
+    }
+
+    Result UnmapPageGroup(KProcessAddress address, const KPageGroup& pg, KMemoryState state) {
+        R_RETURN(m_page_table.UnmapPageGroup(address, pg, state));
+    }
+
+    Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment,
+                    KPhysicalAddress phys_addr, KMemoryState state, KMemoryPermission perm) {
+        R_RETURN(m_page_table.MapPages(out_addr, num_pages, alignment, phys_addr, state, perm));
+    }
+
+    Result MapPages(KProcessAddress* out_addr, size_t num_pages, KMemoryState state,
+                    KMemoryPermission perm) {
+        R_RETURN(m_page_table.MapPages(out_addr, num_pages, state, perm));
+    }
+
+    Result MapPages(KProcessAddress address, size_t num_pages, KMemoryState state,
+                    KMemoryPermission perm) {
+        R_RETURN(m_page_table.MapPages(address, num_pages, state, perm));
+    }
+
+    Result UnmapPages(KProcessAddress addr, size_t num_pages, KMemoryState state) {
+        R_RETURN(m_page_table.UnmapPages(addr, num_pages, state));
+    }
+
+    Result MakeAndOpenPageGroup(KPageGroup* out, KProcessAddress address, size_t num_pages,
+                                KMemoryState state_mask, KMemoryState state,
+                                KMemoryPermission perm_mask, KMemoryPermission perm,
+                                KMemoryAttribute attr_mask, KMemoryAttribute attr) {
+        R_RETURN(m_page_table.MakeAndOpenPageGroup(out, address, num_pages, state_mask, state,
+                                                   perm_mask, perm, attr_mask, attr));
+    }
+
+    Result InvalidateProcessDataCache(KProcessAddress address, size_t size) {
+        R_RETURN(m_page_table.InvalidateProcessDataCache(address, size));
+    }
+
+    Result ReadDebugMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) {
+        R_RETURN(m_page_table.ReadDebugMemory(dst_address, src_address, size));
+    }
+
+    Result ReadDebugIoMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size,
+                             KMemoryState state) {
+        R_RETURN(m_page_table.ReadDebugIoMemory(dst_address, src_address, size, state));
+    }
+
+    Result WriteDebugMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) {
+        R_RETURN(m_page_table.WriteDebugMemory(dst_address, src_address, size));
+    }
+
+    Result WriteDebugIoMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size,
+                              KMemoryState state) {
+        R_RETURN(m_page_table.WriteDebugIoMemory(dst_address, src_address, size, state));
+    }
+
+    Result LockForMapDeviceAddressSpace(bool* out_is_io, KProcessAddress address, size_t size,
+                                        KMemoryPermission perm, bool is_aligned, bool check_heap) {
+        R_RETURN(m_page_table.LockForMapDeviceAddressSpace(out_is_io, address, size, perm,
+                                                           is_aligned, check_heap));
+    }
+
+    Result LockForUnmapDeviceAddressSpace(KProcessAddress address, size_t size, bool check_heap) {
+        R_RETURN(m_page_table.LockForUnmapDeviceAddressSpace(address, size, check_heap));
+    }
+
+    Result UnlockForDeviceAddressSpace(KProcessAddress address, size_t size) {
+        R_RETURN(m_page_table.UnlockForDeviceAddressSpace(address, size));
+    }
+
+    Result UnlockForDeviceAddressSpacePartialMap(KProcessAddress address, size_t size) {
+        R_RETURN(m_page_table.UnlockForDeviceAddressSpacePartialMap(address, size));
+    }
+
+    Result OpenMemoryRangeForMapDeviceAddressSpace(KPageTableBase::MemoryRange* out,
+                                                   KProcessAddress address, size_t size,
+                                                   KMemoryPermission perm, bool is_aligned) {
+        R_RETURN(m_page_table.OpenMemoryRangeForMapDeviceAddressSpace(out, address, size, perm,
+                                                                      is_aligned));
+    }
+
+    Result OpenMemoryRangeForUnmapDeviceAddressSpace(KPageTableBase::MemoryRange* out,
+                                                     KProcessAddress address, size_t size) {
+        R_RETURN(m_page_table.OpenMemoryRangeForUnmapDeviceAddressSpace(out, address, size));
+    }
+
+    Result LockForIpcUserBuffer(KPhysicalAddress* out, KProcessAddress address, size_t size) {
+        R_RETURN(m_page_table.LockForIpcUserBuffer(out, address, size));
+    }
+
+    Result UnlockForIpcUserBuffer(KProcessAddress address, size_t size) {
+        R_RETURN(m_page_table.UnlockForIpcUserBuffer(address, size));
+    }
+
+    Result LockForTransferMemory(KPageGroup* out, KProcessAddress address, size_t size,
+                                 KMemoryPermission perm) {
+        R_RETURN(m_page_table.LockForTransferMemory(out, address, size, perm));
+    }
+
+    Result UnlockForTransferMemory(KProcessAddress address, size_t size, const KPageGroup& pg) {
+        R_RETURN(m_page_table.UnlockForTransferMemory(address, size, pg));
+    }
+
+    Result LockForCodeMemory(KPageGroup* out, KProcessAddress address, size_t size) {
+        R_RETURN(m_page_table.LockForCodeMemory(out, address, size));
+    }
+
+    Result UnlockForCodeMemory(KProcessAddress address, size_t size, const KPageGroup& pg) {
+        R_RETURN(m_page_table.UnlockForCodeMemory(address, size, pg));
+    }
+
+    Result OpenMemoryRangeForProcessCacheOperation(KPageTableBase::MemoryRange* out,
+                                                   KProcessAddress address, size_t size) {
+        R_RETURN(m_page_table.OpenMemoryRangeForProcessCacheOperation(out, address, size));
+    }
+
+    Result CopyMemoryFromLinearToUser(KProcessAddress dst_addr, size_t size,
+                                      KProcessAddress src_addr, KMemoryState src_state_mask,
+                                      KMemoryState src_state, KMemoryPermission src_test_perm,
+                                      KMemoryAttribute src_attr_mask, KMemoryAttribute src_attr) {
+        R_RETURN(m_page_table.CopyMemoryFromLinearToUser(dst_addr, size, src_addr, src_state_mask,
+                                                         src_state, src_test_perm, src_attr_mask,
+                                                         src_attr));
+    }
+
+    Result CopyMemoryFromLinearToKernel(void* dst_addr, size_t size, KProcessAddress src_addr,
+                                        KMemoryState src_state_mask, KMemoryState src_state,
+                                        KMemoryPermission src_test_perm,
+                                        KMemoryAttribute src_attr_mask, KMemoryAttribute src_attr) {
+        R_RETURN(m_page_table.CopyMemoryFromLinearToKernel(dst_addr, size, src_addr, src_state_mask,
+                                                           src_state, src_test_perm, src_attr_mask,
+                                                           src_attr));
+    }
+
+    Result CopyMemoryFromUserToLinear(KProcessAddress dst_addr, size_t size,
+                                      KMemoryState dst_state_mask, KMemoryState dst_state,
+                                      KMemoryPermission dst_test_perm,
+                                      KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr,
+                                      KProcessAddress src_addr) {
+        R_RETURN(m_page_table.CopyMemoryFromUserToLinear(dst_addr, size, dst_state_mask, dst_state,
+                                                         dst_test_perm, dst_attr_mask, dst_attr,
+                                                         src_addr));
+    }
+
+    Result CopyMemoryFromKernelToLinear(KProcessAddress dst_addr, size_t size,
+                                        KMemoryState dst_state_mask, KMemoryState dst_state,
+                                        KMemoryPermission dst_test_perm,
+                                        KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr,
+                                        void* src_addr) {
+        R_RETURN(m_page_table.CopyMemoryFromKernelToLinear(dst_addr, size, dst_state_mask,
+                                                           dst_state, dst_test_perm, dst_attr_mask,
+                                                           dst_attr, src_addr));
+    }
+
+    Result CopyMemoryFromHeapToHeap(KProcessPageTable& dst_page_table, KProcessAddress dst_addr,
+                                    size_t size, KMemoryState dst_state_mask,
+                                    KMemoryState dst_state, KMemoryPermission dst_test_perm,
+                                    KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr,
+                                    KProcessAddress src_addr, KMemoryState src_state_mask,
+                                    KMemoryState src_state, KMemoryPermission src_test_perm,
+                                    KMemoryAttribute src_attr_mask, KMemoryAttribute src_attr) {
+        R_RETURN(m_page_table.CopyMemoryFromHeapToHeap(
+            dst_page_table.m_page_table, dst_addr, size, dst_state_mask, dst_state, dst_test_perm,
+            dst_attr_mask, dst_attr, src_addr, src_state_mask, src_state, src_test_perm,
+            src_attr_mask, src_attr));
+    }
+
+    Result CopyMemoryFromHeapToHeapWithoutCheckDestination(
+        KProcessPageTable& dst_page_table, KProcessAddress dst_addr, size_t size,
+        KMemoryState dst_state_mask, KMemoryState dst_state, KMemoryPermission dst_test_perm,
+        KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr, KProcessAddress src_addr,
+        KMemoryState src_state_mask, KMemoryState src_state, KMemoryPermission src_test_perm,
+        KMemoryAttribute src_attr_mask, KMemoryAttribute src_attr) {
+        R_RETURN(m_page_table.CopyMemoryFromHeapToHeapWithoutCheckDestination(
+            dst_page_table.m_page_table, dst_addr, size, dst_state_mask, dst_state, dst_test_perm,
+            dst_attr_mask, dst_attr, src_addr, src_state_mask, src_state, src_test_perm,
+            src_attr_mask, src_attr));
+    }
+
+    Result SetupForIpc(KProcessAddress* out_dst_addr, size_t size, KProcessAddress src_addr,
+                       KProcessPageTable& src_page_table, KMemoryPermission test_perm,
+                       KMemoryState dst_state, bool send) {
+        R_RETURN(m_page_table.SetupForIpc(out_dst_addr, size, src_addr, src_page_table.m_page_table,
+                                          test_perm, dst_state, send));
+    }
+
+    Result CleanupForIpcServer(KProcessAddress address, size_t size, KMemoryState dst_state) {
+        R_RETURN(m_page_table.CleanupForIpcServer(address, size, dst_state));
+    }
+
+    Result CleanupForIpcClient(KProcessAddress address, size_t size, KMemoryState dst_state) {
+        R_RETURN(m_page_table.CleanupForIpcClient(address, size, dst_state));
+    }
+
+    Result MapPhysicalMemory(KProcessAddress address, size_t size) {
+        R_RETURN(m_page_table.MapPhysicalMemory(address, size));
+    }
+
+    Result UnmapPhysicalMemory(KProcessAddress address, size_t size) {
+        R_RETURN(m_page_table.UnmapPhysicalMemory(address, size));
+    }
+
+    Result MapPhysicalMemoryUnsafe(KProcessAddress address, size_t size) {
+        R_RETURN(m_page_table.MapPhysicalMemoryUnsafe(address, size));
+    }
+
+    Result UnmapPhysicalMemoryUnsafe(KProcessAddress address, size_t size) {
+        R_RETURN(m_page_table.UnmapPhysicalMemoryUnsafe(address, size));
+    }
+
+    Result UnmapProcessMemory(KProcessAddress dst_address, size_t size,
+                              KProcessPageTable& src_page_table, KProcessAddress src_address) {
+        R_RETURN(m_page_table.UnmapProcessMemory(dst_address, size, src_page_table.m_page_table,
+                                                 src_address));
+    }
+
+    bool GetPhysicalAddress(KPhysicalAddress* out, KProcessAddress address) {
+        return m_page_table.GetPhysicalAddress(out, address);
+    }
+
+    bool Contains(KProcessAddress addr, size_t size) const {
+        return m_page_table.Contains(addr, size);
+    }
+
+    bool IsInAliasRegion(KProcessAddress addr, size_t size) const {
+        return m_page_table.IsInAliasRegion(addr, size);
+    }
+    bool IsInHeapRegion(KProcessAddress addr, size_t size) const {
+        return m_page_table.IsInHeapRegion(addr, size);
+    }
+    bool IsInUnsafeAliasRegion(KProcessAddress addr, size_t size) const {
+        return m_page_table.IsInUnsafeAliasRegion(addr, size);
+    }
+
+    bool CanContain(KProcessAddress addr, size_t size, KMemoryState state) const {
+        return m_page_table.CanContain(addr, size, state);
+    }
+
+    KProcessAddress GetAddressSpaceStart() const {
+        return m_page_table.GetAddressSpaceStart();
+    }
+    KProcessAddress GetHeapRegionStart() const {
+        return m_page_table.GetHeapRegionStart();
+    }
+    KProcessAddress GetAliasRegionStart() const {
+        return m_page_table.GetAliasRegionStart();
+    }
+    KProcessAddress GetStackRegionStart() const {
+        return m_page_table.GetStackRegionStart();
+    }
+    KProcessAddress GetKernelMapRegionStart() const {
+        return m_page_table.GetKernelMapRegionStart();
+    }
+    KProcessAddress GetCodeRegionStart() const {
+        return m_page_table.GetCodeRegionStart();
+    }
+    KProcessAddress GetAliasCodeRegionStart() const {
+        return m_page_table.GetAliasCodeRegionStart();
+    }
+
+    size_t GetAddressSpaceSize() const {
+        return m_page_table.GetAddressSpaceSize();
+    }
+    size_t GetHeapRegionSize() const {
+        return m_page_table.GetHeapRegionSize();
+    }
+    size_t GetAliasRegionSize() const {
+        return m_page_table.GetAliasRegionSize();
+    }
+    size_t GetStackRegionSize() const {
+        return m_page_table.GetStackRegionSize();
+    }
+    size_t GetKernelMapRegionSize() const {
+        return m_page_table.GetKernelMapRegionSize();
+    }
+    size_t GetCodeRegionSize() const {
+        return m_page_table.GetCodeRegionSize();
+    }
+    size_t GetAliasCodeRegionSize() const {
+        return m_page_table.GetAliasCodeRegionSize();
+    }
+
+    size_t GetNormalMemorySize() const {
+        return m_page_table.GetNormalMemorySize();
+    }
+
+    size_t GetCodeSize() const {
+        return m_page_table.GetCodeSize();
+    }
+    size_t GetCodeDataSize() const {
+        return m_page_table.GetCodeDataSize();
+    }
+
+    size_t GetAliasCodeSize() const {
+        return m_page_table.GetAliasCodeSize();
+    }
+    size_t GetAliasCodeDataSize() const {
+        return m_page_table.GetAliasCodeDataSize();
+    }
+
+    u32 GetAllocateOption() const {
+        return m_page_table.GetAllocateOption();
+    }
+
+    u32 GetAddressSpaceWidth() const {
+        return m_page_table.GetAddressSpaceWidth();
+    }
+
+    KPhysicalAddress GetHeapPhysicalAddress(KVirtualAddress address) {
+        return m_page_table.GetHeapPhysicalAddress(address);
+    }
+
+    u8* GetHeapVirtualPointer(KPhysicalAddress address) {
+        return m_page_table.GetHeapVirtualPointer(address);
+    }
+
+    KVirtualAddress GetHeapVirtualAddress(KPhysicalAddress address) {
+        return m_page_table.GetHeapVirtualAddress(address);
+    }
+
+    KBlockInfoManager* GetBlockInfoManager() {
+        return m_page_table.GetBlockInfoManager();
+    }
+
+    KPageTable& GetBasePageTable() {
+        return m_page_table;
+    }
+
+    const KPageTable& GetBasePageTable() const {
+        return m_page_table;
+    }
+};
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_server_session.cpp b/src/core/hle/kernel/k_server_session.cpp
index c64ceb5302..3ea653163b 100644
--- a/src/core/hle/kernel/k_server_session.cpp
+++ b/src/core/hle/kernel/k_server_session.cpp
@@ -383,7 +383,7 @@ Result KServerSession::SendReply(bool is_hle) {
         if (event != nullptr) {
             // // Get the client process/page table.
             // KProcess *client_process             = client_thread->GetOwnerProcess();
-            // KPageTable *client_page_table        = std::addressof(client_process->PageTable());
+            // KProcessPageTable *client_page_table = std::addressof(client_process->PageTable());
 
             // // If we need to, reply with an async error.
             // if (R_FAILED(client_result)) {
diff --git a/src/core/hle/kernel/k_system_resource.cpp b/src/core/hle/kernel/k_system_resource.cpp
index 07e92aa804..b51941faf4 100644
--- a/src/core/hle/kernel/k_system_resource.cpp
+++ b/src/core/hle/kernel/k_system_resource.cpp
@@ -40,7 +40,7 @@ Result KSecureSystemResource::Initialize(size_t size, KResourceLimit* resource_l
 
     // Get resource pointer.
     KPhysicalAddress resource_paddr =
-        KPageTable::GetHeapPhysicalAddress(m_kernel.MemoryLayout(), m_resource_address);
+        KPageTable::GetHeapPhysicalAddress(m_kernel, m_resource_address);
     auto* resource =
         m_kernel.System().DeviceMemory().GetPointer<KPageTableManager::RefCount>(resource_paddr);
 
diff --git a/src/core/hle/kernel/k_thread_local_page.cpp b/src/core/hle/kernel/k_thread_local_page.cpp
index 2c45b42324..a632d16349 100644
--- a/src/core/hle/kernel/k_thread_local_page.cpp
+++ b/src/core/hle/kernel/k_thread_local_page.cpp
@@ -37,8 +37,8 @@ Result KThreadLocalPage::Initialize(KernelCore& kernel, KProcess* process) {
 
 Result KThreadLocalPage::Finalize() {
     // Get the physical address of the page.
-    const KPhysicalAddress phys_addr = m_owner->GetPageTable().GetPhysicalAddr(m_virt_addr);
-    ASSERT(phys_addr);
+    KPhysicalAddress phys_addr{};
+    ASSERT(m_owner->GetPageTable().GetPhysicalAddress(std::addressof(phys_addr), m_virt_addr));
 
     // Unmap the page.
     R_TRY(m_owner->GetPageTable().UnmapPages(this->GetAddress(), 1, KMemoryState::ThreadLocal));
diff --git a/src/core/hle/kernel/process_capability.cpp b/src/core/hle/kernel/process_capability.cpp
deleted file mode 100644
index 773319ad85..0000000000
--- a/src/core/hle/kernel/process_capability.cpp
+++ /dev/null
@@ -1,389 +0,0 @@
-// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-#include <bit>
-
-#include "common/bit_util.h"
-#include "common/logging/log.h"
-#include "core/hle/kernel/k_handle_table.h"
-#include "core/hle/kernel/k_page_table.h"
-#include "core/hle/kernel/process_capability.h"
-#include "core/hle/kernel/svc_results.h"
-
-namespace Kernel {
-namespace {
-
-// clang-format off
-
-// Shift offsets for kernel capability types.
-enum : u32 {
-    CapabilityOffset_PriorityAndCoreNum = 3,
-    CapabilityOffset_Syscall            = 4,
-    CapabilityOffset_MapPhysical        = 6,
-    CapabilityOffset_MapIO              = 7,
-    CapabilityOffset_MapRegion          = 10,
-    CapabilityOffset_Interrupt          = 11,
-    CapabilityOffset_ProgramType        = 13,
-    CapabilityOffset_KernelVersion      = 14,
-    CapabilityOffset_HandleTableSize    = 15,
-    CapabilityOffset_Debug              = 16,
-};
-
-// Combined mask of all parameters that may be initialized only once.
-constexpr u32 InitializeOnceMask = (1U << CapabilityOffset_PriorityAndCoreNum) |
-                                   (1U << CapabilityOffset_ProgramType) |
-                                   (1U << CapabilityOffset_KernelVersion) |
-                                   (1U << CapabilityOffset_HandleTableSize) |
-                                   (1U << CapabilityOffset_Debug);
-
-// Packed kernel version indicating 10.4.0
-constexpr u32 PackedKernelVersion = 0x520000;
-
-// Indicates possible types of capabilities that can be specified.
-enum class CapabilityType : u32 {
-    Unset              = 0U,
-    PriorityAndCoreNum = (1U << CapabilityOffset_PriorityAndCoreNum) - 1,
-    Syscall            = (1U << CapabilityOffset_Syscall) - 1,
-    MapPhysical        = (1U << CapabilityOffset_MapPhysical) - 1,
-    MapIO              = (1U << CapabilityOffset_MapIO) - 1,
-    MapRegion          = (1U << CapabilityOffset_MapRegion) - 1,
-    Interrupt          = (1U << CapabilityOffset_Interrupt) - 1,
-    ProgramType        = (1U << CapabilityOffset_ProgramType) - 1,
-    KernelVersion      = (1U << CapabilityOffset_KernelVersion) - 1,
-    HandleTableSize    = (1U << CapabilityOffset_HandleTableSize) - 1,
-    Debug              = (1U << CapabilityOffset_Debug) - 1,
-    Ignorable          = 0xFFFFFFFFU,
-};
-
-// clang-format on
-
-constexpr CapabilityType GetCapabilityType(u32 value) {
-    return static_cast<CapabilityType>((~value & (value + 1)) - 1);
-}
-
-u32 GetFlagBitOffset(CapabilityType type) {
-    const auto value = static_cast<u32>(type);
-    return static_cast<u32>(Common::BitSize<u32>() - static_cast<u32>(std::countl_zero(value)));
-}
-
-} // Anonymous namespace
-
-Result ProcessCapabilities::InitializeForKernelProcess(const u32* capabilities,
-                                                       std::size_t num_capabilities,
-                                                       KPageTable& page_table) {
-    Clear();
-
-    // Allow all cores and priorities.
-    core_mask = 0xF;
-    priority_mask = 0xFFFFFFFFFFFFFFFF;
-    kernel_version = PackedKernelVersion;
-
-    return ParseCapabilities(capabilities, num_capabilities, page_table);
-}
-
-Result ProcessCapabilities::InitializeForUserProcess(const u32* capabilities,
-                                                     std::size_t num_capabilities,
-                                                     KPageTable& page_table) {
-    Clear();
-
-    return ParseCapabilities(capabilities, num_capabilities, page_table);
-}
-
-void ProcessCapabilities::InitializeForMetadatalessProcess() {
-    // Allow all cores and priorities
-    core_mask = 0xF;
-    priority_mask = 0xFFFFFFFFFFFFFFFF;
-    kernel_version = PackedKernelVersion;
-
-    // Allow all system calls and interrupts.
-    svc_capabilities.set();
-    interrupt_capabilities.set();
-
-    // Allow using the maximum possible amount of handles
-    handle_table_size = static_cast<s32>(KHandleTable::MaxTableSize);
-
-    // Allow all debugging capabilities.
-    is_debuggable = true;
-    can_force_debug = true;
-}
-
-Result ProcessCapabilities::ParseCapabilities(const u32* capabilities, std::size_t num_capabilities,
-                                              KPageTable& page_table) {
-    u32 set_flags = 0;
-    u32 set_svc_bits = 0;
-
-    for (std::size_t i = 0; i < num_capabilities; ++i) {
-        const u32 descriptor = capabilities[i];
-        const auto type = GetCapabilityType(descriptor);
-
-        if (type == CapabilityType::MapPhysical) {
-            i++;
-
-            // The MapPhysical type uses two descriptor flags for its parameters.
-            // If there's only one, then there's a problem.
-            if (i >= num_capabilities) {
-                LOG_ERROR(Kernel, "Invalid combination! i={}", i);
-                return ResultInvalidCombination;
-            }
-
-            const auto size_flags = capabilities[i];
-            if (GetCapabilityType(size_flags) != CapabilityType::MapPhysical) {
-                LOG_ERROR(Kernel, "Invalid capability type! size_flags={}", size_flags);
-                return ResultInvalidCombination;
-            }
-
-            const auto result = HandleMapPhysicalFlags(descriptor, size_flags, page_table);
-            if (result.IsError()) {
-                LOG_ERROR(Kernel, "Failed to map physical flags! descriptor={}, size_flags={}",
-                          descriptor, size_flags);
-                return result;
-            }
-        } else {
-            const auto result =
-                ParseSingleFlagCapability(set_flags, set_svc_bits, descriptor, page_table);
-            if (result.IsError()) {
-                LOG_ERROR(
-                    Kernel,
-                    "Failed to parse capability flag! set_flags={}, set_svc_bits={}, descriptor={}",
-                    set_flags, set_svc_bits, descriptor);
-                return result;
-            }
-        }
-    }
-
-    return ResultSuccess;
-}
-
-Result ProcessCapabilities::ParseSingleFlagCapability(u32& set_flags, u32& set_svc_bits, u32 flag,
-                                                      KPageTable& page_table) {
-    const auto type = GetCapabilityType(flag);
-
-    if (type == CapabilityType::Unset) {
-        return ResultInvalidArgument;
-    }
-
-    // Bail early on ignorable entries, as one would expect,
-    // ignorable descriptors can be ignored.
-    if (type == CapabilityType::Ignorable) {
-        return ResultSuccess;
-    }
-
-    // Ensure that the give flag hasn't already been initialized before.
-    // If it has been, then bail.
-    const u32 flag_length = GetFlagBitOffset(type);
-    const u32 set_flag = 1U << flag_length;
-    if ((set_flag & set_flags & InitializeOnceMask) != 0) {
-        LOG_ERROR(Kernel,
-                  "Attempted to initialize flags that may only be initialized once. set_flags={}",
-                  set_flags);
-        return ResultInvalidCombination;
-    }
-    set_flags |= set_flag;
-
-    switch (type) {
-    case CapabilityType::PriorityAndCoreNum:
-        return HandlePriorityCoreNumFlags(flag);
-    case CapabilityType::Syscall:
-        return HandleSyscallFlags(set_svc_bits, flag);
-    case CapabilityType::MapIO:
-        return HandleMapIOFlags(flag, page_table);
-    case CapabilityType::MapRegion:
-        return HandleMapRegionFlags(flag, page_table);
-    case CapabilityType::Interrupt:
-        return HandleInterruptFlags(flag);
-    case CapabilityType::ProgramType:
-        return HandleProgramTypeFlags(flag);
-    case CapabilityType::KernelVersion:
-        return HandleKernelVersionFlags(flag);
-    case CapabilityType::HandleTableSize:
-        return HandleHandleTableFlags(flag);
-    case CapabilityType::Debug:
-        return HandleDebugFlags(flag);
-    default:
-        break;
-    }
-
-    LOG_ERROR(Kernel, "Invalid capability type! type={}", type);
-    return ResultInvalidArgument;
-}
-
-void ProcessCapabilities::Clear() {
-    svc_capabilities.reset();
-    interrupt_capabilities.reset();
-
-    core_mask = 0;
-    priority_mask = 0;
-
-    handle_table_size = 0;
-    kernel_version = 0;
-
-    program_type = ProgramType::SysModule;
-
-    is_debuggable = false;
-    can_force_debug = false;
-}
-
-Result ProcessCapabilities::HandlePriorityCoreNumFlags(u32 flags) {
-    if (priority_mask != 0 || core_mask != 0) {
-        LOG_ERROR(Kernel, "Core or priority mask are not zero! priority_mask={}, core_mask={}",
-                  priority_mask, core_mask);
-        return ResultInvalidArgument;
-    }
-
-    const u32 core_num_min = (flags >> 16) & 0xFF;
-    const u32 core_num_max = (flags >> 24) & 0xFF;
-    if (core_num_min > core_num_max) {
-        LOG_ERROR(Kernel, "Core min is greater than core max! core_num_min={}, core_num_max={}",
-                  core_num_min, core_num_max);
-        return ResultInvalidCombination;
-    }
-
-    const u32 priority_min = (flags >> 10) & 0x3F;
-    const u32 priority_max = (flags >> 4) & 0x3F;
-    if (priority_min > priority_max) {
-        LOG_ERROR(Kernel,
-                  "Priority min is greater than priority max! priority_min={}, priority_max={}",
-                  core_num_min, priority_max);
-        return ResultInvalidCombination;
-    }
-
-    // The switch only has 4 usable cores.
-    if (core_num_max >= 4) {
-        LOG_ERROR(Kernel, "Invalid max cores specified! core_num_max={}", core_num_max);
-        return ResultInvalidCoreId;
-    }
-
-    const auto make_mask = [](u64 min, u64 max) {
-        const u64 range = max - min + 1;
-        const u64 mask = (1ULL << range) - 1;
-
-        return mask << min;
-    };
-
-    core_mask = make_mask(core_num_min, core_num_max);
-    priority_mask = make_mask(priority_min, priority_max);
-    return ResultSuccess;
-}
-
-Result ProcessCapabilities::HandleSyscallFlags(u32& set_svc_bits, u32 flags) {
-    const u32 index = flags >> 29;
-    const u32 svc_bit = 1U << index;
-
-    // If we've already set this svc before, bail.
-    if ((set_svc_bits & svc_bit) != 0) {
-        return ResultInvalidCombination;
-    }
-    set_svc_bits |= svc_bit;
-
-    const u32 svc_mask = (flags >> 5) & 0xFFFFFF;
-    for (u32 i = 0; i < 24; ++i) {
-        const u32 svc_number = index * 24 + i;
-
-        if ((svc_mask & (1U << i)) == 0) {
-            continue;
-        }
-
-        svc_capabilities[svc_number] = true;
-    }
-
-    return ResultSuccess;
-}
-
-Result ProcessCapabilities::HandleMapPhysicalFlags(u32 flags, u32 size_flags,
-                                                   KPageTable& page_table) {
-    // TODO(Lioncache): Implement once the memory manager can handle this.
-    return ResultSuccess;
-}
-
-Result ProcessCapabilities::HandleMapIOFlags(u32 flags, KPageTable& page_table) {
-    // TODO(Lioncache): Implement once the memory manager can handle this.
-    return ResultSuccess;
-}
-
-Result ProcessCapabilities::HandleMapRegionFlags(u32 flags, KPageTable& page_table) {
-    // TODO(Lioncache): Implement once the memory manager can handle this.
-    return ResultSuccess;
-}
-
-Result ProcessCapabilities::HandleInterruptFlags(u32 flags) {
-    constexpr u32 interrupt_ignore_value = 0x3FF;
-    const u32 interrupt0 = (flags >> 12) & 0x3FF;
-    const u32 interrupt1 = (flags >> 22) & 0x3FF;
-
-    for (u32 interrupt : {interrupt0, interrupt1}) {
-        if (interrupt == interrupt_ignore_value) {
-            continue;
-        }
-
-        // NOTE:
-        // This should be checking a generic interrupt controller value
-        // as part of the calculation, however, given we don't currently
-        // emulate that, it's sufficient to mark every interrupt as defined.
-
-        if (interrupt >= interrupt_capabilities.size()) {
-            LOG_ERROR(Kernel, "Process interrupt capability is out of range! svc_number={}",
-                      interrupt);
-            return ResultOutOfRange;
-        }
-
-        interrupt_capabilities[interrupt] = true;
-    }
-
-    return ResultSuccess;
-}
-
-Result ProcessCapabilities::HandleProgramTypeFlags(u32 flags) {
-    const u32 reserved = flags >> 17;
-    if (reserved != 0) {
-        LOG_ERROR(Kernel, "Reserved value is non-zero! reserved={}", reserved);
-        return ResultReservedUsed;
-    }
-
-    program_type = static_cast<ProgramType>((flags >> 14) & 0b111);
-    return ResultSuccess;
-}
-
-Result ProcessCapabilities::HandleKernelVersionFlags(u32 flags) {
-    // Yes, the internal member variable is checked in the actual kernel here.
-    // This might look odd for options that are only allowed to be initialized
-    // just once, however the kernel has a separate initialization function for
-    // kernel processes and userland processes. The kernel variant sets this
-    // member variable ahead of time.
-
-    const u32 major_version = kernel_version >> 19;
-
-    if (major_version != 0 || flags < 0x80000) {
-        LOG_ERROR(Kernel,
-                  "Kernel version is non zero or flags are too small! major_version={}, flags={}",
-                  major_version, flags);
-        return ResultInvalidArgument;
-    }
-
-    kernel_version = flags;
-    return ResultSuccess;
-}
-
-Result ProcessCapabilities::HandleHandleTableFlags(u32 flags) {
-    const u32 reserved = flags >> 26;
-    if (reserved != 0) {
-        LOG_ERROR(Kernel, "Reserved value is non-zero! reserved={}", reserved);
-        return ResultReservedUsed;
-    }
-
-    handle_table_size = static_cast<s32>((flags >> 16) & 0x3FF);
-    return ResultSuccess;
-}
-
-Result ProcessCapabilities::HandleDebugFlags(u32 flags) {
-    const u32 reserved = flags >> 19;
-    if (reserved != 0) {
-        LOG_ERROR(Kernel, "Reserved value is non-zero! reserved={}", reserved);
-        return ResultReservedUsed;
-    }
-
-    is_debuggable = (flags & 0x20000) != 0;
-    can_force_debug = (flags & 0x40000) != 0;
-    return ResultSuccess;
-}
-
-} // namespace Kernel
diff --git a/src/core/hle/kernel/process_capability.h b/src/core/hle/kernel/process_capability.h
deleted file mode 100644
index ff05dc5ff7..0000000000
--- a/src/core/hle/kernel/process_capability.h
+++ /dev/null
@@ -1,266 +0,0 @@
-// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-#pragma once
-
-#include <bitset>
-
-#include "common/common_types.h"
-
-union Result;
-
-namespace Kernel {
-
-class KPageTable;
-
-/// The possible types of programs that may be indicated
-/// by the program type capability descriptor.
-enum class ProgramType {
-    SysModule,
-    Application,
-    Applet,
-};
-
-/// Handles kernel capability descriptors that are provided by
-/// application metadata. These descriptors provide information
-/// that alters certain parameters for kernel process instance
-/// that will run said application (or applet).
-///
-/// Capabilities are a sequence of flag descriptors, that indicate various
-/// configurations and constraints for a particular process.
-///
-/// Flag types are indicated by a sequence of set low bits. E.g. the
-/// types are indicated with the low bits as follows (where x indicates "don't care"):
-///
-/// - Priority and core mask   : 0bxxxxxxxxxxxx0111
-/// - Allowed service call mask: 0bxxxxxxxxxxx01111
-/// - Map physical memory      : 0bxxxxxxxxx0111111
-/// - Map IO memory            : 0bxxxxxxxx01111111
-/// - Interrupts               : 0bxxxx011111111111
-/// - Application type         : 0bxx01111111111111
-/// - Kernel version           : 0bx011111111111111
-/// - Handle table size        : 0b0111111111111111
-/// - Debugger flags           : 0b1111111111111111
-///
-/// These are essentially a bit offset subtracted by 1 to create a mask.
-/// e.g. The first entry in the above list is simply bit 3 (value 8 -> 0b1000)
-///      subtracted by one (7 -> 0b0111)
-///
-/// An example of a bit layout (using the map physical layout):
-/// <example>
-///   The MapPhysical type indicates a sequence entry pair of:
-///
-///   [initial, memory_flags], where:
-///
-///   initial:
-///     bits:
-///       7-24: Starting page to map memory at.
-///       25  : Indicates if the memory should be mapped as read only.
-///
-///   memory_flags:
-///     bits:
-///       7-20 : Number of pages to map
-///       21-25: Seems to be reserved (still checked against though)
-///       26   : Whether or not the memory being mapped is IO memory, or physical memory
-/// </example>
-///
-class ProcessCapabilities {
-public:
-    using InterruptCapabilities = std::bitset<1024>;
-    using SyscallCapabilities = std::bitset<192>;
-
-    ProcessCapabilities() = default;
-    ProcessCapabilities(const ProcessCapabilities&) = delete;
-    ProcessCapabilities(ProcessCapabilities&&) = default;
-
-    ProcessCapabilities& operator=(const ProcessCapabilities&) = delete;
-    ProcessCapabilities& operator=(ProcessCapabilities&&) = default;
-
-    /// Initializes this process capabilities instance for a kernel process.
-    ///
-    /// @param capabilities     The capabilities to parse
-    /// @param num_capabilities The number of capabilities to parse.
-    /// @param page_table       The memory manager to use for handling any mapping-related
-    ///                         operations (such as mapping IO memory, etc).
-    ///
-    /// @returns ResultSuccess if this capabilities instance was able to be initialized,
-    ///          otherwise, an error code upon failure.
-    ///
-    Result InitializeForKernelProcess(const u32* capabilities, std::size_t num_capabilities,
-                                      KPageTable& page_table);
-
-    /// Initializes this process capabilities instance for a userland process.
-    ///
-    /// @param capabilities     The capabilities to parse.
-    /// @param num_capabilities The total number of capabilities to parse.
-    /// @param page_table       The memory manager to use for handling any mapping-related
-    ///                         operations (such as mapping IO memory, etc).
-    ///
-    /// @returns ResultSuccess if this capabilities instance was able to be initialized,
-    ///          otherwise, an error code upon failure.
-    ///
-    Result InitializeForUserProcess(const u32* capabilities, std::size_t num_capabilities,
-                                    KPageTable& page_table);
-
-    /// Initializes this process capabilities instance for a process that does not
-    /// have any metadata to parse.
-    ///
-    /// This is necessary, as we allow running raw executables, and the internal
-    /// kernel process capabilities also determine what CPU cores the process is
-    /// allowed to run on, and what priorities are allowed for  threads. It also
-    /// determines the max handle table size, what the program type is, whether or
-    /// not the process can be debugged, or whether it's possible for a process to
-    /// forcibly debug another process.
-    ///
-    /// Given the above, this essentially enables all capabilities across the board
-    /// for the process. It allows the process to:
-    ///
-    /// - Run on any core
-    /// - Use any thread priority
-    /// - Use the maximum amount of handles a process is allowed to.
-    /// - Be debuggable
-    /// - Forcibly debug other processes.
-    ///
-    /// Note that this is not a behavior that the kernel allows a process to do via
-    /// a single function like this. This is yuzu-specific behavior to handle
-    /// executables with no capability descriptors whatsoever to derive behavior from.
-    /// It being yuzu-specific is why this is also not the default behavior and not
-    /// done by default in the constructor.
-    ///
-    void InitializeForMetadatalessProcess();
-
-    /// Gets the allowable core mask
-    u64 GetCoreMask() const {
-        return core_mask;
-    }
-
-    /// Gets the allowable priority mask
-    u64 GetPriorityMask() const {
-        return priority_mask;
-    }
-
-    /// Gets the SVC access permission bits
-    const SyscallCapabilities& GetServiceCapabilities() const {
-        return svc_capabilities;
-    }
-
-    /// Gets the valid interrupt bits.
-    const InterruptCapabilities& GetInterruptCapabilities() const {
-        return interrupt_capabilities;
-    }
-
-    /// Gets the program type for this process.
-    ProgramType GetProgramType() const {
-        return program_type;
-    }
-
-    /// Gets the number of total allowable handles for the process' handle table.
-    s32 GetHandleTableSize() const {
-        return handle_table_size;
-    }
-
-    /// Gets the kernel version value.
-    u32 GetKernelVersion() const {
-        return kernel_version;
-    }
-
-    /// Whether or not this process can be debugged.
-    bool IsDebuggable() const {
-        return is_debuggable;
-    }
-
-    /// Whether or not this process can forcibly debug another
-    /// process, even if that process is not considered debuggable.
-    bool CanForceDebug() const {
-        return can_force_debug;
-    }
-
-private:
-    /// Attempts to parse a given sequence of capability descriptors.
-    ///
-    /// @param capabilities     The sequence of capability descriptors to parse.
-    /// @param num_capabilities The number of descriptors within the given sequence.
-    /// @param page_table       The memory manager that will perform any memory
-    ///                         mapping if necessary.
-    ///
-    /// @return ResultSuccess if no errors occur, otherwise an error code.
-    ///
-    Result ParseCapabilities(const u32* capabilities, std::size_t num_capabilities,
-                             KPageTable& page_table);
-
-    /// Attempts to parse a capability descriptor that is only represented by a
-    /// single flag set.
-    ///
-    /// @param set_flags    Running set of flags that are used to catch
-    ///                     flags being initialized more than once when they shouldn't be.
-    /// @param set_svc_bits Running set of bits representing the allowed supervisor calls mask.
-    /// @param flag         The flag to attempt to parse.
-    /// @param page_table   The memory manager that will perform any memory
-    ///                     mapping if necessary.
-    ///
-    /// @return ResultSuccess if no errors occurred, otherwise an error code.
-    ///
-    Result ParseSingleFlagCapability(u32& set_flags, u32& set_svc_bits, u32 flag,
-                                     KPageTable& page_table);
-
-    /// Clears the internal state of this process capability instance. Necessary,
-    /// to have a sane starting point due to us allowing running executables without
-    /// configuration metadata. We assume a process is not going to have metadata,
-    /// and if it turns out that the process does, in fact, have metadata, then
-    /// we attempt to parse it. Thus, we need this to reset data members back to
-    /// a good state.
-    ///
-    /// DO NOT ever make this a public member function. This isn't an invariant
-    /// anything external should depend upon (and if anything comes to rely on it,
-    /// you should immediately be questioning the design of that thing, not this
-    /// class. If the kernel itself can run without depending on behavior like that,
-    /// then so can yuzu).
-    ///
-    void Clear();
-
-    /// Handles flags related to the priority and core number capability flags.
-    Result HandlePriorityCoreNumFlags(u32 flags);
-
-    /// Handles flags related to determining the allowable SVC mask.
-    Result HandleSyscallFlags(u32& set_svc_bits, u32 flags);
-
-    /// Handles flags related to mapping physical memory pages.
-    Result HandleMapPhysicalFlags(u32 flags, u32 size_flags, KPageTable& page_table);
-
-    /// Handles flags related to mapping IO pages.
-    Result HandleMapIOFlags(u32 flags, KPageTable& page_table);
-
-    /// Handles flags related to mapping physical memory regions.
-    Result HandleMapRegionFlags(u32 flags, KPageTable& page_table);
-
-    /// Handles flags related to the interrupt capability flags.
-    Result HandleInterruptFlags(u32 flags);
-
-    /// Handles flags related to the program type.
-    Result HandleProgramTypeFlags(u32 flags);
-
-    /// Handles flags related to the handle table size.
-    Result HandleHandleTableFlags(u32 flags);
-
-    /// Handles flags related to the kernel version capability flags.
-    Result HandleKernelVersionFlags(u32 flags);
-
-    /// Handles flags related to debug-specific capabilities.
-    Result HandleDebugFlags(u32 flags);
-
-    SyscallCapabilities svc_capabilities;
-    InterruptCapabilities interrupt_capabilities;
-
-    u64 core_mask = 0;
-    u64 priority_mask = 0;
-
-    s32 handle_table_size = 0;
-    u32 kernel_version = 0;
-
-    ProgramType program_type = ProgramType::SysModule;
-
-    bool is_debuggable = false;
-    bool can_force_debug = false;
-};
-
-} // namespace Kernel
diff --git a/src/core/hle/kernel/svc/svc_memory.cpp b/src/core/hle/kernel/svc/svc_memory.cpp
index 97f1210def..4ca62860d5 100644
--- a/src/core/hle/kernel/svc/svc_memory.cpp
+++ b/src/core/hle/kernel/svc/svc_memory.cpp
@@ -29,7 +29,8 @@ constexpr bool IsValidAddressRange(u64 address, u64 size) {
 // Helper function that performs the common sanity checks for svcMapMemory
 // and svcUnmapMemory. This is doable, as both functions perform their sanitizing
 // in the same order.
-Result MapUnmapMemorySanityChecks(const KPageTable& manager, u64 dst_addr, u64 src_addr, u64 size) {
+Result MapUnmapMemorySanityChecks(const KProcessPageTable& manager, u64 dst_addr, u64 src_addr,
+                                  u64 size) {
     if (!Common::Is4KBAligned(dst_addr)) {
         LOG_ERROR(Kernel_SVC, "Destination address is not aligned to 4KB, 0x{:016X}", dst_addr);
         R_THROW(ResultInvalidAddress);
@@ -123,7 +124,8 @@ Result SetMemoryAttribute(Core::System& system, u64 address, u64 size, u32 mask,
     R_UNLESS(page_table.Contains(address, size), ResultInvalidCurrentMemory);
 
     // Set the memory attribute.
-    R_RETURN(page_table.SetMemoryAttribute(address, size, mask, attr));
+    R_RETURN(page_table.SetMemoryAttribute(address, size, static_cast<KMemoryAttribute>(mask),
+                                           static_cast<KMemoryAttribute>(attr)));
 }
 
 /// Maps a memory range into a different range.
diff --git a/src/core/hle/kernel/svc/svc_physical_memory.cpp b/src/core/hle/kernel/svc/svc_physical_memory.cpp
index 99330d02a9..793e9f8d01 100644
--- a/src/core/hle/kernel/svc/svc_physical_memory.cpp
+++ b/src/core/hle/kernel/svc/svc_physical_memory.cpp
@@ -16,7 +16,14 @@ Result SetHeapSize(Core::System& system, u64* out_address, u64 size) {
     R_UNLESS(size < MainMemorySizeMax, ResultInvalidSize);
 
     // Set the heap size.
-    R_RETURN(GetCurrentProcess(system.Kernel()).GetPageTable().SetHeapSize(out_address, size));
+    KProcessAddress address{};
+    R_TRY(GetCurrentProcess(system.Kernel())
+              .GetPageTable()
+              .SetHeapSize(std::addressof(address), size));
+
+    // We succeeded.
+    *out_address = GetInteger(address);
+    R_SUCCEED();
 }
 
 /// Maps memory at a desired address
diff --git a/src/core/hle/kernel/svc/svc_process_memory.cpp b/src/core/hle/kernel/svc/svc_process_memory.cpp
index 07cd481755..e1427947b0 100644
--- a/src/core/hle/kernel/svc/svc_process_memory.cpp
+++ b/src/core/hle/kernel/svc/svc_process_memory.cpp
@@ -247,8 +247,7 @@ Result UnmapProcessCodeMemory(Core::System& system, Handle process_handle, u64 d
         R_THROW(ResultInvalidCurrentMemory);
     }
 
-    R_RETURN(page_table.UnmapCodeMemory(dst_address, src_address, size,
-                                        KPageTable::ICacheInvalidationStrategy::InvalidateAll));
+    R_RETURN(page_table.UnmapCodeMemory(dst_address, src_address, size));
 }
 
 Result SetProcessMemoryPermission64(Core::System& system, Handle process_handle, uint64_t address,
diff --git a/src/core/hle/kernel/svc/svc_query_memory.cpp b/src/core/hle/kernel/svc/svc_query_memory.cpp
index 51af06e97d..816dcb8d0f 100644
--- a/src/core/hle/kernel/svc/svc_query_memory.cpp
+++ b/src/core/hle/kernel/svc/svc_query_memory.cpp
@@ -31,12 +31,12 @@ Result QueryProcessMemory(Core::System& system, uint64_t out_memory_info, PageIn
     }
 
     auto& current_memory{GetCurrentMemory(system.Kernel())};
-    const auto memory_info{process->GetPageTable().QueryInfo(address).GetSvcMemoryInfo()};
 
-    current_memory.WriteBlock(out_memory_info, std::addressof(memory_info), sizeof(memory_info));
+    KMemoryInfo mem_info;
+    R_TRY(process->GetPageTable().QueryInfo(std::addressof(mem_info), out_page_info, address));
 
-    //! This is supposed to be part of the QueryInfo call.
-    *out_page_info = {};
+    const auto svc_mem_info = mem_info.GetSvcMemoryInfo();
+    current_memory.WriteBlock(out_memory_info, std::addressof(svc_mem_info), sizeof(svc_mem_info));
 
     R_SUCCEED();
 }
diff --git a/src/core/hle/result.h b/src/core/hle/result.h
index dd0b27f479..749f51f696 100644
--- a/src/core/hle/result.h
+++ b/src/core/hle/result.h
@@ -407,3 +407,34 @@ constexpr inline Result __TmpCurrentResultReference = ResultSuccess;
 
 /// Evaluates a boolean expression, and succeeds if that expression is true.
 #define R_SUCCEED_IF(expr) R_UNLESS(!(expr), ResultSuccess)
+
+#define R_TRY_CATCH(res_expr)                                                                      \
+    {                                                                                              \
+        const auto R_CURRENT_RESULT = (res_expr);                                                  \
+        if (R_FAILED(R_CURRENT_RESULT)) {                                                          \
+            if (false)
+
+#define R_END_TRY_CATCH                                                                            \
+    else if (R_FAILED(R_CURRENT_RESULT)) {                                                         \
+        R_THROW(R_CURRENT_RESULT);                                                                 \
+    }                                                                                              \
+    }                                                                                              \
+    }
+
+#define R_CATCH_ALL()                                                                              \
+    }                                                                                              \
+    else if (R_FAILED(R_CURRENT_RESULT)) {                                                         \
+        if (true)
+
+#define R_CATCH(res_expr)                                                                          \
+    }                                                                                              \
+    else if ((res_expr) == (R_CURRENT_RESULT)) {                                                   \
+        if (true)
+
+#define R_CONVERT(catch_type, convert_type)                                                        \
+    R_CATCH(catch_type) { R_THROW(static_cast<Result>(convert_type)); }
+
+#define R_CONVERT_ALL(convert_type)                                                                \
+    R_CATCH_ALL() { R_THROW(static_cast<Result>(convert_type)); }
+
+#define R_ASSERT(res_expr) ASSERT(R_SUCCEEDED(res_expr))
diff --git a/src/core/hle/service/ldr/ldr.cpp b/src/core/hle/service/ldr/ldr.cpp
index c73035c77a..97b6a9385d 100644
--- a/src/core/hle/service/ldr/ldr.cpp
+++ b/src/core/hle/service/ldr/ldr.cpp
@@ -286,9 +286,14 @@ public:
         rb.Push(ResultSuccess);
     }
 
-    bool ValidateRegionForMap(Kernel::KPageTable& page_table, VAddr start, std::size_t size) const {
+    bool ValidateRegionForMap(Kernel::KProcessPageTable& page_table, VAddr start,
+                              std::size_t size) const {
         const std::size_t padding_size{page_table.GetNumGuardPages() * Kernel::PageSize};
-        const auto start_info{page_table.QueryInfo(start - 1)};
+
+        Kernel::KMemoryInfo start_info;
+        Kernel::Svc::PageInfo page_info;
+        R_ASSERT(
+            page_table.QueryInfo(std::addressof(start_info), std::addressof(page_info), start - 1));
 
         if (start_info.GetState() != Kernel::KMemoryState::Free) {
             return {};
@@ -298,7 +303,9 @@ public:
             return {};
         }
 
-        const auto end_info{page_table.QueryInfo(start + size)};
+        Kernel::KMemoryInfo end_info;
+        R_ASSERT(page_table.QueryInfo(std::addressof(end_info), std::addressof(page_info),
+                                      start + size));
 
         if (end_info.GetState() != Kernel::KMemoryState::Free) {
             return {};
@@ -307,7 +314,7 @@ public:
         return (start + size + padding_size) <= (end_info.GetAddress() + end_info.GetSize());
     }
 
-    Result GetAvailableMapRegion(Kernel::KPageTable& page_table, u64 size, VAddr& out_addr) {
+    Result GetAvailableMapRegion(Kernel::KProcessPageTable& page_table, u64 size, VAddr& out_addr) {
         size = Common::AlignUp(size, Kernel::PageSize);
         size += page_table.GetNumGuardPages() * Kernel::PageSize * 4;
 
@@ -391,12 +398,8 @@ public:
 
             if (bss_size) {
                 auto block_guard = detail::ScopeExit([&] {
-                    page_table.UnmapCodeMemory(
-                        addr + nro_size, bss_addr, bss_size,
-                        Kernel::KPageTable::ICacheInvalidationStrategy::InvalidateRange);
-                    page_table.UnmapCodeMemory(
-                        addr, nro_addr, nro_size,
-                        Kernel::KPageTable::ICacheInvalidationStrategy::InvalidateRange);
+                    page_table.UnmapCodeMemory(addr + nro_size, bss_addr, bss_size);
+                    page_table.UnmapCodeMemory(addr, nro_addr, nro_size);
                 });
 
                 const Result result{page_table.MapCodeMemory(addr + nro_size, bss_addr, bss_size)};
@@ -578,21 +581,17 @@ public:
         auto& page_table{system.ApplicationProcess()->GetPageTable()};
 
         if (info.bss_size != 0) {
-            R_TRY(page_table.UnmapCodeMemory(
-                info.nro_address + info.text_size + info.ro_size + info.data_size, info.bss_address,
-                info.bss_size, Kernel::KPageTable::ICacheInvalidationStrategy::InvalidateRange));
+            R_TRY(page_table.UnmapCodeMemory(info.nro_address + info.text_size + info.ro_size +
+                                                 info.data_size,
+                                             info.bss_address, info.bss_size));
         }
 
-        R_TRY(page_table.UnmapCodeMemory(
-            info.nro_address + info.text_size + info.ro_size,
-            info.src_addr + info.text_size + info.ro_size, info.data_size,
-            Kernel::KPageTable::ICacheInvalidationStrategy::InvalidateRange));
-        R_TRY(page_table.UnmapCodeMemory(
-            info.nro_address + info.text_size, info.src_addr + info.text_size, info.ro_size,
-            Kernel::KPageTable::ICacheInvalidationStrategy::InvalidateRange));
-        R_TRY(page_table.UnmapCodeMemory(
-            info.nro_address, info.src_addr, info.text_size,
-            Kernel::KPageTable::ICacheInvalidationStrategy::InvalidateRange));
+        R_TRY(page_table.UnmapCodeMemory(info.nro_address + info.text_size + info.ro_size,
+                                         info.src_addr + info.text_size + info.ro_size,
+                                         info.data_size));
+        R_TRY(page_table.UnmapCodeMemory(info.nro_address + info.text_size,
+                                         info.src_addr + info.text_size, info.ro_size));
+        R_TRY(page_table.UnmapCodeMemory(info.nro_address, info.src_addr, info.text_size));
         return ResultSuccess;
     }
 
diff --git a/src/core/memory.cpp b/src/core/memory.cpp
index fa52734025..84b60a928b 100644
--- a/src/core/memory.cpp
+++ b/src/core/memory.cpp
@@ -41,7 +41,7 @@ struct Memory::Impl {
     explicit Impl(Core::System& system_) : system{system_} {}
 
     void SetCurrentPageTable(Kernel::KProcess& process, u32 core_id) {
-        current_page_table = &process.GetPageTable().PageTableImpl();
+        current_page_table = &process.GetPageTable().GetImpl();
         current_page_table->fastmem_arena = system.DeviceMemory().buffer.VirtualBasePointer();
 
         const std::size_t address_space_width = process.GetPageTable().GetAddressSpaceWidth();
@@ -195,7 +195,7 @@ struct Memory::Impl {
 
     bool WalkBlock(const Common::ProcessAddress addr, const std::size_t size, auto on_unmapped,
                    auto on_memory, auto on_rasterizer, auto increment) {
-        const auto& page_table = system.ApplicationProcess()->GetPageTable().PageTableImpl();
+        const auto& page_table = system.ApplicationProcess()->GetPageTable().GetImpl();
         std::size_t remaining_size = size;
         std::size_t page_index = addr >> YUZU_PAGEBITS;
         std::size_t page_offset = addr & YUZU_PAGEMASK;
@@ -826,7 +826,7 @@ void Memory::UnmapRegion(Common::PageTable& page_table, Common::ProcessAddress b
 
 bool Memory::IsValidVirtualAddress(const Common::ProcessAddress vaddr) const {
     const Kernel::KProcess& process = *system.ApplicationProcess();
-    const auto& page_table = process.GetPageTable().PageTableImpl();
+    const auto& page_table = process.GetPageTable().GetImpl();
     const size_t page = vaddr >> YUZU_PAGEBITS;
     if (page >= page_table.pointers.size()) {
         return false;

From b16fefa106d0dcafb5d7520debe7d1d6438c3ced Mon Sep 17 00:00:00 2001
From: Liam <byteslice@airmail.cc>
Date: Tue, 7 Nov 2023 20:42:22 -0500
Subject: [PATCH 2/3] k_page_table: use more precise icache invalidates

---
 src/core/hle/kernel/k_page_table_base.cpp | 13 +++++++++----
 1 file changed, 9 insertions(+), 4 deletions(-)

diff --git a/src/core/hle/kernel/k_page_table_base.cpp b/src/core/hle/kernel/k_page_table_base.cpp
index 1cc019c066..c97b4a6b70 100644
--- a/src/core/hle/kernel/k_page_table_base.cpp
+++ b/src/core/hle/kernel/k_page_table_base.cpp
@@ -72,6 +72,11 @@ void InvalidateEntireInstructionCache(Core::System& system) {
     system.InvalidateCpuInstructionCaches();
 }
 
+template <typename AddressType>
+void InvalidateInstructionCache(Core::System& system, AddressType addr, u64 size) {
+    system.InvalidateCpuInstructionCacheRange(GetInteger(addr), size);
+}
+
 template <typename AddressType>
 Result InvalidateDataCache(AddressType addr, u64 size) {
     R_SUCCEED();
@@ -1245,7 +1250,7 @@ Result KPageTableBase::UnmapCodeMemory(KProcessAddress dst_address, KProcessAddr
     bool reprotected_pages = false;
     SCOPE_EXIT({
         if (reprotected_pages && any_code_pages) {
-            InvalidateEntireInstructionCache(m_system);
+            InvalidateInstructionCache(m_system, dst_address, size);
         }
     });
 
@@ -1981,7 +1986,7 @@ Result KPageTableBase::SetProcessMemoryPermission(KProcessAddress addr, size_t s
         for (const auto& block : pg) {
             StoreDataCache(GetHeapVirtualPointer(m_kernel, block.GetAddress()), block.GetSize());
         }
-        InvalidateEntireInstructionCache(m_system);
+        InvalidateInstructionCache(m_system, addr, size);
     }
 
     R_SUCCEED();
@@ -3222,8 +3227,8 @@ Result KPageTableBase::WriteDebugMemory(KProcessAddress dst_address, KProcessAdd
     // Perform copy for the last block.
     R_TRY(PerformCopy());
 
-    // Invalidate the entire instruction cache, as this svc allows modifying executable pages.
-    InvalidateEntireInstructionCache(m_system);
+    // Invalidate the instruction cache, as this svc allows modifying executable pages.
+    InvalidateInstructionCache(m_system, dst_address, size);
 
     R_SUCCEED();
 }

From 875246f5b29d1a14e6c08a1631a2acc8c8eb3a19 Mon Sep 17 00:00:00 2001
From: Liam <byteslice@airmail.cc>
Date: Fri, 10 Nov 2023 12:01:32 -0500
Subject: [PATCH 3/3] k_page_table: fix shutdown

---
 src/core/hle/kernel/k_page_table_base.cpp | 7 -------
 1 file changed, 7 deletions(-)

diff --git a/src/core/hle/kernel/k_page_table_base.cpp b/src/core/hle/kernel/k_page_table_base.cpp
index c97b4a6b70..6a57ad55c3 100644
--- a/src/core/hle/kernel/k_page_table_base.cpp
+++ b/src/core/hle/kernel/k_page_table_base.cpp
@@ -68,10 +68,6 @@ public:
     }
 };
 
-void InvalidateEntireInstructionCache(Core::System& system) {
-    system.InvalidateCpuInstructionCaches();
-}
-
 template <typename AddressType>
 void InvalidateInstructionCache(Core::System& system, AddressType addr, u64 size) {
     system.InvalidateCpuInstructionCacheRange(GetInteger(addr), size);
@@ -435,9 +431,6 @@ void KPageTableBase::Finalize() {
                                   m_mapped_ipc_server_memory);
     }
 
-    // Invalidate the entire instruction cache.
-    InvalidateEntireInstructionCache(m_system);
-
     // Close the backing page table, as the destructor is not called for guest objects.
     m_impl.reset();
 }