diff --git a/src/common/page_table.cpp b/src/common/page_table.cpp
index 9fffd816f6..75897eeae2 100644
--- a/src/common/page_table.cpp
+++ b/src/common/page_table.cpp
@@ -10,11 +10,65 @@ PageTable::PageTable() = default;
 
 PageTable::~PageTable() noexcept = default;
 
-void PageTable::Resize(size_t address_space_width_in_bits, size_t page_size_in_bits) {
-    const size_t num_page_table_entries{1ULL << (address_space_width_in_bits - page_size_in_bits)};
+bool PageTable::BeginTraversal(TraversalEntry* out_entry, TraversalContext* out_context,
+                               u64 address) const {
+    // Setup invalid defaults.
+    out_entry->phys_addr = 0;
+    out_entry->block_size = page_size;
+    out_context->next_page = 0;
+
+    // Validate that we can read the actual entry.
+    const auto page = address / page_size;
+    if (page >= backing_addr.size()) {
+        return false;
+    }
+
+    // Validate that the entry is mapped.
+    const auto phys_addr = backing_addr[page];
+    if (phys_addr == 0) {
+        return false;
+    }
+
+    // Populate the results.
+    out_entry->phys_addr = phys_addr + address;
+    out_context->next_page = page + 1;
+    out_context->next_offset = address + page_size;
+
+    return true;
+}
+
+bool PageTable::ContinueTraversal(TraversalEntry* out_entry, TraversalContext* context) const {
+    // Setup invalid defaults.
+    out_entry->phys_addr = 0;
+    out_entry->block_size = page_size;
+
+    // Validate that we can read the actual entry.
+    const auto page = context->next_page;
+    if (page >= backing_addr.size()) {
+        return false;
+    }
+
+    // Validate that the entry is mapped.
+    const auto phys_addr = backing_addr[page];
+    if (phys_addr == 0) {
+        return false;
+    }
+
+    // Populate the results.
+    out_entry->phys_addr = phys_addr + context->next_offset;
+    context->next_page = page + 1;
+    context->next_offset += page_size;
+
+    return true;
+}
+
+void PageTable::Resize(std::size_t address_space_width_in_bits, std::size_t page_size_in_bits) {
+    const std::size_t num_page_table_entries{1ULL
+                                             << (address_space_width_in_bits - page_size_in_bits)};
     pointers.resize(num_page_table_entries);
     backing_addr.resize(num_page_table_entries);
     current_address_space_width_in_bits = address_space_width_in_bits;
+    page_size = 1ULL << page_size_in_bits;
 }
 
 } // namespace Common
diff --git a/src/common/page_table.h b/src/common/page_table.h
index 8267e8b4d8..fe254d7ae4 100644
--- a/src/common/page_table.h
+++ b/src/common/page_table.h
@@ -27,6 +27,16 @@ enum class PageType : u8 {
  * mimics the way a real CPU page table works.
  */
 struct PageTable {
+    struct TraversalEntry {
+        u64 phys_addr{};
+        std::size_t block_size{};
+    };
+
+    struct TraversalContext {
+        u64 next_page{};
+        u64 next_offset{};
+    };
+
     /// Number of bits reserved for attribute tagging.
     /// This can be at most the guaranteed alignment of the pointers in the page table.
     static constexpr int ATTRIBUTE_BITS = 2;
@@ -89,6 +99,10 @@ struct PageTable {
     PageTable(PageTable&&) noexcept = default;
     PageTable& operator=(PageTable&&) noexcept = default;
 
+    bool BeginTraversal(TraversalEntry* out_entry, TraversalContext* out_context,
+                        u64 address) const;
+    bool ContinueTraversal(TraversalEntry* out_entry, TraversalContext* context) const;
+
     /**
      * Resizes the page table to be able to accommodate enough pages within
      * a given address space.
@@ -96,9 +110,9 @@ struct PageTable {
      * @param address_space_width_in_bits The address size width in bits.
      * @param page_size_in_bits           The page size in bits.
      */
-    void Resize(size_t address_space_width_in_bits, size_t page_size_in_bits);
+    void Resize(std::size_t address_space_width_in_bits, std::size_t page_size_in_bits);
 
-    size_t GetAddressSpaceBits() const {
+    std::size_t GetAddressSpaceBits() const {
         return current_address_space_width_in_bits;
     }
 
@@ -110,9 +124,11 @@ struct PageTable {
 
     VirtualBuffer<u64> backing_addr;
 
-    size_t current_address_space_width_in_bits;
+    std::size_t current_address_space_width_in_bits{};
 
-    u8* fastmem_arena;
+    u8* fastmem_arena{};
+
+    std::size_t page_size{};
 };
 
 } // namespace Common
diff --git a/src/core/hle/kernel/k_page_table.cpp b/src/core/hle/kernel/k_page_table.cpp
index 912853e5c2..28cea9ab31 100644
--- a/src/core/hle/kernel/k_page_table.cpp
+++ b/src/core/hle/kernel/k_page_table.cpp
@@ -400,148 +400,473 @@ ResultCode KPageTable::UnmapProcessMemory(VAddr dst_addr, std::size_t size,
     return ResultSuccess;
 }
 
-ResultCode KPageTable::MapPhysicalMemory(VAddr addr, std::size_t size) {
+ResultCode KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) {
     // Lock the physical memory lock.
     KScopedLightLock map_phys_mem_lk(map_physical_memory_lock);
 
-    // Lock the table.
-    KScopedLightLock lk(general_lock);
+    // Calculate the last address for convenience.
+    const VAddr last_address = address + size - 1;
 
-    std::size_t mapped_size{};
-    const VAddr end_addr{addr + size};
+    // Define iteration variables.
+    VAddr cur_address;
+    std::size_t mapped_size;
 
-    block_manager->IterateForRange(addr, end_addr, [&](const KMemoryInfo& info) {
-        if (info.state != KMemoryState::Free) {
-            mapped_size += GetSizeInRange(info, addr, end_addr);
-        }
-    });
+    // The entire mapping process can be retried.
+    while (true) {
+        // Check if the memory is already mapped.
+        {
+            // Lock the table.
+            KScopedLightLock lk(general_lock);
 
-    if (mapped_size == size) {
-        return ResultSuccess;
-    }
+            // Iterate over the memory.
+            cur_address = address;
+            mapped_size = 0;
 
-    const std::size_t remaining_size{size - mapped_size};
-    const std::size_t remaining_pages{remaining_size / PageSize};
+            auto it = block_manager->FindIterator(cur_address);
+            while (true) {
+                // Check that the iterator is valid.
+                ASSERT(it != block_manager->end());
 
-    // Reserve the memory from the process resource limit.
-    KScopedResourceReservation memory_reservation(
-        system.Kernel().CurrentProcess()->GetResourceLimit(), LimitableResource::PhysicalMemory,
-        remaining_size);
-    if (!memory_reservation.Succeeded()) {
-        LOG_ERROR(Kernel, "Could not reserve remaining {:X} bytes", remaining_size);
-        return ResultLimitReached;
-    }
+                // Get the memory info.
+                const KMemoryInfo info = it->GetMemoryInfo();
 
-    KPageLinkedList page_linked_list;
+                // Check if we're done.
+                if (last_address <= info.GetLastAddress()) {
+                    if (info.GetState() != KMemoryState::Free) {
+                        mapped_size += (last_address + 1 - cur_address);
+                    }
+                    break;
+                }
 
-    CASCADE_CODE(system.Kernel().MemoryManager().Allocate(page_linked_list, remaining_pages,
-                                                          memory_pool, allocation_option));
+                // Track the memory if it's mapped.
+                if (info.GetState() != KMemoryState::Free) {
+                    mapped_size += VAddr(info.GetEndAddress()) - cur_address;
+                }
 
-    // We succeeded, so commit the memory reservation.
-    memory_reservation.Commit();
-
-    // Map the memory.
-    auto node{page_linked_list.Nodes().begin()};
-    PAddr map_addr{node->GetAddress()};
-    std::size_t src_num_pages{node->GetNumPages()};
-    block_manager->IterateForRange(addr, end_addr, [&](const KMemoryInfo& info) {
-        if (info.state != KMemoryState::Free) {
-            return;
-        }
-
-        std::size_t dst_num_pages{GetSizeInRange(info, addr, end_addr) / PageSize};
-        VAddr dst_addr{GetAddressInRange(info, addr)};
-
-        while (dst_num_pages) {
-            if (!src_num_pages) {
-                node = std::next(node);
-                map_addr = node->GetAddress();
-                src_num_pages = node->GetNumPages();
+                // Advance.
+                cur_address = info.GetEndAddress();
+                ++it;
             }
 
-            const std::size_t num_pages{std::min(src_num_pages, dst_num_pages)};
-            Operate(dst_addr, num_pages, KMemoryPermission::UserReadWrite, OperationType::Map,
-                    map_addr);
-
-            dst_addr += num_pages * PageSize;
-            map_addr += num_pages * PageSize;
-            src_num_pages -= num_pages;
-            dst_num_pages -= num_pages;
+            // If the size mapped is the size requested, we've nothing to do.
+            R_SUCCEED_IF(size == mapped_size);
         }
-    });
 
-    mapped_physical_memory_size += remaining_size;
+        // Allocate and map the memory.
+        {
+            // Reserve the memory from the process resource limit.
+            KScopedResourceReservation memory_reservation(
+                system.Kernel().CurrentProcess()->GetResourceLimit(),
+                LimitableResource::PhysicalMemory, size - mapped_size);
+            R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
 
-    const std::size_t num_pages{size / PageSize};
-    block_manager->Update(addr, num_pages, KMemoryState::Free, KMemoryPermission::None,
-                          KMemoryAttribute::None, KMemoryState::Normal,
-                          KMemoryPermission::UserReadWrite, KMemoryAttribute::None);
+            // Allocate pages for the new memory.
+            KPageLinkedList page_linked_list;
+            R_TRY(system.Kernel().MemoryManager().Allocate(
+                page_linked_list, (size - mapped_size) / PageSize, memory_pool, allocation_option));
 
-    return ResultSuccess;
+            // Map the memory.
+            {
+                // Lock the table.
+                KScopedLightLock lk(general_lock);
+
+                size_t num_allocator_blocks = 0;
+
+                // Verify that nobody has mapped memory since we first checked.
+                {
+                    // Iterate over the memory.
+                    size_t checked_mapped_size = 0;
+                    cur_address = address;
+
+                    auto it = block_manager->FindIterator(cur_address);
+                    while (true) {
+                        // Check that the iterator is valid.
+                        ASSERT(it != block_manager->end());
+
+                        // Get the memory info.
+                        const KMemoryInfo info = it->GetMemoryInfo();
+
+                        const bool is_free = info.GetState() == KMemoryState::Free;
+                        if (is_free) {
+                            if (info.GetAddress() < address) {
+                                ++num_allocator_blocks;
+                            }
+                            if (last_address < info.GetLastAddress()) {
+                                ++num_allocator_blocks;
+                            }
+                        }
+
+                        // Check if we're done.
+                        if (last_address <= info.GetLastAddress()) {
+                            if (!is_free) {
+                                checked_mapped_size += (last_address + 1 - cur_address);
+                            }
+                            break;
+                        }
+
+                        // Track the memory if it's mapped.
+                        if (!is_free) {
+                            checked_mapped_size += VAddr(info.GetEndAddress()) - cur_address;
+                        }
+
+                        // Advance.
+                        cur_address = info.GetEndAddress();
+                        ++it;
+                    }
+
+                    // If the size now isn't what it was before, somebody mapped or unmapped
+                    // concurrently. If this happened, retry.
+                    if (mapped_size != checked_mapped_size) {
+                        continue;
+                    }
+                }
+
+                // Reset the current tracking address, and make sure we clean up on failure.
+                cur_address = address;
+                auto unmap_guard = detail::ScopeExit([&] {
+                    if (cur_address > address) {
+                        const VAddr last_unmap_address = cur_address - 1;
+
+                        // Iterate, unmapping the pages.
+                        cur_address = address;
+
+                        auto it = block_manager->FindIterator(cur_address);
+                        while (true) {
+                            // Check that the iterator is valid.
+                            ASSERT(it != block_manager->end());
+
+                            // Get the memory info.
+                            const KMemoryInfo info = it->GetMemoryInfo();
+
+                            // If the memory state is free, we mapped it and need to unmap it.
+                            if (info.GetState() == KMemoryState::Free) {
+                                // Determine the range to unmap.
+                                const size_t cur_pages =
+                                    std::min(VAddr(info.GetEndAddress()) - cur_address,
+                                             last_unmap_address + 1 - cur_address) /
+                                    PageSize;
+
+                                // Unmap.
+                                ASSERT(Operate(cur_address, cur_pages, KMemoryPermission::None,
+                                               OperationType::Unmap)
+                                           .IsSuccess());
+                            }
+
+                            // Check if we're done.
+                            if (last_unmap_address <= info.GetLastAddress()) {
+                                break;
+                            }
+
+                            // Advance.
+                            cur_address = info.GetEndAddress();
+                            ++it;
+                        }
+                    }
+                });
+
+                // Iterate over the memory.
+                auto pg_it = page_linked_list.Nodes().begin();
+                PAddr pg_phys_addr = pg_it->GetAddress();
+                size_t pg_pages = pg_it->GetNumPages();
+
+                auto it = block_manager->FindIterator(cur_address);
+                while (true) {
+                    // Check that the iterator is valid.
+                    ASSERT(it != block_manager->end());
+
+                    // Get the memory info.
+                    const KMemoryInfo info = it->GetMemoryInfo();
+
+                    // If it's unmapped, we need to map it.
+                    if (info.GetState() == KMemoryState::Free) {
+                        // Determine the range to map.
+                        size_t map_pages = std::min(VAddr(info.GetEndAddress()) - cur_address,
+                                                    last_address + 1 - cur_address) /
+                                           PageSize;
+
+                        // While we have pages to map, map them.
+                        while (map_pages > 0) {
+                            // Check if we're at the end of the physical block.
+                            if (pg_pages == 0) {
+                                // Ensure there are more pages to map.
+                                ASSERT(pg_it != page_linked_list.Nodes().end());
+
+                                // Advance our physical block.
+                                ++pg_it;
+                                pg_phys_addr = pg_it->GetAddress();
+                                pg_pages = pg_it->GetNumPages();
+                            }
+
+                            // Map whatever we can.
+                            const size_t cur_pages = std::min(pg_pages, map_pages);
+                            R_TRY(Operate(cur_address, cur_pages, KMemoryPermission::UserReadWrite,
+                                          OperationType::Map, pg_phys_addr));
+
+                            // Advance.
+                            cur_address += cur_pages * PageSize;
+                            map_pages -= cur_pages;
+
+                            pg_phys_addr += cur_pages * PageSize;
+                            pg_pages -= cur_pages;
+                        }
+                    }
+
+                    // Check if we're done.
+                    if (last_address <= info.GetLastAddress()) {
+                        break;
+                    }
+
+                    // Advance.
+                    cur_address = info.GetEndAddress();
+                    ++it;
+                }
+
+                // We succeeded, so commit the memory reservation.
+                memory_reservation.Commit();
+
+                // Increase our tracked mapped size.
+                mapped_physical_memory_size += (size - mapped_size);
+
+                // Update the relevant memory blocks.
+                block_manager->Update(address, size / PageSize, KMemoryState::Free,
+                                      KMemoryPermission::None, KMemoryAttribute::None,
+                                      KMemoryState::Normal, KMemoryPermission::UserReadWrite,
+                                      KMemoryAttribute::None);
+
+                // Cancel our guard.
+                unmap_guard.Cancel();
+
+                return ResultSuccess;
+            }
+        }
+    }
 }
 
-ResultCode KPageTable::UnmapPhysicalMemory(VAddr addr, std::size_t size) {
+ResultCode KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) {
     // Lock the physical memory lock.
     KScopedLightLock map_phys_mem_lk(map_physical_memory_lock);
 
     // Lock the table.
     KScopedLightLock lk(general_lock);
 
-    const VAddr end_addr{addr + size};
-    ResultCode result{ResultSuccess};
-    std::size_t mapped_size{};
+    // Calculate the last address for convenience.
+    const VAddr last_address = address + size - 1;
 
-    // Verify that the region can be unmapped
-    block_manager->IterateForRange(addr, end_addr, [&](const KMemoryInfo& info) {
-        if (info.state == KMemoryState::Normal) {
-            if (info.attribute != KMemoryAttribute::None) {
-                result = ResultInvalidCurrentMemory;
-                return;
+    // Define iteration variables.
+    VAddr cur_address = 0;
+    std::size_t mapped_size = 0;
+    std::size_t num_allocator_blocks = 0;
+
+    // Check if the memory is mapped.
+    {
+        // Iterate over the memory.
+        cur_address = address;
+        mapped_size = 0;
+
+        auto it = block_manager->FindIterator(cur_address);
+        while (true) {
+            // Check that the iterator is valid.
+            ASSERT(it != block_manager->end());
+
+            // Get the memory info.
+            const KMemoryInfo info = it->GetMemoryInfo();
+
+            // Verify the memory's state.
+            const bool is_normal = info.GetState() == KMemoryState::Normal &&
+                                   info.GetAttribute() == KMemoryAttribute::None;
+            const bool is_free = info.GetState() == KMemoryState::Free;
+            R_UNLESS(is_normal || is_free, ResultInvalidCurrentMemory);
+
+            if (is_normal) {
+                R_UNLESS(info.GetAttribute() == KMemoryAttribute::None, ResultInvalidCurrentMemory);
+
+                if (info.GetAddress() < address) {
+                    ++num_allocator_blocks;
+                }
+                if (last_address < info.GetLastAddress()) {
+                    ++num_allocator_blocks;
+                }
             }
-            mapped_size += GetSizeInRange(info, addr, end_addr);
-        } else if (info.state != KMemoryState::Free) {
-            result = ResultInvalidCurrentMemory;
+
+            // Check if we're done.
+            if (last_address <= info.GetLastAddress()) {
+                if (is_normal) {
+                    mapped_size += (last_address + 1 - cur_address);
+                }
+                break;
+            }
+
+            // Track the memory if it's mapped.
+            if (is_normal) {
+                mapped_size += VAddr(info.GetEndAddress()) - cur_address;
+            }
+
+            // Advance.
+            cur_address = info.GetEndAddress();
+            ++it;
         }
-    });
 
-    if (result.IsError()) {
-        return result;
+        // If there's nothing mapped, we've nothing to do.
+        R_SUCCEED_IF(mapped_size == 0);
     }
 
-    if (!mapped_size) {
-        return ResultSuccess;
+    // Make a page group for the unmap region.
+    KPageLinkedList pg;
+    {
+        auto& impl = this->PageTableImpl();
+
+        // Begin traversal.
+        Common::PageTable::TraversalContext context;
+        Common::PageTable::TraversalEntry cur_entry = {.phys_addr = 0, .block_size = 0};
+        bool cur_valid = false;
+        Common::PageTable::TraversalEntry next_entry;
+        bool next_valid = false;
+        size_t tot_size = 0;
+
+        cur_address = address;
+        next_valid =
+            impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), cur_address);
+        next_entry.block_size =
+            (next_entry.block_size - (next_entry.phys_addr & (next_entry.block_size - 1)));
+
+        // Iterate, building the group.
+        while (true) {
+            if ((!next_valid && !cur_valid) ||
+                (next_valid && cur_valid &&
+                 next_entry.phys_addr == cur_entry.phys_addr + cur_entry.block_size)) {
+                cur_entry.block_size += next_entry.block_size;
+            } else {
+                if (cur_valid) {
+                    // ASSERT(IsHeapPhysicalAddress(cur_entry.phys_addr));
+                    R_TRY(pg.AddBlock(cur_entry.phys_addr, cur_entry.block_size / PageSize));
+                }
+
+                // Update tracking variables.
+                tot_size += cur_entry.block_size;
+                cur_entry = next_entry;
+                cur_valid = next_valid;
+            }
+
+            if (cur_entry.block_size + tot_size >= size) {
+                break;
+            }
+
+            next_valid =
+                impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
+        }
+
+        // Add the last block.
+        if (cur_valid) {
+            // ASSERT(IsHeapPhysicalAddress(cur_entry.phys_addr));
+            R_TRY(pg.AddBlock(cur_entry.phys_addr, (size - tot_size) / PageSize));
+        }
     }
+    ASSERT(pg.GetNumPages() == mapped_size / PageSize);
 
-    // Unmap each region within the range
-    KPageLinkedList page_linked_list;
-    block_manager->IterateForRange(addr, end_addr, [&](const KMemoryInfo& info) {
-        if (info.state == KMemoryState::Normal) {
-            const std::size_t block_size{GetSizeInRange(info, addr, end_addr)};
-            const std::size_t block_num_pages{block_size / PageSize};
-            const VAddr block_addr{GetAddressInRange(info, addr)};
+    // Reset the current tracking address, and make sure we clean up on failure.
+    cur_address = address;
+    auto remap_guard = detail::ScopeExit([&] {
+        if (cur_address > address) {
+            const VAddr last_map_address = cur_address - 1;
+            cur_address = address;
 
-            AddRegionToPages(block_addr, block_size / PageSize, page_linked_list);
+            // Iterate over the memory we unmapped.
+            auto it = block_manager->FindIterator(cur_address);
+            auto pg_it = pg.Nodes().begin();
+            PAddr pg_phys_addr = pg_it->GetAddress();
+            size_t pg_pages = pg_it->GetNumPages();
 
-            if (result = Operate(block_addr, block_num_pages, KMemoryPermission::None,
-                                 OperationType::Unmap);
-                result.IsError()) {
-                return;
+            while (true) {
+                // Get the memory info for the pages we unmapped, convert to property.
+                const KMemoryInfo info = it->GetMemoryInfo();
+
+                // If the memory is normal, we unmapped it and need to re-map it.
+                if (info.GetState() == KMemoryState::Normal) {
+                    // Determine the range to map.
+                    size_t map_pages = std::min(VAddr(info.GetEndAddress()) - cur_address,
+                                                last_map_address + 1 - cur_address) /
+                                       PageSize;
+
+                    // While we have pages to map, map them.
+                    while (map_pages > 0) {
+                        // Check if we're at the end of the physical block.
+                        if (pg_pages == 0) {
+                            // Ensure there are more pages to map.
+                            ASSERT(pg_it != pg.Nodes().end());
+
+                            // Advance our physical block.
+                            ++pg_it;
+                            pg_phys_addr = pg_it->GetAddress();
+                            pg_pages = pg_it->GetNumPages();
+                        }
+
+                        // Map whatever we can.
+                        const size_t cur_pages = std::min(pg_pages, map_pages);
+                        ASSERT(this->Operate(cur_address, cur_pages, info.GetPermission(),
+                                             OperationType::Map, pg_phys_addr) == ResultSuccess);
+
+                        // Advance.
+                        cur_address += cur_pages * PageSize;
+                        map_pages -= cur_pages;
+
+                        pg_phys_addr += cur_pages * PageSize;
+                        pg_pages -= cur_pages;
+                    }
+                }
+
+                // Check if we're done.
+                if (last_map_address <= info.GetLastAddress()) {
+                    break;
+                }
+
+                // Advance.
+                ++it;
             }
         }
     });
-    if (result.IsError()) {
-        return result;
+
+    // Iterate over the memory, unmapping as we go.
+    auto it = block_manager->FindIterator(cur_address);
+    while (true) {
+        // Check that the iterator is valid.
+        ASSERT(it != block_manager->end());
+
+        // Get the memory info.
+        const KMemoryInfo info = it->GetMemoryInfo();
+
+        // If the memory state is normal, we need to unmap it.
+        if (info.GetState() == KMemoryState::Normal) {
+            // Determine the range to unmap.
+            const size_t cur_pages = std::min(VAddr(info.GetEndAddress()) - cur_address,
+                                              last_address + 1 - cur_address) /
+                                     PageSize;
+
+            // Unmap.
+            R_TRY(Operate(cur_address, cur_pages, KMemoryPermission::None, OperationType::Unmap));
+        }
+
+        // Check if we're done.
+        if (last_address <= info.GetLastAddress()) {
+            break;
+        }
+
+        // Advance.
+        cur_address = info.GetEndAddress();
+        ++it;
     }
 
-    const std::size_t num_pages{size / PageSize};
-    system.Kernel().MemoryManager().Free(page_linked_list, num_pages, memory_pool,
-                                         allocation_option);
-
-    block_manager->Update(addr, num_pages, KMemoryState::Free);
-
+    // Release the memory resource.
+    mapped_physical_memory_size -= mapped_size;
     auto process{system.Kernel().CurrentProcess()};
     process->GetResourceLimit()->Release(LimitableResource::PhysicalMemory, mapped_size);
-    mapped_physical_memory_size -= mapped_size;
+
+    // Update memory blocks.
+    system.Kernel().MemoryManager().Free(pg, size / PageSize, memory_pool, allocation_option);
+    block_manager->Update(address, size / PageSize, KMemoryState::Free, KMemoryPermission::None,
+                          KMemoryAttribute::None);
+
+    // We succeeded.
+    remap_guard.Cancel();
 
     return ResultSuccess;
 }