mirror of
				https://git.suyu.dev/suyu/suyu
				synced 2025-11-04 00:49:02 -06:00 
			
		
		
		
	Revert "Revert "k_page_group: synchronize""
This commit is contained in:
		@@ -226,6 +226,7 @@ add_library(core STATIC
 | 
			
		||||
    hle/kernel/k_page_buffer.h
 | 
			
		||||
    hle/kernel/k_page_heap.cpp
 | 
			
		||||
    hle/kernel/k_page_heap.h
 | 
			
		||||
    hle/kernel/k_page_group.cpp
 | 
			
		||||
    hle/kernel/k_page_group.h
 | 
			
		||||
    hle/kernel/k_page_table.cpp
 | 
			
		||||
    hle/kernel/k_page_table.h
 | 
			
		||||
 
 | 
			
		||||
@@ -27,13 +27,13 @@ Result KCodeMemory::Initialize(Core::DeviceMemory& device_memory, VAddr addr, si
 | 
			
		||||
    auto& page_table = m_owner->PageTable();
 | 
			
		||||
 | 
			
		||||
    // Construct the page group.
 | 
			
		||||
    m_page_group = {};
 | 
			
		||||
    m_page_group.emplace(kernel, page_table.GetBlockInfoManager());
 | 
			
		||||
 | 
			
		||||
    // Lock the memory.
 | 
			
		||||
    R_TRY(page_table.LockForCodeMemory(&m_page_group, addr, size))
 | 
			
		||||
    R_TRY(page_table.LockForCodeMemory(std::addressof(*m_page_group), addr, size))
 | 
			
		||||
 | 
			
		||||
    // Clear the memory.
 | 
			
		||||
    for (const auto& block : m_page_group.Nodes()) {
 | 
			
		||||
    for (const auto& block : *m_page_group) {
 | 
			
		||||
        std::memset(device_memory.GetPointer<void>(block.GetAddress()), 0xFF, block.GetSize());
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
@@ -51,12 +51,13 @@ Result KCodeMemory::Initialize(Core::DeviceMemory& device_memory, VAddr addr, si
 | 
			
		||||
void KCodeMemory::Finalize() {
 | 
			
		||||
    // Unlock.
 | 
			
		||||
    if (!m_is_mapped && !m_is_owner_mapped) {
 | 
			
		||||
        const size_t size = m_page_group.GetNumPages() * PageSize;
 | 
			
		||||
        m_owner->PageTable().UnlockForCodeMemory(m_address, size, m_page_group);
 | 
			
		||||
        const size_t size = m_page_group->GetNumPages() * PageSize;
 | 
			
		||||
        m_owner->PageTable().UnlockForCodeMemory(m_address, size, *m_page_group);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    // Close the page group.
 | 
			
		||||
    m_page_group = {};
 | 
			
		||||
    m_page_group->Close();
 | 
			
		||||
    m_page_group->Finalize();
 | 
			
		||||
 | 
			
		||||
    // Close our reference to our owner.
 | 
			
		||||
    m_owner->Close();
 | 
			
		||||
@@ -64,7 +65,7 @@ void KCodeMemory::Finalize() {
 | 
			
		||||
 | 
			
		||||
Result KCodeMemory::Map(VAddr address, size_t size) {
 | 
			
		||||
    // Validate the size.
 | 
			
		||||
    R_UNLESS(m_page_group.GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize);
 | 
			
		||||
    R_UNLESS(m_page_group->GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize);
 | 
			
		||||
 | 
			
		||||
    // Lock ourselves.
 | 
			
		||||
    KScopedLightLock lk(m_lock);
 | 
			
		||||
@@ -74,7 +75,7 @@ Result KCodeMemory::Map(VAddr address, size_t size) {
 | 
			
		||||
 | 
			
		||||
    // Map the memory.
 | 
			
		||||
    R_TRY(kernel.CurrentProcess()->PageTable().MapPages(
 | 
			
		||||
        address, m_page_group, KMemoryState::CodeOut, KMemoryPermission::UserReadWrite));
 | 
			
		||||
        address, *m_page_group, KMemoryState::CodeOut, KMemoryPermission::UserReadWrite));
 | 
			
		||||
 | 
			
		||||
    // Mark ourselves as mapped.
 | 
			
		||||
    m_is_mapped = true;
 | 
			
		||||
@@ -84,13 +85,13 @@ Result KCodeMemory::Map(VAddr address, size_t size) {
 | 
			
		||||
 | 
			
		||||
Result KCodeMemory::Unmap(VAddr address, size_t size) {
 | 
			
		||||
    // Validate the size.
 | 
			
		||||
    R_UNLESS(m_page_group.GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize);
 | 
			
		||||
    R_UNLESS(m_page_group->GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize);
 | 
			
		||||
 | 
			
		||||
    // Lock ourselves.
 | 
			
		||||
    KScopedLightLock lk(m_lock);
 | 
			
		||||
 | 
			
		||||
    // Unmap the memory.
 | 
			
		||||
    R_TRY(kernel.CurrentProcess()->PageTable().UnmapPages(address, m_page_group,
 | 
			
		||||
    R_TRY(kernel.CurrentProcess()->PageTable().UnmapPages(address, *m_page_group,
 | 
			
		||||
                                                          KMemoryState::CodeOut));
 | 
			
		||||
 | 
			
		||||
    // Mark ourselves as unmapped.
 | 
			
		||||
@@ -101,7 +102,7 @@ Result KCodeMemory::Unmap(VAddr address, size_t size) {
 | 
			
		||||
 | 
			
		||||
Result KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermission perm) {
 | 
			
		||||
    // Validate the size.
 | 
			
		||||
    R_UNLESS(m_page_group.GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize);
 | 
			
		||||
    R_UNLESS(m_page_group->GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize);
 | 
			
		||||
 | 
			
		||||
    // Lock ourselves.
 | 
			
		||||
    KScopedLightLock lk(m_lock);
 | 
			
		||||
@@ -125,7 +126,7 @@ Result KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermission
 | 
			
		||||
 | 
			
		||||
    // Map the memory.
 | 
			
		||||
    R_TRY(
 | 
			
		||||
        m_owner->PageTable().MapPages(address, m_page_group, KMemoryState::GeneratedCode, k_perm));
 | 
			
		||||
        m_owner->PageTable().MapPages(address, *m_page_group, KMemoryState::GeneratedCode, k_perm));
 | 
			
		||||
 | 
			
		||||
    // Mark ourselves as mapped.
 | 
			
		||||
    m_is_owner_mapped = true;
 | 
			
		||||
@@ -135,13 +136,13 @@ Result KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermission
 | 
			
		||||
 | 
			
		||||
Result KCodeMemory::UnmapFromOwner(VAddr address, size_t size) {
 | 
			
		||||
    // Validate the size.
 | 
			
		||||
    R_UNLESS(m_page_group.GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize);
 | 
			
		||||
    R_UNLESS(m_page_group->GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize);
 | 
			
		||||
 | 
			
		||||
    // Lock ourselves.
 | 
			
		||||
    KScopedLightLock lk(m_lock);
 | 
			
		||||
 | 
			
		||||
    // Unmap the memory.
 | 
			
		||||
    R_TRY(m_owner->PageTable().UnmapPages(address, m_page_group, KMemoryState::GeneratedCode));
 | 
			
		||||
    R_TRY(m_owner->PageTable().UnmapPages(address, *m_page_group, KMemoryState::GeneratedCode));
 | 
			
		||||
 | 
			
		||||
    // Mark ourselves as unmapped.
 | 
			
		||||
    m_is_owner_mapped = false;
 | 
			
		||||
 
 | 
			
		||||
@@ -3,6 +3,8 @@
 | 
			
		||||
 | 
			
		||||
#pragma once
 | 
			
		||||
 | 
			
		||||
#include <optional>
 | 
			
		||||
 | 
			
		||||
#include "common/common_types.h"
 | 
			
		||||
#include "core/device_memory.h"
 | 
			
		||||
#include "core/hle/kernel/k_auto_object.h"
 | 
			
		||||
@@ -49,11 +51,11 @@ public:
 | 
			
		||||
        return m_address;
 | 
			
		||||
    }
 | 
			
		||||
    size_t GetSize() const {
 | 
			
		||||
        return m_is_initialized ? m_page_group.GetNumPages() * PageSize : 0;
 | 
			
		||||
        return m_is_initialized ? m_page_group->GetNumPages() * PageSize : 0;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
private:
 | 
			
		||||
    KPageGroup m_page_group{};
 | 
			
		||||
    std::optional<KPageGroup> m_page_group{};
 | 
			
		||||
    KProcess* m_owner{};
 | 
			
		||||
    VAddr m_address{};
 | 
			
		||||
    KLightLock m_lock;
 | 
			
		||||
 
 | 
			
		||||
@@ -223,7 +223,7 @@ Result KMemoryManager::AllocatePageGroupImpl(KPageGroup* out, size_t num_pages,
 | 
			
		||||
 | 
			
		||||
    // Ensure that we don't leave anything un-freed.
 | 
			
		||||
    ON_RESULT_FAILURE {
 | 
			
		||||
        for (const auto& it : out->Nodes()) {
 | 
			
		||||
        for (const auto& it : *out) {
 | 
			
		||||
            auto& manager = this->GetManager(it.GetAddress());
 | 
			
		||||
            const size_t node_num_pages = std::min<u64>(
 | 
			
		||||
                it.GetNumPages(), (manager.GetEndAddress() - it.GetAddress()) / PageSize);
 | 
			
		||||
@@ -285,7 +285,7 @@ Result KMemoryManager::AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 op
 | 
			
		||||
                                      m_has_optimized_process[static_cast<size_t>(pool)], true));
 | 
			
		||||
 | 
			
		||||
    // Open the first reference to the pages.
 | 
			
		||||
    for (const auto& block : out->Nodes()) {
 | 
			
		||||
    for (const auto& block : *out) {
 | 
			
		||||
        PAddr cur_address = block.GetAddress();
 | 
			
		||||
        size_t remaining_pages = block.GetNumPages();
 | 
			
		||||
        while (remaining_pages > 0) {
 | 
			
		||||
@@ -335,7 +335,7 @@ Result KMemoryManager::AllocateForProcess(KPageGroup* out, size_t num_pages, u32
 | 
			
		||||
    // Perform optimized memory tracking, if we should.
 | 
			
		||||
    if (optimized) {
 | 
			
		||||
        // Iterate over the allocated blocks.
 | 
			
		||||
        for (const auto& block : out->Nodes()) {
 | 
			
		||||
        for (const auto& block : *out) {
 | 
			
		||||
            // Get the block extents.
 | 
			
		||||
            const PAddr block_address = block.GetAddress();
 | 
			
		||||
            const size_t block_pages = block.GetNumPages();
 | 
			
		||||
@@ -391,7 +391,7 @@ Result KMemoryManager::AllocateForProcess(KPageGroup* out, size_t num_pages, u32
 | 
			
		||||
        }
 | 
			
		||||
    } else {
 | 
			
		||||
        // Set all the allocated memory.
 | 
			
		||||
        for (const auto& block : out->Nodes()) {
 | 
			
		||||
        for (const auto& block : *out) {
 | 
			
		||||
            std::memset(m_system.DeviceMemory().GetPointer<void>(block.GetAddress()), fill_pattern,
 | 
			
		||||
                        block.GetSize());
 | 
			
		||||
        }
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										121
									
								
								src/core/hle/kernel/k_page_group.cpp
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										121
									
								
								src/core/hle/kernel/k_page_group.cpp
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,121 @@
 | 
			
		||||
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
 | 
			
		||||
// SPDX-License-Identifier: GPL-2.0-or-later
 | 
			
		||||
 | 
			
		||||
#include "core/hle/kernel/k_dynamic_resource_manager.h"
 | 
			
		||||
#include "core/hle/kernel/k_memory_manager.h"
 | 
			
		||||
#include "core/hle/kernel/k_page_group.h"
 | 
			
		||||
#include "core/hle/kernel/kernel.h"
 | 
			
		||||
#include "core/hle/kernel/svc_results.h"
 | 
			
		||||
 | 
			
		||||
namespace Kernel {
 | 
			
		||||
 | 
			
		||||
void KPageGroup::Finalize() {
 | 
			
		||||
    KBlockInfo* cur = m_first_block;
 | 
			
		||||
    while (cur != nullptr) {
 | 
			
		||||
        KBlockInfo* next = cur->GetNext();
 | 
			
		||||
        m_manager->Free(cur);
 | 
			
		||||
        cur = next;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    m_first_block = nullptr;
 | 
			
		||||
    m_last_block = nullptr;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void KPageGroup::CloseAndReset() {
 | 
			
		||||
    auto& mm = m_kernel.MemoryManager();
 | 
			
		||||
 | 
			
		||||
    KBlockInfo* cur = m_first_block;
 | 
			
		||||
    while (cur != nullptr) {
 | 
			
		||||
        KBlockInfo* next = cur->GetNext();
 | 
			
		||||
        mm.Close(cur->GetAddress(), cur->GetNumPages());
 | 
			
		||||
        m_manager->Free(cur);
 | 
			
		||||
        cur = next;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    m_first_block = nullptr;
 | 
			
		||||
    m_last_block = nullptr;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
size_t KPageGroup::GetNumPages() const {
 | 
			
		||||
    size_t num_pages = 0;
 | 
			
		||||
 | 
			
		||||
    for (const auto& it : *this) {
 | 
			
		||||
        num_pages += it.GetNumPages();
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    return num_pages;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
Result KPageGroup::AddBlock(KPhysicalAddress addr, size_t num_pages) {
 | 
			
		||||
    // Succeed immediately if we're adding no pages.
 | 
			
		||||
    R_SUCCEED_IF(num_pages == 0);
 | 
			
		||||
 | 
			
		||||
    // Check for overflow.
 | 
			
		||||
    ASSERT(addr < addr + num_pages * PageSize);
 | 
			
		||||
 | 
			
		||||
    // Try to just append to the last block.
 | 
			
		||||
    if (m_last_block != nullptr) {
 | 
			
		||||
        R_SUCCEED_IF(m_last_block->TryConcatenate(addr, num_pages));
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    // Allocate a new block.
 | 
			
		||||
    KBlockInfo* new_block = m_manager->Allocate();
 | 
			
		||||
    R_UNLESS(new_block != nullptr, ResultOutOfResource);
 | 
			
		||||
 | 
			
		||||
    // Initialize the block.
 | 
			
		||||
    new_block->Initialize(addr, num_pages);
 | 
			
		||||
 | 
			
		||||
    // Add the block to our list.
 | 
			
		||||
    if (m_last_block != nullptr) {
 | 
			
		||||
        m_last_block->SetNext(new_block);
 | 
			
		||||
    } else {
 | 
			
		||||
        m_first_block = new_block;
 | 
			
		||||
    }
 | 
			
		||||
    m_last_block = new_block;
 | 
			
		||||
 | 
			
		||||
    R_SUCCEED();
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void KPageGroup::Open() const {
 | 
			
		||||
    auto& mm = m_kernel.MemoryManager();
 | 
			
		||||
 | 
			
		||||
    for (const auto& it : *this) {
 | 
			
		||||
        mm.Open(it.GetAddress(), it.GetNumPages());
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void KPageGroup::OpenFirst() const {
 | 
			
		||||
    auto& mm = m_kernel.MemoryManager();
 | 
			
		||||
 | 
			
		||||
    for (const auto& it : *this) {
 | 
			
		||||
        mm.OpenFirst(it.GetAddress(), it.GetNumPages());
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void KPageGroup::Close() const {
 | 
			
		||||
    auto& mm = m_kernel.MemoryManager();
 | 
			
		||||
 | 
			
		||||
    for (const auto& it : *this) {
 | 
			
		||||
        mm.Close(it.GetAddress(), it.GetNumPages());
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
bool KPageGroup::IsEquivalentTo(const KPageGroup& rhs) const {
 | 
			
		||||
    auto lit = this->begin();
 | 
			
		||||
    auto rit = rhs.begin();
 | 
			
		||||
    auto lend = this->end();
 | 
			
		||||
    auto rend = rhs.end();
 | 
			
		||||
 | 
			
		||||
    while (lit != lend && rit != rend) {
 | 
			
		||||
        if (*lit != *rit) {
 | 
			
		||||
            return false;
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        ++lit;
 | 
			
		||||
        ++rit;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    return lit == lend && rit == rend;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
} // namespace Kernel
 | 
			
		||||
@@ -1,4 +1,4 @@
 | 
			
		||||
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
 | 
			
		||||
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
 | 
			
		||||
// SPDX-License-Identifier: GPL-2.0-or-later
 | 
			
		||||
 | 
			
		||||
#pragma once
 | 
			
		||||
@@ -13,24 +13,23 @@
 | 
			
		||||
 | 
			
		||||
namespace Kernel {
 | 
			
		||||
 | 
			
		||||
class KBlockInfoManager;
 | 
			
		||||
class KernelCore;
 | 
			
		||||
class KPageGroup;
 | 
			
		||||
 | 
			
		||||
class KBlockInfo {
 | 
			
		||||
private:
 | 
			
		||||
    friend class KPageGroup;
 | 
			
		||||
 | 
			
		||||
public:
 | 
			
		||||
    constexpr KBlockInfo() = default;
 | 
			
		||||
    constexpr explicit KBlockInfo() : m_next(nullptr) {}
 | 
			
		||||
 | 
			
		||||
    constexpr void Initialize(PAddr addr, size_t np) {
 | 
			
		||||
    constexpr void Initialize(KPhysicalAddress addr, size_t np) {
 | 
			
		||||
        ASSERT(Common::IsAligned(addr, PageSize));
 | 
			
		||||
        ASSERT(static_cast<u32>(np) == np);
 | 
			
		||||
 | 
			
		||||
        m_page_index = static_cast<u32>(addr) / PageSize;
 | 
			
		||||
        m_page_index = static_cast<u32>(addr / PageSize);
 | 
			
		||||
        m_num_pages = static_cast<u32>(np);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    constexpr PAddr GetAddress() const {
 | 
			
		||||
    constexpr KPhysicalAddress GetAddress() const {
 | 
			
		||||
        return m_page_index * PageSize;
 | 
			
		||||
    }
 | 
			
		||||
    constexpr size_t GetNumPages() const {
 | 
			
		||||
@@ -39,10 +38,10 @@ public:
 | 
			
		||||
    constexpr size_t GetSize() const {
 | 
			
		||||
        return this->GetNumPages() * PageSize;
 | 
			
		||||
    }
 | 
			
		||||
    constexpr PAddr GetEndAddress() const {
 | 
			
		||||
    constexpr KPhysicalAddress GetEndAddress() const {
 | 
			
		||||
        return (m_page_index + m_num_pages) * PageSize;
 | 
			
		||||
    }
 | 
			
		||||
    constexpr PAddr GetLastAddress() const {
 | 
			
		||||
    constexpr KPhysicalAddress GetLastAddress() const {
 | 
			
		||||
        return this->GetEndAddress() - 1;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
@@ -62,8 +61,8 @@ public:
 | 
			
		||||
        return !(*this == rhs);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    constexpr bool IsStrictlyBefore(PAddr addr) const {
 | 
			
		||||
        const PAddr end = this->GetEndAddress();
 | 
			
		||||
    constexpr bool IsStrictlyBefore(KPhysicalAddress addr) const {
 | 
			
		||||
        const KPhysicalAddress end = this->GetEndAddress();
 | 
			
		||||
 | 
			
		||||
        if (m_page_index != 0 && end == 0) {
 | 
			
		||||
            return false;
 | 
			
		||||
@@ -72,11 +71,11 @@ public:
 | 
			
		||||
        return end < addr;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    constexpr bool operator<(PAddr addr) const {
 | 
			
		||||
    constexpr bool operator<(KPhysicalAddress addr) const {
 | 
			
		||||
        return this->IsStrictlyBefore(addr);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    constexpr bool TryConcatenate(PAddr addr, size_t np) {
 | 
			
		||||
    constexpr bool TryConcatenate(KPhysicalAddress addr, size_t np) {
 | 
			
		||||
        if (addr != 0 && addr == this->GetEndAddress()) {
 | 
			
		||||
            m_num_pages += static_cast<u32>(np);
 | 
			
		||||
            return true;
 | 
			
		||||
@@ -90,96 +89,118 @@ private:
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
private:
 | 
			
		||||
    friend class KPageGroup;
 | 
			
		||||
 | 
			
		||||
    KBlockInfo* m_next{};
 | 
			
		||||
    u32 m_page_index{};
 | 
			
		||||
    u32 m_num_pages{};
 | 
			
		||||
};
 | 
			
		||||
static_assert(sizeof(KBlockInfo) <= 0x10);
 | 
			
		||||
 | 
			
		||||
class KPageGroup final {
 | 
			
		||||
class KPageGroup {
 | 
			
		||||
public:
 | 
			
		||||
    class Node final {
 | 
			
		||||
    class Iterator {
 | 
			
		||||
    public:
 | 
			
		||||
        constexpr Node(u64 addr_, std::size_t num_pages_) : addr{addr_}, num_pages{num_pages_} {}
 | 
			
		||||
        using iterator_category = std::forward_iterator_tag;
 | 
			
		||||
        using value_type = const KBlockInfo;
 | 
			
		||||
        using difference_type = std::ptrdiff_t;
 | 
			
		||||
        using pointer = value_type*;
 | 
			
		||||
        using reference = value_type&;
 | 
			
		||||
 | 
			
		||||
        constexpr u64 GetAddress() const {
 | 
			
		||||
            return addr;
 | 
			
		||||
        constexpr explicit Iterator(pointer n) : m_node(n) {}
 | 
			
		||||
 | 
			
		||||
        constexpr bool operator==(const Iterator& rhs) const {
 | 
			
		||||
            return m_node == rhs.m_node;
 | 
			
		||||
        }
 | 
			
		||||
        constexpr bool operator!=(const Iterator& rhs) const {
 | 
			
		||||
            return !(*this == rhs);
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        constexpr std::size_t GetNumPages() const {
 | 
			
		||||
            return num_pages;
 | 
			
		||||
        constexpr pointer operator->() const {
 | 
			
		||||
            return m_node;
 | 
			
		||||
        }
 | 
			
		||||
        constexpr reference operator*() const {
 | 
			
		||||
            return *m_node;
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        constexpr std::size_t GetSize() const {
 | 
			
		||||
            return GetNumPages() * PageSize;
 | 
			
		||||
        constexpr Iterator& operator++() {
 | 
			
		||||
            m_node = m_node->GetNext();
 | 
			
		||||
            return *this;
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        constexpr Iterator operator++(int) {
 | 
			
		||||
            const Iterator it{*this};
 | 
			
		||||
            ++(*this);
 | 
			
		||||
            return it;
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
    private:
 | 
			
		||||
        u64 addr{};
 | 
			
		||||
        std::size_t num_pages{};
 | 
			
		||||
        pointer m_node{};
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
public:
 | 
			
		||||
    KPageGroup() = default;
 | 
			
		||||
    KPageGroup(u64 address, u64 num_pages) {
 | 
			
		||||
        ASSERT(AddBlock(address, num_pages).IsSuccess());
 | 
			
		||||
    explicit KPageGroup(KernelCore& kernel, KBlockInfoManager* m)
 | 
			
		||||
        : m_kernel{kernel}, m_manager{m} {}
 | 
			
		||||
    ~KPageGroup() {
 | 
			
		||||
        this->Finalize();
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    constexpr std::list<Node>& Nodes() {
 | 
			
		||||
        return nodes;
 | 
			
		||||
    void CloseAndReset();
 | 
			
		||||
    void Finalize();
 | 
			
		||||
 | 
			
		||||
    Iterator begin() const {
 | 
			
		||||
        return Iterator{m_first_block};
 | 
			
		||||
    }
 | 
			
		||||
    Iterator end() const {
 | 
			
		||||
        return Iterator{nullptr};
 | 
			
		||||
    }
 | 
			
		||||
    bool empty() const {
 | 
			
		||||
        return m_first_block == nullptr;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    constexpr const std::list<Node>& Nodes() const {
 | 
			
		||||
        return nodes;
 | 
			
		||||
    Result AddBlock(KPhysicalAddress addr, size_t num_pages);
 | 
			
		||||
    void Open() const;
 | 
			
		||||
    void OpenFirst() const;
 | 
			
		||||
    void Close() const;
 | 
			
		||||
 | 
			
		||||
    size_t GetNumPages() const;
 | 
			
		||||
 | 
			
		||||
    bool IsEquivalentTo(const KPageGroup& rhs) const;
 | 
			
		||||
 | 
			
		||||
    bool operator==(const KPageGroup& rhs) const {
 | 
			
		||||
        return this->IsEquivalentTo(rhs);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    std::size_t GetNumPages() const {
 | 
			
		||||
        std::size_t num_pages = 0;
 | 
			
		||||
        for (const Node& node : nodes) {
 | 
			
		||||
            num_pages += node.GetNumPages();
 | 
			
		||||
        }
 | 
			
		||||
        return num_pages;
 | 
			
		||||
    bool operator!=(const KPageGroup& rhs) const {
 | 
			
		||||
        return !(*this == rhs);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    bool IsEqual(KPageGroup& other) const {
 | 
			
		||||
        auto this_node = nodes.begin();
 | 
			
		||||
        auto other_node = other.nodes.begin();
 | 
			
		||||
        while (this_node != nodes.end() && other_node != other.nodes.end()) {
 | 
			
		||||
            if (this_node->GetAddress() != other_node->GetAddress() ||
 | 
			
		||||
                this_node->GetNumPages() != other_node->GetNumPages()) {
 | 
			
		||||
                return false;
 | 
			
		||||
            }
 | 
			
		||||
            this_node = std::next(this_node);
 | 
			
		||||
            other_node = std::next(other_node);
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        return this_node == nodes.end() && other_node == other.nodes.end();
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    Result AddBlock(u64 address, u64 num_pages) {
 | 
			
		||||
        if (!num_pages) {
 | 
			
		||||
            return ResultSuccess;
 | 
			
		||||
        }
 | 
			
		||||
        if (!nodes.empty()) {
 | 
			
		||||
            const auto node = nodes.back();
 | 
			
		||||
            if (node.GetAddress() + node.GetNumPages() * PageSize == address) {
 | 
			
		||||
                address = node.GetAddress();
 | 
			
		||||
                num_pages += node.GetNumPages();
 | 
			
		||||
                nodes.pop_back();
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
        nodes.push_back({address, num_pages});
 | 
			
		||||
        return ResultSuccess;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    bool Empty() const {
 | 
			
		||||
        return nodes.empty();
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    void Finalize() {}
 | 
			
		||||
 | 
			
		||||
private:
 | 
			
		||||
    std::list<Node> nodes;
 | 
			
		||||
    KernelCore& m_kernel;
 | 
			
		||||
    KBlockInfo* m_first_block{};
 | 
			
		||||
    KBlockInfo* m_last_block{};
 | 
			
		||||
    KBlockInfoManager* m_manager{};
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
class KScopedPageGroup {
 | 
			
		||||
public:
 | 
			
		||||
    explicit KScopedPageGroup(const KPageGroup* gp) : m_pg(gp) {
 | 
			
		||||
        if (m_pg) {
 | 
			
		||||
            m_pg->Open();
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
    explicit KScopedPageGroup(const KPageGroup& gp) : KScopedPageGroup(std::addressof(gp)) {}
 | 
			
		||||
    ~KScopedPageGroup() {
 | 
			
		||||
        if (m_pg) {
 | 
			
		||||
            m_pg->Close();
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    void CancelClose() {
 | 
			
		||||
        m_pg = nullptr;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
private:
 | 
			
		||||
    const KPageGroup* m_pg{};
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
} // namespace Kernel
 | 
			
		||||
 
 | 
			
		||||
@@ -100,7 +100,7 @@ constexpr size_t GetAddressSpaceWidthFromType(FileSys::ProgramAddressSpaceType a
 | 
			
		||||
 | 
			
		||||
KPageTable::KPageTable(Core::System& system_)
 | 
			
		||||
    : m_general_lock{system_.Kernel()},
 | 
			
		||||
      m_map_physical_memory_lock{system_.Kernel()}, m_system{system_} {}
 | 
			
		||||
      m_map_physical_memory_lock{system_.Kernel()}, m_system{system_}, m_kernel{system_.Kernel()} {}
 | 
			
		||||
 | 
			
		||||
KPageTable::~KPageTable() = default;
 | 
			
		||||
 | 
			
		||||
@@ -373,7 +373,7 @@ Result KPageTable::MapProcessCode(VAddr addr, size_t num_pages, KMemoryState sta
 | 
			
		||||
                                                 m_memory_block_slab_manager);
 | 
			
		||||
 | 
			
		||||
    // Allocate and open.
 | 
			
		||||
    KPageGroup pg;
 | 
			
		||||
    KPageGroup pg{m_kernel, m_block_info_manager};
 | 
			
		||||
    R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpen(
 | 
			
		||||
        &pg, num_pages,
 | 
			
		||||
        KMemoryManager::EncodeOption(KMemoryManager::Pool::Application, m_allocation_option)));
 | 
			
		||||
@@ -432,7 +432,7 @@ Result KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, size_t si
 | 
			
		||||
        const size_t num_pages = size / PageSize;
 | 
			
		||||
 | 
			
		||||
        // Create page groups for the memory being mapped.
 | 
			
		||||
        KPageGroup pg;
 | 
			
		||||
        KPageGroup pg{m_kernel, m_block_info_manager};
 | 
			
		||||
        AddRegionToPages(src_address, num_pages, pg);
 | 
			
		||||
 | 
			
		||||
        // Reprotect the source as kernel-read/not mapped.
 | 
			
		||||
@@ -593,7 +593,7 @@ Result KPageTable::MakePageGroup(KPageGroup& pg, VAddr addr, size_t num_pages) {
 | 
			
		||||
    const size_t size = num_pages * PageSize;
 | 
			
		||||
 | 
			
		||||
    // We're making a new group, not adding to an existing one.
 | 
			
		||||
    R_UNLESS(pg.Empty(), ResultInvalidCurrentMemory);
 | 
			
		||||
    R_UNLESS(pg.empty(), ResultInvalidCurrentMemory);
 | 
			
		||||
 | 
			
		||||
    // Begin traversal.
 | 
			
		||||
    Common::PageTable::TraversalContext context;
 | 
			
		||||
@@ -640,11 +640,10 @@ Result KPageTable::MakePageGroup(KPageGroup& pg, VAddr addr, size_t num_pages) {
 | 
			
		||||
    R_SUCCEED();
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
bool KPageTable::IsValidPageGroup(const KPageGroup& pg_ll, VAddr addr, size_t num_pages) {
 | 
			
		||||
bool KPageTable::IsValidPageGroup(const KPageGroup& pg, VAddr addr, size_t num_pages) {
 | 
			
		||||
    ASSERT(this->IsLockedByCurrentThread());
 | 
			
		||||
 | 
			
		||||
    const size_t size = num_pages * PageSize;
 | 
			
		||||
    const auto& pg = pg_ll.Nodes();
 | 
			
		||||
    const auto& memory_layout = m_system.Kernel().MemoryLayout();
 | 
			
		||||
 | 
			
		||||
    // Empty groups are necessarily invalid.
 | 
			
		||||
@@ -942,9 +941,6 @@ Result KPageTable::SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_add
 | 
			
		||||
 | 
			
		||||
    ON_RESULT_FAILURE {
 | 
			
		||||
        if (cur_mapped_addr != dst_addr) {
 | 
			
		||||
            // HACK: Manually close the pages.
 | 
			
		||||
            HACK_ClosePages(dst_addr, (cur_mapped_addr - dst_addr) / PageSize);
 | 
			
		||||
 | 
			
		||||
            ASSERT(Operate(dst_addr, (cur_mapped_addr - dst_addr) / PageSize,
 | 
			
		||||
                           KMemoryPermission::None, OperationType::Unmap)
 | 
			
		||||
                       .IsSuccess());
 | 
			
		||||
@@ -1020,9 +1016,6 @@ Result KPageTable::SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_add
 | 
			
		||||
        // Map the page.
 | 
			
		||||
        R_TRY(Operate(cur_mapped_addr, 1, test_perm, OperationType::Map, start_partial_page));
 | 
			
		||||
 | 
			
		||||
        // HACK: Manually open the pages.
 | 
			
		||||
        HACK_OpenPages(start_partial_page, 1);
 | 
			
		||||
 | 
			
		||||
        // Update tracking extents.
 | 
			
		||||
        cur_mapped_addr += PageSize;
 | 
			
		||||
        cur_block_addr += PageSize;
 | 
			
		||||
@@ -1051,9 +1044,6 @@ Result KPageTable::SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_add
 | 
			
		||||
            R_TRY(Operate(cur_mapped_addr, cur_block_size / PageSize, test_perm, OperationType::Map,
 | 
			
		||||
                          cur_block_addr));
 | 
			
		||||
 | 
			
		||||
            // HACK: Manually open the pages.
 | 
			
		||||
            HACK_OpenPages(cur_block_addr, cur_block_size / PageSize);
 | 
			
		||||
 | 
			
		||||
            // Update tracking extents.
 | 
			
		||||
            cur_mapped_addr += cur_block_size;
 | 
			
		||||
            cur_block_addr = next_entry.phys_addr;
 | 
			
		||||
@@ -1073,9 +1063,6 @@ Result KPageTable::SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_add
 | 
			
		||||
        R_TRY(Operate(cur_mapped_addr, last_block_size / PageSize, test_perm, OperationType::Map,
 | 
			
		||||
                      cur_block_addr));
 | 
			
		||||
 | 
			
		||||
        // HACK: Manually open the pages.
 | 
			
		||||
        HACK_OpenPages(cur_block_addr, last_block_size / PageSize);
 | 
			
		||||
 | 
			
		||||
        // Update tracking extents.
 | 
			
		||||
        cur_mapped_addr += last_block_size;
 | 
			
		||||
        cur_block_addr += last_block_size;
 | 
			
		||||
@@ -1107,9 +1094,6 @@ Result KPageTable::SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_add
 | 
			
		||||
 | 
			
		||||
        // Map the page.
 | 
			
		||||
        R_TRY(Operate(cur_mapped_addr, 1, test_perm, OperationType::Map, end_partial_page));
 | 
			
		||||
 | 
			
		||||
        // HACK: Manually open the pages.
 | 
			
		||||
        HACK_OpenPages(end_partial_page, 1);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    // Update memory blocks to reflect our changes
 | 
			
		||||
@@ -1211,9 +1195,6 @@ Result KPageTable::CleanupForIpcServer(VAddr address, size_t size, KMemoryState
 | 
			
		||||
    const size_t aligned_size = aligned_end - aligned_start;
 | 
			
		||||
    const size_t aligned_num_pages = aligned_size / PageSize;
 | 
			
		||||
 | 
			
		||||
    // HACK: Manually close the pages.
 | 
			
		||||
    HACK_ClosePages(aligned_start, aligned_num_pages);
 | 
			
		||||
 | 
			
		||||
    // Unmap the pages.
 | 
			
		||||
    R_TRY(Operate(aligned_start, aligned_num_pages, KMemoryPermission::None, OperationType::Unmap));
 | 
			
		||||
 | 
			
		||||
@@ -1501,17 +1482,6 @@ void KPageTable::CleanupForIpcClientOnServerSetupFailure([[maybe_unused]] PageLi
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void KPageTable::HACK_OpenPages(PAddr phys_addr, size_t num_pages) {
 | 
			
		||||
    m_system.Kernel().MemoryManager().OpenFirst(phys_addr, num_pages);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void KPageTable::HACK_ClosePages(VAddr virt_addr, size_t num_pages) {
 | 
			
		||||
    for (size_t index = 0; index < num_pages; ++index) {
 | 
			
		||||
        const auto paddr = GetPhysicalAddr(virt_addr + (index * PageSize));
 | 
			
		||||
        m_system.Kernel().MemoryManager().Close(paddr, 1);
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
 | 
			
		||||
    // Lock the physical memory lock.
 | 
			
		||||
    KScopedLightLock phys_lk(m_map_physical_memory_lock);
 | 
			
		||||
@@ -1572,7 +1542,7 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
 | 
			
		||||
            R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
 | 
			
		||||
 | 
			
		||||
            // Allocate pages for the new memory.
 | 
			
		||||
            KPageGroup pg;
 | 
			
		||||
            KPageGroup pg{m_kernel, m_block_info_manager};
 | 
			
		||||
            R_TRY(m_system.Kernel().MemoryManager().AllocateForProcess(
 | 
			
		||||
                &pg, (size - mapped_size) / PageSize, m_allocate_option, 0, 0));
 | 
			
		||||
 | 
			
		||||
@@ -1650,7 +1620,7 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
 | 
			
		||||
                KScopedPageTableUpdater updater(this);
 | 
			
		||||
 | 
			
		||||
                // Prepare to iterate over the memory.
 | 
			
		||||
                auto pg_it = pg.Nodes().begin();
 | 
			
		||||
                auto pg_it = pg.begin();
 | 
			
		||||
                PAddr pg_phys_addr = pg_it->GetAddress();
 | 
			
		||||
                size_t pg_pages = pg_it->GetNumPages();
 | 
			
		||||
 | 
			
		||||
@@ -1680,9 +1650,6 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
 | 
			
		||||
                                             last_unmap_address + 1 - cur_address) /
 | 
			
		||||
                                    PageSize;
 | 
			
		||||
 | 
			
		||||
                                // HACK: Manually close the pages.
 | 
			
		||||
                                HACK_ClosePages(cur_address, cur_pages);
 | 
			
		||||
 | 
			
		||||
                                // Unmap.
 | 
			
		||||
                                ASSERT(Operate(cur_address, cur_pages, KMemoryPermission::None,
 | 
			
		||||
                                               OperationType::Unmap)
 | 
			
		||||
@@ -1703,7 +1670,7 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
 | 
			
		||||
                    // Release any remaining unmapped memory.
 | 
			
		||||
                    m_system.Kernel().MemoryManager().OpenFirst(pg_phys_addr, pg_pages);
 | 
			
		||||
                    m_system.Kernel().MemoryManager().Close(pg_phys_addr, pg_pages);
 | 
			
		||||
                    for (++pg_it; pg_it != pg.Nodes().end(); ++pg_it) {
 | 
			
		||||
                    for (++pg_it; pg_it != pg.end(); ++pg_it) {
 | 
			
		||||
                        m_system.Kernel().MemoryManager().OpenFirst(pg_it->GetAddress(),
 | 
			
		||||
                                                                    pg_it->GetNumPages());
 | 
			
		||||
                        m_system.Kernel().MemoryManager().Close(pg_it->GetAddress(),
 | 
			
		||||
@@ -1731,7 +1698,7 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
 | 
			
		||||
                            // Check if we're at the end of the physical block.
 | 
			
		||||
                            if (pg_pages == 0) {
 | 
			
		||||
                                // Ensure there are more pages to map.
 | 
			
		||||
                                ASSERT(pg_it != pg.Nodes().end());
 | 
			
		||||
                                ASSERT(pg_it != pg.end());
 | 
			
		||||
 | 
			
		||||
                                // Advance our physical block.
 | 
			
		||||
                                ++pg_it;
 | 
			
		||||
@@ -1742,10 +1709,7 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
 | 
			
		||||
                            // Map whatever we can.
 | 
			
		||||
                            const size_t cur_pages = std::min(pg_pages, map_pages);
 | 
			
		||||
                            R_TRY(Operate(cur_address, cur_pages, KMemoryPermission::UserReadWrite,
 | 
			
		||||
                                          OperationType::Map, pg_phys_addr));
 | 
			
		||||
 | 
			
		||||
                            // HACK: Manually open the pages.
 | 
			
		||||
                            HACK_OpenPages(pg_phys_addr, cur_pages);
 | 
			
		||||
                                          OperationType::MapFirst, pg_phys_addr));
 | 
			
		||||
 | 
			
		||||
                            // Advance.
 | 
			
		||||
                            cur_address += cur_pages * PageSize;
 | 
			
		||||
@@ -1888,9 +1852,6 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) {
 | 
			
		||||
                                              last_address + 1 - cur_address) /
 | 
			
		||||
                                     PageSize;
 | 
			
		||||
 | 
			
		||||
            // HACK: Manually close the pages.
 | 
			
		||||
            HACK_ClosePages(cur_address, cur_pages);
 | 
			
		||||
 | 
			
		||||
            // Unmap.
 | 
			
		||||
            ASSERT(Operate(cur_address, cur_pages, KMemoryPermission::None, OperationType::Unmap)
 | 
			
		||||
                       .IsSuccess());
 | 
			
		||||
@@ -1955,7 +1916,7 @@ Result KPageTable::MapMemory(VAddr dst_address, VAddr src_address, size_t size)
 | 
			
		||||
    R_TRY(dst_allocator_result);
 | 
			
		||||
 | 
			
		||||
    // Map the memory.
 | 
			
		||||
    KPageGroup page_linked_list;
 | 
			
		||||
    KPageGroup page_linked_list{m_kernel, m_block_info_manager};
 | 
			
		||||
    const size_t num_pages{size / PageSize};
 | 
			
		||||
    const KMemoryPermission new_src_perm = static_cast<KMemoryPermission>(
 | 
			
		||||
        KMemoryPermission::KernelRead | KMemoryPermission::NotMapped);
 | 
			
		||||
@@ -2022,14 +1983,14 @@ Result KPageTable::UnmapMemory(VAddr dst_address, VAddr src_address, size_t size
 | 
			
		||||
                                                     num_dst_allocator_blocks);
 | 
			
		||||
    R_TRY(dst_allocator_result);
 | 
			
		||||
 | 
			
		||||
    KPageGroup src_pages;
 | 
			
		||||
    KPageGroup dst_pages;
 | 
			
		||||
    KPageGroup src_pages{m_kernel, m_block_info_manager};
 | 
			
		||||
    KPageGroup dst_pages{m_kernel, m_block_info_manager};
 | 
			
		||||
    const size_t num_pages{size / PageSize};
 | 
			
		||||
 | 
			
		||||
    AddRegionToPages(src_address, num_pages, src_pages);
 | 
			
		||||
    AddRegionToPages(dst_address, num_pages, dst_pages);
 | 
			
		||||
 | 
			
		||||
    R_UNLESS(dst_pages.IsEqual(src_pages), ResultInvalidMemoryRegion);
 | 
			
		||||
    R_UNLESS(dst_pages.IsEquivalentTo(src_pages), ResultInvalidMemoryRegion);
 | 
			
		||||
 | 
			
		||||
    {
 | 
			
		||||
        auto block_guard = detail::ScopeExit([&] { MapPages(dst_address, dst_pages, dst_perm); });
 | 
			
		||||
@@ -2060,7 +2021,7 @@ Result KPageTable::MapPages(VAddr addr, const KPageGroup& page_linked_list,
 | 
			
		||||
 | 
			
		||||
    VAddr cur_addr{addr};
 | 
			
		||||
 | 
			
		||||
    for (const auto& node : page_linked_list.Nodes()) {
 | 
			
		||||
    for (const auto& node : page_linked_list) {
 | 
			
		||||
        if (const auto result{
 | 
			
		||||
                Operate(cur_addr, node.GetNumPages(), perm, OperationType::Map, node.GetAddress())};
 | 
			
		||||
            result.IsError()) {
 | 
			
		||||
@@ -2160,7 +2121,7 @@ Result KPageTable::UnmapPages(VAddr addr, const KPageGroup& page_linked_list) {
 | 
			
		||||
 | 
			
		||||
    VAddr cur_addr{addr};
 | 
			
		||||
 | 
			
		||||
    for (const auto& node : page_linked_list.Nodes()) {
 | 
			
		||||
    for (const auto& node : page_linked_list) {
 | 
			
		||||
        if (const auto result{Operate(cur_addr, node.GetNumPages(), KMemoryPermission::None,
 | 
			
		||||
                                      OperationType::Unmap)};
 | 
			
		||||
            result.IsError()) {
 | 
			
		||||
@@ -2527,13 +2488,13 @@ Result KPageTable::SetHeapSize(VAddr* out, size_t size) {
 | 
			
		||||
    R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
 | 
			
		||||
 | 
			
		||||
    // Allocate pages for the heap extension.
 | 
			
		||||
    KPageGroup pg;
 | 
			
		||||
    KPageGroup pg{m_kernel, m_block_info_manager};
 | 
			
		||||
    R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpen(
 | 
			
		||||
        &pg, allocation_size / PageSize,
 | 
			
		||||
        KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option)));
 | 
			
		||||
 | 
			
		||||
    // Clear all the newly allocated pages.
 | 
			
		||||
    for (const auto& it : pg.Nodes()) {
 | 
			
		||||
    for (const auto& it : pg) {
 | 
			
		||||
        std::memset(m_system.DeviceMemory().GetPointer<void>(it.GetAddress()), m_heap_fill_value,
 | 
			
		||||
                    it.GetSize());
 | 
			
		||||
    }
 | 
			
		||||
@@ -2610,11 +2571,23 @@ ResultVal<VAddr> KPageTable::AllocateAndMapMemory(size_t needed_num_pages, size_
 | 
			
		||||
    if (is_map_only) {
 | 
			
		||||
        R_TRY(Operate(addr, needed_num_pages, perm, OperationType::Map, map_addr));
 | 
			
		||||
    } else {
 | 
			
		||||
        KPageGroup page_group;
 | 
			
		||||
        R_TRY(m_system.Kernel().MemoryManager().AllocateForProcess(
 | 
			
		||||
            &page_group, needed_num_pages,
 | 
			
		||||
            KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option), 0, 0));
 | 
			
		||||
        R_TRY(Operate(addr, needed_num_pages, page_group, OperationType::MapGroup));
 | 
			
		||||
        // Create a page group tohold the pages we allocate.
 | 
			
		||||
        KPageGroup pg{m_kernel, m_block_info_manager};
 | 
			
		||||
 | 
			
		||||
        R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpen(
 | 
			
		||||
            &pg, needed_num_pages,
 | 
			
		||||
            KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option)));
 | 
			
		||||
 | 
			
		||||
        // Ensure that the page group is closed when we're done working with it.
 | 
			
		||||
        SCOPE_EXIT({ pg.Close(); });
 | 
			
		||||
 | 
			
		||||
        // Clear all pages.
 | 
			
		||||
        for (const auto& it : pg) {
 | 
			
		||||
            std::memset(m_system.DeviceMemory().GetPointer<void>(it.GetAddress()),
 | 
			
		||||
                        m_heap_fill_value, it.GetSize());
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        R_TRY(Operate(addr, needed_num_pages, pg, OperationType::MapGroup));
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    // Update the blocks.
 | 
			
		||||
@@ -2795,19 +2768,28 @@ Result KPageTable::Operate(VAddr addr, size_t num_pages, const KPageGroup& page_
 | 
			
		||||
    ASSERT(num_pages > 0);
 | 
			
		||||
    ASSERT(num_pages == page_group.GetNumPages());
 | 
			
		||||
 | 
			
		||||
    for (const auto& node : page_group.Nodes()) {
 | 
			
		||||
        const size_t size{node.GetNumPages() * PageSize};
 | 
			
		||||
    switch (operation) {
 | 
			
		||||
    case OperationType::MapGroup: {
 | 
			
		||||
        // We want to maintain a new reference to every page in the group.
 | 
			
		||||
        KScopedPageGroup spg(page_group);
 | 
			
		||||
 | 
			
		||||
        switch (operation) {
 | 
			
		||||
        case OperationType::MapGroup:
 | 
			
		||||
        for (const auto& node : page_group) {
 | 
			
		||||
            const size_t size{node.GetNumPages() * PageSize};
 | 
			
		||||
 | 
			
		||||
            // Map the pages.
 | 
			
		||||
            m_system.Memory().MapMemoryRegion(*m_page_table_impl, addr, size, node.GetAddress());
 | 
			
		||||
            break;
 | 
			
		||||
        default:
 | 
			
		||||
            ASSERT(false);
 | 
			
		||||
            break;
 | 
			
		||||
 | 
			
		||||
            addr += size;
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        addr += size;
 | 
			
		||||
        // We succeeded! We want to persist the reference to the pages.
 | 
			
		||||
        spg.CancelClose();
 | 
			
		||||
 | 
			
		||||
        break;
 | 
			
		||||
    }
 | 
			
		||||
    default:
 | 
			
		||||
        ASSERT(false);
 | 
			
		||||
        break;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    R_SUCCEED();
 | 
			
		||||
@@ -2822,13 +2804,29 @@ Result KPageTable::Operate(VAddr addr, size_t num_pages, KMemoryPermission perm,
 | 
			
		||||
    ASSERT(ContainsPages(addr, num_pages));
 | 
			
		||||
 | 
			
		||||
    switch (operation) {
 | 
			
		||||
    case OperationType::Unmap:
 | 
			
		||||
    case OperationType::Unmap: {
 | 
			
		||||
        // Ensure that any pages we track close on exit.
 | 
			
		||||
        KPageGroup pages_to_close{m_kernel, this->GetBlockInfoManager()};
 | 
			
		||||
        SCOPE_EXIT({ pages_to_close.CloseAndReset(); });
 | 
			
		||||
 | 
			
		||||
        this->AddRegionToPages(addr, num_pages, pages_to_close);
 | 
			
		||||
        m_system.Memory().UnmapRegion(*m_page_table_impl, addr, num_pages * PageSize);
 | 
			
		||||
        break;
 | 
			
		||||
    }
 | 
			
		||||
    case OperationType::MapFirst:
 | 
			
		||||
    case OperationType::Map: {
 | 
			
		||||
        ASSERT(map_addr);
 | 
			
		||||
        ASSERT(Common::IsAligned(map_addr, PageSize));
 | 
			
		||||
        m_system.Memory().MapMemoryRegion(*m_page_table_impl, addr, num_pages * PageSize, map_addr);
 | 
			
		||||
 | 
			
		||||
        // Open references to pages, if we should.
 | 
			
		||||
        if (IsHeapPhysicalAddress(m_kernel.MemoryLayout(), map_addr)) {
 | 
			
		||||
            if (operation == OperationType::MapFirst) {
 | 
			
		||||
                m_kernel.MemoryManager().OpenFirst(map_addr, num_pages);
 | 
			
		||||
            } else {
 | 
			
		||||
                m_kernel.MemoryManager().Open(map_addr, num_pages);
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
        break;
 | 
			
		||||
    }
 | 
			
		||||
    case OperationType::Separate: {
 | 
			
		||||
 
 | 
			
		||||
@@ -107,6 +107,10 @@ public:
 | 
			
		||||
        return *m_page_table_impl;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    KBlockInfoManager* GetBlockInfoManager() {
 | 
			
		||||
        return m_block_info_manager;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    bool CanContain(VAddr addr, size_t size, KMemoryState state) const;
 | 
			
		||||
 | 
			
		||||
protected:
 | 
			
		||||
@@ -261,10 +265,6 @@ private:
 | 
			
		||||
    void CleanupForIpcClientOnServerSetupFailure(PageLinkedList* page_list, VAddr address,
 | 
			
		||||
                                                 size_t size, KMemoryPermission prot_perm);
 | 
			
		||||
 | 
			
		||||
    // HACK: These will be removed once we automatically manage page reference counts.
 | 
			
		||||
    void HACK_OpenPages(PAddr phys_addr, size_t num_pages);
 | 
			
		||||
    void HACK_ClosePages(VAddr virt_addr, size_t num_pages);
 | 
			
		||||
 | 
			
		||||
    mutable KLightLock m_general_lock;
 | 
			
		||||
    mutable KLightLock m_map_physical_memory_lock;
 | 
			
		||||
 | 
			
		||||
@@ -488,6 +488,7 @@ private:
 | 
			
		||||
    std::unique_ptr<Common::PageTable> m_page_table_impl;
 | 
			
		||||
 | 
			
		||||
    Core::System& m_system;
 | 
			
		||||
    KernelCore& m_kernel;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
} // namespace Kernel
 | 
			
		||||
 
 | 
			
		||||
@@ -13,10 +13,7 @@
 | 
			
		||||
namespace Kernel {
 | 
			
		||||
 | 
			
		||||
KSharedMemory::KSharedMemory(KernelCore& kernel_) : KAutoObjectWithSlabHeapAndContainer{kernel_} {}
 | 
			
		||||
 | 
			
		||||
KSharedMemory::~KSharedMemory() {
 | 
			
		||||
    kernel.GetSystemResourceLimit()->Release(LimitableResource::PhysicalMemoryMax, size);
 | 
			
		||||
}
 | 
			
		||||
KSharedMemory::~KSharedMemory() = default;
 | 
			
		||||
 | 
			
		||||
Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* owner_process_,
 | 
			
		||||
                                 Svc::MemoryPermission owner_permission_,
 | 
			
		||||
@@ -49,7 +46,8 @@ Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* o
 | 
			
		||||
    R_UNLESS(physical_address != 0, ResultOutOfMemory);
 | 
			
		||||
 | 
			
		||||
    //! Insert the result into our page group.
 | 
			
		||||
    page_group.emplace(physical_address, num_pages);
 | 
			
		||||
    page_group.emplace(kernel, &kernel.GetSystemSystemResource().GetBlockInfoManager());
 | 
			
		||||
    page_group->AddBlock(physical_address, num_pages);
 | 
			
		||||
 | 
			
		||||
    // Commit our reservation.
 | 
			
		||||
    memory_reservation.Commit();
 | 
			
		||||
@@ -62,7 +60,7 @@ Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* o
 | 
			
		||||
    is_initialized = true;
 | 
			
		||||
 | 
			
		||||
    // Clear all pages in the memory.
 | 
			
		||||
    for (const auto& block : page_group->Nodes()) {
 | 
			
		||||
    for (const auto& block : *page_group) {
 | 
			
		||||
        std::memset(device_memory_.GetPointer<void>(block.GetAddress()), 0, block.GetSize());
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
@@ -71,13 +69,8 @@ Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* o
 | 
			
		||||
 | 
			
		||||
void KSharedMemory::Finalize() {
 | 
			
		||||
    // Close and finalize the page group.
 | 
			
		||||
    // page_group->Close();
 | 
			
		||||
    // page_group->Finalize();
 | 
			
		||||
 | 
			
		||||
    //! HACK: Manually close.
 | 
			
		||||
    for (const auto& block : page_group->Nodes()) {
 | 
			
		||||
        kernel.MemoryManager().Close(block.GetAddress(), block.GetNumPages());
 | 
			
		||||
    }
 | 
			
		||||
    page_group->Close();
 | 
			
		||||
    page_group->Finalize();
 | 
			
		||||
 | 
			
		||||
    // Release the memory reservation.
 | 
			
		||||
    resource_limit->Release(LimitableResource::PhysicalMemoryMax, size);
 | 
			
		||||
 
 | 
			
		||||
@@ -14,4 +14,7 @@ constexpr std::size_t PageSize{1 << PageBits};
 | 
			
		||||
 | 
			
		||||
using Page = std::array<u8, PageSize>;
 | 
			
		||||
 | 
			
		||||
using KPhysicalAddress = PAddr;
 | 
			
		||||
using KProcessAddress = VAddr;
 | 
			
		||||
 | 
			
		||||
} // namespace Kernel
 | 
			
		||||
 
 | 
			
		||||
@@ -1485,7 +1485,7 @@ static Result MapProcessMemory(Core::System& system, VAddr dst_address, Handle p
 | 
			
		||||
             ResultInvalidMemoryRegion);
 | 
			
		||||
 | 
			
		||||
    // Create a new page group.
 | 
			
		||||
    KPageGroup pg;
 | 
			
		||||
    KPageGroup pg{system.Kernel(), dst_pt.GetBlockInfoManager()};
 | 
			
		||||
    R_TRY(src_pt.MakeAndOpenPageGroup(
 | 
			
		||||
        std::addressof(pg), src_address, size / PageSize, KMemoryState::FlagCanMapProcess,
 | 
			
		||||
        KMemoryState::FlagCanMapProcess, KMemoryPermission::None, KMemoryPermission::None,
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user