Merge pull request #12579 from FernandoS27/smmu
Core: Implement Device Mapping & GPU SMMU
This commit is contained in:
		@@ -8,6 +8,7 @@
 | 
			
		||||
#include "audio_core/sink/sink_stream.h"
 | 
			
		||||
#include "core/core.h"
 | 
			
		||||
#include "core/core_timing.h"
 | 
			
		||||
#include "core/guest_memory.h"
 | 
			
		||||
#include "core/memory.h"
 | 
			
		||||
 | 
			
		||||
#include "core/hle/kernel/k_process.h"
 | 
			
		||||
 
 | 
			
		||||
@@ -9,6 +9,7 @@
 | 
			
		||||
#include "common/fixed_point.h"
 | 
			
		||||
#include "common/logging/log.h"
 | 
			
		||||
#include "common/scratch_buffer.h"
 | 
			
		||||
#include "core/guest_memory.h"
 | 
			
		||||
#include "core/memory.h"
 | 
			
		||||
 | 
			
		||||
namespace AudioCore::Renderer {
 | 
			
		||||
 
 | 
			
		||||
@@ -45,6 +45,7 @@ using f32 = float;  ///< 32-bit floating point
 | 
			
		||||
using f64 = double; ///< 64-bit floating point
 | 
			
		||||
 | 
			
		||||
using VAddr = u64;    ///< Represents a pointer in the userspace virtual address space.
 | 
			
		||||
using DAddr = u64;    ///< Represents a pointer in the device specific virtual address space.
 | 
			
		||||
using PAddr = u64;    ///< Represents a pointer in the ARM11 physical address space.
 | 
			
		||||
using GPUVAddr = u64; ///< Represents a pointer in the GPU virtual address space.
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -37,6 +37,8 @@ add_library(core STATIC
 | 
			
		||||
    debugger/gdbstub_arch.h
 | 
			
		||||
    debugger/gdbstub.cpp
 | 
			
		||||
    debugger/gdbstub.h
 | 
			
		||||
    device_memory_manager.h
 | 
			
		||||
    device_memory_manager.inc
 | 
			
		||||
    device_memory.cpp
 | 
			
		||||
    device_memory.h
 | 
			
		||||
    file_sys/fssystem/fs_i_storage.h
 | 
			
		||||
@@ -609,6 +611,8 @@ add_library(core STATIC
 | 
			
		||||
    hle/service/ns/pdm_qry.h
 | 
			
		||||
    hle/service/nvdrv/core/container.cpp
 | 
			
		||||
    hle/service/nvdrv/core/container.h
 | 
			
		||||
    hle/service/nvdrv/core/heap_mapper.cpp
 | 
			
		||||
    hle/service/nvdrv/core/heap_mapper.h
 | 
			
		||||
    hle/service/nvdrv/core/nvmap.cpp
 | 
			
		||||
    hle/service/nvdrv/core/nvmap.h
 | 
			
		||||
    hle/service/nvdrv/core/syncpoint_manager.cpp
 | 
			
		||||
 
 | 
			
		||||
@@ -28,6 +28,7 @@
 | 
			
		||||
#include "core/file_sys/savedata_factory.h"
 | 
			
		||||
#include "core/file_sys/vfs_concat.h"
 | 
			
		||||
#include "core/file_sys/vfs_real.h"
 | 
			
		||||
#include "core/gpu_dirty_memory_manager.h"
 | 
			
		||||
#include "core/hle/kernel/k_memory_manager.h"
 | 
			
		||||
#include "core/hle/kernel/k_process.h"
 | 
			
		||||
#include "core/hle/kernel/k_resource_limit.h"
 | 
			
		||||
@@ -565,6 +566,9 @@ struct System::Impl {
 | 
			
		||||
    std::array<u64, Core::Hardware::NUM_CPU_CORES> dynarmic_ticks{};
 | 
			
		||||
    std::array<MicroProfileToken, Core::Hardware::NUM_CPU_CORES> microprofile_cpu{};
 | 
			
		||||
 | 
			
		||||
    std::array<Core::GPUDirtyMemoryManager, Core::Hardware::NUM_CPU_CORES>
 | 
			
		||||
        gpu_dirty_memory_managers;
 | 
			
		||||
 | 
			
		||||
    std::deque<std::vector<u8>> user_channel;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
@@ -651,8 +655,14 @@ size_t System::GetCurrentHostThreadID() const {
 | 
			
		||||
    return impl->kernel.GetCurrentHostThreadID();
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void System::GatherGPUDirtyMemory(std::function<void(VAddr, size_t)>& callback) {
 | 
			
		||||
    return this->ApplicationProcess()->GatherGPUDirtyMemory(callback);
 | 
			
		||||
std::span<GPUDirtyMemoryManager> System::GetGPUDirtyMemoryManager() {
 | 
			
		||||
    return impl->gpu_dirty_memory_managers;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void System::GatherGPUDirtyMemory(std::function<void(PAddr, size_t)>& callback) {
 | 
			
		||||
    for (auto& manager : impl->gpu_dirty_memory_managers) {
 | 
			
		||||
        manager.Gather(callback);
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
PerfStatsResults System::GetAndResetPerfStats() {
 | 
			
		||||
 
 | 
			
		||||
@@ -8,6 +8,7 @@
 | 
			
		||||
#include <functional>
 | 
			
		||||
#include <memory>
 | 
			
		||||
#include <mutex>
 | 
			
		||||
#include <span>
 | 
			
		||||
#include <string>
 | 
			
		||||
#include <vector>
 | 
			
		||||
 | 
			
		||||
@@ -116,6 +117,7 @@ class CpuManager;
 | 
			
		||||
class Debugger;
 | 
			
		||||
class DeviceMemory;
 | 
			
		||||
class ExclusiveMonitor;
 | 
			
		||||
class GPUDirtyMemoryManager;
 | 
			
		||||
class PerfStats;
 | 
			
		||||
class Reporter;
 | 
			
		||||
class SpeedLimiter;
 | 
			
		||||
@@ -224,7 +226,9 @@ public:
 | 
			
		||||
    /// Prepare the core emulation for a reschedule
 | 
			
		||||
    void PrepareReschedule(u32 core_index);
 | 
			
		||||
 | 
			
		||||
    void GatherGPUDirtyMemory(std::function<void(VAddr, size_t)>& callback);
 | 
			
		||||
    std::span<GPUDirtyMemoryManager> GetGPUDirtyMemoryManager();
 | 
			
		||||
 | 
			
		||||
    void GatherGPUDirtyMemory(std::function<void(PAddr, size_t)>& callback);
 | 
			
		||||
 | 
			
		||||
    [[nodiscard]] size_t GetCurrentHostThreadID() const;
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -31,6 +31,12 @@ public:
 | 
			
		||||
               DramMemoryMap::Base;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    template <typename T>
 | 
			
		||||
    PAddr GetRawPhysicalAddr(const T* ptr) const {
 | 
			
		||||
        return static_cast<PAddr>(reinterpret_cast<uintptr_t>(ptr) -
 | 
			
		||||
                                  reinterpret_cast<uintptr_t>(buffer.BackingBasePointer()));
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    template <typename T>
 | 
			
		||||
    T* GetPointer(Common::PhysicalAddress addr) {
 | 
			
		||||
        return reinterpret_cast<T*>(buffer.BackingBasePointer() +
 | 
			
		||||
@@ -43,6 +49,16 @@ public:
 | 
			
		||||
                                    (GetInteger(addr) - DramMemoryMap::Base));
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    template <typename T>
 | 
			
		||||
    T* GetPointerFromRaw(PAddr addr) {
 | 
			
		||||
        return reinterpret_cast<T*>(buffer.BackingBasePointer() + addr);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    template <typename T>
 | 
			
		||||
    const T* GetPointerFromRaw(PAddr addr) const {
 | 
			
		||||
        return reinterpret_cast<T*>(buffer.BackingBasePointer() + addr);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    Common::HostMemory buffer;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										211
									
								
								src/core/device_memory_manager.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										211
									
								
								src/core/device_memory_manager.h
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,211 @@
 | 
			
		||||
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
 | 
			
		||||
// SPDX-License-Identifier: GPL-2.0-or-later
 | 
			
		||||
 | 
			
		||||
#pragma once
 | 
			
		||||
 | 
			
		||||
#include <array>
 | 
			
		||||
#include <atomic>
 | 
			
		||||
#include <deque>
 | 
			
		||||
#include <memory>
 | 
			
		||||
#include <mutex>
 | 
			
		||||
 | 
			
		||||
#include "common/common_types.h"
 | 
			
		||||
#include "common/scratch_buffer.h"
 | 
			
		||||
#include "common/virtual_buffer.h"
 | 
			
		||||
 | 
			
		||||
namespace Core {
 | 
			
		||||
 | 
			
		||||
constexpr size_t DEVICE_PAGEBITS = 12ULL;
 | 
			
		||||
constexpr size_t DEVICE_PAGESIZE = 1ULL << DEVICE_PAGEBITS;
 | 
			
		||||
constexpr size_t DEVICE_PAGEMASK = DEVICE_PAGESIZE - 1ULL;
 | 
			
		||||
 | 
			
		||||
class DeviceMemory;
 | 
			
		||||
 | 
			
		||||
namespace Memory {
 | 
			
		||||
class Memory;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <typename DTraits>
 | 
			
		||||
struct DeviceMemoryManagerAllocator;
 | 
			
		||||
 | 
			
		||||
struct Asid {
 | 
			
		||||
    size_t id;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
template <typename Traits>
 | 
			
		||||
class DeviceMemoryManager {
 | 
			
		||||
    using DeviceInterface = typename Traits::DeviceInterface;
 | 
			
		||||
    using DeviceMethods = typename Traits::DeviceMethods;
 | 
			
		||||
 | 
			
		||||
public:
 | 
			
		||||
    DeviceMemoryManager(const DeviceMemory& device_memory);
 | 
			
		||||
    ~DeviceMemoryManager();
 | 
			
		||||
 | 
			
		||||
    void BindInterface(DeviceInterface* device_inter);
 | 
			
		||||
 | 
			
		||||
    DAddr Allocate(size_t size);
 | 
			
		||||
    void AllocateFixed(DAddr start, size_t size);
 | 
			
		||||
    void Free(DAddr start, size_t size);
 | 
			
		||||
 | 
			
		||||
    void Map(DAddr address, VAddr virtual_address, size_t size, Asid asid, bool track = false);
 | 
			
		||||
 | 
			
		||||
    void Unmap(DAddr address, size_t size);
 | 
			
		||||
 | 
			
		||||
    void TrackContinuityImpl(DAddr address, VAddr virtual_address, size_t size, Asid asid);
 | 
			
		||||
    void TrackContinuity(DAddr address, VAddr virtual_address, size_t size, Asid asid) {
 | 
			
		||||
        std::scoped_lock lk(mapping_guard);
 | 
			
		||||
        TrackContinuityImpl(address, virtual_address, size, asid);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    // Write / Read
 | 
			
		||||
    template <typename T>
 | 
			
		||||
    T* GetPointer(DAddr address);
 | 
			
		||||
 | 
			
		||||
    template <typename T>
 | 
			
		||||
    const T* GetPointer(DAddr address) const;
 | 
			
		||||
 | 
			
		||||
    template <typename Func>
 | 
			
		||||
    void ApplyOpOnPAddr(PAddr address, Common::ScratchBuffer<u32>& buffer, Func&& operation) {
 | 
			
		||||
        DAddr subbits = static_cast<DAddr>(address & page_mask);
 | 
			
		||||
        const u32 base = compressed_device_addr[(address >> page_bits)];
 | 
			
		||||
        if ((base >> MULTI_FLAG_BITS) == 0) [[likely]] {
 | 
			
		||||
            const DAddr d_address = (static_cast<DAddr>(base) << page_bits) + subbits;
 | 
			
		||||
            operation(d_address);
 | 
			
		||||
            return;
 | 
			
		||||
        }
 | 
			
		||||
        InnerGatherDeviceAddresses(buffer, address);
 | 
			
		||||
        for (u32 value : buffer) {
 | 
			
		||||
            operation((static_cast<DAddr>(value) << page_bits) + subbits);
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    template <typename Func>
 | 
			
		||||
    void ApplyOpOnPointer(const u8* p, Common::ScratchBuffer<u32>& buffer, Func&& operation) {
 | 
			
		||||
        PAddr address = GetRawPhysicalAddr<u8>(p);
 | 
			
		||||
        ApplyOpOnPAddr(address, buffer, operation);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    PAddr GetPhysicalRawAddressFromDAddr(DAddr address) const {
 | 
			
		||||
        PAddr subbits = static_cast<PAddr>(address & page_mask);
 | 
			
		||||
        auto paddr = compressed_physical_ptr[(address >> page_bits)];
 | 
			
		||||
        if (paddr == 0) {
 | 
			
		||||
            return 0;
 | 
			
		||||
        }
 | 
			
		||||
        return (static_cast<PAddr>(paddr - 1) << page_bits) + subbits;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    template <typename T>
 | 
			
		||||
    void Write(DAddr address, T value);
 | 
			
		||||
 | 
			
		||||
    template <typename T>
 | 
			
		||||
    T Read(DAddr address) const;
 | 
			
		||||
 | 
			
		||||
    u8* GetSpan(const DAddr src_addr, const std::size_t size);
 | 
			
		||||
    const u8* GetSpan(const DAddr src_addr, const std::size_t size) const;
 | 
			
		||||
 | 
			
		||||
    void ReadBlock(DAddr address, void* dest_pointer, size_t size);
 | 
			
		||||
    void ReadBlockUnsafe(DAddr address, void* dest_pointer, size_t size);
 | 
			
		||||
    void WriteBlock(DAddr address, const void* src_pointer, size_t size);
 | 
			
		||||
    void WriteBlockUnsafe(DAddr address, const void* src_pointer, size_t size);
 | 
			
		||||
 | 
			
		||||
    Asid RegisterProcess(Memory::Memory* memory);
 | 
			
		||||
    void UnregisterProcess(Asid id);
 | 
			
		||||
 | 
			
		||||
    void UpdatePagesCachedCount(DAddr addr, size_t size, s32 delta);
 | 
			
		||||
 | 
			
		||||
    static constexpr size_t AS_BITS = Traits::device_virtual_bits;
 | 
			
		||||
 | 
			
		||||
private:
 | 
			
		||||
    static constexpr size_t device_virtual_bits = Traits::device_virtual_bits;
 | 
			
		||||
    static constexpr size_t device_as_size = 1ULL << device_virtual_bits;
 | 
			
		||||
    static constexpr size_t physical_min_bits = 32;
 | 
			
		||||
    static constexpr size_t physical_max_bits = 33;
 | 
			
		||||
    static constexpr size_t page_bits = 12;
 | 
			
		||||
    static constexpr size_t page_size = 1ULL << page_bits;
 | 
			
		||||
    static constexpr size_t page_mask = page_size - 1ULL;
 | 
			
		||||
    static constexpr u32 physical_address_base = 1U << page_bits;
 | 
			
		||||
    static constexpr u32 MULTI_FLAG_BITS = 31;
 | 
			
		||||
    static constexpr u32 MULTI_FLAG = 1U << MULTI_FLAG_BITS;
 | 
			
		||||
    static constexpr u32 MULTI_MASK = ~MULTI_FLAG;
 | 
			
		||||
 | 
			
		||||
    template <typename T>
 | 
			
		||||
    T* GetPointerFromRaw(PAddr addr) {
 | 
			
		||||
        return reinterpret_cast<T*>(physical_base + addr);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    template <typename T>
 | 
			
		||||
    const T* GetPointerFromRaw(PAddr addr) const {
 | 
			
		||||
        return reinterpret_cast<T*>(physical_base + addr);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    template <typename T>
 | 
			
		||||
    PAddr GetRawPhysicalAddr(const T* ptr) const {
 | 
			
		||||
        return static_cast<PAddr>(reinterpret_cast<uintptr_t>(ptr) - physical_base);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    void WalkBlock(const DAddr addr, const std::size_t size, auto on_unmapped, auto on_memory,
 | 
			
		||||
                   auto increment);
 | 
			
		||||
 | 
			
		||||
    void InnerGatherDeviceAddresses(Common::ScratchBuffer<u32>& buffer, PAddr address);
 | 
			
		||||
 | 
			
		||||
    std::unique_ptr<DeviceMemoryManagerAllocator<Traits>> impl;
 | 
			
		||||
 | 
			
		||||
    const uintptr_t physical_base;
 | 
			
		||||
    DeviceInterface* device_inter;
 | 
			
		||||
    Common::VirtualBuffer<u32> compressed_physical_ptr;
 | 
			
		||||
    Common::VirtualBuffer<u32> compressed_device_addr;
 | 
			
		||||
    Common::VirtualBuffer<u32> continuity_tracker;
 | 
			
		||||
 | 
			
		||||
    // Process memory interfaces
 | 
			
		||||
 | 
			
		||||
    std::deque<size_t> id_pool;
 | 
			
		||||
    std::deque<Memory::Memory*> registered_processes;
 | 
			
		||||
 | 
			
		||||
    // Memory protection management
 | 
			
		||||
 | 
			
		||||
    static constexpr size_t guest_max_as_bits = 39;
 | 
			
		||||
    static constexpr size_t guest_as_size = 1ULL << guest_max_as_bits;
 | 
			
		||||
    static constexpr size_t guest_mask = guest_as_size - 1ULL;
 | 
			
		||||
    static constexpr size_t asid_start_bit = guest_max_as_bits;
 | 
			
		||||
 | 
			
		||||
    std::pair<Asid, VAddr> ExtractCPUBacking(size_t page_index) {
 | 
			
		||||
        auto content = cpu_backing_address[page_index];
 | 
			
		||||
        const VAddr address = content & guest_mask;
 | 
			
		||||
        const Asid asid{static_cast<size_t>(content >> asid_start_bit)};
 | 
			
		||||
        return std::make_pair(asid, address);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    void InsertCPUBacking(size_t page_index, VAddr address, Asid asid) {
 | 
			
		||||
        cpu_backing_address[page_index] = address | (asid.id << asid_start_bit);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    Common::VirtualBuffer<VAddr> cpu_backing_address;
 | 
			
		||||
    static constexpr size_t subentries = 8 / sizeof(u8);
 | 
			
		||||
    static constexpr size_t subentries_mask = subentries - 1;
 | 
			
		||||
    class CounterEntry final {
 | 
			
		||||
    public:
 | 
			
		||||
        CounterEntry() = default;
 | 
			
		||||
 | 
			
		||||
        std::atomic_uint8_t& Count(std::size_t page) {
 | 
			
		||||
            return values[page & subentries_mask];
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        const std::atomic_uint8_t& Count(std::size_t page) const {
 | 
			
		||||
            return values[page & subentries_mask];
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
    private:
 | 
			
		||||
        std::array<std::atomic_uint8_t, subentries> values{};
 | 
			
		||||
    };
 | 
			
		||||
    static_assert(sizeof(CounterEntry) == subentries * sizeof(u8),
 | 
			
		||||
                  "CounterEntry should be 8 bytes!");
 | 
			
		||||
 | 
			
		||||
    static constexpr size_t num_counter_entries =
 | 
			
		||||
        (1ULL << (device_virtual_bits - page_bits)) / subentries;
 | 
			
		||||
    using CachedPages = std::array<CounterEntry, num_counter_entries>;
 | 
			
		||||
    std::unique_ptr<CachedPages> cached_pages;
 | 
			
		||||
    std::mutex counter_guard;
 | 
			
		||||
    std::mutex mapping_guard;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
} // namespace Core
 | 
			
		||||
							
								
								
									
										582
									
								
								src/core/device_memory_manager.inc
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										582
									
								
								src/core/device_memory_manager.inc
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,582 @@
 | 
			
		||||
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
 | 
			
		||||
// SPDX-License-Identifier: GPL-2.0-or-later
 | 
			
		||||
 | 
			
		||||
#include <atomic>
 | 
			
		||||
#include <limits>
 | 
			
		||||
#include <memory>
 | 
			
		||||
#include <type_traits>
 | 
			
		||||
 | 
			
		||||
#include "common/address_space.h"
 | 
			
		||||
#include "common/address_space.inc"
 | 
			
		||||
#include "common/alignment.h"
 | 
			
		||||
#include "common/assert.h"
 | 
			
		||||
#include "common/div_ceil.h"
 | 
			
		||||
#include "common/scope_exit.h"
 | 
			
		||||
#include "common/settings.h"
 | 
			
		||||
#include "core/device_memory.h"
 | 
			
		||||
#include "core/device_memory_manager.h"
 | 
			
		||||
#include "core/memory.h"
 | 
			
		||||
 | 
			
		||||
namespace Core {
 | 
			
		||||
 | 
			
		||||
namespace {
 | 
			
		||||
 | 
			
		||||
class MultiAddressContainer {
 | 
			
		||||
public:
 | 
			
		||||
    MultiAddressContainer() = default;
 | 
			
		||||
    ~MultiAddressContainer() = default;
 | 
			
		||||
 | 
			
		||||
    void GatherValues(u32 start_entry, Common::ScratchBuffer<u32>& buffer) {
 | 
			
		||||
        buffer.resize(8);
 | 
			
		||||
        buffer.resize(0);
 | 
			
		||||
        size_t index = 0;
 | 
			
		||||
        const auto add_value = [&](u32 value) {
 | 
			
		||||
            buffer[index] = value;
 | 
			
		||||
            index++;
 | 
			
		||||
            buffer.resize(index);
 | 
			
		||||
        };
 | 
			
		||||
 | 
			
		||||
        u32 iter_entry = start_entry;
 | 
			
		||||
        Entry* current = &storage[iter_entry - 1];
 | 
			
		||||
        add_value(current->value);
 | 
			
		||||
        while (current->next_entry != 0) {
 | 
			
		||||
            iter_entry = current->next_entry;
 | 
			
		||||
            current = &storage[iter_entry - 1];
 | 
			
		||||
            add_value(current->value);
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    u32 Register(u32 value) {
 | 
			
		||||
        return RegisterImplementation(value);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    void Register(u32 value, u32 start_entry) {
 | 
			
		||||
        auto entry_id = RegisterImplementation(value);
 | 
			
		||||
        u32 iter_entry = start_entry;
 | 
			
		||||
        Entry* current = &storage[iter_entry - 1];
 | 
			
		||||
        while (current->next_entry != 0) {
 | 
			
		||||
            iter_entry = current->next_entry;
 | 
			
		||||
            current = &storage[iter_entry - 1];
 | 
			
		||||
        }
 | 
			
		||||
        current->next_entry = entry_id;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    std::pair<bool, u32> Unregister(u32 value, u32 start_entry) {
 | 
			
		||||
        u32 iter_entry = start_entry;
 | 
			
		||||
        Entry* previous{};
 | 
			
		||||
        Entry* current = &storage[iter_entry - 1];
 | 
			
		||||
        Entry* next{};
 | 
			
		||||
        bool more_than_one_remaining = false;
 | 
			
		||||
        u32 result_start{start_entry};
 | 
			
		||||
        size_t count = 0;
 | 
			
		||||
        while (current->value != value) {
 | 
			
		||||
            count++;
 | 
			
		||||
            previous = current;
 | 
			
		||||
            iter_entry = current->next_entry;
 | 
			
		||||
            current = &storage[iter_entry - 1];
 | 
			
		||||
        }
 | 
			
		||||
        // Find next
 | 
			
		||||
        u32 next_entry = current->next_entry;
 | 
			
		||||
        if (next_entry != 0) {
 | 
			
		||||
            next = &storage[next_entry - 1];
 | 
			
		||||
            more_than_one_remaining = next->next_entry != 0 || previous != nullptr;
 | 
			
		||||
        }
 | 
			
		||||
        if (previous) {
 | 
			
		||||
            previous->next_entry = next_entry;
 | 
			
		||||
        } else {
 | 
			
		||||
            result_start = next_entry;
 | 
			
		||||
        }
 | 
			
		||||
        free_entries.emplace_back(iter_entry);
 | 
			
		||||
        return std::make_pair(more_than_one_remaining || count > 1, result_start);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    u32 ReleaseEntry(u32 start_entry) {
 | 
			
		||||
        Entry* current = &storage[start_entry - 1];
 | 
			
		||||
        free_entries.emplace_back(start_entry);
 | 
			
		||||
        return current->value;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
private:
 | 
			
		||||
    u32 RegisterImplementation(u32 value) {
 | 
			
		||||
        auto entry_id = GetNewEntry();
 | 
			
		||||
        auto& entry = storage[entry_id - 1];
 | 
			
		||||
        entry.next_entry = 0;
 | 
			
		||||
        entry.value = value;
 | 
			
		||||
        return entry_id;
 | 
			
		||||
    }
 | 
			
		||||
    u32 GetNewEntry() {
 | 
			
		||||
        if (!free_entries.empty()) {
 | 
			
		||||
            u32 result = free_entries.front();
 | 
			
		||||
            free_entries.pop_front();
 | 
			
		||||
            return result;
 | 
			
		||||
        }
 | 
			
		||||
        storage.emplace_back();
 | 
			
		||||
        u32 new_entry = static_cast<u32>(storage.size());
 | 
			
		||||
        return new_entry;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    struct Entry {
 | 
			
		||||
        u32 next_entry{};
 | 
			
		||||
        u32 value{};
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    std::deque<Entry> storage;
 | 
			
		||||
    std::deque<u32> free_entries;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
struct EmptyAllocator {
 | 
			
		||||
    EmptyAllocator([[maybe_unused]] DAddr address) {}
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
} // namespace
 | 
			
		||||
 | 
			
		||||
template <typename DTraits>
 | 
			
		||||
struct DeviceMemoryManagerAllocator {
 | 
			
		||||
    static constexpr size_t device_virtual_bits = DTraits::device_virtual_bits;
 | 
			
		||||
    static constexpr DAddr first_address = 1ULL << Memory::YUZU_PAGEBITS;
 | 
			
		||||
    static constexpr DAddr max_device_area = 1ULL << device_virtual_bits;
 | 
			
		||||
 | 
			
		||||
    DeviceMemoryManagerAllocator() : main_allocator(first_address) {}
 | 
			
		||||
 | 
			
		||||
    Common::FlatAllocator<DAddr, 0, device_virtual_bits> main_allocator;
 | 
			
		||||
    MultiAddressContainer multi_dev_address;
 | 
			
		||||
 | 
			
		||||
    /// Returns true when vaddr -> vaddr+size is fully contained in the buffer
 | 
			
		||||
    template <bool pin_area>
 | 
			
		||||
    [[nodiscard]] bool IsInBounds(VAddr addr, u64 size) const noexcept {
 | 
			
		||||
        return addr >= 0 && addr + size <= max_device_area;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    DAddr Allocate(size_t size) {
 | 
			
		||||
        return main_allocator.Allocate(size);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    void AllocateFixed(DAddr b_address, size_t b_size) {
 | 
			
		||||
        main_allocator.AllocateFixed(b_address, b_size);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    void Free(DAddr b_address, size_t b_size) {
 | 
			
		||||
        main_allocator.Free(b_address, b_size);
 | 
			
		||||
    }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
template <typename Traits>
 | 
			
		||||
DeviceMemoryManager<Traits>::DeviceMemoryManager(const DeviceMemory& device_memory_)
 | 
			
		||||
    : physical_base{reinterpret_cast<const uintptr_t>(device_memory_.buffer.BackingBasePointer())},
 | 
			
		||||
      device_inter{nullptr}, compressed_physical_ptr(device_as_size >> Memory::YUZU_PAGEBITS),
 | 
			
		||||
      compressed_device_addr(1ULL << ((Settings::values.memory_layout_mode.GetValue() ==
 | 
			
		||||
                                               Settings::MemoryLayout::Memory_4Gb
 | 
			
		||||
                                           ? physical_min_bits
 | 
			
		||||
                                           : physical_max_bits) -
 | 
			
		||||
                                      Memory::YUZU_PAGEBITS)),
 | 
			
		||||
      continuity_tracker(device_as_size >> Memory::YUZU_PAGEBITS),
 | 
			
		||||
      cpu_backing_address(device_as_size >> Memory::YUZU_PAGEBITS) {
 | 
			
		||||
    impl = std::make_unique<DeviceMemoryManagerAllocator<Traits>>();
 | 
			
		||||
    cached_pages = std::make_unique<CachedPages>();
 | 
			
		||||
 | 
			
		||||
    const size_t total_virtual = device_as_size >> Memory::YUZU_PAGEBITS;
 | 
			
		||||
    for (size_t i = 0; i < total_virtual; i++) {
 | 
			
		||||
        compressed_physical_ptr[i] = 0;
 | 
			
		||||
        continuity_tracker[i] = 1;
 | 
			
		||||
        cpu_backing_address[i] = 0;
 | 
			
		||||
    }
 | 
			
		||||
    const size_t total_phys = 1ULL << ((Settings::values.memory_layout_mode.GetValue() ==
 | 
			
		||||
                                                Settings::MemoryLayout::Memory_4Gb
 | 
			
		||||
                                            ? physical_min_bits
 | 
			
		||||
                                            : physical_max_bits) -
 | 
			
		||||
                                       Memory::YUZU_PAGEBITS);
 | 
			
		||||
    for (size_t i = 0; i < total_phys; i++) {
 | 
			
		||||
        compressed_device_addr[i] = 0;
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <typename Traits>
 | 
			
		||||
DeviceMemoryManager<Traits>::~DeviceMemoryManager() = default;
 | 
			
		||||
 | 
			
		||||
template <typename Traits>
 | 
			
		||||
void DeviceMemoryManager<Traits>::BindInterface(DeviceInterface* device_inter_) {
 | 
			
		||||
    device_inter = device_inter_;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <typename Traits>
 | 
			
		||||
DAddr DeviceMemoryManager<Traits>::Allocate(size_t size) {
 | 
			
		||||
    return impl->Allocate(size);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <typename Traits>
 | 
			
		||||
void DeviceMemoryManager<Traits>::AllocateFixed(DAddr start, size_t size) {
 | 
			
		||||
    return impl->AllocateFixed(start, size);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <typename Traits>
 | 
			
		||||
void DeviceMemoryManager<Traits>::Free(DAddr start, size_t size) {
 | 
			
		||||
    impl->Free(start, size);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <typename Traits>
 | 
			
		||||
void DeviceMemoryManager<Traits>::Map(DAddr address, VAddr virtual_address, size_t size,
 | 
			
		||||
                                      Asid asid, bool track) {
 | 
			
		||||
    Core::Memory::Memory* process_memory = registered_processes[asid.id];
 | 
			
		||||
    size_t start_page_d = address >> Memory::YUZU_PAGEBITS;
 | 
			
		||||
    size_t num_pages = Common::AlignUp(size, Memory::YUZU_PAGESIZE) >> Memory::YUZU_PAGEBITS;
 | 
			
		||||
    std::scoped_lock lk(mapping_guard);
 | 
			
		||||
    for (size_t i = 0; i < num_pages; i++) {
 | 
			
		||||
        const VAddr new_vaddress = virtual_address + i * Memory::YUZU_PAGESIZE;
 | 
			
		||||
        auto* ptr = process_memory->GetPointerSilent(Common::ProcessAddress(new_vaddress));
 | 
			
		||||
        if (ptr == nullptr) [[unlikely]] {
 | 
			
		||||
            compressed_physical_ptr[start_page_d + i] = 0;
 | 
			
		||||
            continue;
 | 
			
		||||
        }
 | 
			
		||||
        auto phys_addr = static_cast<u32>(GetRawPhysicalAddr(ptr) >> Memory::YUZU_PAGEBITS) + 1U;
 | 
			
		||||
        compressed_physical_ptr[start_page_d + i] = phys_addr;
 | 
			
		||||
        InsertCPUBacking(start_page_d + i, new_vaddress, asid);
 | 
			
		||||
        const u32 base_dev = compressed_device_addr[phys_addr - 1U];
 | 
			
		||||
        const u32 new_dev = static_cast<u32>(start_page_d + i);
 | 
			
		||||
        if (base_dev == 0) [[likely]] {
 | 
			
		||||
            compressed_device_addr[phys_addr - 1U] = new_dev;
 | 
			
		||||
            continue;
 | 
			
		||||
        }
 | 
			
		||||
        u32 start_id = base_dev & MULTI_MASK;
 | 
			
		||||
        if ((base_dev >> MULTI_FLAG_BITS) == 0) {
 | 
			
		||||
            start_id = impl->multi_dev_address.Register(base_dev);
 | 
			
		||||
            compressed_device_addr[phys_addr - 1U] = MULTI_FLAG | start_id;
 | 
			
		||||
        }
 | 
			
		||||
        impl->multi_dev_address.Register(new_dev, start_id);
 | 
			
		||||
    }
 | 
			
		||||
    if (track) {
 | 
			
		||||
        TrackContinuityImpl(address, virtual_address, size, asid);
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <typename Traits>
 | 
			
		||||
void DeviceMemoryManager<Traits>::Unmap(DAddr address, size_t size) {
 | 
			
		||||
    size_t start_page_d = address >> Memory::YUZU_PAGEBITS;
 | 
			
		||||
    size_t num_pages = Common::AlignUp(size, Memory::YUZU_PAGESIZE) >> Memory::YUZU_PAGEBITS;
 | 
			
		||||
    device_inter->InvalidateRegion(address, size);
 | 
			
		||||
    std::scoped_lock lk(mapping_guard);
 | 
			
		||||
    for (size_t i = 0; i < num_pages; i++) {
 | 
			
		||||
        auto phys_addr = compressed_physical_ptr[start_page_d + i];
 | 
			
		||||
        compressed_physical_ptr[start_page_d + i] = 0;
 | 
			
		||||
        cpu_backing_address[start_page_d + i] = 0;
 | 
			
		||||
        if (phys_addr != 0) [[likely]] {
 | 
			
		||||
            const u32 base_dev = compressed_device_addr[phys_addr - 1U];
 | 
			
		||||
            if ((base_dev >> MULTI_FLAG_BITS) == 0) [[likely]] {
 | 
			
		||||
                compressed_device_addr[phys_addr - 1] = 0;
 | 
			
		||||
                continue;
 | 
			
		||||
            }
 | 
			
		||||
            const auto [more_entries, new_start] = impl->multi_dev_address.Unregister(
 | 
			
		||||
                static_cast<u32>(start_page_d + i), base_dev & MULTI_MASK);
 | 
			
		||||
            if (!more_entries) {
 | 
			
		||||
                compressed_device_addr[phys_addr - 1] =
 | 
			
		||||
                    impl->multi_dev_address.ReleaseEntry(new_start);
 | 
			
		||||
                continue;
 | 
			
		||||
            }
 | 
			
		||||
            compressed_device_addr[phys_addr - 1] = new_start | MULTI_FLAG;
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
template <typename Traits>
 | 
			
		||||
void DeviceMemoryManager<Traits>::TrackContinuityImpl(DAddr address, VAddr virtual_address,
 | 
			
		||||
                                                      size_t size, Asid asid) {
 | 
			
		||||
    Core::Memory::Memory* process_memory = registered_processes[asid.id];
 | 
			
		||||
    size_t start_page_d = address >> Memory::YUZU_PAGEBITS;
 | 
			
		||||
    size_t num_pages = Common::AlignUp(size, Memory::YUZU_PAGESIZE) >> Memory::YUZU_PAGEBITS;
 | 
			
		||||
    uintptr_t last_ptr = 0;
 | 
			
		||||
    size_t page_count = 1;
 | 
			
		||||
    for (size_t i = num_pages; i > 0; i--) {
 | 
			
		||||
        size_t index = i - 1;
 | 
			
		||||
        const VAddr new_vaddress = virtual_address + index * Memory::YUZU_PAGESIZE;
 | 
			
		||||
        const uintptr_t new_ptr = reinterpret_cast<uintptr_t>(
 | 
			
		||||
            process_memory->GetPointerSilent(Common::ProcessAddress(new_vaddress)));
 | 
			
		||||
        if (new_ptr + page_size == last_ptr) {
 | 
			
		||||
            page_count++;
 | 
			
		||||
        } else {
 | 
			
		||||
            page_count = 1;
 | 
			
		||||
        }
 | 
			
		||||
        last_ptr = new_ptr;
 | 
			
		||||
        continuity_tracker[start_page_d + index] = static_cast<u32>(page_count);
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
template <typename Traits>
 | 
			
		||||
u8* DeviceMemoryManager<Traits>::GetSpan(const DAddr src_addr, const std::size_t size) {
 | 
			
		||||
    size_t page_index = src_addr >> page_bits;
 | 
			
		||||
    size_t subbits = src_addr & page_mask;
 | 
			
		||||
    if ((static_cast<size_t>(continuity_tracker[page_index]) << page_bits) >= size + subbits) {
 | 
			
		||||
        return GetPointer<u8>(src_addr);
 | 
			
		||||
    }
 | 
			
		||||
    return nullptr;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <typename Traits>
 | 
			
		||||
const u8* DeviceMemoryManager<Traits>::GetSpan(const DAddr src_addr, const std::size_t size) const {
 | 
			
		||||
    size_t page_index = src_addr >> page_bits;
 | 
			
		||||
    size_t subbits = src_addr & page_mask;
 | 
			
		||||
    if ((static_cast<size_t>(continuity_tracker[page_index]) << page_bits) >= size + subbits) {
 | 
			
		||||
        return GetPointer<u8>(src_addr);
 | 
			
		||||
    }
 | 
			
		||||
    return nullptr;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <typename Traits>
 | 
			
		||||
void DeviceMemoryManager<Traits>::InnerGatherDeviceAddresses(Common::ScratchBuffer<u32>& buffer,
 | 
			
		||||
                                                             PAddr address) {
 | 
			
		||||
    size_t phys_addr = address >> page_bits;
 | 
			
		||||
    std::scoped_lock lk(mapping_guard);
 | 
			
		||||
    u32 backing = compressed_device_addr[phys_addr];
 | 
			
		||||
    if ((backing >> MULTI_FLAG_BITS) != 0) {
 | 
			
		||||
        impl->multi_dev_address.GatherValues(backing & MULTI_MASK, buffer);
 | 
			
		||||
        return;
 | 
			
		||||
    }
 | 
			
		||||
    buffer.resize(1);
 | 
			
		||||
    buffer[0] = backing;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <typename Traits>
 | 
			
		||||
template <typename T>
 | 
			
		||||
T* DeviceMemoryManager<Traits>::GetPointer(DAddr address) {
 | 
			
		||||
    const size_t index = address >> Memory::YUZU_PAGEBITS;
 | 
			
		||||
    const size_t offset = address & Memory::YUZU_PAGEMASK;
 | 
			
		||||
    auto phys_addr = compressed_physical_ptr[index];
 | 
			
		||||
    if (phys_addr == 0) [[unlikely]] {
 | 
			
		||||
        return nullptr;
 | 
			
		||||
    }
 | 
			
		||||
    return GetPointerFromRaw<T>((static_cast<PAddr>(phys_addr - 1) << Memory::YUZU_PAGEBITS) +
 | 
			
		||||
                                offset);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <typename Traits>
 | 
			
		||||
template <typename T>
 | 
			
		||||
const T* DeviceMemoryManager<Traits>::GetPointer(DAddr address) const {
 | 
			
		||||
    const size_t index = address >> Memory::YUZU_PAGEBITS;
 | 
			
		||||
    const size_t offset = address & Memory::YUZU_PAGEMASK;
 | 
			
		||||
    auto phys_addr = compressed_physical_ptr[index];
 | 
			
		||||
    if (phys_addr == 0) [[unlikely]] {
 | 
			
		||||
        return nullptr;
 | 
			
		||||
    }
 | 
			
		||||
    return GetPointerFromRaw<T>((static_cast<PAddr>(phys_addr - 1) << Memory::YUZU_PAGEBITS) +
 | 
			
		||||
                                offset);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <typename Traits>
 | 
			
		||||
template <typename T>
 | 
			
		||||
void DeviceMemoryManager<Traits>::Write(DAddr address, T value) {
 | 
			
		||||
    T* ptr = GetPointer<T>(address);
 | 
			
		||||
    if (!ptr) [[unlikely]] {
 | 
			
		||||
        return;
 | 
			
		||||
    }
 | 
			
		||||
    std::memcpy(ptr, &value, sizeof(T));
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <typename Traits>
 | 
			
		||||
template <typename T>
 | 
			
		||||
T DeviceMemoryManager<Traits>::Read(DAddr address) const {
 | 
			
		||||
    const T* ptr = GetPointer<T>(address);
 | 
			
		||||
    T result{};
 | 
			
		||||
    if (!ptr) [[unlikely]] {
 | 
			
		||||
        return result;
 | 
			
		||||
    }
 | 
			
		||||
    std::memcpy(&result, ptr, sizeof(T));
 | 
			
		||||
    return result;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <typename Traits>
 | 
			
		||||
void DeviceMemoryManager<Traits>::WalkBlock(DAddr addr, std::size_t size, auto on_unmapped,
 | 
			
		||||
                                            auto on_memory, auto increment) {
 | 
			
		||||
    std::size_t remaining_size = size;
 | 
			
		||||
    std::size_t page_index = addr >> Memory::YUZU_PAGEBITS;
 | 
			
		||||
    std::size_t page_offset = addr & Memory::YUZU_PAGEMASK;
 | 
			
		||||
 | 
			
		||||
    while (remaining_size) {
 | 
			
		||||
        const size_t next_pages = static_cast<std::size_t>(continuity_tracker[page_index]);
 | 
			
		||||
        const std::size_t copy_amount =
 | 
			
		||||
            std::min((next_pages << Memory::YUZU_PAGEBITS) - page_offset, remaining_size);
 | 
			
		||||
        const auto current_vaddr =
 | 
			
		||||
            static_cast<u64>((page_index << Memory::YUZU_PAGEBITS) + page_offset);
 | 
			
		||||
        SCOPE_EXIT({
 | 
			
		||||
            page_index += next_pages;
 | 
			
		||||
            page_offset = 0;
 | 
			
		||||
            increment(copy_amount);
 | 
			
		||||
            remaining_size -= copy_amount;
 | 
			
		||||
        });
 | 
			
		||||
 | 
			
		||||
        auto phys_addr = compressed_physical_ptr[page_index];
 | 
			
		||||
        if (phys_addr == 0) {
 | 
			
		||||
            on_unmapped(copy_amount, current_vaddr);
 | 
			
		||||
            continue;
 | 
			
		||||
        }
 | 
			
		||||
        auto* mem_ptr = GetPointerFromRaw<u8>(
 | 
			
		||||
            (static_cast<PAddr>(phys_addr - 1) << Memory::YUZU_PAGEBITS) + page_offset);
 | 
			
		||||
        on_memory(copy_amount, mem_ptr);
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <typename Traits>
 | 
			
		||||
void DeviceMemoryManager<Traits>::ReadBlock(DAddr address, void* dest_pointer, size_t size) {
 | 
			
		||||
    device_inter->FlushRegion(address, size);
 | 
			
		||||
    WalkBlock(
 | 
			
		||||
        address, size,
 | 
			
		||||
        [&](size_t copy_amount, DAddr current_vaddr) {
 | 
			
		||||
            LOG_ERROR(
 | 
			
		||||
                HW_Memory,
 | 
			
		||||
                "Unmapped Device ReadBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})",
 | 
			
		||||
                current_vaddr, address, size);
 | 
			
		||||
            std::memset(dest_pointer, 0, copy_amount);
 | 
			
		||||
        },
 | 
			
		||||
        [&](size_t copy_amount, const u8* const src_ptr) {
 | 
			
		||||
            std::memcpy(dest_pointer, src_ptr, copy_amount);
 | 
			
		||||
        },
 | 
			
		||||
        [&](const std::size_t copy_amount) {
 | 
			
		||||
            dest_pointer = static_cast<u8*>(dest_pointer) + copy_amount;
 | 
			
		||||
        });
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <typename Traits>
 | 
			
		||||
void DeviceMemoryManager<Traits>::WriteBlock(DAddr address, const void* src_pointer, size_t size) {
 | 
			
		||||
    WalkBlock(
 | 
			
		||||
        address, size,
 | 
			
		||||
        [&](size_t copy_amount, DAddr current_vaddr) {
 | 
			
		||||
            LOG_ERROR(
 | 
			
		||||
                HW_Memory,
 | 
			
		||||
                "Unmapped Device WriteBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})",
 | 
			
		||||
                current_vaddr, address, size);
 | 
			
		||||
        },
 | 
			
		||||
        [&](size_t copy_amount, u8* const dst_ptr) {
 | 
			
		||||
            std::memcpy(dst_ptr, src_pointer, copy_amount);
 | 
			
		||||
        },
 | 
			
		||||
        [&](const std::size_t copy_amount) {
 | 
			
		||||
            src_pointer = static_cast<const u8*>(src_pointer) + copy_amount;
 | 
			
		||||
        });
 | 
			
		||||
    device_inter->InvalidateRegion(address, size);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <typename Traits>
 | 
			
		||||
void DeviceMemoryManager<Traits>::ReadBlockUnsafe(DAddr address, void* dest_pointer, size_t size) {
 | 
			
		||||
    WalkBlock(
 | 
			
		||||
        address, size,
 | 
			
		||||
        [&](size_t copy_amount, DAddr current_vaddr) {
 | 
			
		||||
            LOG_ERROR(
 | 
			
		||||
                HW_Memory,
 | 
			
		||||
                "Unmapped Device ReadBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})",
 | 
			
		||||
                current_vaddr, address, size);
 | 
			
		||||
            std::memset(dest_pointer, 0, copy_amount);
 | 
			
		||||
        },
 | 
			
		||||
        [&](size_t copy_amount, const u8* const src_ptr) {
 | 
			
		||||
            std::memcpy(dest_pointer, src_ptr, copy_amount);
 | 
			
		||||
        },
 | 
			
		||||
        [&](const std::size_t copy_amount) {
 | 
			
		||||
            dest_pointer = static_cast<u8*>(dest_pointer) + copy_amount;
 | 
			
		||||
        });
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <typename Traits>
 | 
			
		||||
void DeviceMemoryManager<Traits>::WriteBlockUnsafe(DAddr address, const void* src_pointer,
 | 
			
		||||
                                                   size_t size) {
 | 
			
		||||
    WalkBlock(
 | 
			
		||||
        address, size,
 | 
			
		||||
        [&](size_t copy_amount, DAddr current_vaddr) {
 | 
			
		||||
            LOG_ERROR(
 | 
			
		||||
                HW_Memory,
 | 
			
		||||
                "Unmapped Device WriteBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})",
 | 
			
		||||
                current_vaddr, address, size);
 | 
			
		||||
        },
 | 
			
		||||
        [&](size_t copy_amount, u8* const dst_ptr) {
 | 
			
		||||
            std::memcpy(dst_ptr, src_pointer, copy_amount);
 | 
			
		||||
        },
 | 
			
		||||
        [&](const std::size_t copy_amount) {
 | 
			
		||||
            src_pointer = static_cast<const u8*>(src_pointer) + copy_amount;
 | 
			
		||||
        });
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <typename Traits>
 | 
			
		||||
Asid DeviceMemoryManager<Traits>::RegisterProcess(Memory::Memory* memory_device_inter) {
 | 
			
		||||
    size_t new_id{};
 | 
			
		||||
    if (!id_pool.empty()) {
 | 
			
		||||
        new_id = id_pool.front();
 | 
			
		||||
        id_pool.pop_front();
 | 
			
		||||
        registered_processes[new_id] = memory_device_inter;
 | 
			
		||||
    } else {
 | 
			
		||||
        registered_processes.emplace_back(memory_device_inter);
 | 
			
		||||
        new_id = registered_processes.size() - 1U;
 | 
			
		||||
    }
 | 
			
		||||
    return Asid{new_id};
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <typename Traits>
 | 
			
		||||
void DeviceMemoryManager<Traits>::UnregisterProcess(Asid asid) {
 | 
			
		||||
    registered_processes[asid.id] = nullptr;
 | 
			
		||||
    id_pool.push_front(asid.id);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <typename Traits>
 | 
			
		||||
void DeviceMemoryManager<Traits>::UpdatePagesCachedCount(DAddr addr, size_t size, s32 delta) {
 | 
			
		||||
    std::unique_lock<std::mutex> lk(counter_guard, std::defer_lock);
 | 
			
		||||
    const auto Lock = [&] {
 | 
			
		||||
        if (!lk) {
 | 
			
		||||
            lk.lock();
 | 
			
		||||
        }
 | 
			
		||||
    };
 | 
			
		||||
    u64 uncache_begin = 0;
 | 
			
		||||
    u64 cache_begin = 0;
 | 
			
		||||
    u64 uncache_bytes = 0;
 | 
			
		||||
    u64 cache_bytes = 0;
 | 
			
		||||
    const auto MarkRegionCaching = &DeviceMemoryManager<Traits>::DeviceMethods::MarkRegionCaching;
 | 
			
		||||
 | 
			
		||||
    std::atomic_thread_fence(std::memory_order_acquire);
 | 
			
		||||
    const size_t page_end = Common::DivCeil(addr + size, Memory::YUZU_PAGESIZE);
 | 
			
		||||
    size_t page = addr >> Memory::YUZU_PAGEBITS;
 | 
			
		||||
    auto [asid, base_vaddress] = ExtractCPUBacking(page);
 | 
			
		||||
    size_t vpage = base_vaddress >> Memory::YUZU_PAGEBITS;
 | 
			
		||||
    auto* memory_device_inter = registered_processes[asid.id];
 | 
			
		||||
    for (; page != page_end; ++page) {
 | 
			
		||||
        std::atomic_uint8_t& count = cached_pages->at(page >> 3).Count(page);
 | 
			
		||||
 | 
			
		||||
        if (delta > 0) {
 | 
			
		||||
            ASSERT_MSG(count.load(std::memory_order::relaxed) < std::numeric_limits<u8>::max(),
 | 
			
		||||
                       "Count may overflow!");
 | 
			
		||||
        } else if (delta < 0) {
 | 
			
		||||
            ASSERT_MSG(count.load(std::memory_order::relaxed) > 0, "Count may underflow!");
 | 
			
		||||
        } else {
 | 
			
		||||
            ASSERT_MSG(false, "Delta must be non-zero!");
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        // Adds or subtracts 1, as count is a unsigned 8-bit value
 | 
			
		||||
        count.fetch_add(static_cast<u8>(delta), std::memory_order_release);
 | 
			
		||||
 | 
			
		||||
        // Assume delta is either -1 or 1
 | 
			
		||||
        if (count.load(std::memory_order::relaxed) == 0) {
 | 
			
		||||
            if (uncache_bytes == 0) {
 | 
			
		||||
                uncache_begin = vpage;
 | 
			
		||||
            }
 | 
			
		||||
            uncache_bytes += Memory::YUZU_PAGESIZE;
 | 
			
		||||
        } else if (uncache_bytes > 0) {
 | 
			
		||||
            Lock();
 | 
			
		||||
            MarkRegionCaching(memory_device_inter, uncache_begin << Memory::YUZU_PAGEBITS,
 | 
			
		||||
                              uncache_bytes, false);
 | 
			
		||||
            uncache_bytes = 0;
 | 
			
		||||
        }
 | 
			
		||||
        if (count.load(std::memory_order::relaxed) == 1 && delta > 0) {
 | 
			
		||||
            if (cache_bytes == 0) {
 | 
			
		||||
                cache_begin = vpage;
 | 
			
		||||
            }
 | 
			
		||||
            cache_bytes += Memory::YUZU_PAGESIZE;
 | 
			
		||||
        } else if (cache_bytes > 0) {
 | 
			
		||||
            Lock();
 | 
			
		||||
            MarkRegionCaching(memory_device_inter, cache_begin << Memory::YUZU_PAGEBITS, cache_bytes,
 | 
			
		||||
                              true);
 | 
			
		||||
            cache_bytes = 0;
 | 
			
		||||
        }
 | 
			
		||||
        vpage++;
 | 
			
		||||
    }
 | 
			
		||||
    if (uncache_bytes > 0) {
 | 
			
		||||
        Lock();
 | 
			
		||||
        MarkRegionCaching(memory_device_inter, uncache_begin << Memory::YUZU_PAGEBITS, uncache_bytes,
 | 
			
		||||
                          false);
 | 
			
		||||
    }
 | 
			
		||||
    if (cache_bytes > 0) {
 | 
			
		||||
        Lock();
 | 
			
		||||
        MarkRegionCaching(memory_device_inter, cache_begin << Memory::YUZU_PAGEBITS, cache_bytes,
 | 
			
		||||
                          true);
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
} // namespace Core
 | 
			
		||||
@@ -10,7 +10,7 @@
 | 
			
		||||
#include <utility>
 | 
			
		||||
#include <vector>
 | 
			
		||||
 | 
			
		||||
#include "core/memory.h"
 | 
			
		||||
#include "core/device_memory_manager.h"
 | 
			
		||||
 | 
			
		||||
namespace Core {
 | 
			
		||||
 | 
			
		||||
@@ -23,7 +23,7 @@ public:
 | 
			
		||||
 | 
			
		||||
    ~GPUDirtyMemoryManager() = default;
 | 
			
		||||
 | 
			
		||||
    void Collect(VAddr address, size_t size) {
 | 
			
		||||
    void Collect(PAddr address, size_t size) {
 | 
			
		||||
        TransformAddress t = BuildTransform(address, size);
 | 
			
		||||
        TransformAddress tmp, original;
 | 
			
		||||
        do {
 | 
			
		||||
@@ -47,7 +47,7 @@ public:
 | 
			
		||||
                                                std::memory_order_relaxed));
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    void Gather(std::function<void(VAddr, size_t)>& callback) {
 | 
			
		||||
    void Gather(std::function<void(PAddr, size_t)>& callback) {
 | 
			
		||||
        {
 | 
			
		||||
            std::scoped_lock lk(guard);
 | 
			
		||||
            TransformAddress t = current.exchange(default_transform, std::memory_order_relaxed);
 | 
			
		||||
@@ -65,7 +65,7 @@ public:
 | 
			
		||||
                mask = mask >> empty_bits;
 | 
			
		||||
 | 
			
		||||
                const size_t continuous_bits = std::countr_one(mask);
 | 
			
		||||
                callback((static_cast<VAddr>(transform.address) << page_bits) + offset,
 | 
			
		||||
                callback((static_cast<PAddr>(transform.address) << page_bits) + offset,
 | 
			
		||||
                         continuous_bits << align_bits);
 | 
			
		||||
                mask = continuous_bits < align_size ? (mask >> continuous_bits) : 0;
 | 
			
		||||
                offset += continuous_bits << align_bits;
 | 
			
		||||
@@ -80,7 +80,7 @@ private:
 | 
			
		||||
        u32 mask;
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    constexpr static size_t page_bits = Memory::YUZU_PAGEBITS - 1;
 | 
			
		||||
    constexpr static size_t page_bits = DEVICE_PAGEBITS - 1;
 | 
			
		||||
    constexpr static size_t page_size = 1ULL << page_bits;
 | 
			
		||||
    constexpr static size_t page_mask = page_size - 1;
 | 
			
		||||
 | 
			
		||||
@@ -89,7 +89,7 @@ private:
 | 
			
		||||
    constexpr static size_t align_mask = align_size - 1;
 | 
			
		||||
    constexpr static TransformAddress default_transform = {.address = ~0U, .mask = 0U};
 | 
			
		||||
 | 
			
		||||
    bool IsValid(VAddr address) {
 | 
			
		||||
    bool IsValid(PAddr address) {
 | 
			
		||||
        return address < (1ULL << 39);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
@@ -103,7 +103,7 @@ private:
 | 
			
		||||
        return mask;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    TransformAddress BuildTransform(VAddr address, size_t size) {
 | 
			
		||||
    TransformAddress BuildTransform(PAddr address, size_t size) {
 | 
			
		||||
        const size_t minor_address = address & page_mask;
 | 
			
		||||
        const size_t minor_bit = minor_address >> align_bits;
 | 
			
		||||
        const size_t top_bit = (minor_address + size + align_mask) >> align_bits;
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										214
									
								
								src/core/guest_memory.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										214
									
								
								src/core/guest_memory.h
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,214 @@
 | 
			
		||||
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
 | 
			
		||||
// SPDX-License-Identifier: GPL-2.0-or-later
 | 
			
		||||
 | 
			
		||||
#pragma once
 | 
			
		||||
 | 
			
		||||
#include <iterator>
 | 
			
		||||
#include <memory>
 | 
			
		||||
#include <optional>
 | 
			
		||||
#include <span>
 | 
			
		||||
#include <vector>
 | 
			
		||||
 | 
			
		||||
#include "common/assert.h"
 | 
			
		||||
#include "common/scratch_buffer.h"
 | 
			
		||||
 | 
			
		||||
namespace Core::Memory {
 | 
			
		||||
 | 
			
		||||
enum GuestMemoryFlags : u32 {
 | 
			
		||||
    Read = 1 << 0,
 | 
			
		||||
    Write = 1 << 1,
 | 
			
		||||
    Safe = 1 << 2,
 | 
			
		||||
    Cached = 1 << 3,
 | 
			
		||||
 | 
			
		||||
    SafeRead = Read | Safe,
 | 
			
		||||
    SafeWrite = Write | Safe,
 | 
			
		||||
    SafeReadWrite = SafeRead | SafeWrite,
 | 
			
		||||
    SafeReadCachedWrite = SafeReadWrite | Cached,
 | 
			
		||||
 | 
			
		||||
    UnsafeRead = Read,
 | 
			
		||||
    UnsafeWrite = Write,
 | 
			
		||||
    UnsafeReadWrite = UnsafeRead | UnsafeWrite,
 | 
			
		||||
    UnsafeReadCachedWrite = UnsafeReadWrite | Cached,
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
namespace {
 | 
			
		||||
template <typename M, typename T, GuestMemoryFlags FLAGS>
 | 
			
		||||
class GuestMemory {
 | 
			
		||||
    using iterator = T*;
 | 
			
		||||
    using const_iterator = const T*;
 | 
			
		||||
    using value_type = T;
 | 
			
		||||
    using element_type = T;
 | 
			
		||||
    using iterator_category = std::contiguous_iterator_tag;
 | 
			
		||||
 | 
			
		||||
public:
 | 
			
		||||
    GuestMemory() = delete;
 | 
			
		||||
    explicit GuestMemory(M& memory, u64 addr, std::size_t size,
 | 
			
		||||
                         Common::ScratchBuffer<T>* backup = nullptr)
 | 
			
		||||
        : m_memory{memory}, m_addr{addr}, m_size{size} {
 | 
			
		||||
        static_assert(FLAGS & GuestMemoryFlags::Read || FLAGS & GuestMemoryFlags::Write);
 | 
			
		||||
        if constexpr (FLAGS & GuestMemoryFlags::Read) {
 | 
			
		||||
            Read(addr, size, backup);
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    ~GuestMemory() = default;
 | 
			
		||||
 | 
			
		||||
    T* data() noexcept {
 | 
			
		||||
        return m_data_span.data();
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    const T* data() const noexcept {
 | 
			
		||||
        return m_data_span.data();
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    size_t size() const noexcept {
 | 
			
		||||
        return m_size;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    size_t size_bytes() const noexcept {
 | 
			
		||||
        return this->size() * sizeof(T);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    [[nodiscard]] T* begin() noexcept {
 | 
			
		||||
        return this->data();
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    [[nodiscard]] const T* begin() const noexcept {
 | 
			
		||||
        return this->data();
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    [[nodiscard]] T* end() noexcept {
 | 
			
		||||
        return this->data() + this->size();
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    [[nodiscard]] const T* end() const noexcept {
 | 
			
		||||
        return this->data() + this->size();
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    T& operator[](size_t index) noexcept {
 | 
			
		||||
        return m_data_span[index];
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    const T& operator[](size_t index) const noexcept {
 | 
			
		||||
        return m_data_span[index];
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    void SetAddressAndSize(u64 addr, std::size_t size) noexcept {
 | 
			
		||||
        m_addr = addr;
 | 
			
		||||
        m_size = size;
 | 
			
		||||
        m_addr_changed = true;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    std::span<T> Read(u64 addr, std::size_t size,
 | 
			
		||||
                      Common::ScratchBuffer<T>* backup = nullptr) noexcept {
 | 
			
		||||
        m_addr = addr;
 | 
			
		||||
        m_size = size;
 | 
			
		||||
        if (m_size == 0) {
 | 
			
		||||
            m_is_data_copy = true;
 | 
			
		||||
            return {};
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        if (this->TrySetSpan()) {
 | 
			
		||||
            if constexpr (FLAGS & GuestMemoryFlags::Safe) {
 | 
			
		||||
                m_memory.FlushRegion(m_addr, this->size_bytes());
 | 
			
		||||
            }
 | 
			
		||||
        } else {
 | 
			
		||||
            if (backup) {
 | 
			
		||||
                backup->resize_destructive(this->size());
 | 
			
		||||
                m_data_span = *backup;
 | 
			
		||||
            } else {
 | 
			
		||||
                m_data_copy.resize(this->size());
 | 
			
		||||
                m_data_span = std::span(m_data_copy);
 | 
			
		||||
            }
 | 
			
		||||
            m_is_data_copy = true;
 | 
			
		||||
            m_span_valid = true;
 | 
			
		||||
            if constexpr (FLAGS & GuestMemoryFlags::Safe) {
 | 
			
		||||
                m_memory.ReadBlock(m_addr, this->data(), this->size_bytes());
 | 
			
		||||
            } else {
 | 
			
		||||
                m_memory.ReadBlockUnsafe(m_addr, this->data(), this->size_bytes());
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
        return m_data_span;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    void Write(std::span<T> write_data) noexcept {
 | 
			
		||||
        if constexpr (FLAGS & GuestMemoryFlags::Cached) {
 | 
			
		||||
            m_memory.WriteBlockCached(m_addr, write_data.data(), this->size_bytes());
 | 
			
		||||
        } else if constexpr (FLAGS & GuestMemoryFlags::Safe) {
 | 
			
		||||
            m_memory.WriteBlock(m_addr, write_data.data(), this->size_bytes());
 | 
			
		||||
        } else {
 | 
			
		||||
            m_memory.WriteBlockUnsafe(m_addr, write_data.data(), this->size_bytes());
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    bool TrySetSpan() noexcept {
 | 
			
		||||
        if (u8* ptr = m_memory.GetSpan(m_addr, this->size_bytes()); ptr) {
 | 
			
		||||
            m_data_span = {reinterpret_cast<T*>(ptr), this->size()};
 | 
			
		||||
            m_span_valid = true;
 | 
			
		||||
            return true;
 | 
			
		||||
        }
 | 
			
		||||
        return false;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
protected:
 | 
			
		||||
    bool IsDataCopy() const noexcept {
 | 
			
		||||
        return m_is_data_copy;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    bool AddressChanged() const noexcept {
 | 
			
		||||
        return m_addr_changed;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    M& m_memory;
 | 
			
		||||
    u64 m_addr{};
 | 
			
		||||
    size_t m_size{};
 | 
			
		||||
    std::span<T> m_data_span{};
 | 
			
		||||
    std::vector<T> m_data_copy{};
 | 
			
		||||
    bool m_span_valid{false};
 | 
			
		||||
    bool m_is_data_copy{false};
 | 
			
		||||
    bool m_addr_changed{false};
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
template <typename M, typename T, GuestMemoryFlags FLAGS>
 | 
			
		||||
class GuestMemoryScoped : public GuestMemory<M, T, FLAGS> {
 | 
			
		||||
public:
 | 
			
		||||
    GuestMemoryScoped() = delete;
 | 
			
		||||
    explicit GuestMemoryScoped(M& memory, u64 addr, std::size_t size,
 | 
			
		||||
                               Common::ScratchBuffer<T>* backup = nullptr)
 | 
			
		||||
        : GuestMemory<M, T, FLAGS>(memory, addr, size, backup) {
 | 
			
		||||
        if constexpr (!(FLAGS & GuestMemoryFlags::Read)) {
 | 
			
		||||
            if (!this->TrySetSpan()) {
 | 
			
		||||
                if (backup) {
 | 
			
		||||
                    this->m_data_span = *backup;
 | 
			
		||||
                    this->m_span_valid = true;
 | 
			
		||||
                    this->m_is_data_copy = true;
 | 
			
		||||
                }
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    ~GuestMemoryScoped() {
 | 
			
		||||
        if constexpr (FLAGS & GuestMemoryFlags::Write) {
 | 
			
		||||
            if (this->size() == 0) [[unlikely]] {
 | 
			
		||||
                return;
 | 
			
		||||
            }
 | 
			
		||||
 | 
			
		||||
            if (this->AddressChanged() || this->IsDataCopy()) {
 | 
			
		||||
                ASSERT(this->m_span_valid);
 | 
			
		||||
                if constexpr (FLAGS & GuestMemoryFlags::Cached) {
 | 
			
		||||
                    this->m_memory.WriteBlockCached(this->m_addr, this->data(), this->size_bytes());
 | 
			
		||||
                } else if constexpr (FLAGS & GuestMemoryFlags::Safe) {
 | 
			
		||||
                    this->m_memory.WriteBlock(this->m_addr, this->data(), this->size_bytes());
 | 
			
		||||
                } else {
 | 
			
		||||
                    this->m_memory.WriteBlockUnsafe(this->m_addr, this->data(), this->size_bytes());
 | 
			
		||||
                }
 | 
			
		||||
            } else if constexpr ((FLAGS & GuestMemoryFlags::Safe) ||
 | 
			
		||||
                                 (FLAGS & GuestMemoryFlags::Cached)) {
 | 
			
		||||
                this->m_memory.InvalidateRegion(this->m_addr, this->size_bytes());
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
};
 | 
			
		||||
} // namespace
 | 
			
		||||
 | 
			
		||||
} // namespace Core::Memory
 | 
			
		||||
@@ -5,6 +5,7 @@
 | 
			
		||||
#include "common/scope_exit.h"
 | 
			
		||||
#include "common/settings.h"
 | 
			
		||||
#include "core/core.h"
 | 
			
		||||
#include "core/gpu_dirty_memory_manager.h"
 | 
			
		||||
#include "core/hle/kernel/k_process.h"
 | 
			
		||||
#include "core/hle/kernel/k_scoped_resource_reservation.h"
 | 
			
		||||
#include "core/hle/kernel/k_shared_memory.h"
 | 
			
		||||
@@ -320,7 +321,7 @@ Result KProcess::Initialize(const Svc::CreateProcessParameter& params, const KPa
 | 
			
		||||
 | 
			
		||||
    // Ensure our memory is initialized.
 | 
			
		||||
    m_memory.SetCurrentPageTable(*this);
 | 
			
		||||
    m_memory.SetGPUDirtyManagers(m_dirty_memory_managers);
 | 
			
		||||
    m_memory.SetGPUDirtyManagers(m_kernel.System().GetGPUDirtyMemoryManager());
 | 
			
		||||
 | 
			
		||||
    // Ensure we can insert the code region.
 | 
			
		||||
    R_UNLESS(m_page_table.CanContain(params.code_address, params.code_num_pages * PageSize,
 | 
			
		||||
@@ -417,7 +418,7 @@ Result KProcess::Initialize(const Svc::CreateProcessParameter& params,
 | 
			
		||||
 | 
			
		||||
    // Ensure our memory is initialized.
 | 
			
		||||
    m_memory.SetCurrentPageTable(*this);
 | 
			
		||||
    m_memory.SetGPUDirtyManagers(m_dirty_memory_managers);
 | 
			
		||||
    m_memory.SetGPUDirtyManagers(m_kernel.System().GetGPUDirtyMemoryManager());
 | 
			
		||||
 | 
			
		||||
    // Ensure we can insert the code region.
 | 
			
		||||
    R_UNLESS(m_page_table.CanContain(params.code_address, code_size, KMemoryState::Code),
 | 
			
		||||
@@ -1141,8 +1142,7 @@ void KProcess::Switch(KProcess* cur_process, KProcess* next_process) {}
 | 
			
		||||
KProcess::KProcess(KernelCore& kernel)
 | 
			
		||||
    : KAutoObjectWithSlabHeapAndContainer(kernel), m_page_table{kernel}, m_state_lock{kernel},
 | 
			
		||||
      m_list_lock{kernel}, m_cond_var{kernel.System()}, m_address_arbiter{kernel.System()},
 | 
			
		||||
      m_handle_table{kernel}, m_dirty_memory_managers{},
 | 
			
		||||
      m_exclusive_monitor{}, m_memory{kernel.System()} {}
 | 
			
		||||
      m_handle_table{kernel}, m_exclusive_monitor{}, m_memory{kernel.System()} {}
 | 
			
		||||
KProcess::~KProcess() = default;
 | 
			
		||||
 | 
			
		||||
Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std::size_t code_size,
 | 
			
		||||
@@ -1324,10 +1324,4 @@ bool KProcess::RemoveWatchpoint(KProcessAddress addr, u64 size, DebugWatchpointT
 | 
			
		||||
    return true;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void KProcess::GatherGPUDirtyMemory(std::function<void(VAddr, size_t)>& callback) {
 | 
			
		||||
    for (auto& manager : m_dirty_memory_managers) {
 | 
			
		||||
        manager.Gather(callback);
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
} // namespace Kernel
 | 
			
		||||
 
 | 
			
		||||
@@ -7,7 +7,6 @@
 | 
			
		||||
 | 
			
		||||
#include "core/arm/arm_interface.h"
 | 
			
		||||
#include "core/file_sys/program_metadata.h"
 | 
			
		||||
#include "core/gpu_dirty_memory_manager.h"
 | 
			
		||||
#include "core/hle/kernel/code_set.h"
 | 
			
		||||
#include "core/hle/kernel/k_address_arbiter.h"
 | 
			
		||||
#include "core/hle/kernel/k_capabilities.h"
 | 
			
		||||
@@ -128,7 +127,6 @@ private:
 | 
			
		||||
#ifdef HAS_NCE
 | 
			
		||||
    std::unordered_map<u64, u64> m_post_handlers{};
 | 
			
		||||
#endif
 | 
			
		||||
    std::array<Core::GPUDirtyMemoryManager, Core::Hardware::NUM_CPU_CORES> m_dirty_memory_managers;
 | 
			
		||||
    std::unique_ptr<Core::ExclusiveMonitor> m_exclusive_monitor;
 | 
			
		||||
    Core::Memory::Memory m_memory;
 | 
			
		||||
 | 
			
		||||
@@ -511,8 +509,6 @@ public:
 | 
			
		||||
        return m_memory;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    void GatherGPUDirtyMemory(std::function<void(VAddr, size_t)>& callback);
 | 
			
		||||
 | 
			
		||||
    Core::ExclusiveMonitor& GetExclusiveMonitor() const {
 | 
			
		||||
        return *m_exclusive_monitor;
 | 
			
		||||
    }
 | 
			
		||||
 
 | 
			
		||||
@@ -12,6 +12,7 @@
 | 
			
		||||
#include "common/common_types.h"
 | 
			
		||||
#include "common/logging/log.h"
 | 
			
		||||
#include "common/scratch_buffer.h"
 | 
			
		||||
#include "core/guest_memory.h"
 | 
			
		||||
#include "core/hle/kernel/k_auto_object.h"
 | 
			
		||||
#include "core/hle/kernel/k_handle_table.h"
 | 
			
		||||
#include "core/hle/kernel/k_process.h"
 | 
			
		||||
@@ -23,19 +24,6 @@
 | 
			
		||||
#include "core/hle/service/ipc_helpers.h"
 | 
			
		||||
#include "core/memory.h"
 | 
			
		||||
 | 
			
		||||
namespace {
 | 
			
		||||
static thread_local std::array read_buffer_data_a{
 | 
			
		||||
    Common::ScratchBuffer<u8>(),
 | 
			
		||||
    Common::ScratchBuffer<u8>(),
 | 
			
		||||
    Common::ScratchBuffer<u8>(),
 | 
			
		||||
};
 | 
			
		||||
static thread_local std::array read_buffer_data_x{
 | 
			
		||||
    Common::ScratchBuffer<u8>(),
 | 
			
		||||
    Common::ScratchBuffer<u8>(),
 | 
			
		||||
    Common::ScratchBuffer<u8>(),
 | 
			
		||||
};
 | 
			
		||||
} // Anonymous namespace
 | 
			
		||||
 | 
			
		||||
namespace Service {
 | 
			
		||||
 | 
			
		||||
SessionRequestHandler::SessionRequestHandler(Kernel::KernelCore& kernel_, const char* service_name_)
 | 
			
		||||
@@ -343,48 +331,27 @@ std::vector<u8> HLERequestContext::ReadBufferCopy(std::size_t buffer_index) cons
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
std::span<const u8> HLERequestContext::ReadBufferA(std::size_t buffer_index) const {
 | 
			
		||||
    static thread_local std::array read_buffer_a{
 | 
			
		||||
        Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
 | 
			
		||||
        Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
 | 
			
		||||
        Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
 | 
			
		||||
    };
 | 
			
		||||
    Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::UnsafeRead> gm(memory, 0, 0);
 | 
			
		||||
 | 
			
		||||
    ASSERT_OR_EXECUTE_MSG(
 | 
			
		||||
        BufferDescriptorA().size() > buffer_index, { return {}; },
 | 
			
		||||
        "BufferDescriptorA invalid buffer_index {}", buffer_index);
 | 
			
		||||
    auto& read_buffer = read_buffer_a[buffer_index];
 | 
			
		||||
    return read_buffer.Read(BufferDescriptorA()[buffer_index].Address(),
 | 
			
		||||
                            BufferDescriptorA()[buffer_index].Size(),
 | 
			
		||||
                            &read_buffer_data_a[buffer_index]);
 | 
			
		||||
    return gm.Read(BufferDescriptorA()[buffer_index].Address(),
 | 
			
		||||
                   BufferDescriptorA()[buffer_index].Size(), &read_buffer_data_a[buffer_index]);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
std::span<const u8> HLERequestContext::ReadBufferX(std::size_t buffer_index) const {
 | 
			
		||||
    static thread_local std::array read_buffer_x{
 | 
			
		||||
        Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
 | 
			
		||||
        Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
 | 
			
		||||
        Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
 | 
			
		||||
    };
 | 
			
		||||
    Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::UnsafeRead> gm(memory, 0, 0);
 | 
			
		||||
 | 
			
		||||
    ASSERT_OR_EXECUTE_MSG(
 | 
			
		||||
        BufferDescriptorX().size() > buffer_index, { return {}; },
 | 
			
		||||
        "BufferDescriptorX invalid buffer_index {}", buffer_index);
 | 
			
		||||
    auto& read_buffer = read_buffer_x[buffer_index];
 | 
			
		||||
    return read_buffer.Read(BufferDescriptorX()[buffer_index].Address(),
 | 
			
		||||
                            BufferDescriptorX()[buffer_index].Size(),
 | 
			
		||||
                            &read_buffer_data_x[buffer_index]);
 | 
			
		||||
    return gm.Read(BufferDescriptorX()[buffer_index].Address(),
 | 
			
		||||
                   BufferDescriptorX()[buffer_index].Size(), &read_buffer_data_x[buffer_index]);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
std::span<const u8> HLERequestContext::ReadBuffer(std::size_t buffer_index) const {
 | 
			
		||||
    static thread_local std::array read_buffer_a{
 | 
			
		||||
        Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
 | 
			
		||||
        Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
 | 
			
		||||
        Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
 | 
			
		||||
    };
 | 
			
		||||
    static thread_local std::array read_buffer_x{
 | 
			
		||||
        Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
 | 
			
		||||
        Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
 | 
			
		||||
        Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
 | 
			
		||||
    };
 | 
			
		||||
    Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::UnsafeRead> gm(memory, 0, 0);
 | 
			
		||||
 | 
			
		||||
    const bool is_buffer_a{BufferDescriptorA().size() > buffer_index &&
 | 
			
		||||
                           BufferDescriptorA()[buffer_index].Size()};
 | 
			
		||||
@@ -401,18 +368,14 @@ std::span<const u8> HLERequestContext::ReadBuffer(std::size_t buffer_index) cons
 | 
			
		||||
        ASSERT_OR_EXECUTE_MSG(
 | 
			
		||||
            BufferDescriptorA().size() > buffer_index, { return {}; },
 | 
			
		||||
            "BufferDescriptorA invalid buffer_index {}", buffer_index);
 | 
			
		||||
        auto& read_buffer = read_buffer_a[buffer_index];
 | 
			
		||||
        return read_buffer.Read(BufferDescriptorA()[buffer_index].Address(),
 | 
			
		||||
                                BufferDescriptorA()[buffer_index].Size(),
 | 
			
		||||
                                &read_buffer_data_a[buffer_index]);
 | 
			
		||||
        return gm.Read(BufferDescriptorA()[buffer_index].Address(),
 | 
			
		||||
                       BufferDescriptorA()[buffer_index].Size(), &read_buffer_data_a[buffer_index]);
 | 
			
		||||
    } else {
 | 
			
		||||
        ASSERT_OR_EXECUTE_MSG(
 | 
			
		||||
            BufferDescriptorX().size() > buffer_index, { return {}; },
 | 
			
		||||
            "BufferDescriptorX invalid buffer_index {}", buffer_index);
 | 
			
		||||
        auto& read_buffer = read_buffer_x[buffer_index];
 | 
			
		||||
        return read_buffer.Read(BufferDescriptorX()[buffer_index].Address(),
 | 
			
		||||
                                BufferDescriptorX()[buffer_index].Size(),
 | 
			
		||||
                                &read_buffer_data_x[buffer_index]);
 | 
			
		||||
        return gm.Read(BufferDescriptorX()[buffer_index].Address(),
 | 
			
		||||
                       BufferDescriptorX()[buffer_index].Size(), &read_buffer_data_x[buffer_index]);
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -41,6 +41,8 @@ class KernelCore;
 | 
			
		||||
class KHandleTable;
 | 
			
		||||
class KProcess;
 | 
			
		||||
class KServerSession;
 | 
			
		||||
template <typename T>
 | 
			
		||||
class KScopedAutoObject;
 | 
			
		||||
class KThread;
 | 
			
		||||
} // namespace Kernel
 | 
			
		||||
 | 
			
		||||
@@ -424,6 +426,9 @@ private:
 | 
			
		||||
 | 
			
		||||
    Kernel::KernelCore& kernel;
 | 
			
		||||
    Core::Memory::Memory& memory;
 | 
			
		||||
 | 
			
		||||
    mutable std::array<Common::ScratchBuffer<u8>, 3> read_buffer_data_a{};
 | 
			
		||||
    mutable std::array<Common::ScratchBuffer<u8>, 3> read_buffer_data_x{};
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
} // namespace Service
 | 
			
		||||
 
 | 
			
		||||
@@ -2,27 +2,135 @@
 | 
			
		||||
// SPDX-FileCopyrightText: 2022 Skyline Team and Contributors
 | 
			
		||||
// SPDX-License-Identifier: GPL-3.0-or-later
 | 
			
		||||
 | 
			
		||||
#include <atomic>
 | 
			
		||||
#include <deque>
 | 
			
		||||
#include <mutex>
 | 
			
		||||
 | 
			
		||||
#include "core/hle/kernel/k_process.h"
 | 
			
		||||
#include "core/hle/service/nvdrv/core/container.h"
 | 
			
		||||
#include "core/hle/service/nvdrv/core/heap_mapper.h"
 | 
			
		||||
#include "core/hle/service/nvdrv/core/nvmap.h"
 | 
			
		||||
#include "core/hle/service/nvdrv/core/syncpoint_manager.h"
 | 
			
		||||
#include "core/memory.h"
 | 
			
		||||
#include "video_core/host1x/host1x.h"
 | 
			
		||||
 | 
			
		||||
namespace Service::Nvidia::NvCore {
 | 
			
		||||
 | 
			
		||||
Session::Session(SessionId id_, Kernel::KProcess* process_, Core::Asid asid_)
 | 
			
		||||
    : id{id_}, process{process_}, asid{asid_}, has_preallocated_area{}, mapper{}, is_active{} {}
 | 
			
		||||
 | 
			
		||||
Session::~Session() = default;
 | 
			
		||||
 | 
			
		||||
struct ContainerImpl {
 | 
			
		||||
    explicit ContainerImpl(Tegra::Host1x::Host1x& host1x_)
 | 
			
		||||
        : file{host1x_}, manager{host1x_}, device_file_data{} {}
 | 
			
		||||
    explicit ContainerImpl(Container& core, Tegra::Host1x::Host1x& host1x_)
 | 
			
		||||
        : host1x{host1x_}, file{core, host1x_}, manager{host1x_}, device_file_data{} {}
 | 
			
		||||
    Tegra::Host1x::Host1x& host1x;
 | 
			
		||||
    NvMap file;
 | 
			
		||||
    SyncpointManager manager;
 | 
			
		||||
    Container::Host1xDeviceFileData device_file_data;
 | 
			
		||||
    std::deque<Session> sessions;
 | 
			
		||||
    size_t new_ids{};
 | 
			
		||||
    std::deque<size_t> id_pool;
 | 
			
		||||
    std::mutex session_guard;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
Container::Container(Tegra::Host1x::Host1x& host1x_) {
 | 
			
		||||
    impl = std::make_unique<ContainerImpl>(host1x_);
 | 
			
		||||
    impl = std::make_unique<ContainerImpl>(*this, host1x_);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
Container::~Container() = default;
 | 
			
		||||
 | 
			
		||||
SessionId Container::OpenSession(Kernel::KProcess* process) {
 | 
			
		||||
    using namespace Common::Literals;
 | 
			
		||||
 | 
			
		||||
    std::scoped_lock lk(impl->session_guard);
 | 
			
		||||
    for (auto& session : impl->sessions) {
 | 
			
		||||
        if (!session.is_active) {
 | 
			
		||||
            continue;
 | 
			
		||||
        }
 | 
			
		||||
        if (session.process == process) {
 | 
			
		||||
            return session.id;
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
    size_t new_id{};
 | 
			
		||||
    auto* memory_interface = &process->GetMemory();
 | 
			
		||||
    auto& smmu = impl->host1x.MemoryManager();
 | 
			
		||||
    auto asid = smmu.RegisterProcess(memory_interface);
 | 
			
		||||
    if (!impl->id_pool.empty()) {
 | 
			
		||||
        new_id = impl->id_pool.front();
 | 
			
		||||
        impl->id_pool.pop_front();
 | 
			
		||||
        impl->sessions[new_id] = Session{SessionId{new_id}, process, asid};
 | 
			
		||||
    } else {
 | 
			
		||||
        new_id = impl->new_ids++;
 | 
			
		||||
        impl->sessions.emplace_back(SessionId{new_id}, process, asid);
 | 
			
		||||
    }
 | 
			
		||||
    auto& session = impl->sessions[new_id];
 | 
			
		||||
    session.is_active = true;
 | 
			
		||||
    // Optimization
 | 
			
		||||
    if (process->IsApplication()) {
 | 
			
		||||
        auto& page_table = process->GetPageTable().GetBasePageTable();
 | 
			
		||||
        auto heap_start = page_table.GetHeapRegionStart();
 | 
			
		||||
 | 
			
		||||
        Kernel::KProcessAddress cur_addr = heap_start;
 | 
			
		||||
        size_t region_size = 0;
 | 
			
		||||
        VAddr region_start = 0;
 | 
			
		||||
        while (true) {
 | 
			
		||||
            Kernel::KMemoryInfo mem_info{};
 | 
			
		||||
            Kernel::Svc::PageInfo page_info{};
 | 
			
		||||
            R_ASSERT(page_table.QueryInfo(std::addressof(mem_info), std::addressof(page_info),
 | 
			
		||||
                                          cur_addr));
 | 
			
		||||
            auto svc_mem_info = mem_info.GetSvcMemoryInfo();
 | 
			
		||||
 | 
			
		||||
            // Check if this memory block is heap.
 | 
			
		||||
            if (svc_mem_info.state == Kernel::Svc::MemoryState::Normal) {
 | 
			
		||||
                if (svc_mem_info.size > region_size) {
 | 
			
		||||
                    region_size = svc_mem_info.size;
 | 
			
		||||
                    region_start = svc_mem_info.base_address;
 | 
			
		||||
                }
 | 
			
		||||
            }
 | 
			
		||||
 | 
			
		||||
            // Check if we're done.
 | 
			
		||||
            const uintptr_t next_address = svc_mem_info.base_address + svc_mem_info.size;
 | 
			
		||||
            if (next_address <= GetInteger(cur_addr)) {
 | 
			
		||||
                break;
 | 
			
		||||
            }
 | 
			
		||||
 | 
			
		||||
            cur_addr = next_address;
 | 
			
		||||
        }
 | 
			
		||||
        session.has_preallocated_area = false;
 | 
			
		||||
        auto start_region = region_size >= 32_MiB ? smmu.Allocate(region_size) : 0;
 | 
			
		||||
        if (start_region != 0) {
 | 
			
		||||
            session.mapper = std::make_unique<HeapMapper>(region_start, start_region, region_size,
 | 
			
		||||
                                                          asid, impl->host1x);
 | 
			
		||||
            smmu.TrackContinuity(start_region, region_start, region_size, asid);
 | 
			
		||||
            session.has_preallocated_area = true;
 | 
			
		||||
            LOG_DEBUG(Debug, "Preallocation created!");
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
    return SessionId{new_id};
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void Container::CloseSession(SessionId session_id) {
 | 
			
		||||
    std::scoped_lock lk(impl->session_guard);
 | 
			
		||||
    auto& session = impl->sessions[session_id.id];
 | 
			
		||||
    auto& smmu = impl->host1x.MemoryManager();
 | 
			
		||||
    if (session.has_preallocated_area) {
 | 
			
		||||
        const DAddr region_start = session.mapper->GetRegionStart();
 | 
			
		||||
        const size_t region_size = session.mapper->GetRegionSize();
 | 
			
		||||
        session.mapper.reset();
 | 
			
		||||
        smmu.Free(region_start, region_size);
 | 
			
		||||
        session.has_preallocated_area = false;
 | 
			
		||||
    }
 | 
			
		||||
    session.is_active = false;
 | 
			
		||||
    smmu.UnregisterProcess(impl->sessions[session_id.id].asid);
 | 
			
		||||
    impl->id_pool.emplace_front(session_id.id);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
Session* Container::GetSession(SessionId session_id) {
 | 
			
		||||
    std::atomic_thread_fence(std::memory_order_acquire);
 | 
			
		||||
    return &impl->sessions[session_id.id];
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
NvMap& Container::GetNvMapFile() {
 | 
			
		||||
    return impl->file;
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
@@ -8,24 +8,56 @@
 | 
			
		||||
#include <memory>
 | 
			
		||||
#include <unordered_map>
 | 
			
		||||
 | 
			
		||||
#include "core/device_memory_manager.h"
 | 
			
		||||
#include "core/hle/service/nvdrv/nvdata.h"
 | 
			
		||||
 | 
			
		||||
namespace Kernel {
 | 
			
		||||
class KProcess;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
namespace Tegra::Host1x {
 | 
			
		||||
class Host1x;
 | 
			
		||||
} // namespace Tegra::Host1x
 | 
			
		||||
 | 
			
		||||
namespace Service::Nvidia::NvCore {
 | 
			
		||||
 | 
			
		||||
class HeapMapper;
 | 
			
		||||
class NvMap;
 | 
			
		||||
class SyncpointManager;
 | 
			
		||||
 | 
			
		||||
struct ContainerImpl;
 | 
			
		||||
 | 
			
		||||
struct SessionId {
 | 
			
		||||
    size_t id;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
struct Session {
 | 
			
		||||
    Session(SessionId id_, Kernel::KProcess* process_, Core::Asid asid_);
 | 
			
		||||
    ~Session();
 | 
			
		||||
 | 
			
		||||
    Session(const Session&) = delete;
 | 
			
		||||
    Session& operator=(const Session&) = delete;
 | 
			
		||||
    Session(Session&&) = default;
 | 
			
		||||
    Session& operator=(Session&&) = default;
 | 
			
		||||
 | 
			
		||||
    SessionId id;
 | 
			
		||||
    Kernel::KProcess* process;
 | 
			
		||||
    Core::Asid asid;
 | 
			
		||||
    bool has_preallocated_area{};
 | 
			
		||||
    std::unique_ptr<HeapMapper> mapper{};
 | 
			
		||||
    bool is_active{};
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
class Container {
 | 
			
		||||
public:
 | 
			
		||||
    explicit Container(Tegra::Host1x::Host1x& host1x);
 | 
			
		||||
    ~Container();
 | 
			
		||||
 | 
			
		||||
    SessionId OpenSession(Kernel::KProcess* process);
 | 
			
		||||
    void CloseSession(SessionId id);
 | 
			
		||||
 | 
			
		||||
    Session* GetSession(SessionId id);
 | 
			
		||||
 | 
			
		||||
    NvMap& GetNvMapFile();
 | 
			
		||||
 | 
			
		||||
    const NvMap& GetNvMapFile() const;
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										175
									
								
								src/core/hle/service/nvdrv/core/heap_mapper.cpp
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										175
									
								
								src/core/hle/service/nvdrv/core/heap_mapper.cpp
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,175 @@
 | 
			
		||||
// SPDX-FileCopyrightText: 2023 yuzu Emulator Project
 | 
			
		||||
// SPDX-License-Identifier: GPL-3.0-or-later
 | 
			
		||||
 | 
			
		||||
#include <mutex>
 | 
			
		||||
 | 
			
		||||
#include <boost/container/small_vector.hpp>
 | 
			
		||||
#define BOOST_NO_MT
 | 
			
		||||
#include <boost/pool/detail/mutex.hpp>
 | 
			
		||||
#undef BOOST_NO_MT
 | 
			
		||||
#include <boost/icl/interval.hpp>
 | 
			
		||||
#include <boost/icl/interval_base_set.hpp>
 | 
			
		||||
#include <boost/icl/interval_set.hpp>
 | 
			
		||||
#include <boost/icl/split_interval_map.hpp>
 | 
			
		||||
#include <boost/pool/pool.hpp>
 | 
			
		||||
#include <boost/pool/pool_alloc.hpp>
 | 
			
		||||
#include <boost/pool/poolfwd.hpp>
 | 
			
		||||
 | 
			
		||||
#include "core/hle/service/nvdrv/core/heap_mapper.h"
 | 
			
		||||
#include "video_core/host1x/host1x.h"
 | 
			
		||||
 | 
			
		||||
namespace boost {
 | 
			
		||||
template <typename T>
 | 
			
		||||
class fast_pool_allocator<T, default_user_allocator_new_delete, details::pool::null_mutex, 4096, 0>;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
namespace Service::Nvidia::NvCore {
 | 
			
		||||
 | 
			
		||||
using IntervalCompare = std::less<DAddr>;
 | 
			
		||||
using IntervalInstance = boost::icl::interval_type_default<DAddr, std::less>;
 | 
			
		||||
using IntervalAllocator = boost::fast_pool_allocator<DAddr>;
 | 
			
		||||
using IntervalSet = boost::icl::interval_set<DAddr>;
 | 
			
		||||
using IntervalType = typename IntervalSet::interval_type;
 | 
			
		||||
 | 
			
		||||
template <typename Type>
 | 
			
		||||
struct counter_add_functor : public boost::icl::identity_based_inplace_combine<Type> {
 | 
			
		||||
    // types
 | 
			
		||||
    typedef counter_add_functor<Type> type;
 | 
			
		||||
    typedef boost::icl::identity_based_inplace_combine<Type> base_type;
 | 
			
		||||
 | 
			
		||||
    // public member functions
 | 
			
		||||
    void operator()(Type& current, const Type& added) const {
 | 
			
		||||
        current += added;
 | 
			
		||||
        if (current < base_type::identity_element()) {
 | 
			
		||||
            current = base_type::identity_element();
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    // public static functions
 | 
			
		||||
    static void version(Type&){};
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
using OverlapCombine = counter_add_functor<int>;
 | 
			
		||||
using OverlapSection = boost::icl::inter_section<int>;
 | 
			
		||||
using OverlapCounter = boost::icl::split_interval_map<DAddr, int>;
 | 
			
		||||
 | 
			
		||||
struct HeapMapper::HeapMapperInternal {
 | 
			
		||||
    HeapMapperInternal(Tegra::Host1x::Host1x& host1x) : device_memory{host1x.MemoryManager()} {}
 | 
			
		||||
    ~HeapMapperInternal() = default;
 | 
			
		||||
 | 
			
		||||
    template <typename Func>
 | 
			
		||||
    void ForEachInOverlapCounter(OverlapCounter& current_range, VAddr cpu_addr, u64 size,
 | 
			
		||||
                                 Func&& func) {
 | 
			
		||||
        const DAddr start_address = cpu_addr;
 | 
			
		||||
        const DAddr end_address = start_address + size;
 | 
			
		||||
        const IntervalType search_interval{start_address, end_address};
 | 
			
		||||
        auto it = current_range.lower_bound(search_interval);
 | 
			
		||||
        if (it == current_range.end()) {
 | 
			
		||||
            return;
 | 
			
		||||
        }
 | 
			
		||||
        auto end_it = current_range.upper_bound(search_interval);
 | 
			
		||||
        for (; it != end_it; it++) {
 | 
			
		||||
            auto& inter = it->first;
 | 
			
		||||
            DAddr inter_addr_end = inter.upper();
 | 
			
		||||
            DAddr inter_addr = inter.lower();
 | 
			
		||||
            if (inter_addr_end > end_address) {
 | 
			
		||||
                inter_addr_end = end_address;
 | 
			
		||||
            }
 | 
			
		||||
            if (inter_addr < start_address) {
 | 
			
		||||
                inter_addr = start_address;
 | 
			
		||||
            }
 | 
			
		||||
            func(inter_addr, inter_addr_end, it->second);
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    void RemoveEachInOverlapCounter(OverlapCounter& current_range,
 | 
			
		||||
                                    const IntervalType search_interval, int subtract_value) {
 | 
			
		||||
        bool any_removals = false;
 | 
			
		||||
        current_range.add(std::make_pair(search_interval, subtract_value));
 | 
			
		||||
        do {
 | 
			
		||||
            any_removals = false;
 | 
			
		||||
            auto it = current_range.lower_bound(search_interval);
 | 
			
		||||
            if (it == current_range.end()) {
 | 
			
		||||
                return;
 | 
			
		||||
            }
 | 
			
		||||
            auto end_it = current_range.upper_bound(search_interval);
 | 
			
		||||
            for (; it != end_it; it++) {
 | 
			
		||||
                if (it->second <= 0) {
 | 
			
		||||
                    any_removals = true;
 | 
			
		||||
                    current_range.erase(it);
 | 
			
		||||
                    break;
 | 
			
		||||
                }
 | 
			
		||||
            }
 | 
			
		||||
        } while (any_removals);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    IntervalSet base_set;
 | 
			
		||||
    OverlapCounter mapping_overlaps;
 | 
			
		||||
    Tegra::MaxwellDeviceMemoryManager& device_memory;
 | 
			
		||||
    std::mutex guard;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
HeapMapper::HeapMapper(VAddr start_vaddress, DAddr start_daddress, size_t size, Core::Asid asid,
 | 
			
		||||
                       Tegra::Host1x::Host1x& host1x)
 | 
			
		||||
    : m_vaddress{start_vaddress}, m_daddress{start_daddress}, m_size{size}, m_asid{asid} {
 | 
			
		||||
    m_internal = std::make_unique<HeapMapperInternal>(host1x);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
HeapMapper::~HeapMapper() {
 | 
			
		||||
    m_internal->device_memory.Unmap(m_daddress, m_size);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
DAddr HeapMapper::Map(VAddr start, size_t size) {
 | 
			
		||||
    std::scoped_lock lk(m_internal->guard);
 | 
			
		||||
    m_internal->base_set.clear();
 | 
			
		||||
    const IntervalType interval{start, start + size};
 | 
			
		||||
    m_internal->base_set.insert(interval);
 | 
			
		||||
    m_internal->ForEachInOverlapCounter(m_internal->mapping_overlaps, start, size,
 | 
			
		||||
                                        [this](VAddr start_addr, VAddr end_addr, int) {
 | 
			
		||||
                                            const IntervalType other{start_addr, end_addr};
 | 
			
		||||
                                            m_internal->base_set.subtract(other);
 | 
			
		||||
                                        });
 | 
			
		||||
    if (!m_internal->base_set.empty()) {
 | 
			
		||||
        auto it = m_internal->base_set.begin();
 | 
			
		||||
        auto end_it = m_internal->base_set.end();
 | 
			
		||||
        for (; it != end_it; it++) {
 | 
			
		||||
            const VAddr inter_addr_end = it->upper();
 | 
			
		||||
            const VAddr inter_addr = it->lower();
 | 
			
		||||
            const size_t offset = inter_addr - m_vaddress;
 | 
			
		||||
            const size_t sub_size = inter_addr_end - inter_addr;
 | 
			
		||||
            m_internal->device_memory.Map(m_daddress + offset, m_vaddress + offset, sub_size,
 | 
			
		||||
                                          m_asid);
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
    m_internal->mapping_overlaps += std::make_pair(interval, 1);
 | 
			
		||||
    m_internal->base_set.clear();
 | 
			
		||||
    return m_daddress + (start - m_vaddress);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void HeapMapper::Unmap(VAddr start, size_t size) {
 | 
			
		||||
    std::scoped_lock lk(m_internal->guard);
 | 
			
		||||
    m_internal->base_set.clear();
 | 
			
		||||
    m_internal->ForEachInOverlapCounter(m_internal->mapping_overlaps, start, size,
 | 
			
		||||
                                        [this](VAddr start_addr, VAddr end_addr, int value) {
 | 
			
		||||
                                            if (value <= 1) {
 | 
			
		||||
                                                const IntervalType other{start_addr, end_addr};
 | 
			
		||||
                                                m_internal->base_set.insert(other);
 | 
			
		||||
                                            }
 | 
			
		||||
                                        });
 | 
			
		||||
    if (!m_internal->base_set.empty()) {
 | 
			
		||||
        auto it = m_internal->base_set.begin();
 | 
			
		||||
        auto end_it = m_internal->base_set.end();
 | 
			
		||||
        for (; it != end_it; it++) {
 | 
			
		||||
            const VAddr inter_addr_end = it->upper();
 | 
			
		||||
            const VAddr inter_addr = it->lower();
 | 
			
		||||
            const size_t offset = inter_addr - m_vaddress;
 | 
			
		||||
            const size_t sub_size = inter_addr_end - inter_addr;
 | 
			
		||||
            m_internal->device_memory.Unmap(m_daddress + offset, sub_size);
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
    const IntervalType to_remove{start, start + size};
 | 
			
		||||
    m_internal->RemoveEachInOverlapCounter(m_internal->mapping_overlaps, to_remove, -1);
 | 
			
		||||
    m_internal->base_set.clear();
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
} // namespace Service::Nvidia::NvCore
 | 
			
		||||
							
								
								
									
										49
									
								
								src/core/hle/service/nvdrv/core/heap_mapper.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										49
									
								
								src/core/hle/service/nvdrv/core/heap_mapper.h
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,49 @@
 | 
			
		||||
// SPDX-FileCopyrightText: 2023 yuzu Emulator Project
 | 
			
		||||
// SPDX-License-Identifier: GPL-3.0-or-later
 | 
			
		||||
 | 
			
		||||
#pragma once
 | 
			
		||||
 | 
			
		||||
#include <memory>
 | 
			
		||||
 | 
			
		||||
#include "common/common_types.h"
 | 
			
		||||
#include "core/device_memory_manager.h"
 | 
			
		||||
 | 
			
		||||
namespace Tegra::Host1x {
 | 
			
		||||
class Host1x;
 | 
			
		||||
} // namespace Tegra::Host1x
 | 
			
		||||
 | 
			
		||||
namespace Service::Nvidia::NvCore {
 | 
			
		||||
 | 
			
		||||
class HeapMapper {
 | 
			
		||||
public:
 | 
			
		||||
    HeapMapper(VAddr start_vaddress, DAddr start_daddress, size_t size, Core::Asid asid,
 | 
			
		||||
               Tegra::Host1x::Host1x& host1x);
 | 
			
		||||
    ~HeapMapper();
 | 
			
		||||
 | 
			
		||||
    bool IsInBounds(VAddr start, size_t size) const {
 | 
			
		||||
        VAddr end = start + size;
 | 
			
		||||
        return start >= m_vaddress && end <= (m_vaddress + m_size);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    DAddr Map(VAddr start, size_t size);
 | 
			
		||||
 | 
			
		||||
    void Unmap(VAddr start, size_t size);
 | 
			
		||||
 | 
			
		||||
    DAddr GetRegionStart() const {
 | 
			
		||||
        return m_daddress;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    size_t GetRegionSize() const {
 | 
			
		||||
        return m_size;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
private:
 | 
			
		||||
    struct HeapMapperInternal;
 | 
			
		||||
    VAddr m_vaddress;
 | 
			
		||||
    DAddr m_daddress;
 | 
			
		||||
    size_t m_size;
 | 
			
		||||
    Core::Asid m_asid;
 | 
			
		||||
    std::unique_ptr<HeapMapperInternal> m_internal;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
} // namespace Service::Nvidia::NvCore
 | 
			
		||||
@@ -2,14 +2,19 @@
 | 
			
		||||
// SPDX-FileCopyrightText: 2022 Skyline Team and Contributors
 | 
			
		||||
// SPDX-License-Identifier: GPL-3.0-or-later
 | 
			
		||||
 | 
			
		||||
#include <functional>
 | 
			
		||||
 | 
			
		||||
#include "common/alignment.h"
 | 
			
		||||
#include "common/assert.h"
 | 
			
		||||
#include "common/logging/log.h"
 | 
			
		||||
#include "core/hle/service/nvdrv/core/container.h"
 | 
			
		||||
#include "core/hle/service/nvdrv/core/heap_mapper.h"
 | 
			
		||||
#include "core/hle/service/nvdrv/core/nvmap.h"
 | 
			
		||||
#include "core/memory.h"
 | 
			
		||||
#include "video_core/host1x/host1x.h"
 | 
			
		||||
 | 
			
		||||
using Core::Memory::YUZU_PAGESIZE;
 | 
			
		||||
constexpr size_t BIG_PAGE_SIZE = YUZU_PAGESIZE * 16;
 | 
			
		||||
 | 
			
		||||
namespace Service::Nvidia::NvCore {
 | 
			
		||||
NvMap::Handle::Handle(u64 size_, Id id_)
 | 
			
		||||
@@ -17,9 +22,9 @@ NvMap::Handle::Handle(u64 size_, Id id_)
 | 
			
		||||
    flags.raw = 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
NvResult NvMap::Handle::Alloc(Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress) {
 | 
			
		||||
NvResult NvMap::Handle::Alloc(Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress,
 | 
			
		||||
                              NvCore::SessionId pSessionId) {
 | 
			
		||||
    std::scoped_lock lock(mutex);
 | 
			
		||||
 | 
			
		||||
    // Handles cannot be allocated twice
 | 
			
		||||
    if (allocated) {
 | 
			
		||||
        return NvResult::AccessDenied;
 | 
			
		||||
@@ -28,6 +33,7 @@ NvResult NvMap::Handle::Alloc(Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress)
 | 
			
		||||
    flags = pFlags;
 | 
			
		||||
    kind = pKind;
 | 
			
		||||
    align = pAlign < YUZU_PAGESIZE ? YUZU_PAGESIZE : pAlign;
 | 
			
		||||
    session_id = pSessionId;
 | 
			
		||||
 | 
			
		||||
    // This flag is only applicable for handles with an address passed
 | 
			
		||||
    if (pAddress) {
 | 
			
		||||
@@ -63,7 +69,7 @@ NvResult NvMap::Handle::Duplicate(bool internal_session) {
 | 
			
		||||
    return NvResult::Success;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
NvMap::NvMap(Tegra::Host1x::Host1x& host1x_) : host1x{host1x_} {}
 | 
			
		||||
NvMap::NvMap(Container& core_, Tegra::Host1x::Host1x& host1x_) : host1x{host1x_}, core{core_} {}
 | 
			
		||||
 | 
			
		||||
void NvMap::AddHandle(std::shared_ptr<Handle> handle_description) {
 | 
			
		||||
    std::scoped_lock lock(handles_lock);
 | 
			
		||||
@@ -78,12 +84,30 @@ void NvMap::UnmapHandle(Handle& handle_description) {
 | 
			
		||||
        handle_description.unmap_queue_entry.reset();
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    // Free and unmap the handle from Host1x GMMU
 | 
			
		||||
    if (handle_description.pin_virt_address) {
 | 
			
		||||
        host1x.GMMU().Unmap(static_cast<GPUVAddr>(handle_description.pin_virt_address),
 | 
			
		||||
                            handle_description.aligned_size);
 | 
			
		||||
        host1x.Allocator().Free(handle_description.pin_virt_address,
 | 
			
		||||
                                static_cast<u32>(handle_description.aligned_size));
 | 
			
		||||
        handle_description.pin_virt_address = 0;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    // Free and unmap the handle from the SMMU
 | 
			
		||||
    host1x.MemoryManager().Unmap(static_cast<GPUVAddr>(handle_description.pin_virt_address),
 | 
			
		||||
                                 handle_description.aligned_size);
 | 
			
		||||
    host1x.Allocator().Free(handle_description.pin_virt_address,
 | 
			
		||||
                            static_cast<u32>(handle_description.aligned_size));
 | 
			
		||||
    handle_description.pin_virt_address = 0;
 | 
			
		||||
    const size_t map_size = handle_description.aligned_size;
 | 
			
		||||
    if (!handle_description.in_heap) {
 | 
			
		||||
        auto& smmu = host1x.MemoryManager();
 | 
			
		||||
        size_t aligned_up = Common::AlignUp(map_size, BIG_PAGE_SIZE);
 | 
			
		||||
        smmu.Unmap(handle_description.d_address, map_size);
 | 
			
		||||
        smmu.Free(handle_description.d_address, static_cast<size_t>(aligned_up));
 | 
			
		||||
        handle_description.d_address = 0;
 | 
			
		||||
        return;
 | 
			
		||||
    }
 | 
			
		||||
    const VAddr vaddress = handle_description.address;
 | 
			
		||||
    auto* session = core.GetSession(handle_description.session_id);
 | 
			
		||||
    session->mapper->Unmap(vaddress, map_size);
 | 
			
		||||
    handle_description.d_address = 0;
 | 
			
		||||
    handle_description.in_heap = false;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
bool NvMap::TryRemoveHandle(const Handle& handle_description) {
 | 
			
		||||
@@ -124,22 +148,33 @@ std::shared_ptr<NvMap::Handle> NvMap::GetHandle(Handle::Id handle) {
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
VAddr NvMap::GetHandleAddress(Handle::Id handle) {
 | 
			
		||||
DAddr NvMap::GetHandleAddress(Handle::Id handle) {
 | 
			
		||||
    std::scoped_lock lock(handles_lock);
 | 
			
		||||
    try {
 | 
			
		||||
        return handles.at(handle)->address;
 | 
			
		||||
        return handles.at(handle)->d_address;
 | 
			
		||||
    } catch (std::out_of_range&) {
 | 
			
		||||
        return 0;
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
u32 NvMap::PinHandle(NvMap::Handle::Id handle) {
 | 
			
		||||
DAddr NvMap::PinHandle(NvMap::Handle::Id handle, bool low_area_pin) {
 | 
			
		||||
    auto handle_description{GetHandle(handle)};
 | 
			
		||||
    if (!handle_description) [[unlikely]] {
 | 
			
		||||
        return 0;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    std::scoped_lock lock(handle_description->mutex);
 | 
			
		||||
    const auto map_low_area = [&] {
 | 
			
		||||
        if (handle_description->pin_virt_address == 0) {
 | 
			
		||||
            auto& gmmu_allocator = host1x.Allocator();
 | 
			
		||||
            auto& gmmu = host1x.GMMU();
 | 
			
		||||
            u32 address =
 | 
			
		||||
                gmmu_allocator.Allocate(static_cast<u32>(handle_description->aligned_size));
 | 
			
		||||
            gmmu.Map(static_cast<GPUVAddr>(address), handle_description->d_address,
 | 
			
		||||
                     handle_description->aligned_size);
 | 
			
		||||
            handle_description->pin_virt_address = address;
 | 
			
		||||
        }
 | 
			
		||||
    };
 | 
			
		||||
    if (!handle_description->pins) {
 | 
			
		||||
        // If we're in the unmap queue we can just remove ourselves and return since we're already
 | 
			
		||||
        // mapped
 | 
			
		||||
@@ -151,37 +186,58 @@ u32 NvMap::PinHandle(NvMap::Handle::Id handle) {
 | 
			
		||||
                unmap_queue.erase(*handle_description->unmap_queue_entry);
 | 
			
		||||
                handle_description->unmap_queue_entry.reset();
 | 
			
		||||
 | 
			
		||||
                if (low_area_pin) {
 | 
			
		||||
                    map_low_area();
 | 
			
		||||
                    handle_description->pins++;
 | 
			
		||||
                    return static_cast<DAddr>(handle_description->pin_virt_address);
 | 
			
		||||
                }
 | 
			
		||||
 | 
			
		||||
                handle_description->pins++;
 | 
			
		||||
                return handle_description->pin_virt_address;
 | 
			
		||||
                return handle_description->d_address;
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        using namespace std::placeholders;
 | 
			
		||||
        // If not then allocate some space and map it
 | 
			
		||||
        u32 address{};
 | 
			
		||||
        auto& smmu_allocator = host1x.Allocator();
 | 
			
		||||
        auto& smmu_memory_manager = host1x.MemoryManager();
 | 
			
		||||
        while ((address = smmu_allocator.Allocate(
 | 
			
		||||
                    static_cast<u32>(handle_description->aligned_size))) == 0) {
 | 
			
		||||
            // Free handles until the allocation succeeds
 | 
			
		||||
            std::scoped_lock queueLock(unmap_queue_lock);
 | 
			
		||||
            if (auto freeHandleDesc{unmap_queue.front()}) {
 | 
			
		||||
                // Handles in the unmap queue are guaranteed not to be pinned so don't bother
 | 
			
		||||
                // checking if they are before unmapping
 | 
			
		||||
                std::scoped_lock freeLock(freeHandleDesc->mutex);
 | 
			
		||||
                if (handle_description->pin_virt_address)
 | 
			
		||||
                    UnmapHandle(*freeHandleDesc);
 | 
			
		||||
            } else {
 | 
			
		||||
                LOG_CRITICAL(Service_NVDRV, "Ran out of SMMU address space!");
 | 
			
		||||
        DAddr address{};
 | 
			
		||||
        auto& smmu = host1x.MemoryManager();
 | 
			
		||||
        auto* session = core.GetSession(handle_description->session_id);
 | 
			
		||||
        const VAddr vaddress = handle_description->address;
 | 
			
		||||
        const size_t map_size = handle_description->aligned_size;
 | 
			
		||||
        if (session->has_preallocated_area && session->mapper->IsInBounds(vaddress, map_size)) {
 | 
			
		||||
            handle_description->d_address = session->mapper->Map(vaddress, map_size);
 | 
			
		||||
            handle_description->in_heap = true;
 | 
			
		||||
        } else {
 | 
			
		||||
            size_t aligned_up = Common::AlignUp(map_size, BIG_PAGE_SIZE);
 | 
			
		||||
            while ((address = smmu.Allocate(aligned_up)) == 0) {
 | 
			
		||||
                // Free handles until the allocation succeeds
 | 
			
		||||
                std::scoped_lock queueLock(unmap_queue_lock);
 | 
			
		||||
                if (auto freeHandleDesc{unmap_queue.front()}) {
 | 
			
		||||
                    // Handles in the unmap queue are guaranteed not to be pinned so don't bother
 | 
			
		||||
                    // checking if they are before unmapping
 | 
			
		||||
                    std::scoped_lock freeLock(freeHandleDesc->mutex);
 | 
			
		||||
                    if (handle_description->d_address)
 | 
			
		||||
                        UnmapHandle(*freeHandleDesc);
 | 
			
		||||
                } else {
 | 
			
		||||
                    LOG_CRITICAL(Service_NVDRV, "Ran out of SMMU address space!");
 | 
			
		||||
                }
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        smmu_memory_manager.Map(static_cast<GPUVAddr>(address), handle_description->address,
 | 
			
		||||
                                handle_description->aligned_size);
 | 
			
		||||
        handle_description->pin_virt_address = address;
 | 
			
		||||
            handle_description->d_address = address;
 | 
			
		||||
            smmu.Map(address, vaddress, map_size, session->asid, true);
 | 
			
		||||
            handle_description->in_heap = false;
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    if (low_area_pin) {
 | 
			
		||||
        map_low_area();
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    handle_description->pins++;
 | 
			
		||||
    return handle_description->pin_virt_address;
 | 
			
		||||
    if (low_area_pin) {
 | 
			
		||||
        return static_cast<DAddr>(handle_description->pin_virt_address);
 | 
			
		||||
    }
 | 
			
		||||
    return handle_description->d_address;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void NvMap::UnpinHandle(Handle::Id handle) {
 | 
			
		||||
@@ -232,7 +288,7 @@ std::optional<NvMap::FreeInfo> NvMap::FreeHandle(Handle::Id handle, bool interna
 | 
			
		||||
                LOG_WARNING(Service_NVDRV, "User duplicate count imbalance detected!");
 | 
			
		||||
            } else if (handle_description->dupes == 0) {
 | 
			
		||||
                // Force unmap the handle
 | 
			
		||||
                if (handle_description->pin_virt_address) {
 | 
			
		||||
                if (handle_description->d_address) {
 | 
			
		||||
                    std::scoped_lock queueLock(unmap_queue_lock);
 | 
			
		||||
                    UnmapHandle(*handle_description);
 | 
			
		||||
                }
 | 
			
		||||
 
 | 
			
		||||
@@ -14,6 +14,7 @@
 | 
			
		||||
 | 
			
		||||
#include "common/bit_field.h"
 | 
			
		||||
#include "common/common_types.h"
 | 
			
		||||
#include "core/hle/service/nvdrv/core/container.h"
 | 
			
		||||
#include "core/hle/service/nvdrv/nvdata.h"
 | 
			
		||||
 | 
			
		||||
namespace Tegra {
 | 
			
		||||
@@ -25,6 +26,8 @@ class Host1x;
 | 
			
		||||
} // namespace Tegra
 | 
			
		||||
 | 
			
		||||
namespace Service::Nvidia::NvCore {
 | 
			
		||||
 | 
			
		||||
class Container;
 | 
			
		||||
/**
 | 
			
		||||
 * @brief The nvmap core class holds the global state for nvmap and provides methods to manage
 | 
			
		||||
 * handles
 | 
			
		||||
@@ -48,7 +51,7 @@ public:
 | 
			
		||||
        using Id = u32;
 | 
			
		||||
        Id id; //!< A globally unique identifier for this handle
 | 
			
		||||
 | 
			
		||||
        s32 pins{};
 | 
			
		||||
        s64 pins{};
 | 
			
		||||
        u32 pin_virt_address{};
 | 
			
		||||
        std::optional<typename std::list<std::shared_ptr<Handle>>::iterator> unmap_queue_entry{};
 | 
			
		||||
 | 
			
		||||
@@ -61,15 +64,18 @@ public:
 | 
			
		||||
        } flags{};
 | 
			
		||||
        static_assert(sizeof(Flags) == sizeof(u32));
 | 
			
		||||
 | 
			
		||||
        u64 address{}; //!< The memory location in the guest's AS that this handle corresponds to,
 | 
			
		||||
                       //!< this can also be in the nvdrv tmem
 | 
			
		||||
        VAddr address{}; //!< The memory location in the guest's AS that this handle corresponds to,
 | 
			
		||||
                         //!< this can also be in the nvdrv tmem
 | 
			
		||||
        bool is_shared_mem_mapped{}; //!< If this nvmap has been mapped with the MapSharedMem IPC
 | 
			
		||||
                                     //!< call
 | 
			
		||||
 | 
			
		||||
        u8 kind{};        //!< Used for memory compression
 | 
			
		||||
        bool allocated{}; //!< If the handle has been allocated with `Alloc`
 | 
			
		||||
        bool in_heap{};
 | 
			
		||||
        NvCore::SessionId session_id{};
 | 
			
		||||
 | 
			
		||||
        u64 dma_map_addr{}; //! remove me after implementing pinning.
 | 
			
		||||
        DAddr d_address{}; //!< The memory location in the device's AS that this handle corresponds
 | 
			
		||||
                           //!< to, this can also be in the nvdrv tmem
 | 
			
		||||
 | 
			
		||||
        Handle(u64 size, Id id);
 | 
			
		||||
 | 
			
		||||
@@ -77,7 +83,8 @@ public:
 | 
			
		||||
         * @brief Sets up the handle with the given memory config, can allocate memory from the tmem
 | 
			
		||||
         * if a 0 address is passed
 | 
			
		||||
         */
 | 
			
		||||
        [[nodiscard]] NvResult Alloc(Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress);
 | 
			
		||||
        [[nodiscard]] NvResult Alloc(Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress,
 | 
			
		||||
                                     NvCore::SessionId pSessionId);
 | 
			
		||||
 | 
			
		||||
        /**
 | 
			
		||||
         * @brief Increases the dupe counter of the handle for the given session
 | 
			
		||||
@@ -108,7 +115,7 @@ public:
 | 
			
		||||
        bool can_unlock;   //!< If the address region is ready to be unlocked
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    explicit NvMap(Tegra::Host1x::Host1x& host1x);
 | 
			
		||||
    explicit NvMap(Container& core, Tegra::Host1x::Host1x& host1x);
 | 
			
		||||
 | 
			
		||||
    /**
 | 
			
		||||
     * @brief Creates an unallocated handle of the given size
 | 
			
		||||
@@ -117,7 +124,7 @@ public:
 | 
			
		||||
 | 
			
		||||
    std::shared_ptr<Handle> GetHandle(Handle::Id handle);
 | 
			
		||||
 | 
			
		||||
    VAddr GetHandleAddress(Handle::Id handle);
 | 
			
		||||
    DAddr GetHandleAddress(Handle::Id handle);
 | 
			
		||||
 | 
			
		||||
    /**
 | 
			
		||||
     * @brief Maps a handle into the SMMU address space
 | 
			
		||||
@@ -125,7 +132,7 @@ public:
 | 
			
		||||
     * number of calls to `UnpinHandle`
 | 
			
		||||
     * @return The SMMU virtual address that the handle has been mapped to
 | 
			
		||||
     */
 | 
			
		||||
    u32 PinHandle(Handle::Id handle);
 | 
			
		||||
    DAddr PinHandle(Handle::Id handle, bool low_area_pin);
 | 
			
		||||
 | 
			
		||||
    /**
 | 
			
		||||
     * @brief When this has been called an equal number of times to `PinHandle` for the supplied
 | 
			
		||||
@@ -172,5 +179,7 @@ private:
 | 
			
		||||
     * @return If the handle was removed from the map
 | 
			
		||||
     */
 | 
			
		||||
    bool TryRemoveHandle(const Handle& handle_description);
 | 
			
		||||
 | 
			
		||||
    Container& core;
 | 
			
		||||
};
 | 
			
		||||
} // namespace Service::Nvidia::NvCore
 | 
			
		||||
 
 | 
			
		||||
@@ -7,6 +7,7 @@
 | 
			
		||||
#include <vector>
 | 
			
		||||
 | 
			
		||||
#include "common/common_types.h"
 | 
			
		||||
#include "core/hle/service/nvdrv/core/container.h"
 | 
			
		||||
#include "core/hle/service/nvdrv/nvdata.h"
 | 
			
		||||
 | 
			
		||||
namespace Core {
 | 
			
		||||
@@ -62,7 +63,7 @@ public:
 | 
			
		||||
     * Called once a device is opened
 | 
			
		||||
     * @param fd The device fd
 | 
			
		||||
     */
 | 
			
		||||
    virtual void OnOpen(DeviceFD fd) = 0;
 | 
			
		||||
    virtual void OnOpen(NvCore::SessionId session_id, DeviceFD fd) = 0;
 | 
			
		||||
 | 
			
		||||
    /**
 | 
			
		||||
     * Called once a device is closed
 | 
			
		||||
 
 | 
			
		||||
@@ -35,14 +35,14 @@ NvResult nvdisp_disp0::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> in
 | 
			
		||||
    return NvResult::NotImplemented;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void nvdisp_disp0::OnOpen(DeviceFD fd) {}
 | 
			
		||||
void nvdisp_disp0::OnOpen(NvCore::SessionId session_id, DeviceFD fd) {}
 | 
			
		||||
void nvdisp_disp0::OnClose(DeviceFD fd) {}
 | 
			
		||||
 | 
			
		||||
void nvdisp_disp0::flip(u32 buffer_handle, u32 offset, android::PixelFormat format, u32 width,
 | 
			
		||||
                        u32 height, u32 stride, android::BufferTransformFlags transform,
 | 
			
		||||
                        const Common::Rectangle<int>& crop_rect,
 | 
			
		||||
                        std::array<Service::Nvidia::NvFence, 4>& fences, u32 num_fences) {
 | 
			
		||||
    const VAddr addr = nvmap.GetHandleAddress(buffer_handle);
 | 
			
		||||
    const DAddr addr = nvmap.GetHandleAddress(buffer_handle);
 | 
			
		||||
    LOG_TRACE(Service,
 | 
			
		||||
              "Drawing from address {:X} offset {:08X} Width {} Height {} Stride {} Format {}",
 | 
			
		||||
              addr, offset, width, height, stride, format);
 | 
			
		||||
 
 | 
			
		||||
@@ -32,7 +32,7 @@ public:
 | 
			
		||||
    NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input, std::span<u8> output,
 | 
			
		||||
                    std::span<u8> inline_output) override;
 | 
			
		||||
 | 
			
		||||
    void OnOpen(DeviceFD fd) override;
 | 
			
		||||
    void OnOpen(NvCore::SessionId session_id, DeviceFD fd) override;
 | 
			
		||||
    void OnClose(DeviceFD fd) override;
 | 
			
		||||
 | 
			
		||||
    /// Performs a screen flip, drawing the buffer pointed to by the handle.
 | 
			
		||||
 
 | 
			
		||||
@@ -86,7 +86,7 @@ NvResult nvhost_as_gpu::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> i
 | 
			
		||||
    return NvResult::NotImplemented;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void nvhost_as_gpu::OnOpen(DeviceFD fd) {}
 | 
			
		||||
void nvhost_as_gpu::OnOpen(NvCore::SessionId session_id, DeviceFD fd) {}
 | 
			
		||||
void nvhost_as_gpu::OnClose(DeviceFD fd) {}
 | 
			
		||||
 | 
			
		||||
NvResult nvhost_as_gpu::AllocAsEx(IoctlAllocAsEx& params) {
 | 
			
		||||
@@ -206,6 +206,8 @@ void nvhost_as_gpu::FreeMappingLocked(u64 offset) {
 | 
			
		||||
                       static_cast<u32>(aligned_size >> page_size_bits));
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    nvmap.UnpinHandle(mapping->handle);
 | 
			
		||||
 | 
			
		||||
    // Sparse mappings shouldn't be fully unmapped, just returned to their sparse state
 | 
			
		||||
    // Only FreeSpace can unmap them fully
 | 
			
		||||
    if (mapping->sparse_alloc) {
 | 
			
		||||
@@ -293,12 +295,12 @@ NvResult nvhost_as_gpu::Remap(std::span<IoctlRemapEntry> entries) {
 | 
			
		||||
                return NvResult::BadValue;
 | 
			
		||||
            }
 | 
			
		||||
 | 
			
		||||
            VAddr cpu_address{static_cast<VAddr>(
 | 
			
		||||
                handle->address +
 | 
			
		||||
                (static_cast<u64>(entry.handle_offset_big_pages) << vm.big_page_size_bits))};
 | 
			
		||||
            DAddr base = nvmap.PinHandle(entry.handle, false);
 | 
			
		||||
            DAddr device_address{static_cast<DAddr>(
 | 
			
		||||
                base + (static_cast<u64>(entry.handle_offset_big_pages) << vm.big_page_size_bits))};
 | 
			
		||||
 | 
			
		||||
            gmmu->Map(virtual_address, cpu_address, size, static_cast<Tegra::PTEKind>(entry.kind),
 | 
			
		||||
                      use_big_pages);
 | 
			
		||||
            gmmu->Map(virtual_address, device_address, size,
 | 
			
		||||
                      static_cast<Tegra::PTEKind>(entry.kind), use_big_pages);
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
@@ -331,9 +333,9 @@ NvResult nvhost_as_gpu::MapBufferEx(IoctlMapBufferEx& params) {
 | 
			
		||||
            }
 | 
			
		||||
 | 
			
		||||
            u64 gpu_address{static_cast<u64>(params.offset + params.buffer_offset)};
 | 
			
		||||
            VAddr cpu_address{mapping->ptr + params.buffer_offset};
 | 
			
		||||
            VAddr device_address{mapping->ptr + params.buffer_offset};
 | 
			
		||||
 | 
			
		||||
            gmmu->Map(gpu_address, cpu_address, params.mapping_size,
 | 
			
		||||
            gmmu->Map(gpu_address, device_address, params.mapping_size,
 | 
			
		||||
                      static_cast<Tegra::PTEKind>(params.kind), mapping->big_page);
 | 
			
		||||
 | 
			
		||||
            return NvResult::Success;
 | 
			
		||||
@@ -349,7 +351,8 @@ NvResult nvhost_as_gpu::MapBufferEx(IoctlMapBufferEx& params) {
 | 
			
		||||
        return NvResult::BadValue;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    VAddr cpu_address{static_cast<VAddr>(handle->address + params.buffer_offset)};
 | 
			
		||||
    DAddr device_address{
 | 
			
		||||
        static_cast<DAddr>(nvmap.PinHandle(params.handle, false) + params.buffer_offset)};
 | 
			
		||||
    u64 size{params.mapping_size ? params.mapping_size : handle->orig_size};
 | 
			
		||||
 | 
			
		||||
    bool big_page{[&]() {
 | 
			
		||||
@@ -373,15 +376,14 @@ NvResult nvhost_as_gpu::MapBufferEx(IoctlMapBufferEx& params) {
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        const bool use_big_pages = alloc->second.big_pages && big_page;
 | 
			
		||||
        gmmu->Map(params.offset, cpu_address, size, static_cast<Tegra::PTEKind>(params.kind),
 | 
			
		||||
        gmmu->Map(params.offset, device_address, size, static_cast<Tegra::PTEKind>(params.kind),
 | 
			
		||||
                  use_big_pages);
 | 
			
		||||
 | 
			
		||||
        auto mapping{std::make_shared<Mapping>(cpu_address, params.offset, size, true,
 | 
			
		||||
                                               use_big_pages, alloc->second.sparse)};
 | 
			
		||||
        auto mapping{std::make_shared<Mapping>(params.handle, device_address, params.offset, size,
 | 
			
		||||
                                               true, use_big_pages, alloc->second.sparse)};
 | 
			
		||||
        alloc->second.mappings.push_back(mapping);
 | 
			
		||||
        mapping_map[params.offset] = mapping;
 | 
			
		||||
    } else {
 | 
			
		||||
 | 
			
		||||
        auto& allocator{big_page ? *vm.big_page_allocator : *vm.small_page_allocator};
 | 
			
		||||
        u32 page_size{big_page ? vm.big_page_size : VM::YUZU_PAGESIZE};
 | 
			
		||||
        u32 page_size_bits{big_page ? vm.big_page_size_bits : VM::PAGE_SIZE_BITS};
 | 
			
		||||
@@ -394,11 +396,11 @@ NvResult nvhost_as_gpu::MapBufferEx(IoctlMapBufferEx& params) {
 | 
			
		||||
            return NvResult::InsufficientMemory;
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        gmmu->Map(params.offset, cpu_address, Common::AlignUp(size, page_size),
 | 
			
		||||
        gmmu->Map(params.offset, device_address, Common::AlignUp(size, page_size),
 | 
			
		||||
                  static_cast<Tegra::PTEKind>(params.kind), big_page);
 | 
			
		||||
 | 
			
		||||
        auto mapping{
 | 
			
		||||
            std::make_shared<Mapping>(cpu_address, params.offset, size, false, big_page, false)};
 | 
			
		||||
        auto mapping{std::make_shared<Mapping>(params.handle, device_address, params.offset, size,
 | 
			
		||||
                                               false, big_page, false)};
 | 
			
		||||
        mapping_map[params.offset] = mapping;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
@@ -433,6 +435,8 @@ NvResult nvhost_as_gpu::UnmapBuffer(IoctlUnmapBuffer& params) {
 | 
			
		||||
            gmmu->Unmap(params.offset, mapping->size);
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        nvmap.UnpinHandle(mapping->handle);
 | 
			
		||||
 | 
			
		||||
        mapping_map.erase(params.offset);
 | 
			
		||||
    } catch (const std::out_of_range&) {
 | 
			
		||||
        LOG_WARNING(Service_NVDRV, "Couldn't find region to unmap at 0x{:X}", params.offset);
 | 
			
		||||
 
 | 
			
		||||
@@ -55,7 +55,7 @@ public:
 | 
			
		||||
    NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input, std::span<u8> output,
 | 
			
		||||
                    std::span<u8> inline_output) override;
 | 
			
		||||
 | 
			
		||||
    void OnOpen(DeviceFD fd) override;
 | 
			
		||||
    void OnOpen(NvCore::SessionId session_id, DeviceFD fd) override;
 | 
			
		||||
    void OnClose(DeviceFD fd) override;
 | 
			
		||||
 | 
			
		||||
    Kernel::KEvent* QueryEvent(u32 event_id) override;
 | 
			
		||||
@@ -159,16 +159,18 @@ private:
 | 
			
		||||
    NvCore::NvMap& nvmap;
 | 
			
		||||
 | 
			
		||||
    struct Mapping {
 | 
			
		||||
        VAddr ptr;
 | 
			
		||||
        NvCore::NvMap::Handle::Id handle;
 | 
			
		||||
        DAddr ptr;
 | 
			
		||||
        u64 offset;
 | 
			
		||||
        u64 size;
 | 
			
		||||
        bool fixed;
 | 
			
		||||
        bool big_page; // Only valid if fixed == false
 | 
			
		||||
        bool sparse_alloc;
 | 
			
		||||
 | 
			
		||||
        Mapping(VAddr ptr_, u64 offset_, u64 size_, bool fixed_, bool big_page_, bool sparse_alloc_)
 | 
			
		||||
            : ptr(ptr_), offset(offset_), size(size_), fixed(fixed_), big_page(big_page_),
 | 
			
		||||
              sparse_alloc(sparse_alloc_) {}
 | 
			
		||||
        Mapping(NvCore::NvMap::Handle::Id handle_, DAddr ptr_, u64 offset_, u64 size_, bool fixed_,
 | 
			
		||||
                bool big_page_, bool sparse_alloc_)
 | 
			
		||||
            : handle(handle_), ptr(ptr_), offset(offset_), size(size_), fixed(fixed_),
 | 
			
		||||
              big_page(big_page_), sparse_alloc(sparse_alloc_) {}
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    struct Allocation {
 | 
			
		||||
@@ -212,9 +214,6 @@ private:
 | 
			
		||||
        bool initialised{};
 | 
			
		||||
    } vm;
 | 
			
		||||
    std::shared_ptr<Tegra::MemoryManager> gmmu;
 | 
			
		||||
 | 
			
		||||
    // s32 channel{};
 | 
			
		||||
    // u32 big_page_size{VM::DEFAULT_BIG_PAGE_SIZE};
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
} // namespace Service::Nvidia::Devices
 | 
			
		||||
 
 | 
			
		||||
@@ -76,7 +76,7 @@ NvResult nvhost_ctrl::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> inp
 | 
			
		||||
    return NvResult::NotImplemented;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void nvhost_ctrl::OnOpen(DeviceFD fd) {}
 | 
			
		||||
void nvhost_ctrl::OnOpen(NvCore::SessionId session_id, DeviceFD fd) {}
 | 
			
		||||
 | 
			
		||||
void nvhost_ctrl::OnClose(DeviceFD fd) {}
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -32,7 +32,7 @@ public:
 | 
			
		||||
    NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input, std::span<u8> output,
 | 
			
		||||
                    std::span<u8> inline_output) override;
 | 
			
		||||
 | 
			
		||||
    void OnOpen(DeviceFD fd) override;
 | 
			
		||||
    void OnOpen(NvCore::SessionId session_id, DeviceFD fd) override;
 | 
			
		||||
    void OnClose(DeviceFD fd) override;
 | 
			
		||||
 | 
			
		||||
    Kernel::KEvent* QueryEvent(u32 event_id) override;
 | 
			
		||||
 
 | 
			
		||||
@@ -82,7 +82,7 @@ NvResult nvhost_ctrl_gpu::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8>
 | 
			
		||||
    return NvResult::NotImplemented;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void nvhost_ctrl_gpu::OnOpen(DeviceFD fd) {}
 | 
			
		||||
void nvhost_ctrl_gpu::OnOpen(NvCore::SessionId session_id, DeviceFD fd) {}
 | 
			
		||||
void nvhost_ctrl_gpu::OnClose(DeviceFD fd) {}
 | 
			
		||||
 | 
			
		||||
NvResult nvhost_ctrl_gpu::GetCharacteristics1(IoctlCharacteristics& params) {
 | 
			
		||||
 
 | 
			
		||||
@@ -28,7 +28,7 @@ public:
 | 
			
		||||
    NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input, std::span<u8> output,
 | 
			
		||||
                    std::span<u8> inline_output) override;
 | 
			
		||||
 | 
			
		||||
    void OnOpen(DeviceFD fd) override;
 | 
			
		||||
    void OnOpen(NvCore::SessionId session_id, DeviceFD fd) override;
 | 
			
		||||
    void OnClose(DeviceFD fd) override;
 | 
			
		||||
 | 
			
		||||
    Kernel::KEvent* QueryEvent(u32 event_id) override;
 | 
			
		||||
 
 | 
			
		||||
@@ -120,7 +120,7 @@ NvResult nvhost_gpu::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> inpu
 | 
			
		||||
    return NvResult::NotImplemented;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void nvhost_gpu::OnOpen(DeviceFD fd) {}
 | 
			
		||||
void nvhost_gpu::OnOpen(NvCore::SessionId session_id, DeviceFD fd) {}
 | 
			
		||||
void nvhost_gpu::OnClose(DeviceFD fd) {}
 | 
			
		||||
 | 
			
		||||
NvResult nvhost_gpu::SetNVMAPfd(IoctlSetNvmapFD& params) {
 | 
			
		||||
 
 | 
			
		||||
@@ -47,7 +47,7 @@ public:
 | 
			
		||||
    NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input, std::span<u8> output,
 | 
			
		||||
                    std::span<u8> inline_output) override;
 | 
			
		||||
 | 
			
		||||
    void OnOpen(DeviceFD fd) override;
 | 
			
		||||
    void OnOpen(NvCore::SessionId session_id, DeviceFD fd) override;
 | 
			
		||||
    void OnClose(DeviceFD fd) override;
 | 
			
		||||
 | 
			
		||||
    Kernel::KEvent* QueryEvent(u32 event_id) override;
 | 
			
		||||
 
 | 
			
		||||
@@ -35,7 +35,7 @@ NvResult nvhost_nvdec::Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> in
 | 
			
		||||
        case 0x7:
 | 
			
		||||
            return WrapFixed(this, &nvhost_nvdec::SetSubmitTimeout, input, output);
 | 
			
		||||
        case 0x9:
 | 
			
		||||
            return WrapFixedVariable(this, &nvhost_nvdec::MapBuffer, input, output);
 | 
			
		||||
            return WrapFixedVariable(this, &nvhost_nvdec::MapBuffer, input, output, fd);
 | 
			
		||||
        case 0xa:
 | 
			
		||||
            return WrapFixedVariable(this, &nvhost_nvdec::UnmapBuffer, input, output);
 | 
			
		||||
        default:
 | 
			
		||||
@@ -68,9 +68,10 @@ NvResult nvhost_nvdec::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> in
 | 
			
		||||
    return NvResult::NotImplemented;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void nvhost_nvdec::OnOpen(DeviceFD fd) {
 | 
			
		||||
void nvhost_nvdec::OnOpen(NvCore::SessionId session_id, DeviceFD fd) {
 | 
			
		||||
    LOG_INFO(Service_NVDRV, "NVDEC video stream started");
 | 
			
		||||
    system.SetNVDECActive(true);
 | 
			
		||||
    sessions[fd] = session_id;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void nvhost_nvdec::OnClose(DeviceFD fd) {
 | 
			
		||||
@@ -81,6 +82,10 @@ void nvhost_nvdec::OnClose(DeviceFD fd) {
 | 
			
		||||
        system.GPU().ClearCdmaInstance(iter->second);
 | 
			
		||||
    }
 | 
			
		||||
    system.SetNVDECActive(false);
 | 
			
		||||
    auto it = sessions.find(fd);
 | 
			
		||||
    if (it != sessions.end()) {
 | 
			
		||||
        sessions.erase(it);
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
} // namespace Service::Nvidia::Devices
 | 
			
		||||
 
 | 
			
		||||
@@ -20,7 +20,7 @@ public:
 | 
			
		||||
    NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input, std::span<u8> output,
 | 
			
		||||
                    std::span<u8> inline_output) override;
 | 
			
		||||
 | 
			
		||||
    void OnOpen(DeviceFD fd) override;
 | 
			
		||||
    void OnOpen(NvCore::SessionId session_id, DeviceFD fd) override;
 | 
			
		||||
    void OnClose(DeviceFD fd) override;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -8,6 +8,7 @@
 | 
			
		||||
#include "common/common_types.h"
 | 
			
		||||
#include "common/logging/log.h"
 | 
			
		||||
#include "core/core.h"
 | 
			
		||||
#include "core/hle/kernel/k_process.h"
 | 
			
		||||
#include "core/hle/service/nvdrv/core/container.h"
 | 
			
		||||
#include "core/hle/service/nvdrv/core/nvmap.h"
 | 
			
		||||
#include "core/hle/service/nvdrv/core/syncpoint_manager.h"
 | 
			
		||||
@@ -95,6 +96,8 @@ NvResult nvhost_nvdec_common::Submit(IoctlSubmit& params, std::span<u8> data, De
 | 
			
		||||
    offset += SliceVectors(data, fence_thresholds, params.fence_count, offset);
 | 
			
		||||
 | 
			
		||||
    auto& gpu = system.GPU();
 | 
			
		||||
    auto* session = core.GetSession(sessions[fd]);
 | 
			
		||||
 | 
			
		||||
    if (gpu.UseNvdec()) {
 | 
			
		||||
        for (std::size_t i = 0; i < syncpt_increments.size(); i++) {
 | 
			
		||||
            const SyncptIncr& syncpt_incr = syncpt_increments[i];
 | 
			
		||||
@@ -106,8 +109,8 @@ NvResult nvhost_nvdec_common::Submit(IoctlSubmit& params, std::span<u8> data, De
 | 
			
		||||
        const auto object = nvmap.GetHandle(cmd_buffer.memory_id);
 | 
			
		||||
        ASSERT_OR_EXECUTE(object, return NvResult::InvalidState;);
 | 
			
		||||
        Tegra::ChCommandHeaderList cmdlist(cmd_buffer.word_count);
 | 
			
		||||
        system.ApplicationMemory().ReadBlock(object->address + cmd_buffer.offset, cmdlist.data(),
 | 
			
		||||
                                             cmdlist.size() * sizeof(u32));
 | 
			
		||||
        session->process->GetMemory().ReadBlock(object->address + cmd_buffer.offset, cmdlist.data(),
 | 
			
		||||
                                                cmdlist.size() * sizeof(u32));
 | 
			
		||||
        gpu.PushCommandBuffer(core.Host1xDeviceFile().fd_to_id[fd], cmdlist);
 | 
			
		||||
    }
 | 
			
		||||
    // Some games expect command_buffers to be written back
 | 
			
		||||
@@ -133,10 +136,12 @@ NvResult nvhost_nvdec_common::GetWaitbase(IoctlGetWaitbase& params) {
 | 
			
		||||
    return NvResult::Success;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
NvResult nvhost_nvdec_common::MapBuffer(IoctlMapBuffer& params, std::span<MapBufferEntry> entries) {
 | 
			
		||||
NvResult nvhost_nvdec_common::MapBuffer(IoctlMapBuffer& params, std::span<MapBufferEntry> entries,
 | 
			
		||||
                                        DeviceFD fd) {
 | 
			
		||||
    const size_t num_entries = std::min(params.num_entries, static_cast<u32>(entries.size()));
 | 
			
		||||
    for (size_t i = 0; i < num_entries; i++) {
 | 
			
		||||
        entries[i].map_address = nvmap.PinHandle(entries[i].map_handle);
 | 
			
		||||
        DAddr pin_address = nvmap.PinHandle(entries[i].map_handle, true);
 | 
			
		||||
        entries[i].map_address = static_cast<u32>(pin_address);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    return NvResult::Success;
 | 
			
		||||
 
 | 
			
		||||
@@ -4,7 +4,9 @@
 | 
			
		||||
#pragma once
 | 
			
		||||
 | 
			
		||||
#include <deque>
 | 
			
		||||
#include <unordered_map>
 | 
			
		||||
#include <vector>
 | 
			
		||||
 | 
			
		||||
#include "common/common_types.h"
 | 
			
		||||
#include "common/swap.h"
 | 
			
		||||
#include "core/hle/service/nvdrv/core/syncpoint_manager.h"
 | 
			
		||||
@@ -111,7 +113,7 @@ protected:
 | 
			
		||||
    NvResult Submit(IoctlSubmit& params, std::span<u8> input, DeviceFD fd);
 | 
			
		||||
    NvResult GetSyncpoint(IoctlGetSyncpoint& params);
 | 
			
		||||
    NvResult GetWaitbase(IoctlGetWaitbase& params);
 | 
			
		||||
    NvResult MapBuffer(IoctlMapBuffer& params, std::span<MapBufferEntry> entries);
 | 
			
		||||
    NvResult MapBuffer(IoctlMapBuffer& params, std::span<MapBufferEntry> entries, DeviceFD fd);
 | 
			
		||||
    NvResult UnmapBuffer(IoctlMapBuffer& params, std::span<MapBufferEntry> entries);
 | 
			
		||||
    NvResult SetSubmitTimeout(u32 timeout);
 | 
			
		||||
 | 
			
		||||
@@ -125,6 +127,7 @@ protected:
 | 
			
		||||
    NvCore::NvMap& nvmap;
 | 
			
		||||
    NvCore::ChannelType channel_type;
 | 
			
		||||
    std::array<u32, MaxSyncPoints> device_syncpoints{};
 | 
			
		||||
    std::unordered_map<DeviceFD, NvCore::SessionId> sessions;
 | 
			
		||||
};
 | 
			
		||||
}; // namespace Devices
 | 
			
		||||
} // namespace Service::Nvidia
 | 
			
		||||
 
 | 
			
		||||
@@ -44,7 +44,7 @@ NvResult nvhost_nvjpg::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> in
 | 
			
		||||
    return NvResult::NotImplemented;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void nvhost_nvjpg::OnOpen(DeviceFD fd) {}
 | 
			
		||||
void nvhost_nvjpg::OnOpen(NvCore::SessionId session_id, DeviceFD fd) {}
 | 
			
		||||
void nvhost_nvjpg::OnClose(DeviceFD fd) {}
 | 
			
		||||
 | 
			
		||||
NvResult nvhost_nvjpg::SetNVMAPfd(IoctlSetNvmapFD& params) {
 | 
			
		||||
 
 | 
			
		||||
@@ -22,7 +22,7 @@ public:
 | 
			
		||||
    NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input, std::span<u8> output,
 | 
			
		||||
                    std::span<u8> inline_output) override;
 | 
			
		||||
 | 
			
		||||
    void OnOpen(DeviceFD fd) override;
 | 
			
		||||
    void OnOpen(NvCore::SessionId session_id, DeviceFD fd) override;
 | 
			
		||||
    void OnClose(DeviceFD fd) override;
 | 
			
		||||
 | 
			
		||||
private:
 | 
			
		||||
 
 | 
			
		||||
@@ -33,7 +33,7 @@ NvResult nvhost_vic::Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> inpu
 | 
			
		||||
        case 0x3:
 | 
			
		||||
            return WrapFixed(this, &nvhost_vic::GetWaitbase, input, output);
 | 
			
		||||
        case 0x9:
 | 
			
		||||
            return WrapFixedVariable(this, &nvhost_vic::MapBuffer, input, output);
 | 
			
		||||
            return WrapFixedVariable(this, &nvhost_vic::MapBuffer, input, output, fd);
 | 
			
		||||
        case 0xa:
 | 
			
		||||
            return WrapFixedVariable(this, &nvhost_vic::UnmapBuffer, input, output);
 | 
			
		||||
        default:
 | 
			
		||||
@@ -68,7 +68,9 @@ NvResult nvhost_vic::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> inpu
 | 
			
		||||
    return NvResult::NotImplemented;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void nvhost_vic::OnOpen(DeviceFD fd) {}
 | 
			
		||||
void nvhost_vic::OnOpen(NvCore::SessionId session_id, DeviceFD fd) {
 | 
			
		||||
    sessions[fd] = session_id;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void nvhost_vic::OnClose(DeviceFD fd) {
 | 
			
		||||
    auto& host1x_file = core.Host1xDeviceFile();
 | 
			
		||||
@@ -76,6 +78,7 @@ void nvhost_vic::OnClose(DeviceFD fd) {
 | 
			
		||||
    if (iter != host1x_file.fd_to_id.end()) {
 | 
			
		||||
        system.GPU().ClearCdmaInstance(iter->second);
 | 
			
		||||
    }
 | 
			
		||||
    sessions.erase(fd);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
} // namespace Service::Nvidia::Devices
 | 
			
		||||
 
 | 
			
		||||
@@ -19,7 +19,7 @@ public:
 | 
			
		||||
    NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input, std::span<u8> output,
 | 
			
		||||
                    std::span<u8> inline_output) override;
 | 
			
		||||
 | 
			
		||||
    void OnOpen(DeviceFD fd) override;
 | 
			
		||||
    void OnOpen(NvCore::SessionId session_id, DeviceFD fd) override;
 | 
			
		||||
    void OnClose(DeviceFD fd) override;
 | 
			
		||||
};
 | 
			
		||||
} // namespace Service::Nvidia::Devices
 | 
			
		||||
 
 | 
			
		||||
@@ -36,9 +36,9 @@ NvResult nvmap::Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> input,
 | 
			
		||||
        case 0x3:
 | 
			
		||||
            return WrapFixed(this, &nvmap::IocFromId, input, output);
 | 
			
		||||
        case 0x4:
 | 
			
		||||
            return WrapFixed(this, &nvmap::IocAlloc, input, output);
 | 
			
		||||
            return WrapFixed(this, &nvmap::IocAlloc, input, output, fd);
 | 
			
		||||
        case 0x5:
 | 
			
		||||
            return WrapFixed(this, &nvmap::IocFree, input, output);
 | 
			
		||||
            return WrapFixed(this, &nvmap::IocFree, input, output, fd);
 | 
			
		||||
        case 0x9:
 | 
			
		||||
            return WrapFixed(this, &nvmap::IocParam, input, output);
 | 
			
		||||
        case 0xe:
 | 
			
		||||
@@ -67,8 +67,15 @@ NvResult nvmap::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input, st
 | 
			
		||||
    return NvResult::NotImplemented;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void nvmap::OnOpen(DeviceFD fd) {}
 | 
			
		||||
void nvmap::OnClose(DeviceFD fd) {}
 | 
			
		||||
void nvmap::OnOpen(NvCore::SessionId session_id, DeviceFD fd) {
 | 
			
		||||
    sessions[fd] = session_id;
 | 
			
		||||
}
 | 
			
		||||
void nvmap::OnClose(DeviceFD fd) {
 | 
			
		||||
    auto it = sessions.find(fd);
 | 
			
		||||
    if (it != sessions.end()) {
 | 
			
		||||
        sessions.erase(it);
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
NvResult nvmap::IocCreate(IocCreateParams& params) {
 | 
			
		||||
    LOG_DEBUG(Service_NVDRV, "called, size=0x{:08X}", params.size);
 | 
			
		||||
@@ -87,7 +94,7 @@ NvResult nvmap::IocCreate(IocCreateParams& params) {
 | 
			
		||||
    return NvResult::Success;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
NvResult nvmap::IocAlloc(IocAllocParams& params) {
 | 
			
		||||
NvResult nvmap::IocAlloc(IocAllocParams& params, DeviceFD fd) {
 | 
			
		||||
    LOG_DEBUG(Service_NVDRV, "called, addr={:X}", params.address);
 | 
			
		||||
 | 
			
		||||
    if (!params.handle) {
 | 
			
		||||
@@ -116,15 +123,15 @@ NvResult nvmap::IocAlloc(IocAllocParams& params) {
 | 
			
		||||
        return NvResult::InsufficientMemory;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    const auto result =
 | 
			
		||||
        handle_description->Alloc(params.flags, params.align, params.kind, params.address);
 | 
			
		||||
    const auto result = handle_description->Alloc(params.flags, params.align, params.kind,
 | 
			
		||||
                                                  params.address, sessions[fd]);
 | 
			
		||||
    if (result != NvResult::Success) {
 | 
			
		||||
        LOG_CRITICAL(Service_NVDRV, "Object failed to allocate, handle={:08X}", params.handle);
 | 
			
		||||
        return result;
 | 
			
		||||
    }
 | 
			
		||||
    bool is_out_io{};
 | 
			
		||||
    ASSERT(system.ApplicationProcess()
 | 
			
		||||
               ->GetPageTable()
 | 
			
		||||
    auto process = container.GetSession(sessions[fd])->process;
 | 
			
		||||
    ASSERT(process->GetPageTable()
 | 
			
		||||
               .LockForMapDeviceAddressSpace(&is_out_io, handle_description->address,
 | 
			
		||||
                                             handle_description->size,
 | 
			
		||||
                                             Kernel::KMemoryPermission::None, true, false)
 | 
			
		||||
@@ -224,7 +231,7 @@ NvResult nvmap::IocParam(IocParamParams& params) {
 | 
			
		||||
    return NvResult::Success;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
NvResult nvmap::IocFree(IocFreeParams& params) {
 | 
			
		||||
NvResult nvmap::IocFree(IocFreeParams& params, DeviceFD fd) {
 | 
			
		||||
    LOG_DEBUG(Service_NVDRV, "called");
 | 
			
		||||
 | 
			
		||||
    if (!params.handle) {
 | 
			
		||||
@@ -233,9 +240,9 @@ NvResult nvmap::IocFree(IocFreeParams& params) {
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    if (auto freeInfo{file.FreeHandle(params.handle, false)}) {
 | 
			
		||||
        auto process = container.GetSession(sessions[fd])->process;
 | 
			
		||||
        if (freeInfo->can_unlock) {
 | 
			
		||||
            ASSERT(system.ApplicationProcess()
 | 
			
		||||
                       ->GetPageTable()
 | 
			
		||||
            ASSERT(process->GetPageTable()
 | 
			
		||||
                       .UnlockForDeviceAddressSpace(freeInfo->address, freeInfo->size)
 | 
			
		||||
                       .IsSuccess());
 | 
			
		||||
        }
 | 
			
		||||
 
 | 
			
		||||
@@ -33,7 +33,7 @@ public:
 | 
			
		||||
    NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input, std::span<u8> output,
 | 
			
		||||
                    std::span<u8> inline_output) override;
 | 
			
		||||
 | 
			
		||||
    void OnOpen(DeviceFD fd) override;
 | 
			
		||||
    void OnOpen(NvCore::SessionId session_id, DeviceFD fd) override;
 | 
			
		||||
    void OnClose(DeviceFD fd) override;
 | 
			
		||||
 | 
			
		||||
    enum class HandleParameterType : u32_le {
 | 
			
		||||
@@ -100,11 +100,11 @@ public:
 | 
			
		||||
    static_assert(sizeof(IocGetIdParams) == 8, "IocGetIdParams has wrong size");
 | 
			
		||||
 | 
			
		||||
    NvResult IocCreate(IocCreateParams& params);
 | 
			
		||||
    NvResult IocAlloc(IocAllocParams& params);
 | 
			
		||||
    NvResult IocAlloc(IocAllocParams& params, DeviceFD fd);
 | 
			
		||||
    NvResult IocGetId(IocGetIdParams& params);
 | 
			
		||||
    NvResult IocFromId(IocFromIdParams& params);
 | 
			
		||||
    NvResult IocParam(IocParamParams& params);
 | 
			
		||||
    NvResult IocFree(IocFreeParams& params);
 | 
			
		||||
    NvResult IocFree(IocFreeParams& params, DeviceFD fd);
 | 
			
		||||
 | 
			
		||||
private:
 | 
			
		||||
    /// Id to use for the next handle that is created.
 | 
			
		||||
@@ -115,6 +115,7 @@ private:
 | 
			
		||||
 | 
			
		||||
    NvCore::Container& container;
 | 
			
		||||
    NvCore::NvMap& file;
 | 
			
		||||
    std::unordered_map<DeviceFD, NvCore::SessionId> sessions;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
} // namespace Service::Nvidia::Devices
 | 
			
		||||
 
 | 
			
		||||
@@ -45,13 +45,22 @@ void EventInterface::FreeEvent(Kernel::KEvent* event) {
 | 
			
		||||
void LoopProcess(Nvnflinger::Nvnflinger& nvnflinger, Core::System& system) {
 | 
			
		||||
    auto server_manager = std::make_unique<ServerManager>(system);
 | 
			
		||||
    auto module = std::make_shared<Module>(system);
 | 
			
		||||
    server_manager->RegisterNamedService("nvdrv", std::make_shared<NVDRV>(system, module, "nvdrv"));
 | 
			
		||||
    server_manager->RegisterNamedService("nvdrv:a",
 | 
			
		||||
                                         std::make_shared<NVDRV>(system, module, "nvdrv:a"));
 | 
			
		||||
    server_manager->RegisterNamedService("nvdrv:s",
 | 
			
		||||
                                         std::make_shared<NVDRV>(system, module, "nvdrv:s"));
 | 
			
		||||
    server_manager->RegisterNamedService("nvdrv:t",
 | 
			
		||||
                                         std::make_shared<NVDRV>(system, module, "nvdrv:t"));
 | 
			
		||||
    const auto NvdrvInterfaceFactoryForApplication = [&, module] {
 | 
			
		||||
        return std::make_shared<NVDRV>(system, module, "nvdrv");
 | 
			
		||||
    };
 | 
			
		||||
    const auto NvdrvInterfaceFactoryForApplets = [&, module] {
 | 
			
		||||
        return std::make_shared<NVDRV>(system, module, "nvdrv:a");
 | 
			
		||||
    };
 | 
			
		||||
    const auto NvdrvInterfaceFactoryForSysmodules = [&, module] {
 | 
			
		||||
        return std::make_shared<NVDRV>(system, module, "nvdrv:s");
 | 
			
		||||
    };
 | 
			
		||||
    const auto NvdrvInterfaceFactoryForTesting = [&, module] {
 | 
			
		||||
        return std::make_shared<NVDRV>(system, module, "nvdrv:t");
 | 
			
		||||
    };
 | 
			
		||||
    server_manager->RegisterNamedService("nvdrv", NvdrvInterfaceFactoryForApplication);
 | 
			
		||||
    server_manager->RegisterNamedService("nvdrv:a", NvdrvInterfaceFactoryForApplets);
 | 
			
		||||
    server_manager->RegisterNamedService("nvdrv:s", NvdrvInterfaceFactoryForSysmodules);
 | 
			
		||||
    server_manager->RegisterNamedService("nvdrv:t", NvdrvInterfaceFactoryForTesting);
 | 
			
		||||
    server_manager->RegisterNamedService("nvmemp", std::make_shared<NVMEMP>(system));
 | 
			
		||||
    nvnflinger.SetNVDrvInstance(module);
 | 
			
		||||
    ServerManager::RunServer(std::move(server_manager));
 | 
			
		||||
@@ -113,7 +122,7 @@ NvResult Module::VerifyFD(DeviceFD fd) const {
 | 
			
		||||
    return NvResult::Success;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
DeviceFD Module::Open(const std::string& device_name) {
 | 
			
		||||
DeviceFD Module::Open(const std::string& device_name, NvCore::SessionId session_id) {
 | 
			
		||||
    auto it = builders.find(device_name);
 | 
			
		||||
    if (it == builders.end()) {
 | 
			
		||||
        LOG_ERROR(Service_NVDRV, "Trying to open unknown device {}", device_name);
 | 
			
		||||
@@ -124,7 +133,7 @@ DeviceFD Module::Open(const std::string& device_name) {
 | 
			
		||||
    auto& builder = it->second;
 | 
			
		||||
    auto device = builder(fd)->second;
 | 
			
		||||
 | 
			
		||||
    device->OnOpen(fd);
 | 
			
		||||
    device->OnOpen(session_id, fd);
 | 
			
		||||
 | 
			
		||||
    return fd;
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
@@ -77,7 +77,7 @@ public:
 | 
			
		||||
    NvResult VerifyFD(DeviceFD fd) const;
 | 
			
		||||
 | 
			
		||||
    /// Opens a device node and returns a file descriptor to it.
 | 
			
		||||
    DeviceFD Open(const std::string& device_name);
 | 
			
		||||
    DeviceFD Open(const std::string& device_name, NvCore::SessionId session_id);
 | 
			
		||||
 | 
			
		||||
    /// Sends an ioctl command to the specified file descriptor.
 | 
			
		||||
    NvResult Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> input, std::span<u8> output);
 | 
			
		||||
@@ -93,6 +93,10 @@ public:
 | 
			
		||||
 | 
			
		||||
    NvResult QueryEvent(DeviceFD fd, u32 event_id, Kernel::KEvent*& event);
 | 
			
		||||
 | 
			
		||||
    NvCore::Container& GetContainer() {
 | 
			
		||||
        return container;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
private:
 | 
			
		||||
    friend class EventInterface;
 | 
			
		||||
    friend class Service::Nvnflinger::Nvnflinger;
 | 
			
		||||
 
 | 
			
		||||
@@ -3,8 +3,10 @@
 | 
			
		||||
// SPDX-License-Identifier: GPL-3.0-or-later
 | 
			
		||||
 | 
			
		||||
#include "common/logging/log.h"
 | 
			
		||||
#include "common/scope_exit.h"
 | 
			
		||||
#include "core/core.h"
 | 
			
		||||
#include "core/hle/kernel/k_event.h"
 | 
			
		||||
#include "core/hle/kernel/k_process.h"
 | 
			
		||||
#include "core/hle/kernel/k_readable_event.h"
 | 
			
		||||
#include "core/hle/service/ipc_helpers.h"
 | 
			
		||||
#include "core/hle/service/nvdrv/nvdata.h"
 | 
			
		||||
@@ -37,7 +39,7 @@ void NVDRV::Open(HLERequestContext& ctx) {
 | 
			
		||||
        return;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    DeviceFD fd = nvdrv->Open(device_name);
 | 
			
		||||
    DeviceFD fd = nvdrv->Open(device_name, session_id);
 | 
			
		||||
 | 
			
		||||
    rb.Push<DeviceFD>(fd);
 | 
			
		||||
    rb.PushEnum(fd != INVALID_NVDRV_FD ? NvResult::Success : NvResult::FileOperationFailed);
 | 
			
		||||
@@ -150,12 +152,29 @@ void NVDRV::Close(HLERequestContext& ctx) {
 | 
			
		||||
 | 
			
		||||
void NVDRV::Initialize(HLERequestContext& ctx) {
 | 
			
		||||
    LOG_WARNING(Service_NVDRV, "(STUBBED) called");
 | 
			
		||||
    IPC::ResponseBuilder rb{ctx, 3};
 | 
			
		||||
    SCOPE_EXIT({
 | 
			
		||||
        rb.Push(ResultSuccess);
 | 
			
		||||
        rb.PushEnum(NvResult::Success);
 | 
			
		||||
    });
 | 
			
		||||
 | 
			
		||||
    if (is_initialized) {
 | 
			
		||||
        // No need to initialize again
 | 
			
		||||
        return;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    IPC::RequestParser rp{ctx};
 | 
			
		||||
    const auto process_handle{ctx.GetCopyHandle(0)};
 | 
			
		||||
    // The transfer memory is lent to nvdrv as a work buffer since nvdrv is
 | 
			
		||||
    // unable to allocate as much memory on its own. For HLE it's unnecessary to handle it
 | 
			
		||||
    [[maybe_unused]] const auto transfer_memory_handle{ctx.GetCopyHandle(1)};
 | 
			
		||||
    [[maybe_unused]] const auto transfer_memory_size = rp.Pop<u32>();
 | 
			
		||||
 | 
			
		||||
    auto& container = nvdrv->GetContainer();
 | 
			
		||||
    auto process = ctx.GetObjectFromHandle<Kernel::KProcess>(process_handle);
 | 
			
		||||
    session_id = container.OpenSession(process.GetPointerUnsafe());
 | 
			
		||||
 | 
			
		||||
    is_initialized = true;
 | 
			
		||||
 | 
			
		||||
    IPC::ResponseBuilder rb{ctx, 3};
 | 
			
		||||
    rb.Push(ResultSuccess);
 | 
			
		||||
    rb.PushEnum(NvResult::Success);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void NVDRV::QueryEvent(HLERequestContext& ctx) {
 | 
			
		||||
@@ -242,6 +261,9 @@ NVDRV::NVDRV(Core::System& system_, std::shared_ptr<Module> nvdrv_, const char*
 | 
			
		||||
    RegisterHandlers(functions);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
NVDRV::~NVDRV() = default;
 | 
			
		||||
NVDRV::~NVDRV() {
 | 
			
		||||
    auto& container = nvdrv->GetContainer();
 | 
			
		||||
    container.CloseSession(session_id);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
} // namespace Service::Nvidia
 | 
			
		||||
 
 | 
			
		||||
@@ -35,6 +35,7 @@ private:
 | 
			
		||||
 | 
			
		||||
    u64 pid{};
 | 
			
		||||
    bool is_initialized{};
 | 
			
		||||
    NvCore::SessionId session_id{};
 | 
			
		||||
    Common::ScratchBuffer<u8> output_buffer;
 | 
			
		||||
    Common::ScratchBuffer<u8> inline_output_buffer;
 | 
			
		||||
};
 | 
			
		||||
 
 | 
			
		||||
@@ -87,19 +87,20 @@ Result CreateNvMapHandle(u32* out_nv_map_handle, Nvidia::Devices::nvmap& nvmap,
 | 
			
		||||
    R_SUCCEED();
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
Result FreeNvMapHandle(Nvidia::Devices::nvmap& nvmap, u32 handle) {
 | 
			
		||||
Result FreeNvMapHandle(Nvidia::Devices::nvmap& nvmap, u32 handle, Nvidia::DeviceFD nvmap_fd) {
 | 
			
		||||
    // Free the handle.
 | 
			
		||||
    Nvidia::Devices::nvmap::IocFreeParams free_params{
 | 
			
		||||
        .handle = handle,
 | 
			
		||||
    };
 | 
			
		||||
    R_UNLESS(nvmap.IocFree(free_params) == Nvidia::NvResult::Success, VI::ResultOperationFailed);
 | 
			
		||||
    R_UNLESS(nvmap.IocFree(free_params, nvmap_fd) == Nvidia::NvResult::Success,
 | 
			
		||||
             VI::ResultOperationFailed);
 | 
			
		||||
 | 
			
		||||
    // We succeeded.
 | 
			
		||||
    R_SUCCEED();
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
Result AllocNvMapHandle(Nvidia::Devices::nvmap& nvmap, u32 handle, Common::ProcessAddress buffer,
 | 
			
		||||
                        u32 size) {
 | 
			
		||||
                        u32 size, Nvidia::DeviceFD nvmap_fd) {
 | 
			
		||||
    // Assign the allocated memory to the handle.
 | 
			
		||||
    Nvidia::Devices::nvmap::IocAllocParams alloc_params{
 | 
			
		||||
        .handle = handle,
 | 
			
		||||
@@ -109,16 +110,16 @@ Result AllocNvMapHandle(Nvidia::Devices::nvmap& nvmap, u32 handle, Common::Proce
 | 
			
		||||
        .kind = 0,
 | 
			
		||||
        .address = GetInteger(buffer),
 | 
			
		||||
    };
 | 
			
		||||
    R_UNLESS(nvmap.IocAlloc(alloc_params) == Nvidia::NvResult::Success, VI::ResultOperationFailed);
 | 
			
		||||
    R_UNLESS(nvmap.IocAlloc(alloc_params, nvmap_fd) == Nvidia::NvResult::Success,
 | 
			
		||||
             VI::ResultOperationFailed);
 | 
			
		||||
 | 
			
		||||
    // We succeeded.
 | 
			
		||||
    R_SUCCEED();
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
Result AllocateHandleForBuffer(u32* out_handle, Nvidia::Module& nvdrv,
 | 
			
		||||
Result AllocateHandleForBuffer(u32* out_handle, Nvidia::Module& nvdrv, Nvidia::DeviceFD nvmap_fd,
 | 
			
		||||
                               Common::ProcessAddress buffer, u32 size) {
 | 
			
		||||
    // Get the nvmap device.
 | 
			
		||||
    auto nvmap_fd = nvdrv.Open("/dev/nvmap");
 | 
			
		||||
    auto nvmap = nvdrv.GetDevice<Nvidia::Devices::nvmap>(nvmap_fd);
 | 
			
		||||
    ASSERT(nvmap != nullptr);
 | 
			
		||||
 | 
			
		||||
@@ -127,11 +128,11 @@ Result AllocateHandleForBuffer(u32* out_handle, Nvidia::Module& nvdrv,
 | 
			
		||||
 | 
			
		||||
    // Ensure we maintain a clean state on failure.
 | 
			
		||||
    ON_RESULT_FAILURE {
 | 
			
		||||
        ASSERT(R_SUCCEEDED(FreeNvMapHandle(*nvmap, *out_handle)));
 | 
			
		||||
        ASSERT(R_SUCCEEDED(FreeNvMapHandle(*nvmap, *out_handle, nvmap_fd)));
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    // Assign the allocated memory to the handle.
 | 
			
		||||
    R_RETURN(AllocNvMapHandle(*nvmap, *out_handle, buffer, size));
 | 
			
		||||
    R_RETURN(AllocNvMapHandle(*nvmap, *out_handle, buffer, size, nvmap_fd));
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
constexpr auto SharedBufferBlockLinearFormat = android::PixelFormat::Rgba8888;
 | 
			
		||||
@@ -197,9 +198,13 @@ Result FbShareBufferManager::Initialize(u64* out_buffer_id, u64* out_layer_id, u
 | 
			
		||||
                                           std::addressof(m_buffer_page_group), m_system,
 | 
			
		||||
                                           SharedBufferSize));
 | 
			
		||||
 | 
			
		||||
    auto& container = m_nvdrv->GetContainer();
 | 
			
		||||
    m_session_id = container.OpenSession(m_system.ApplicationProcess());
 | 
			
		||||
    m_nvmap_fd = m_nvdrv->Open("/dev/nvmap", m_session_id);
 | 
			
		||||
 | 
			
		||||
    // Create an nvmap handle for the buffer and assign the memory to it.
 | 
			
		||||
    R_TRY(AllocateHandleForBuffer(std::addressof(m_buffer_nvmap_handle), *m_nvdrv, map_address,
 | 
			
		||||
                                  SharedBufferSize));
 | 
			
		||||
    R_TRY(AllocateHandleForBuffer(std::addressof(m_buffer_nvmap_handle), *m_nvdrv, m_nvmap_fd,
 | 
			
		||||
                                  map_address, SharedBufferSize));
 | 
			
		||||
 | 
			
		||||
    // Record the display id.
 | 
			
		||||
    m_display_id = display_id;
 | 
			
		||||
 
 | 
			
		||||
@@ -4,6 +4,8 @@
 | 
			
		||||
#pragma once
 | 
			
		||||
 | 
			
		||||
#include "common/math_util.h"
 | 
			
		||||
#include "core/hle/service/nvdrv/core/container.h"
 | 
			
		||||
#include "core/hle/service/nvdrv/nvdata.h"
 | 
			
		||||
#include "core/hle/service/nvnflinger/nvnflinger.h"
 | 
			
		||||
#include "core/hle/service/nvnflinger/ui/fence.h"
 | 
			
		||||
 | 
			
		||||
@@ -53,7 +55,8 @@ private:
 | 
			
		||||
    u64 m_layer_id = 0;
 | 
			
		||||
    u32 m_buffer_nvmap_handle = 0;
 | 
			
		||||
    SharedMemoryPoolLayout m_pool_layout = {};
 | 
			
		||||
 | 
			
		||||
    Nvidia::DeviceFD m_nvmap_fd = {};
 | 
			
		||||
    Nvidia::NvCore::SessionId m_session_id = {};
 | 
			
		||||
    std::unique_ptr<Kernel::KPageGroup> m_buffer_page_group;
 | 
			
		||||
 | 
			
		||||
    std::mutex m_guard;
 | 
			
		||||
 
 | 
			
		||||
@@ -124,7 +124,7 @@ void Nvnflinger::ShutdownLayers() {
 | 
			
		||||
 | 
			
		||||
void Nvnflinger::SetNVDrvInstance(std::shared_ptr<Nvidia::Module> instance) {
 | 
			
		||||
    nvdrv = std::move(instance);
 | 
			
		||||
    disp_fd = nvdrv->Open("/dev/nvdisp_disp0");
 | 
			
		||||
    disp_fd = nvdrv->Open("/dev/nvdisp_disp0", {});
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
std::optional<u64> Nvnflinger::OpenDisplay(std::string_view name) {
 | 
			
		||||
 
 | 
			
		||||
@@ -22,11 +22,13 @@ GraphicBuffer::GraphicBuffer(Service::Nvidia::NvCore::NvMap& nvmap,
 | 
			
		||||
    : NvGraphicBuffer(GetBuffer(buffer)), m_nvmap(std::addressof(nvmap)) {
 | 
			
		||||
    if (this->BufferId() > 0) {
 | 
			
		||||
        m_nvmap->DuplicateHandle(this->BufferId(), true);
 | 
			
		||||
        m_nvmap->PinHandle(this->BufferId(), false);
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
GraphicBuffer::~GraphicBuffer() {
 | 
			
		||||
    if (m_nvmap != nullptr && this->BufferId() > 0) {
 | 
			
		||||
        m_nvmap->UnpinHandle(this->BufferId());
 | 
			
		||||
        m_nvmap->FreeHandle(this->BufferId(), true);
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
@@ -24,6 +24,8 @@
 | 
			
		||||
#include "core/hle/kernel/k_process.h"
 | 
			
		||||
#include "core/memory.h"
 | 
			
		||||
#include "video_core/gpu.h"
 | 
			
		||||
#include "video_core/host1x/gpu_device_memory_manager.h"
 | 
			
		||||
#include "video_core/host1x/host1x.h"
 | 
			
		||||
#include "video_core/rasterizer_download_area.h"
 | 
			
		||||
 | 
			
		||||
namespace Core::Memory {
 | 
			
		||||
@@ -637,17 +639,6 @@ struct Memory::Impl {
 | 
			
		||||
        LOG_DEBUG(HW_Memory, "Mapping {:016X} onto {:016X}-{:016X}", GetInteger(target),
 | 
			
		||||
                  base * YUZU_PAGESIZE, (base + size) * YUZU_PAGESIZE);
 | 
			
		||||
 | 
			
		||||
        // During boot, current_page_table might not be set yet, in which case we need not flush
 | 
			
		||||
        if (system.IsPoweredOn()) {
 | 
			
		||||
            auto& gpu = system.GPU();
 | 
			
		||||
            for (u64 i = 0; i < size; i++) {
 | 
			
		||||
                const auto page = base + i;
 | 
			
		||||
                if (page_table.pointers[page].Type() == Common::PageType::RasterizerCachedMemory) {
 | 
			
		||||
                    gpu.FlushAndInvalidateRegion(page << YUZU_PAGEBITS, YUZU_PAGESIZE);
 | 
			
		||||
                }
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        const auto end = base + size;
 | 
			
		||||
        ASSERT_MSG(end <= page_table.pointers.size(), "out of range mapping at {:016X}",
 | 
			
		||||
                   base + page_table.pointers.size());
 | 
			
		||||
@@ -811,21 +802,33 @@ struct Memory::Impl {
 | 
			
		||||
        return true;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    void HandleRasterizerDownload(VAddr address, size_t size) {
 | 
			
		||||
    void HandleRasterizerDownload(VAddr v_address, size_t size) {
 | 
			
		||||
        const auto* p = GetPointerImpl(
 | 
			
		||||
            v_address, []() {}, []() {});
 | 
			
		||||
        if (!gpu_device_memory) [[unlikely]] {
 | 
			
		||||
            gpu_device_memory = &system.Host1x().MemoryManager();
 | 
			
		||||
        }
 | 
			
		||||
        const size_t core = system.GetCurrentHostThreadID();
 | 
			
		||||
        auto& current_area = rasterizer_read_areas[core];
 | 
			
		||||
        const VAddr end_address = address + size;
 | 
			
		||||
        if (current_area.start_address <= address && end_address <= current_area.end_address)
 | 
			
		||||
            [[likely]] {
 | 
			
		||||
            return;
 | 
			
		||||
        }
 | 
			
		||||
        current_area = system.GPU().OnCPURead(address, size);
 | 
			
		||||
        gpu_device_memory->ApplyOpOnPointer(p, scratch_buffers[core], [&](DAddr address) {
 | 
			
		||||
            const DAddr end_address = address + size;
 | 
			
		||||
            if (current_area.start_address <= address && end_address <= current_area.end_address)
 | 
			
		||||
                [[likely]] {
 | 
			
		||||
                return;
 | 
			
		||||
            }
 | 
			
		||||
            current_area = system.GPU().OnCPURead(address, size);
 | 
			
		||||
        });
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    void HandleRasterizerWrite(VAddr address, size_t size) {
 | 
			
		||||
    void HandleRasterizerWrite(VAddr v_address, size_t size) {
 | 
			
		||||
        const auto* p = GetPointerImpl(
 | 
			
		||||
            v_address, []() {}, []() {});
 | 
			
		||||
        constexpr size_t sys_core = Core::Hardware::NUM_CPU_CORES - 1;
 | 
			
		||||
        const size_t core = std::min(system.GetCurrentHostThreadID(),
 | 
			
		||||
                                     sys_core); // any other calls threads go to syscore.
 | 
			
		||||
        if (!gpu_device_memory) [[unlikely]] {
 | 
			
		||||
            gpu_device_memory = &system.Host1x().MemoryManager();
 | 
			
		||||
        }
 | 
			
		||||
        // Guard on sys_core;
 | 
			
		||||
        if (core == sys_core) [[unlikely]] {
 | 
			
		||||
            sys_core_guard.lock();
 | 
			
		||||
@@ -835,36 +838,53 @@ struct Memory::Impl {
 | 
			
		||||
                sys_core_guard.unlock();
 | 
			
		||||
            }
 | 
			
		||||
        });
 | 
			
		||||
        auto& current_area = rasterizer_write_areas[core];
 | 
			
		||||
        VAddr subaddress = address >> YUZU_PAGEBITS;
 | 
			
		||||
        bool do_collection = current_area.last_address == subaddress;
 | 
			
		||||
        if (!do_collection) [[unlikely]] {
 | 
			
		||||
            do_collection = system.GPU().OnCPUWrite(address, size);
 | 
			
		||||
            if (!do_collection) {
 | 
			
		||||
                return;
 | 
			
		||||
        gpu_device_memory->ApplyOpOnPointer(p, scratch_buffers[core], [&](DAddr address) {
 | 
			
		||||
            auto& current_area = rasterizer_write_areas[core];
 | 
			
		||||
            PAddr subaddress = address >> YUZU_PAGEBITS;
 | 
			
		||||
            bool do_collection = current_area.last_address == subaddress;
 | 
			
		||||
            if (!do_collection) [[unlikely]] {
 | 
			
		||||
                do_collection = system.GPU().OnCPUWrite(address, size);
 | 
			
		||||
                if (!do_collection) {
 | 
			
		||||
                    return;
 | 
			
		||||
                }
 | 
			
		||||
                current_area.last_address = subaddress;
 | 
			
		||||
            }
 | 
			
		||||
            current_area.last_address = subaddress;
 | 
			
		||||
        }
 | 
			
		||||
        gpu_dirty_managers[core].Collect(address, size);
 | 
			
		||||
            gpu_dirty_managers[core].Collect(address, size);
 | 
			
		||||
        });
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    struct GPUDirtyState {
 | 
			
		||||
        VAddr last_address;
 | 
			
		||||
        PAddr last_address;
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    void InvalidateRegion(Common::ProcessAddress dest_addr, size_t size) {
 | 
			
		||||
        system.GPU().InvalidateRegion(GetInteger(dest_addr), size);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    void FlushRegion(Common::ProcessAddress dest_addr, size_t size) {
 | 
			
		||||
        system.GPU().FlushRegion(GetInteger(dest_addr), size);
 | 
			
		||||
    void InvalidateGPUMemory(u8* p, size_t size) {
 | 
			
		||||
        constexpr size_t sys_core = Core::Hardware::NUM_CPU_CORES - 1;
 | 
			
		||||
        const size_t core = std::min(system.GetCurrentHostThreadID(),
 | 
			
		||||
                                     sys_core); // any other calls threads go to syscore.
 | 
			
		||||
        if (!gpu_device_memory) [[unlikely]] {
 | 
			
		||||
            gpu_device_memory = &system.Host1x().MemoryManager();
 | 
			
		||||
        }
 | 
			
		||||
        // Guard on sys_core;
 | 
			
		||||
        if (core == sys_core) [[unlikely]] {
 | 
			
		||||
            sys_core_guard.lock();
 | 
			
		||||
        }
 | 
			
		||||
        SCOPE_EXIT({
 | 
			
		||||
            if (core == sys_core) [[unlikely]] {
 | 
			
		||||
                sys_core_guard.unlock();
 | 
			
		||||
            }
 | 
			
		||||
        });
 | 
			
		||||
        auto& gpu = system.GPU();
 | 
			
		||||
        gpu_device_memory->ApplyOpOnPointer(
 | 
			
		||||
            p, scratch_buffers[core], [&](DAddr address) { gpu.InvalidateRegion(address, size); });
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    Core::System& system;
 | 
			
		||||
    Tegra::MaxwellDeviceMemoryManager* gpu_device_memory{};
 | 
			
		||||
    Common::PageTable* current_page_table = nullptr;
 | 
			
		||||
    std::array<VideoCore::RasterizerDownloadArea, Core::Hardware::NUM_CPU_CORES>
 | 
			
		||||
        rasterizer_read_areas{};
 | 
			
		||||
    std::array<GPUDirtyState, Core::Hardware::NUM_CPU_CORES> rasterizer_write_areas{};
 | 
			
		||||
    std::array<Common::ScratchBuffer<u32>, Core::Hardware::NUM_CPU_CORES> scratch_buffers{};
 | 
			
		||||
    std::span<Core::GPUDirtyMemoryManager> gpu_dirty_managers;
 | 
			
		||||
    std::mutex sys_core_guard;
 | 
			
		||||
 | 
			
		||||
@@ -1059,14 +1079,6 @@ void Memory::MarkRegionDebug(Common::ProcessAddress vaddr, u64 size, bool debug)
 | 
			
		||||
    impl->MarkRegionDebug(GetInteger(vaddr), size, debug);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void Memory::InvalidateRegion(Common::ProcessAddress dest_addr, size_t size) {
 | 
			
		||||
    impl->InvalidateRegion(dest_addr, size);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void Memory::FlushRegion(Common::ProcessAddress dest_addr, size_t size) {
 | 
			
		||||
    impl->FlushRegion(dest_addr, size);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
bool Memory::InvalidateNCE(Common::ProcessAddress vaddr, size_t size) {
 | 
			
		||||
    [[maybe_unused]] bool mapped = true;
 | 
			
		||||
    [[maybe_unused]] bool rasterizer = false;
 | 
			
		||||
@@ -1078,10 +1090,10 @@ bool Memory::InvalidateNCE(Common::ProcessAddress vaddr, size_t size) {
 | 
			
		||||
                      GetInteger(vaddr));
 | 
			
		||||
            mapped = false;
 | 
			
		||||
        },
 | 
			
		||||
        [&] {
 | 
			
		||||
            impl->system.GPU().InvalidateRegion(GetInteger(vaddr), size);
 | 
			
		||||
            rasterizer = true;
 | 
			
		||||
        });
 | 
			
		||||
        [&] { rasterizer = true; });
 | 
			
		||||
    if (rasterizer) {
 | 
			
		||||
        impl->InvalidateGPUMemory(ptr, size);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
#ifdef __linux__
 | 
			
		||||
    if (!rasterizer && mapped) {
 | 
			
		||||
 
 | 
			
		||||
@@ -12,6 +12,7 @@
 | 
			
		||||
 | 
			
		||||
#include "common/scratch_buffer.h"
 | 
			
		||||
#include "common/typed_address.h"
 | 
			
		||||
#include "core/guest_memory.h"
 | 
			
		||||
#include "core/hle/result.h"
 | 
			
		||||
 | 
			
		||||
namespace Common {
 | 
			
		||||
@@ -486,10 +487,10 @@ public:
 | 
			
		||||
    void MarkRegionDebug(Common::ProcessAddress vaddr, u64 size, bool debug);
 | 
			
		||||
 | 
			
		||||
    void SetGPUDirtyManagers(std::span<Core::GPUDirtyMemoryManager> managers);
 | 
			
		||||
    void InvalidateRegion(Common::ProcessAddress dest_addr, size_t size);
 | 
			
		||||
 | 
			
		||||
    bool InvalidateNCE(Common::ProcessAddress vaddr, size_t size);
 | 
			
		||||
 | 
			
		||||
    bool InvalidateSeparateHeap(void* fault_address);
 | 
			
		||||
    void FlushRegion(Common::ProcessAddress dest_addr, size_t size);
 | 
			
		||||
 | 
			
		||||
private:
 | 
			
		||||
    Core::System& system;
 | 
			
		||||
@@ -498,209 +499,9 @@ private:
 | 
			
		||||
    std::unique_ptr<Impl> impl;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
enum GuestMemoryFlags : u32 {
 | 
			
		||||
    Read = 1 << 0,
 | 
			
		||||
    Write = 1 << 1,
 | 
			
		||||
    Safe = 1 << 2,
 | 
			
		||||
    Cached = 1 << 3,
 | 
			
		||||
 | 
			
		||||
    SafeRead = Read | Safe,
 | 
			
		||||
    SafeWrite = Write | Safe,
 | 
			
		||||
    SafeReadWrite = SafeRead | SafeWrite,
 | 
			
		||||
    SafeReadCachedWrite = SafeReadWrite | Cached,
 | 
			
		||||
 | 
			
		||||
    UnsafeRead = Read,
 | 
			
		||||
    UnsafeWrite = Write,
 | 
			
		||||
    UnsafeReadWrite = UnsafeRead | UnsafeWrite,
 | 
			
		||||
    UnsafeReadCachedWrite = UnsafeReadWrite | Cached,
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
namespace {
 | 
			
		||||
template <typename M, typename T, GuestMemoryFlags FLAGS>
 | 
			
		||||
class GuestMemory {
 | 
			
		||||
    using iterator = T*;
 | 
			
		||||
    using const_iterator = const T*;
 | 
			
		||||
    using value_type = T;
 | 
			
		||||
    using element_type = T;
 | 
			
		||||
    using iterator_category = std::contiguous_iterator_tag;
 | 
			
		||||
 | 
			
		||||
public:
 | 
			
		||||
    GuestMemory() = delete;
 | 
			
		||||
    explicit GuestMemory(M& memory, u64 addr, std::size_t size,
 | 
			
		||||
                         Common::ScratchBuffer<T>* backup = nullptr)
 | 
			
		||||
        : m_memory{memory}, m_addr{addr}, m_size{size} {
 | 
			
		||||
        static_assert(FLAGS & GuestMemoryFlags::Read || FLAGS & GuestMemoryFlags::Write);
 | 
			
		||||
        if constexpr (FLAGS & GuestMemoryFlags::Read) {
 | 
			
		||||
            Read(addr, size, backup);
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    ~GuestMemory() = default;
 | 
			
		||||
 | 
			
		||||
    T* data() noexcept {
 | 
			
		||||
        return m_data_span.data();
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    const T* data() const noexcept {
 | 
			
		||||
        return m_data_span.data();
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    size_t size() const noexcept {
 | 
			
		||||
        return m_size;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    size_t size_bytes() const noexcept {
 | 
			
		||||
        return this->size() * sizeof(T);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    [[nodiscard]] T* begin() noexcept {
 | 
			
		||||
        return this->data();
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    [[nodiscard]] const T* begin() const noexcept {
 | 
			
		||||
        return this->data();
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    [[nodiscard]] T* end() noexcept {
 | 
			
		||||
        return this->data() + this->size();
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    [[nodiscard]] const T* end() const noexcept {
 | 
			
		||||
        return this->data() + this->size();
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    T& operator[](size_t index) noexcept {
 | 
			
		||||
        return m_data_span[index];
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    const T& operator[](size_t index) const noexcept {
 | 
			
		||||
        return m_data_span[index];
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    void SetAddressAndSize(u64 addr, std::size_t size) noexcept {
 | 
			
		||||
        m_addr = addr;
 | 
			
		||||
        m_size = size;
 | 
			
		||||
        m_addr_changed = true;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    std::span<T> Read(u64 addr, std::size_t size,
 | 
			
		||||
                      Common::ScratchBuffer<T>* backup = nullptr) noexcept {
 | 
			
		||||
        m_addr = addr;
 | 
			
		||||
        m_size = size;
 | 
			
		||||
        if (m_size == 0) {
 | 
			
		||||
            m_is_data_copy = true;
 | 
			
		||||
            return {};
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        if (this->TrySetSpan()) {
 | 
			
		||||
            if constexpr (FLAGS & GuestMemoryFlags::Safe) {
 | 
			
		||||
                m_memory.FlushRegion(m_addr, this->size_bytes());
 | 
			
		||||
            }
 | 
			
		||||
        } else {
 | 
			
		||||
            if (backup) {
 | 
			
		||||
                backup->resize_destructive(this->size());
 | 
			
		||||
                m_data_span = *backup;
 | 
			
		||||
            } else {
 | 
			
		||||
                m_data_copy.resize(this->size());
 | 
			
		||||
                m_data_span = std::span(m_data_copy);
 | 
			
		||||
            }
 | 
			
		||||
            m_is_data_copy = true;
 | 
			
		||||
            m_span_valid = true;
 | 
			
		||||
            if constexpr (FLAGS & GuestMemoryFlags::Safe) {
 | 
			
		||||
                m_memory.ReadBlock(m_addr, this->data(), this->size_bytes());
 | 
			
		||||
            } else {
 | 
			
		||||
                m_memory.ReadBlockUnsafe(m_addr, this->data(), this->size_bytes());
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
        return m_data_span;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    void Write(std::span<T> write_data) noexcept {
 | 
			
		||||
        if constexpr (FLAGS & GuestMemoryFlags::Cached) {
 | 
			
		||||
            m_memory.WriteBlockCached(m_addr, write_data.data(), this->size_bytes());
 | 
			
		||||
        } else if constexpr (FLAGS & GuestMemoryFlags::Safe) {
 | 
			
		||||
            m_memory.WriteBlock(m_addr, write_data.data(), this->size_bytes());
 | 
			
		||||
        } else {
 | 
			
		||||
            m_memory.WriteBlockUnsafe(m_addr, write_data.data(), this->size_bytes());
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    bool TrySetSpan() noexcept {
 | 
			
		||||
        if (u8* ptr = m_memory.GetSpan(m_addr, this->size_bytes()); ptr) {
 | 
			
		||||
            m_data_span = {reinterpret_cast<T*>(ptr), this->size()};
 | 
			
		||||
            m_span_valid = true;
 | 
			
		||||
            return true;
 | 
			
		||||
        }
 | 
			
		||||
        return false;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
protected:
 | 
			
		||||
    bool IsDataCopy() const noexcept {
 | 
			
		||||
        return m_is_data_copy;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    bool AddressChanged() const noexcept {
 | 
			
		||||
        return m_addr_changed;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    M& m_memory;
 | 
			
		||||
    u64 m_addr{};
 | 
			
		||||
    size_t m_size{};
 | 
			
		||||
    std::span<T> m_data_span{};
 | 
			
		||||
    std::vector<T> m_data_copy{};
 | 
			
		||||
    bool m_span_valid{false};
 | 
			
		||||
    bool m_is_data_copy{false};
 | 
			
		||||
    bool m_addr_changed{false};
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
template <typename M, typename T, GuestMemoryFlags FLAGS>
 | 
			
		||||
class GuestMemoryScoped : public GuestMemory<M, T, FLAGS> {
 | 
			
		||||
public:
 | 
			
		||||
    GuestMemoryScoped() = delete;
 | 
			
		||||
    explicit GuestMemoryScoped(M& memory, u64 addr, std::size_t size,
 | 
			
		||||
                               Common::ScratchBuffer<T>* backup = nullptr)
 | 
			
		||||
        : GuestMemory<M, T, FLAGS>(memory, addr, size, backup) {
 | 
			
		||||
        if constexpr (!(FLAGS & GuestMemoryFlags::Read)) {
 | 
			
		||||
            if (!this->TrySetSpan()) {
 | 
			
		||||
                if (backup) {
 | 
			
		||||
                    this->m_data_span = *backup;
 | 
			
		||||
                    this->m_span_valid = true;
 | 
			
		||||
                    this->m_is_data_copy = true;
 | 
			
		||||
                }
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    ~GuestMemoryScoped() {
 | 
			
		||||
        if constexpr (FLAGS & GuestMemoryFlags::Write) {
 | 
			
		||||
            if (this->size() == 0) [[unlikely]] {
 | 
			
		||||
                return;
 | 
			
		||||
            }
 | 
			
		||||
 | 
			
		||||
            if (this->AddressChanged() || this->IsDataCopy()) {
 | 
			
		||||
                ASSERT(this->m_span_valid);
 | 
			
		||||
                if constexpr (FLAGS & GuestMemoryFlags::Cached) {
 | 
			
		||||
                    this->m_memory.WriteBlockCached(this->m_addr, this->data(), this->size_bytes());
 | 
			
		||||
                } else if constexpr (FLAGS & GuestMemoryFlags::Safe) {
 | 
			
		||||
                    this->m_memory.WriteBlock(this->m_addr, this->data(), this->size_bytes());
 | 
			
		||||
                } else {
 | 
			
		||||
                    this->m_memory.WriteBlockUnsafe(this->m_addr, this->data(), this->size_bytes());
 | 
			
		||||
                }
 | 
			
		||||
            } else if constexpr ((FLAGS & GuestMemoryFlags::Safe) ||
 | 
			
		||||
                                 (FLAGS & GuestMemoryFlags::Cached)) {
 | 
			
		||||
                this->m_memory.InvalidateRegion(this->m_addr, this->size_bytes());
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
};
 | 
			
		||||
} // namespace
 | 
			
		||||
 | 
			
		||||
template <typename T, GuestMemoryFlags FLAGS>
 | 
			
		||||
using CpuGuestMemory = GuestMemory<Memory, T, FLAGS>;
 | 
			
		||||
using CpuGuestMemory = GuestMemory<Core::Memory::Memory, T, FLAGS>;
 | 
			
		||||
template <typename T, GuestMemoryFlags FLAGS>
 | 
			
		||||
using CpuGuestMemoryScoped = GuestMemoryScoped<Memory, T, FLAGS>;
 | 
			
		||||
template <typename T, GuestMemoryFlags FLAGS>
 | 
			
		||||
using GpuGuestMemory = GuestMemory<Tegra::MemoryManager, T, FLAGS>;
 | 
			
		||||
template <typename T, GuestMemoryFlags FLAGS>
 | 
			
		||||
using GpuGuestMemoryScoped = GuestMemoryScoped<Tegra::MemoryManager, T, FLAGS>;
 | 
			
		||||
using CpuGuestMemoryScoped = GuestMemoryScoped<Core::Memory::Memory, T, FLAGS>;
 | 
			
		||||
 | 
			
		||||
} // namespace Core::Memory
 | 
			
		||||
 
 | 
			
		||||
@@ -24,9 +24,8 @@ constexpr VAddr c = 16 * HIGH_PAGE_SIZE;
 | 
			
		||||
class RasterizerInterface {
 | 
			
		||||
public:
 | 
			
		||||
    void UpdatePagesCachedCount(VAddr addr, u64 size, int delta) {
 | 
			
		||||
        const u64 page_start{addr >> Core::Memory::YUZU_PAGEBITS};
 | 
			
		||||
        const u64 page_end{(addr + size + Core::Memory::YUZU_PAGESIZE - 1) >>
 | 
			
		||||
                           Core::Memory::YUZU_PAGEBITS};
 | 
			
		||||
        const u64 page_start{addr >> Core::DEVICE_PAGEBITS};
 | 
			
		||||
        const u64 page_end{(addr + size + Core::DEVICE_PAGESIZE - 1) >> Core::DEVICE_PAGEBITS};
 | 
			
		||||
        for (u64 page = page_start; page < page_end; ++page) {
 | 
			
		||||
            int& value = page_table[page];
 | 
			
		||||
            value += delta;
 | 
			
		||||
@@ -40,7 +39,7 @@ public:
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    [[nodiscard]] int Count(VAddr addr) const noexcept {
 | 
			
		||||
        const auto it = page_table.find(addr >> Core::Memory::YUZU_PAGEBITS);
 | 
			
		||||
        const auto it = page_table.find(addr >> Core::DEVICE_PAGEBITS);
 | 
			
		||||
        return it == page_table.end() ? 0 : it->second;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -71,6 +71,8 @@ add_library(video_core STATIC
 | 
			
		||||
    host1x/ffmpeg/ffmpeg.h
 | 
			
		||||
    host1x/control.cpp
 | 
			
		||||
    host1x/control.h
 | 
			
		||||
    host1x/gpu_device_memory_manager.cpp
 | 
			
		||||
    host1x/gpu_device_memory_manager.h
 | 
			
		||||
    host1x/host1x.cpp
 | 
			
		||||
    host1x/host1x.h
 | 
			
		||||
    host1x/nvdec.cpp
 | 
			
		||||
@@ -93,6 +95,7 @@ add_library(video_core STATIC
 | 
			
		||||
    gpu.h
 | 
			
		||||
    gpu_thread.cpp
 | 
			
		||||
    gpu_thread.h
 | 
			
		||||
    guest_memory.h
 | 
			
		||||
    invalidation_accumulator.h
 | 
			
		||||
    memory_manager.cpp
 | 
			
		||||
    memory_manager.h
 | 
			
		||||
@@ -105,8 +108,6 @@ add_library(video_core STATIC
 | 
			
		||||
    query_cache/query_stream.h
 | 
			
		||||
    query_cache/types.h
 | 
			
		||||
    query_cache.h
 | 
			
		||||
    rasterizer_accelerated.cpp
 | 
			
		||||
    rasterizer_accelerated.h
 | 
			
		||||
    rasterizer_interface.h
 | 
			
		||||
    renderer_base.cpp
 | 
			
		||||
    renderer_base.h
 | 
			
		||||
 
 | 
			
		||||
@@ -33,13 +33,12 @@ struct NullBufferParams {};
 | 
			
		||||
 *
 | 
			
		||||
 * The buffer size and address is forcefully aligned to CPU page boundaries.
 | 
			
		||||
 */
 | 
			
		||||
template <class RasterizerInterface>
 | 
			
		||||
class BufferBase {
 | 
			
		||||
public:
 | 
			
		||||
    static constexpr u64 BASE_PAGE_BITS = 16;
 | 
			
		||||
    static constexpr u64 BASE_PAGE_SIZE = 1ULL << BASE_PAGE_BITS;
 | 
			
		||||
 | 
			
		||||
    explicit BufferBase(RasterizerInterface& rasterizer_, VAddr cpu_addr_, u64 size_bytes_)
 | 
			
		||||
    explicit BufferBase(VAddr cpu_addr_, u64 size_bytes_)
 | 
			
		||||
        : cpu_addr{cpu_addr_}, size_bytes{size_bytes_} {}
 | 
			
		||||
 | 
			
		||||
    explicit BufferBase(NullBufferParams) {}
 | 
			
		||||
 
 | 
			
		||||
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							@@ -32,7 +32,6 @@
 | 
			
		||||
#include "common/microprofile.h"
 | 
			
		||||
#include "common/scope_exit.h"
 | 
			
		||||
#include "common/settings.h"
 | 
			
		||||
#include "core/memory.h"
 | 
			
		||||
#include "video_core/buffer_cache/buffer_base.h"
 | 
			
		||||
#include "video_core/control/channel_state_cache.h"
 | 
			
		||||
#include "video_core/delayed_destruction_ring.h"
 | 
			
		||||
@@ -41,7 +40,6 @@
 | 
			
		||||
#include "video_core/engines/kepler_compute.h"
 | 
			
		||||
#include "video_core/engines/maxwell_3d.h"
 | 
			
		||||
#include "video_core/memory_manager.h"
 | 
			
		||||
#include "video_core/rasterizer_interface.h"
 | 
			
		||||
#include "video_core/surface.h"
 | 
			
		||||
#include "video_core/texture_cache/slot_vector.h"
 | 
			
		||||
#include "video_core/texture_cache/types.h"
 | 
			
		||||
@@ -94,7 +92,7 @@ static constexpr BufferId NULL_BUFFER_ID{0};
 | 
			
		||||
static constexpr u32 DEFAULT_SKIP_CACHE_SIZE = static_cast<u32>(4_KiB);
 | 
			
		||||
 | 
			
		||||
struct Binding {
 | 
			
		||||
    VAddr cpu_addr{};
 | 
			
		||||
    DAddr device_addr{};
 | 
			
		||||
    u32 size{};
 | 
			
		||||
    BufferId buffer_id;
 | 
			
		||||
};
 | 
			
		||||
@@ -104,7 +102,7 @@ struct TextureBufferBinding : Binding {
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
static constexpr Binding NULL_BINDING{
 | 
			
		||||
    .cpu_addr = 0,
 | 
			
		||||
    .device_addr = 0,
 | 
			
		||||
    .size = 0,
 | 
			
		||||
    .buffer_id = NULL_BUFFER_ID,
 | 
			
		||||
};
 | 
			
		||||
@@ -204,10 +202,10 @@ class BufferCache : public VideoCommon::ChannelSetupCaches<BufferCacheChannelInf
 | 
			
		||||
    using Async_Buffer = typename P::Async_Buffer;
 | 
			
		||||
    using MemoryTracker = typename P::MemoryTracker;
 | 
			
		||||
 | 
			
		||||
    using IntervalCompare = std::less<VAddr>;
 | 
			
		||||
    using IntervalInstance = boost::icl::interval_type_default<VAddr, std::less>;
 | 
			
		||||
    using IntervalAllocator = boost::fast_pool_allocator<VAddr>;
 | 
			
		||||
    using IntervalSet = boost::icl::interval_set<VAddr>;
 | 
			
		||||
    using IntervalCompare = std::less<DAddr>;
 | 
			
		||||
    using IntervalInstance = boost::icl::interval_type_default<DAddr, std::less>;
 | 
			
		||||
    using IntervalAllocator = boost::fast_pool_allocator<DAddr>;
 | 
			
		||||
    using IntervalSet = boost::icl::interval_set<DAddr>;
 | 
			
		||||
    using IntervalType = typename IntervalSet::interval_type;
 | 
			
		||||
 | 
			
		||||
    template <typename Type>
 | 
			
		||||
@@ -230,32 +228,31 @@ class BufferCache : public VideoCommon::ChannelSetupCaches<BufferCacheChannelInf
 | 
			
		||||
 | 
			
		||||
    using OverlapCombine = counter_add_functor<int>;
 | 
			
		||||
    using OverlapSection = boost::icl::inter_section<int>;
 | 
			
		||||
    using OverlapCounter = boost::icl::split_interval_map<VAddr, int>;
 | 
			
		||||
    using OverlapCounter = boost::icl::split_interval_map<DAddr, int>;
 | 
			
		||||
 | 
			
		||||
    struct OverlapResult {
 | 
			
		||||
        boost::container::small_vector<BufferId, 16> ids;
 | 
			
		||||
        VAddr begin;
 | 
			
		||||
        VAddr end;
 | 
			
		||||
        DAddr begin;
 | 
			
		||||
        DAddr end;
 | 
			
		||||
        bool has_stream_leap = false;
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
public:
 | 
			
		||||
    explicit BufferCache(VideoCore::RasterizerInterface& rasterizer_,
 | 
			
		||||
                         Core::Memory::Memory& cpu_memory_, Runtime& runtime_);
 | 
			
		||||
    explicit BufferCache(Tegra::MaxwellDeviceMemoryManager& device_memory_, Runtime& runtime_);
 | 
			
		||||
 | 
			
		||||
    void TickFrame();
 | 
			
		||||
 | 
			
		||||
    void WriteMemory(VAddr cpu_addr, u64 size);
 | 
			
		||||
    void WriteMemory(DAddr device_addr, u64 size);
 | 
			
		||||
 | 
			
		||||
    void CachedWriteMemory(VAddr cpu_addr, u64 size);
 | 
			
		||||
    void CachedWriteMemory(DAddr device_addr, u64 size);
 | 
			
		||||
 | 
			
		||||
    bool OnCPUWrite(VAddr cpu_addr, u64 size);
 | 
			
		||||
    bool OnCPUWrite(DAddr device_addr, u64 size);
 | 
			
		||||
 | 
			
		||||
    void DownloadMemory(VAddr cpu_addr, u64 size);
 | 
			
		||||
    void DownloadMemory(DAddr device_addr, u64 size);
 | 
			
		||||
 | 
			
		||||
    std::optional<VideoCore::RasterizerDownloadArea> GetFlushArea(VAddr cpu_addr, u64 size);
 | 
			
		||||
    std::optional<VideoCore::RasterizerDownloadArea> GetFlushArea(DAddr device_addr, u64 size);
 | 
			
		||||
 | 
			
		||||
    bool InlineMemory(VAddr dest_address, size_t copy_size, std::span<const u8> inlined_buffer);
 | 
			
		||||
    bool InlineMemory(DAddr dest_address, size_t copy_size, std::span<const u8> inlined_buffer);
 | 
			
		||||
 | 
			
		||||
    void BindGraphicsUniformBuffer(size_t stage, u32 index, GPUVAddr gpu_addr, u32 size);
 | 
			
		||||
 | 
			
		||||
@@ -300,7 +297,7 @@ public:
 | 
			
		||||
                                                       ObtainBufferSynchronize sync_info,
 | 
			
		||||
                                                       ObtainBufferOperation post_op);
 | 
			
		||||
 | 
			
		||||
    [[nodiscard]] std::pair<Buffer*, u32> ObtainCPUBuffer(VAddr gpu_addr, u32 size,
 | 
			
		||||
    [[nodiscard]] std::pair<Buffer*, u32> ObtainCPUBuffer(DAddr gpu_addr, u32 size,
 | 
			
		||||
                                                          ObtainBufferSynchronize sync_info,
 | 
			
		||||
                                                          ObtainBufferOperation post_op);
 | 
			
		||||
    void FlushCachedWrites();
 | 
			
		||||
@@ -326,13 +323,13 @@ public:
 | 
			
		||||
    bool DMAClear(GPUVAddr src_address, u64 amount, u32 value);
 | 
			
		||||
 | 
			
		||||
    /// Return true when a CPU region is modified from the GPU
 | 
			
		||||
    [[nodiscard]] bool IsRegionGpuModified(VAddr addr, size_t size);
 | 
			
		||||
    [[nodiscard]] bool IsRegionGpuModified(DAddr addr, size_t size);
 | 
			
		||||
 | 
			
		||||
    /// Return true when a region is registered on the cache
 | 
			
		||||
    [[nodiscard]] bool IsRegionRegistered(VAddr addr, size_t size);
 | 
			
		||||
    [[nodiscard]] bool IsRegionRegistered(DAddr addr, size_t size);
 | 
			
		||||
 | 
			
		||||
    /// Return true when a CPU region is modified from the CPU
 | 
			
		||||
    [[nodiscard]] bool IsRegionCpuModified(VAddr addr, size_t size);
 | 
			
		||||
    [[nodiscard]] bool IsRegionCpuModified(DAddr addr, size_t size);
 | 
			
		||||
 | 
			
		||||
    void SetDrawIndirect(
 | 
			
		||||
        const Tegra::Engines::DrawManager::IndirectParams* current_draw_indirect_) {
 | 
			
		||||
@@ -366,9 +363,9 @@ private:
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    template <typename Func>
 | 
			
		||||
    void ForEachBufferInRange(VAddr cpu_addr, u64 size, Func&& func) {
 | 
			
		||||
        const u64 page_end = Common::DivCeil(cpu_addr + size, CACHING_PAGESIZE);
 | 
			
		||||
        for (u64 page = cpu_addr >> CACHING_PAGEBITS; page < page_end;) {
 | 
			
		||||
    void ForEachBufferInRange(DAddr device_addr, u64 size, Func&& func) {
 | 
			
		||||
        const u64 page_end = Common::DivCeil(device_addr + size, CACHING_PAGESIZE);
 | 
			
		||||
        for (u64 page = device_addr >> CACHING_PAGEBITS; page < page_end;) {
 | 
			
		||||
            const BufferId buffer_id = page_table[page];
 | 
			
		||||
            if (!buffer_id) {
 | 
			
		||||
                ++page;
 | 
			
		||||
@@ -377,15 +374,15 @@ private:
 | 
			
		||||
            Buffer& buffer = slot_buffers[buffer_id];
 | 
			
		||||
            func(buffer_id, buffer);
 | 
			
		||||
 | 
			
		||||
            const VAddr end_addr = buffer.CpuAddr() + buffer.SizeBytes();
 | 
			
		||||
            const DAddr end_addr = buffer.CpuAddr() + buffer.SizeBytes();
 | 
			
		||||
            page = Common::DivCeil(end_addr, CACHING_PAGESIZE);
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    template <typename Func>
 | 
			
		||||
    void ForEachInRangeSet(IntervalSet& current_range, VAddr cpu_addr, u64 size, Func&& func) {
 | 
			
		||||
        const VAddr start_address = cpu_addr;
 | 
			
		||||
        const VAddr end_address = start_address + size;
 | 
			
		||||
    void ForEachInRangeSet(IntervalSet& current_range, DAddr device_addr, u64 size, Func&& func) {
 | 
			
		||||
        const DAddr start_address = device_addr;
 | 
			
		||||
        const DAddr end_address = start_address + size;
 | 
			
		||||
        const IntervalType search_interval{start_address, end_address};
 | 
			
		||||
        auto it = current_range.lower_bound(search_interval);
 | 
			
		||||
        if (it == current_range.end()) {
 | 
			
		||||
@@ -393,8 +390,8 @@ private:
 | 
			
		||||
        }
 | 
			
		||||
        auto end_it = current_range.upper_bound(search_interval);
 | 
			
		||||
        for (; it != end_it; it++) {
 | 
			
		||||
            VAddr inter_addr_end = it->upper();
 | 
			
		||||
            VAddr inter_addr = it->lower();
 | 
			
		||||
            DAddr inter_addr_end = it->upper();
 | 
			
		||||
            DAddr inter_addr = it->lower();
 | 
			
		||||
            if (inter_addr_end > end_address) {
 | 
			
		||||
                inter_addr_end = end_address;
 | 
			
		||||
            }
 | 
			
		||||
@@ -406,10 +403,10 @@ private:
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    template <typename Func>
 | 
			
		||||
    void ForEachInOverlapCounter(OverlapCounter& current_range, VAddr cpu_addr, u64 size,
 | 
			
		||||
    void ForEachInOverlapCounter(OverlapCounter& current_range, DAddr device_addr, u64 size,
 | 
			
		||||
                                 Func&& func) {
 | 
			
		||||
        const VAddr start_address = cpu_addr;
 | 
			
		||||
        const VAddr end_address = start_address + size;
 | 
			
		||||
        const DAddr start_address = device_addr;
 | 
			
		||||
        const DAddr end_address = start_address + size;
 | 
			
		||||
        const IntervalType search_interval{start_address, end_address};
 | 
			
		||||
        auto it = current_range.lower_bound(search_interval);
 | 
			
		||||
        if (it == current_range.end()) {
 | 
			
		||||
@@ -418,8 +415,8 @@ private:
 | 
			
		||||
        auto end_it = current_range.upper_bound(search_interval);
 | 
			
		||||
        for (; it != end_it; it++) {
 | 
			
		||||
            auto& inter = it->first;
 | 
			
		||||
            VAddr inter_addr_end = inter.upper();
 | 
			
		||||
            VAddr inter_addr = inter.lower();
 | 
			
		||||
            DAddr inter_addr_end = inter.upper();
 | 
			
		||||
            DAddr inter_addr = inter.lower();
 | 
			
		||||
            if (inter_addr_end > end_address) {
 | 
			
		||||
                inter_addr_end = end_address;
 | 
			
		||||
            }
 | 
			
		||||
@@ -451,9 +448,9 @@ private:
 | 
			
		||||
        } while (any_removals);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    static bool IsRangeGranular(VAddr cpu_addr, size_t size) {
 | 
			
		||||
        return (cpu_addr & ~Core::Memory::YUZU_PAGEMASK) ==
 | 
			
		||||
               ((cpu_addr + size) & ~Core::Memory::YUZU_PAGEMASK);
 | 
			
		||||
    static bool IsRangeGranular(DAddr device_addr, size_t size) {
 | 
			
		||||
        return (device_addr & ~Core::DEVICE_PAGEMASK) ==
 | 
			
		||||
               ((device_addr + size) & ~Core::DEVICE_PAGEMASK);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    void RunGarbageCollector();
 | 
			
		||||
@@ -508,15 +505,15 @@ private:
 | 
			
		||||
 | 
			
		||||
    void UpdateComputeTextureBuffers();
 | 
			
		||||
 | 
			
		||||
    void MarkWrittenBuffer(BufferId buffer_id, VAddr cpu_addr, u32 size);
 | 
			
		||||
    void MarkWrittenBuffer(BufferId buffer_id, DAddr device_addr, u32 size);
 | 
			
		||||
 | 
			
		||||
    [[nodiscard]] BufferId FindBuffer(VAddr cpu_addr, u32 size);
 | 
			
		||||
    [[nodiscard]] BufferId FindBuffer(DAddr device_addr, u32 size);
 | 
			
		||||
 | 
			
		||||
    [[nodiscard]] OverlapResult ResolveOverlaps(VAddr cpu_addr, u32 wanted_size);
 | 
			
		||||
    [[nodiscard]] OverlapResult ResolveOverlaps(DAddr device_addr, u32 wanted_size);
 | 
			
		||||
 | 
			
		||||
    void JoinOverlap(BufferId new_buffer_id, BufferId overlap_id, bool accumulate_stream_score);
 | 
			
		||||
 | 
			
		||||
    [[nodiscard]] BufferId CreateBuffer(VAddr cpu_addr, u32 wanted_size);
 | 
			
		||||
    [[nodiscard]] BufferId CreateBuffer(DAddr device_addr, u32 wanted_size);
 | 
			
		||||
 | 
			
		||||
    void Register(BufferId buffer_id);
 | 
			
		||||
 | 
			
		||||
@@ -527,7 +524,7 @@ private:
 | 
			
		||||
 | 
			
		||||
    void TouchBuffer(Buffer& buffer, BufferId buffer_id) noexcept;
 | 
			
		||||
 | 
			
		||||
    bool SynchronizeBuffer(Buffer& buffer, VAddr cpu_addr, u32 size);
 | 
			
		||||
    bool SynchronizeBuffer(Buffer& buffer, DAddr device_addr, u32 size);
 | 
			
		||||
 | 
			
		||||
    void UploadMemory(Buffer& buffer, u64 total_size_bytes, u64 largest_copy,
 | 
			
		||||
                      std::span<BufferCopy> copies);
 | 
			
		||||
@@ -539,7 +536,7 @@ private:
 | 
			
		||||
 | 
			
		||||
    void DownloadBufferMemory(Buffer& buffer_id);
 | 
			
		||||
 | 
			
		||||
    void DownloadBufferMemory(Buffer& buffer_id, VAddr cpu_addr, u64 size);
 | 
			
		||||
    void DownloadBufferMemory(Buffer& buffer_id, DAddr device_addr, u64 size);
 | 
			
		||||
 | 
			
		||||
    void DeleteBuffer(BufferId buffer_id, bool do_not_mark = false);
 | 
			
		||||
 | 
			
		||||
@@ -549,7 +546,7 @@ private:
 | 
			
		||||
    [[nodiscard]] TextureBufferBinding GetTextureBufferBinding(GPUVAddr gpu_addr, u32 size,
 | 
			
		||||
                                                               PixelFormat format);
 | 
			
		||||
 | 
			
		||||
    [[nodiscard]] std::span<const u8> ImmediateBufferWithData(VAddr cpu_addr, size_t size);
 | 
			
		||||
    [[nodiscard]] std::span<const u8> ImmediateBufferWithData(DAddr device_addr, size_t size);
 | 
			
		||||
 | 
			
		||||
    [[nodiscard]] std::span<u8> ImmediateBuffer(size_t wanted_capacity);
 | 
			
		||||
 | 
			
		||||
@@ -557,11 +554,10 @@ private:
 | 
			
		||||
 | 
			
		||||
    void ClearDownload(IntervalType subtract_interval);
 | 
			
		||||
 | 
			
		||||
    void InlineMemoryImplementation(VAddr dest_address, size_t copy_size,
 | 
			
		||||
    void InlineMemoryImplementation(DAddr dest_address, size_t copy_size,
 | 
			
		||||
                                    std::span<const u8> inlined_buffer);
 | 
			
		||||
 | 
			
		||||
    VideoCore::RasterizerInterface& rasterizer;
 | 
			
		||||
    Core::Memory::Memory& cpu_memory;
 | 
			
		||||
    Tegra::MaxwellDeviceMemoryManager& device_memory;
 | 
			
		||||
 | 
			
		||||
    SlotVector<Buffer> slot_buffers;
 | 
			
		||||
    DelayedDestructionRing<Buffer, 8> delayed_destruction_ring;
 | 
			
		||||
@@ -598,7 +594,7 @@ private:
 | 
			
		||||
    u64 critical_memory = 0;
 | 
			
		||||
    BufferId inline_buffer_id;
 | 
			
		||||
 | 
			
		||||
    std::array<BufferId, ((1ULL << 39) >> CACHING_PAGEBITS)> page_table;
 | 
			
		||||
    std::array<BufferId, ((1ULL << 34) >> CACHING_PAGEBITS)> page_table;
 | 
			
		||||
    Common::ScratchBuffer<u8> tmp_buffer;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -17,19 +17,19 @@
 | 
			
		||||
 | 
			
		||||
namespace VideoCommon {
 | 
			
		||||
 | 
			
		||||
template <class RasterizerInterface>
 | 
			
		||||
template <typename DeviceTracker>
 | 
			
		||||
class MemoryTrackerBase {
 | 
			
		||||
    static constexpr size_t MAX_CPU_PAGE_BITS = 39;
 | 
			
		||||
    static constexpr size_t MAX_CPU_PAGE_BITS = 34;
 | 
			
		||||
    static constexpr size_t HIGHER_PAGE_BITS = 22;
 | 
			
		||||
    static constexpr size_t HIGHER_PAGE_SIZE = 1ULL << HIGHER_PAGE_BITS;
 | 
			
		||||
    static constexpr size_t HIGHER_PAGE_MASK = HIGHER_PAGE_SIZE - 1ULL;
 | 
			
		||||
    static constexpr size_t NUM_HIGH_PAGES = 1ULL << (MAX_CPU_PAGE_BITS - HIGHER_PAGE_BITS);
 | 
			
		||||
    static constexpr size_t MANAGER_POOL_SIZE = 32;
 | 
			
		||||
    static constexpr size_t WORDS_STACK_NEEDED = HIGHER_PAGE_SIZE / BYTES_PER_WORD;
 | 
			
		||||
    using Manager = WordManager<RasterizerInterface, WORDS_STACK_NEEDED>;
 | 
			
		||||
    using Manager = WordManager<DeviceTracker, WORDS_STACK_NEEDED>;
 | 
			
		||||
 | 
			
		||||
public:
 | 
			
		||||
    MemoryTrackerBase(RasterizerInterface& rasterizer_) : rasterizer{&rasterizer_} {}
 | 
			
		||||
    MemoryTrackerBase(DeviceTracker& device_tracker_) : device_tracker{&device_tracker_} {}
 | 
			
		||||
    ~MemoryTrackerBase() = default;
 | 
			
		||||
 | 
			
		||||
    /// Returns the inclusive CPU modified range in a begin end pair
 | 
			
		||||
@@ -74,7 +74,7 @@ public:
 | 
			
		||||
            });
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    /// Mark region as CPU modified, notifying the rasterizer about this change
 | 
			
		||||
    /// Mark region as CPU modified, notifying the device_tracker about this change
 | 
			
		||||
    void MarkRegionAsCpuModified(VAddr dirty_cpu_addr, u64 query_size) {
 | 
			
		||||
        IteratePages<true>(dirty_cpu_addr, query_size,
 | 
			
		||||
                           [](Manager* manager, u64 offset, size_t size) {
 | 
			
		||||
@@ -83,7 +83,7 @@ public:
 | 
			
		||||
                           });
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    /// Unmark region as CPU modified, notifying the rasterizer about this change
 | 
			
		||||
    /// Unmark region as CPU modified, notifying the device_tracker about this change
 | 
			
		||||
    void UnmarkRegionAsCpuModified(VAddr dirty_cpu_addr, u64 query_size) {
 | 
			
		||||
        IteratePages<true>(dirty_cpu_addr, query_size,
 | 
			
		||||
                           [](Manager* manager, u64 offset, size_t size) {
 | 
			
		||||
@@ -139,7 +139,7 @@ public:
 | 
			
		||||
            });
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    /// Flushes cached CPU writes, and notify the rasterizer about the deltas
 | 
			
		||||
    /// Flushes cached CPU writes, and notify the device_tracker about the deltas
 | 
			
		||||
    void FlushCachedWrites(VAddr query_cpu_addr, u64 query_size) noexcept {
 | 
			
		||||
        IteratePages<false>(query_cpu_addr, query_size,
 | 
			
		||||
                            [](Manager* manager, [[maybe_unused]] u64 offset,
 | 
			
		||||
@@ -280,7 +280,7 @@ private:
 | 
			
		||||
        manager_pool.emplace_back();
 | 
			
		||||
        auto& last_pool = manager_pool.back();
 | 
			
		||||
        for (size_t i = 0; i < MANAGER_POOL_SIZE; i++) {
 | 
			
		||||
            new (&last_pool[i]) Manager(0, *rasterizer, HIGHER_PAGE_SIZE);
 | 
			
		||||
            new (&last_pool[i]) Manager(0, *device_tracker, HIGHER_PAGE_SIZE);
 | 
			
		||||
            free_managers.push_back(&last_pool[i]);
 | 
			
		||||
        }
 | 
			
		||||
        return on_return();
 | 
			
		||||
@@ -293,7 +293,7 @@ private:
 | 
			
		||||
 | 
			
		||||
    std::unordered_set<u32> cached_pages;
 | 
			
		||||
 | 
			
		||||
    RasterizerInterface* rasterizer = nullptr;
 | 
			
		||||
    DeviceTracker* device_tracker = nullptr;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
} // namespace VideoCommon
 | 
			
		||||
 
 | 
			
		||||
@@ -13,12 +13,12 @@
 | 
			
		||||
#include "common/common_funcs.h"
 | 
			
		||||
#include "common/common_types.h"
 | 
			
		||||
#include "common/div_ceil.h"
 | 
			
		||||
#include "core/memory.h"
 | 
			
		||||
#include "video_core/host1x/gpu_device_memory_manager.h"
 | 
			
		||||
 | 
			
		||||
namespace VideoCommon {
 | 
			
		||||
 | 
			
		||||
constexpr u64 PAGES_PER_WORD = 64;
 | 
			
		||||
constexpr u64 BYTES_PER_PAGE = Core::Memory::YUZU_PAGESIZE;
 | 
			
		||||
constexpr u64 BYTES_PER_PAGE = Core::DEVICE_PAGESIZE;
 | 
			
		||||
constexpr u64 BYTES_PER_WORD = PAGES_PER_WORD * BYTES_PER_PAGE;
 | 
			
		||||
 | 
			
		||||
enum class Type {
 | 
			
		||||
@@ -163,11 +163,11 @@ struct Words {
 | 
			
		||||
    WordsArray<stack_words> preflushable;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
template <class RasterizerInterface, size_t stack_words = 1>
 | 
			
		||||
template <class DeviceTracker, size_t stack_words = 1>
 | 
			
		||||
class WordManager {
 | 
			
		||||
public:
 | 
			
		||||
    explicit WordManager(VAddr cpu_addr_, RasterizerInterface& rasterizer_, u64 size_bytes)
 | 
			
		||||
        : cpu_addr{cpu_addr_}, rasterizer{&rasterizer_}, words{size_bytes} {}
 | 
			
		||||
    explicit WordManager(VAddr cpu_addr_, DeviceTracker& tracker_, u64 size_bytes)
 | 
			
		||||
        : cpu_addr{cpu_addr_}, tracker{&tracker_}, words{size_bytes} {}
 | 
			
		||||
 | 
			
		||||
    explicit WordManager() = default;
 | 
			
		||||
 | 
			
		||||
@@ -279,7 +279,7 @@ public:
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    /**
 | 
			
		||||
     * Loop over each page in the given range, turn off those bits and notify the rasterizer if
 | 
			
		||||
     * Loop over each page in the given range, turn off those bits and notify the tracker if
 | 
			
		||||
     * needed. Call the given function on each turned off range.
 | 
			
		||||
     *
 | 
			
		||||
     * @param query_cpu_range Base CPU address to loop over
 | 
			
		||||
@@ -459,26 +459,26 @@ private:
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    /**
 | 
			
		||||
     * Notify rasterizer about changes in the CPU tracking state of a word in the buffer
 | 
			
		||||
     * Notify tracker about changes in the CPU tracking state of a word in the buffer
 | 
			
		||||
     *
 | 
			
		||||
     * @param word_index   Index to the word to notify to the rasterizer
 | 
			
		||||
     * @param word_index   Index to the word to notify to the tracker
 | 
			
		||||
     * @param current_bits Current state of the word
 | 
			
		||||
     * @param new_bits     New state of the word
 | 
			
		||||
     *
 | 
			
		||||
     * @tparam add_to_rasterizer True when the rasterizer should start tracking the new pages
 | 
			
		||||
     * @tparam add_to_tracker True when the tracker should start tracking the new pages
 | 
			
		||||
     */
 | 
			
		||||
    template <bool add_to_rasterizer>
 | 
			
		||||
    template <bool add_to_tracker>
 | 
			
		||||
    void NotifyRasterizer(u64 word_index, u64 current_bits, u64 new_bits) const {
 | 
			
		||||
        u64 changed_bits = (add_to_rasterizer ? current_bits : ~current_bits) & new_bits;
 | 
			
		||||
        u64 changed_bits = (add_to_tracker ? current_bits : ~current_bits) & new_bits;
 | 
			
		||||
        VAddr addr = cpu_addr + word_index * BYTES_PER_WORD;
 | 
			
		||||
        IteratePages(changed_bits, [&](size_t offset, size_t size) {
 | 
			
		||||
            rasterizer->UpdatePagesCachedCount(addr + offset * BYTES_PER_PAGE,
 | 
			
		||||
                                               size * BYTES_PER_PAGE, add_to_rasterizer ? 1 : -1);
 | 
			
		||||
            tracker->UpdatePagesCachedCount(addr + offset * BYTES_PER_PAGE, size * BYTES_PER_PAGE,
 | 
			
		||||
                                            add_to_tracker ? 1 : -1);
 | 
			
		||||
        });
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    VAddr cpu_addr = 0;
 | 
			
		||||
    RasterizerInterface* rasterizer = nullptr;
 | 
			
		||||
    DeviceTracker* tracker = nullptr;
 | 
			
		||||
    Words<stack_words> words;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -5,10 +5,10 @@
 | 
			
		||||
#include "common/microprofile.h"
 | 
			
		||||
#include "common/settings.h"
 | 
			
		||||
#include "core/core.h"
 | 
			
		||||
#include "core/memory.h"
 | 
			
		||||
#include "video_core/dma_pusher.h"
 | 
			
		||||
#include "video_core/engines/maxwell_3d.h"
 | 
			
		||||
#include "video_core/gpu.h"
 | 
			
		||||
#include "video_core/guest_memory.h"
 | 
			
		||||
#include "video_core/memory_manager.h"
 | 
			
		||||
 | 
			
		||||
namespace Tegra {
 | 
			
		||||
@@ -85,15 +85,15 @@ bool DmaPusher::Step() {
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
        const auto safe_process = [&] {
 | 
			
		||||
            Core::Memory::GpuGuestMemory<Tegra::CommandHeader,
 | 
			
		||||
                                         Core::Memory::GuestMemoryFlags::SafeRead>
 | 
			
		||||
            Tegra::Memory::GpuGuestMemory<Tegra::CommandHeader,
 | 
			
		||||
                                          Tegra::Memory::GuestMemoryFlags::SafeRead>
 | 
			
		||||
                headers(memory_manager, dma_state.dma_get, command_list_header.size,
 | 
			
		||||
                        &command_headers);
 | 
			
		||||
            ProcessCommands(headers);
 | 
			
		||||
        };
 | 
			
		||||
        const auto unsafe_process = [&] {
 | 
			
		||||
            Core::Memory::GpuGuestMemory<Tegra::CommandHeader,
 | 
			
		||||
                                         Core::Memory::GuestMemoryFlags::UnsafeRead>
 | 
			
		||||
            Tegra::Memory::GpuGuestMemory<Tegra::CommandHeader,
 | 
			
		||||
                                          Tegra::Memory::GuestMemoryFlags::UnsafeRead>
 | 
			
		||||
                headers(memory_manager, dma_state.dma_get, command_list_header.size,
 | 
			
		||||
                        &command_headers);
 | 
			
		||||
            ProcessCommands(headers);
 | 
			
		||||
 
 | 
			
		||||
@@ -5,8 +5,8 @@
 | 
			
		||||
 | 
			
		||||
#include "common/algorithm.h"
 | 
			
		||||
#include "common/assert.h"
 | 
			
		||||
#include "core/memory.h"
 | 
			
		||||
#include "video_core/engines/engine_upload.h"
 | 
			
		||||
#include "video_core/guest_memory.h"
 | 
			
		||||
#include "video_core/memory_manager.h"
 | 
			
		||||
#include "video_core/rasterizer_interface.h"
 | 
			
		||||
#include "video_core/textures/decoders.h"
 | 
			
		||||
@@ -68,7 +68,8 @@ void State::ProcessData(std::span<const u8> read_buffer) {
 | 
			
		||||
            true, bytes_per_pixel, width, regs.dest.height, regs.dest.depth,
 | 
			
		||||
            regs.dest.BlockHeight(), regs.dest.BlockDepth());
 | 
			
		||||
 | 
			
		||||
        Core::Memory::GpuGuestMemoryScoped<u8, Core::Memory::GuestMemoryFlags::SafeReadCachedWrite>
 | 
			
		||||
        Tegra::Memory::GpuGuestMemoryScoped<u8,
 | 
			
		||||
                                            Tegra::Memory::GuestMemoryFlags::SafeReadCachedWrite>
 | 
			
		||||
            tmp(memory_manager, address, dst_size, &tmp_buffer);
 | 
			
		||||
 | 
			
		||||
        Tegra::Texture::SwizzleSubrect(tmp, read_buffer, bytes_per_pixel, width, regs.dest.height,
 | 
			
		||||
 
 | 
			
		||||
@@ -9,7 +9,6 @@
 | 
			
		||||
#include "common/settings.h"
 | 
			
		||||
#include "core/core.h"
 | 
			
		||||
#include "core/core_timing.h"
 | 
			
		||||
#include "core/memory.h"
 | 
			
		||||
#include "video_core/dirty_flags.h"
 | 
			
		||||
#include "video_core/engines/draw_manager.h"
 | 
			
		||||
#include "video_core/engines/maxwell_3d.h"
 | 
			
		||||
 
 | 
			
		||||
@@ -8,9 +8,9 @@
 | 
			
		||||
#include "common/polyfill_ranges.h"
 | 
			
		||||
#include "common/settings.h"
 | 
			
		||||
#include "core/core.h"
 | 
			
		||||
#include "core/memory.h"
 | 
			
		||||
#include "video_core/engines/maxwell_3d.h"
 | 
			
		||||
#include "video_core/engines/maxwell_dma.h"
 | 
			
		||||
#include "video_core/guest_memory.h"
 | 
			
		||||
#include "video_core/memory_manager.h"
 | 
			
		||||
#include "video_core/renderer_base.h"
 | 
			
		||||
#include "video_core/textures/decoders.h"
 | 
			
		||||
@@ -133,8 +133,8 @@ void MaxwellDMA::Launch() {
 | 
			
		||||
                UNIMPLEMENTED_IF(regs.offset_out % 16 != 0);
 | 
			
		||||
                read_buffer.resize_destructive(16);
 | 
			
		||||
                for (u32 offset = 0; offset < regs.line_length_in; offset += 16) {
 | 
			
		||||
                    Core::Memory::GpuGuestMemoryScoped<
 | 
			
		||||
                        u8, Core::Memory::GuestMemoryFlags::SafeReadCachedWrite>
 | 
			
		||||
                    Tegra::Memory::GpuGuestMemoryScoped<
 | 
			
		||||
                        u8, Tegra::Memory::GuestMemoryFlags::SafeReadCachedWrite>
 | 
			
		||||
                        tmp_write_buffer(memory_manager,
 | 
			
		||||
                                         convert_linear_2_blocklinear_addr(regs.offset_in + offset),
 | 
			
		||||
                                         16, &read_buffer);
 | 
			
		||||
@@ -146,16 +146,16 @@ void MaxwellDMA::Launch() {
 | 
			
		||||
                UNIMPLEMENTED_IF(regs.offset_out % 16 != 0);
 | 
			
		||||
                read_buffer.resize_destructive(16);
 | 
			
		||||
                for (u32 offset = 0; offset < regs.line_length_in; offset += 16) {
 | 
			
		||||
                    Core::Memory::GpuGuestMemoryScoped<
 | 
			
		||||
                        u8, Core::Memory::GuestMemoryFlags::SafeReadCachedWrite>
 | 
			
		||||
                    Tegra::Memory::GpuGuestMemoryScoped<
 | 
			
		||||
                        u8, Tegra::Memory::GuestMemoryFlags::SafeReadCachedWrite>
 | 
			
		||||
                        tmp_write_buffer(memory_manager, regs.offset_in + offset, 16, &read_buffer);
 | 
			
		||||
                    tmp_write_buffer.SetAddressAndSize(
 | 
			
		||||
                        convert_linear_2_blocklinear_addr(regs.offset_out + offset), 16);
 | 
			
		||||
                }
 | 
			
		||||
            } else {
 | 
			
		||||
                if (!accelerate.BufferCopy(regs.offset_in, regs.offset_out, regs.line_length_in)) {
 | 
			
		||||
                    Core::Memory::GpuGuestMemoryScoped<
 | 
			
		||||
                        u8, Core::Memory::GuestMemoryFlags::SafeReadCachedWrite>
 | 
			
		||||
                    Tegra::Memory::GpuGuestMemoryScoped<
 | 
			
		||||
                        u8, Tegra::Memory::GuestMemoryFlags::SafeReadCachedWrite>
 | 
			
		||||
                        tmp_write_buffer(memory_manager, regs.offset_in, regs.line_length_in,
 | 
			
		||||
                                         &read_buffer);
 | 
			
		||||
                    tmp_write_buffer.SetAddressAndSize(regs.offset_out, regs.line_length_in);
 | 
			
		||||
@@ -226,9 +226,9 @@ void MaxwellDMA::CopyBlockLinearToPitch() {
 | 
			
		||||
 | 
			
		||||
    const size_t dst_size = dst_operand.pitch * regs.line_count;
 | 
			
		||||
 | 
			
		||||
    Core::Memory::GpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead> tmp_read_buffer(
 | 
			
		||||
    Tegra::Memory::GpuGuestMemory<u8, Tegra::Memory::GuestMemoryFlags::SafeRead> tmp_read_buffer(
 | 
			
		||||
        memory_manager, src_operand.address, src_size, &read_buffer);
 | 
			
		||||
    Core::Memory::GpuGuestMemoryScoped<u8, Core::Memory::GuestMemoryFlags::UnsafeReadCachedWrite>
 | 
			
		||||
    Tegra::Memory::GpuGuestMemoryScoped<u8, Tegra::Memory::GuestMemoryFlags::UnsafeReadCachedWrite>
 | 
			
		||||
        tmp_write_buffer(memory_manager, dst_operand.address, dst_size, &write_buffer);
 | 
			
		||||
 | 
			
		||||
    UnswizzleSubrect(tmp_write_buffer, tmp_read_buffer, bytes_per_pixel, width, height, depth,
 | 
			
		||||
@@ -290,9 +290,9 @@ void MaxwellDMA::CopyPitchToBlockLinear() {
 | 
			
		||||
 | 
			
		||||
    GPUVAddr src_addr = regs.offset_in;
 | 
			
		||||
    GPUVAddr dst_addr = regs.offset_out;
 | 
			
		||||
    Core::Memory::GpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead> tmp_read_buffer(
 | 
			
		||||
    Tegra::Memory::GpuGuestMemory<u8, Tegra::Memory::GuestMemoryFlags::SafeRead> tmp_read_buffer(
 | 
			
		||||
        memory_manager, src_addr, src_size, &read_buffer);
 | 
			
		||||
    Core::Memory::GpuGuestMemoryScoped<u8, Core::Memory::GuestMemoryFlags::UnsafeReadCachedWrite>
 | 
			
		||||
    Tegra::Memory::GpuGuestMemoryScoped<u8, Tegra::Memory::GuestMemoryFlags::UnsafeReadCachedWrite>
 | 
			
		||||
        tmp_write_buffer(memory_manager, dst_addr, dst_size, &write_buffer);
 | 
			
		||||
 | 
			
		||||
    //  If the input is linear and the output is tiled, swizzle the input and copy it over.
 | 
			
		||||
@@ -344,9 +344,9 @@ void MaxwellDMA::CopyBlockLinearToBlockLinear() {
 | 
			
		||||
 | 
			
		||||
    intermediate_buffer.resize_destructive(mid_buffer_size);
 | 
			
		||||
 | 
			
		||||
    Core::Memory::GpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead> tmp_read_buffer(
 | 
			
		||||
    Tegra::Memory::GpuGuestMemory<u8, Tegra::Memory::GuestMemoryFlags::SafeRead> tmp_read_buffer(
 | 
			
		||||
        memory_manager, regs.offset_in, src_size, &read_buffer);
 | 
			
		||||
    Core::Memory::GpuGuestMemoryScoped<u8, Core::Memory::GuestMemoryFlags::SafeReadCachedWrite>
 | 
			
		||||
    Tegra::Memory::GpuGuestMemoryScoped<u8, Tegra::Memory::GuestMemoryFlags::SafeReadCachedWrite>
 | 
			
		||||
        tmp_write_buffer(memory_manager, regs.offset_out, dst_size, &write_buffer);
 | 
			
		||||
 | 
			
		||||
    UnswizzleSubrect(intermediate_buffer, tmp_read_buffer, bytes_per_pixel, src_width, src.height,
 | 
			
		||||
 
 | 
			
		||||
@@ -8,6 +8,7 @@
 | 
			
		||||
#include "common/scratch_buffer.h"
 | 
			
		||||
#include "video_core/engines/sw_blitter/blitter.h"
 | 
			
		||||
#include "video_core/engines/sw_blitter/converter.h"
 | 
			
		||||
#include "video_core/guest_memory.h"
 | 
			
		||||
#include "video_core/memory_manager.h"
 | 
			
		||||
#include "video_core/surface.h"
 | 
			
		||||
#include "video_core/textures/decoders.h"
 | 
			
		||||
@@ -160,7 +161,7 @@ bool SoftwareBlitEngine::Blit(Fermi2D::Surface& src, Fermi2D::Surface& dst,
 | 
			
		||||
    const auto dst_bytes_per_pixel = BytesPerBlock(PixelFormatFromRenderTargetFormat(dst.format));
 | 
			
		||||
    const size_t src_size = get_surface_size(src, src_bytes_per_pixel);
 | 
			
		||||
 | 
			
		||||
    Core::Memory::GpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead> tmp_buffer(
 | 
			
		||||
    Tegra::Memory::GpuGuestMemory<u8, Tegra::Memory::GuestMemoryFlags::SafeRead> tmp_buffer(
 | 
			
		||||
        memory_manager, src.Address(), src_size, &impl->tmp_buffer);
 | 
			
		||||
 | 
			
		||||
    const size_t src_copy_size = src_extent_x * src_extent_y * src_bytes_per_pixel;
 | 
			
		||||
@@ -220,7 +221,7 @@ bool SoftwareBlitEngine::Blit(Fermi2D::Surface& src, Fermi2D::Surface& dst,
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    const size_t dst_size = get_surface_size(dst, dst_bytes_per_pixel);
 | 
			
		||||
    Core::Memory::GpuGuestMemoryScoped<u8, Core::Memory::GuestMemoryFlags::SafeReadWrite>
 | 
			
		||||
    Tegra::Memory::GpuGuestMemoryScoped<u8, Tegra::Memory::GuestMemoryFlags::SafeReadWrite>
 | 
			
		||||
        tmp_buffer2(memory_manager, dst.Address(), dst_size, &impl->tmp_buffer);
 | 
			
		||||
 | 
			
		||||
    if (dst.linear == Fermi2D::MemoryLayout::BlockLinear) {
 | 
			
		||||
 
 | 
			
		||||
@@ -14,7 +14,7 @@ namespace Tegra {
 | 
			
		||||
 * Struct describing framebuffer configuration
 | 
			
		||||
 */
 | 
			
		||||
struct FramebufferConfig {
 | 
			
		||||
    VAddr address{};
 | 
			
		||||
    DAddr address{};
 | 
			
		||||
    u32 offset{};
 | 
			
		||||
    u32 width{};
 | 
			
		||||
    u32 height{};
 | 
			
		||||
 
 | 
			
		||||
@@ -85,7 +85,8 @@ struct GPU::Impl {
 | 
			
		||||
    void BindRenderer(std::unique_ptr<VideoCore::RendererBase> renderer_) {
 | 
			
		||||
        renderer = std::move(renderer_);
 | 
			
		||||
        rasterizer = renderer->ReadRasterizer();
 | 
			
		||||
        host1x.MemoryManager().BindRasterizer(rasterizer);
 | 
			
		||||
        host1x.MemoryManager().BindInterface(rasterizer);
 | 
			
		||||
        host1x.GMMU().BindRasterizer(rasterizer);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    /// Flush all current written commands into the host GPU for execution.
 | 
			
		||||
@@ -95,8 +96,8 @@ struct GPU::Impl {
 | 
			
		||||
 | 
			
		||||
    /// Synchronizes CPU writes with Host GPU memory.
 | 
			
		||||
    void InvalidateGPUCache() {
 | 
			
		||||
        std::function<void(VAddr, size_t)> callback_writes(
 | 
			
		||||
            [this](VAddr address, size_t size) { rasterizer->OnCacheInvalidation(address, size); });
 | 
			
		||||
        std::function<void(PAddr, size_t)> callback_writes(
 | 
			
		||||
            [this](PAddr address, size_t size) { rasterizer->OnCacheInvalidation(address, size); });
 | 
			
		||||
        system.GatherGPUDirtyMemory(callback_writes);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
@@ -279,11 +280,11 @@ struct GPU::Impl {
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    /// Notify rasterizer that any caches of the specified region should be flushed to Switch memory
 | 
			
		||||
    void FlushRegion(VAddr addr, u64 size) {
 | 
			
		||||
    void FlushRegion(DAddr addr, u64 size) {
 | 
			
		||||
        gpu_thread.FlushRegion(addr, size);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    VideoCore::RasterizerDownloadArea OnCPURead(VAddr addr, u64 size) {
 | 
			
		||||
    VideoCore::RasterizerDownloadArea OnCPURead(DAddr addr, u64 size) {
 | 
			
		||||
        auto raster_area = rasterizer->GetFlushArea(addr, size);
 | 
			
		||||
        if (raster_area.preemtive) {
 | 
			
		||||
            return raster_area;
 | 
			
		||||
@@ -299,16 +300,16 @@ struct GPU::Impl {
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    /// Notify rasterizer that any caches of the specified region should be invalidated
 | 
			
		||||
    void InvalidateRegion(VAddr addr, u64 size) {
 | 
			
		||||
    void InvalidateRegion(DAddr addr, u64 size) {
 | 
			
		||||
        gpu_thread.InvalidateRegion(addr, size);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    bool OnCPUWrite(VAddr addr, u64 size) {
 | 
			
		||||
    bool OnCPUWrite(DAddr addr, u64 size) {
 | 
			
		||||
        return rasterizer->OnCPUWrite(addr, size);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    /// Notify rasterizer that any caches of the specified region should be flushed and invalidated
 | 
			
		||||
    void FlushAndInvalidateRegion(VAddr addr, u64 size) {
 | 
			
		||||
    void FlushAndInvalidateRegion(DAddr addr, u64 size) {
 | 
			
		||||
        gpu_thread.FlushAndInvalidateRegion(addr, size);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
@@ -437,7 +438,7 @@ void GPU::OnCommandListEnd() {
 | 
			
		||||
    impl->OnCommandListEnd();
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
u64 GPU::RequestFlush(VAddr addr, std::size_t size) {
 | 
			
		||||
u64 GPU::RequestFlush(DAddr addr, std::size_t size) {
 | 
			
		||||
    return impl->RequestSyncOperation(
 | 
			
		||||
        [this, addr, size]() { impl->rasterizer->FlushRegion(addr, size); });
 | 
			
		||||
}
 | 
			
		||||
@@ -557,23 +558,23 @@ void GPU::SwapBuffers(const Tegra::FramebufferConfig* framebuffer) {
 | 
			
		||||
    impl->SwapBuffers(framebuffer);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
VideoCore::RasterizerDownloadArea GPU::OnCPURead(VAddr addr, u64 size) {
 | 
			
		||||
VideoCore::RasterizerDownloadArea GPU::OnCPURead(PAddr addr, u64 size) {
 | 
			
		||||
    return impl->OnCPURead(addr, size);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void GPU::FlushRegion(VAddr addr, u64 size) {
 | 
			
		||||
void GPU::FlushRegion(DAddr addr, u64 size) {
 | 
			
		||||
    impl->FlushRegion(addr, size);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void GPU::InvalidateRegion(VAddr addr, u64 size) {
 | 
			
		||||
void GPU::InvalidateRegion(DAddr addr, u64 size) {
 | 
			
		||||
    impl->InvalidateRegion(addr, size);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
bool GPU::OnCPUWrite(VAddr addr, u64 size) {
 | 
			
		||||
bool GPU::OnCPUWrite(DAddr addr, u64 size) {
 | 
			
		||||
    return impl->OnCPUWrite(addr, size);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void GPU::FlushAndInvalidateRegion(VAddr addr, u64 size) {
 | 
			
		||||
void GPU::FlushAndInvalidateRegion(DAddr addr, u64 size) {
 | 
			
		||||
    impl->FlushAndInvalidateRegion(addr, size);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -158,7 +158,7 @@ public:
 | 
			
		||||
    void InitAddressSpace(Tegra::MemoryManager& memory_manager);
 | 
			
		||||
 | 
			
		||||
    /// Request a host GPU memory flush from the CPU.
 | 
			
		||||
    [[nodiscard]] u64 RequestFlush(VAddr addr, std::size_t size);
 | 
			
		||||
    [[nodiscard]] u64 RequestFlush(DAddr addr, std::size_t size);
 | 
			
		||||
 | 
			
		||||
    /// Obtains current flush request fence id.
 | 
			
		||||
    [[nodiscard]] u64 CurrentSyncRequestFence() const;
 | 
			
		||||
@@ -242,20 +242,20 @@ public:
 | 
			
		||||
    void SwapBuffers(const Tegra::FramebufferConfig* framebuffer);
 | 
			
		||||
 | 
			
		||||
    /// Notify rasterizer that any caches of the specified region should be flushed to Switch memory
 | 
			
		||||
    [[nodiscard]] VideoCore::RasterizerDownloadArea OnCPURead(VAddr addr, u64 size);
 | 
			
		||||
    [[nodiscard]] VideoCore::RasterizerDownloadArea OnCPURead(DAddr addr, u64 size);
 | 
			
		||||
 | 
			
		||||
    /// Notify rasterizer that any caches of the specified region should be flushed to Switch memory
 | 
			
		||||
    void FlushRegion(VAddr addr, u64 size);
 | 
			
		||||
    void FlushRegion(DAddr addr, u64 size);
 | 
			
		||||
 | 
			
		||||
    /// Notify rasterizer that any caches of the specified region should be invalidated
 | 
			
		||||
    void InvalidateRegion(VAddr addr, u64 size);
 | 
			
		||||
    void InvalidateRegion(DAddr addr, u64 size);
 | 
			
		||||
 | 
			
		||||
    /// Notify rasterizer that CPU is trying to write this area. It returns true if the area is
 | 
			
		||||
    /// sensible, false otherwise
 | 
			
		||||
    bool OnCPUWrite(VAddr addr, u64 size);
 | 
			
		||||
    bool OnCPUWrite(DAddr addr, u64 size);
 | 
			
		||||
 | 
			
		||||
    /// Notify rasterizer that any caches of the specified region should be flushed and invalidated
 | 
			
		||||
    void FlushAndInvalidateRegion(VAddr addr, u64 size);
 | 
			
		||||
    void FlushAndInvalidateRegion(DAddr addr, u64 size);
 | 
			
		||||
 | 
			
		||||
private:
 | 
			
		||||
    struct Impl;
 | 
			
		||||
 
 | 
			
		||||
@@ -82,7 +82,7 @@ void ThreadManager::SwapBuffers(const Tegra::FramebufferConfig* framebuffer) {
 | 
			
		||||
    PushCommand(SwapBuffersCommand(framebuffer ? std::make_optional(*framebuffer) : std::nullopt));
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void ThreadManager::FlushRegion(VAddr addr, u64 size) {
 | 
			
		||||
void ThreadManager::FlushRegion(DAddr addr, u64 size) {
 | 
			
		||||
    if (!is_async) {
 | 
			
		||||
        // Always flush with synchronous GPU mode
 | 
			
		||||
        PushCommand(FlushRegionCommand(addr, size));
 | 
			
		||||
@@ -101,11 +101,11 @@ void ThreadManager::TickGPU() {
 | 
			
		||||
    PushCommand(GPUTickCommand());
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void ThreadManager::InvalidateRegion(VAddr addr, u64 size) {
 | 
			
		||||
void ThreadManager::InvalidateRegion(DAddr addr, u64 size) {
 | 
			
		||||
    rasterizer->OnCacheInvalidation(addr, size);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void ThreadManager::FlushAndInvalidateRegion(VAddr addr, u64 size) {
 | 
			
		||||
void ThreadManager::FlushAndInvalidateRegion(DAddr addr, u64 size) {
 | 
			
		||||
    // Skip flush on asynch mode, as FlushAndInvalidateRegion is not used for anything too important
 | 
			
		||||
    rasterizer->OnCacheInvalidation(addr, size);
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
@@ -54,26 +54,26 @@ struct SwapBuffersCommand final {
 | 
			
		||||
 | 
			
		||||
/// Command to signal to the GPU thread to flush a region
 | 
			
		||||
struct FlushRegionCommand final {
 | 
			
		||||
    explicit constexpr FlushRegionCommand(VAddr addr_, u64 size_) : addr{addr_}, size{size_} {}
 | 
			
		||||
    explicit constexpr FlushRegionCommand(DAddr addr_, u64 size_) : addr{addr_}, size{size_} {}
 | 
			
		||||
 | 
			
		||||
    VAddr addr;
 | 
			
		||||
    DAddr addr;
 | 
			
		||||
    u64 size;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
/// Command to signal to the GPU thread to invalidate a region
 | 
			
		||||
struct InvalidateRegionCommand final {
 | 
			
		||||
    explicit constexpr InvalidateRegionCommand(VAddr addr_, u64 size_) : addr{addr_}, size{size_} {}
 | 
			
		||||
    explicit constexpr InvalidateRegionCommand(DAddr addr_, u64 size_) : addr{addr_}, size{size_} {}
 | 
			
		||||
 | 
			
		||||
    VAddr addr;
 | 
			
		||||
    DAddr addr;
 | 
			
		||||
    u64 size;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
/// Command to signal to the GPU thread to flush and invalidate a region
 | 
			
		||||
struct FlushAndInvalidateRegionCommand final {
 | 
			
		||||
    explicit constexpr FlushAndInvalidateRegionCommand(VAddr addr_, u64 size_)
 | 
			
		||||
    explicit constexpr FlushAndInvalidateRegionCommand(DAddr addr_, u64 size_)
 | 
			
		||||
        : addr{addr_}, size{size_} {}
 | 
			
		||||
 | 
			
		||||
    VAddr addr;
 | 
			
		||||
    DAddr addr;
 | 
			
		||||
    u64 size;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
@@ -122,13 +122,13 @@ public:
 | 
			
		||||
    void SwapBuffers(const Tegra::FramebufferConfig* framebuffer);
 | 
			
		||||
 | 
			
		||||
    /// Notify rasterizer that any caches of the specified region should be flushed to Switch memory
 | 
			
		||||
    void FlushRegion(VAddr addr, u64 size);
 | 
			
		||||
    void FlushRegion(DAddr addr, u64 size);
 | 
			
		||||
 | 
			
		||||
    /// Notify rasterizer that any caches of the specified region should be invalidated
 | 
			
		||||
    void InvalidateRegion(VAddr addr, u64 size);
 | 
			
		||||
    void InvalidateRegion(DAddr addr, u64 size);
 | 
			
		||||
 | 
			
		||||
    /// Notify rasterizer that any caches of the specified region should be flushed and invalidated
 | 
			
		||||
    void FlushAndInvalidateRegion(VAddr addr, u64 size);
 | 
			
		||||
    void FlushAndInvalidateRegion(DAddr addr, u64 size);
 | 
			
		||||
 | 
			
		||||
    void TickGPU();
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										30
									
								
								src/video_core/guest_memory.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										30
									
								
								src/video_core/guest_memory.h
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,30 @@
 | 
			
		||||
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
 | 
			
		||||
// SPDX-License-Identifier: GPL-2.0-or-later
 | 
			
		||||
 | 
			
		||||
#pragma once
 | 
			
		||||
 | 
			
		||||
#include <iterator>
 | 
			
		||||
#include <memory>
 | 
			
		||||
#include <optional>
 | 
			
		||||
#include <span>
 | 
			
		||||
#include <vector>
 | 
			
		||||
 | 
			
		||||
#include "common/scratch_buffer.h"
 | 
			
		||||
#include "core/guest_memory.h"
 | 
			
		||||
#include "video_core/memory_manager.h"
 | 
			
		||||
 | 
			
		||||
namespace Tegra::Memory {
 | 
			
		||||
 | 
			
		||||
using GuestMemoryFlags = Core::Memory::GuestMemoryFlags;
 | 
			
		||||
 | 
			
		||||
template <typename T, GuestMemoryFlags FLAGS>
 | 
			
		||||
using DeviceGuestMemory = Core::Memory::GuestMemory<Tegra::MaxwellDeviceMemoryManager, T, FLAGS>;
 | 
			
		||||
template <typename T, GuestMemoryFlags FLAGS>
 | 
			
		||||
using DeviceGuestMemoryScoped =
 | 
			
		||||
    Core::Memory::GuestMemoryScoped<Tegra::MaxwellDeviceMemoryManager, T, FLAGS>;
 | 
			
		||||
template <typename T, GuestMemoryFlags FLAGS>
 | 
			
		||||
using GpuGuestMemory = Core::Memory::GuestMemory<Tegra::MemoryManager, T, FLAGS>;
 | 
			
		||||
template <typename T, GuestMemoryFlags FLAGS>
 | 
			
		||||
using GpuGuestMemoryScoped = Core::Memory::GuestMemoryScoped<Tegra::MemoryManager, T, FLAGS>;
 | 
			
		||||
 | 
			
		||||
} // namespace Tegra::Memory
 | 
			
		||||
@@ -32,13 +32,12 @@ H264::~H264() = default;
 | 
			
		||||
std::span<const u8> H264::ComposeFrame(const Host1x::NvdecCommon::NvdecRegisters& state,
 | 
			
		||||
                                       size_t* out_configuration_size, bool is_first_frame) {
 | 
			
		||||
    H264DecoderContext context;
 | 
			
		||||
    host1x.MemoryManager().ReadBlock(state.picture_info_offset, &context,
 | 
			
		||||
                                     sizeof(H264DecoderContext));
 | 
			
		||||
    host1x.GMMU().ReadBlock(state.picture_info_offset, &context, sizeof(H264DecoderContext));
 | 
			
		||||
 | 
			
		||||
    const s64 frame_number = context.h264_parameter_set.frame_number.Value();
 | 
			
		||||
    if (!is_first_frame && frame_number != 0) {
 | 
			
		||||
        frame.resize_destructive(context.stream_len);
 | 
			
		||||
        host1x.MemoryManager().ReadBlock(state.frame_bitstream_offset, frame.data(), frame.size());
 | 
			
		||||
        host1x.GMMU().ReadBlock(state.frame_bitstream_offset, frame.data(), frame.size());
 | 
			
		||||
        *out_configuration_size = 0;
 | 
			
		||||
        return frame;
 | 
			
		||||
    }
 | 
			
		||||
@@ -159,8 +158,8 @@ std::span<const u8> H264::ComposeFrame(const Host1x::NvdecCommon::NvdecRegisters
 | 
			
		||||
    std::memcpy(frame.data(), encoded_header.data(), encoded_header.size());
 | 
			
		||||
 | 
			
		||||
    *out_configuration_size = encoded_header.size();
 | 
			
		||||
    host1x.MemoryManager().ReadBlock(state.frame_bitstream_offset,
 | 
			
		||||
                                     frame.data() + encoded_header.size(), context.stream_len);
 | 
			
		||||
    host1x.GMMU().ReadBlock(state.frame_bitstream_offset, frame.data() + encoded_header.size(),
 | 
			
		||||
                            context.stream_len);
 | 
			
		||||
 | 
			
		||||
    return frame;
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
@@ -14,7 +14,7 @@ VP8::~VP8() = default;
 | 
			
		||||
 | 
			
		||||
std::span<const u8> VP8::ComposeFrame(const Host1x::NvdecCommon::NvdecRegisters& state) {
 | 
			
		||||
    VP8PictureInfo info;
 | 
			
		||||
    host1x.MemoryManager().ReadBlock(state.picture_info_offset, &info, sizeof(VP8PictureInfo));
 | 
			
		||||
    host1x.GMMU().ReadBlock(state.picture_info_offset, &info, sizeof(VP8PictureInfo));
 | 
			
		||||
 | 
			
		||||
    const bool is_key_frame = info.key_frame == 1u;
 | 
			
		||||
    const auto bitstream_size = static_cast<size_t>(info.vld_buffer_size);
 | 
			
		||||
@@ -45,7 +45,7 @@ std::span<const u8> VP8::ComposeFrame(const Host1x::NvdecCommon::NvdecRegisters&
 | 
			
		||||
        frame[9] = static_cast<u8>(((info.frame_height >> 8) & 0x3f));
 | 
			
		||||
    }
 | 
			
		||||
    const u64 bitstream_offset = state.frame_bitstream_offset;
 | 
			
		||||
    host1x.MemoryManager().ReadBlock(bitstream_offset, frame.data() + header_size, bitstream_size);
 | 
			
		||||
    host1x.GMMU().ReadBlock(bitstream_offset, frame.data() + header_size, bitstream_size);
 | 
			
		||||
 | 
			
		||||
    return frame;
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
@@ -358,7 +358,7 @@ void VP9::WriteMvProbabilityUpdate(VpxRangeEncoder& writer, u8 new_prob, u8 old_
 | 
			
		||||
 | 
			
		||||
Vp9PictureInfo VP9::GetVp9PictureInfo(const Host1x::NvdecCommon::NvdecRegisters& state) {
 | 
			
		||||
    PictureInfo picture_info;
 | 
			
		||||
    host1x.MemoryManager().ReadBlock(state.picture_info_offset, &picture_info, sizeof(PictureInfo));
 | 
			
		||||
    host1x.GMMU().ReadBlock(state.picture_info_offset, &picture_info, sizeof(PictureInfo));
 | 
			
		||||
    Vp9PictureInfo vp9_info = picture_info.Convert();
 | 
			
		||||
 | 
			
		||||
    InsertEntropy(state.vp9_entropy_probs_offset, vp9_info.entropy);
 | 
			
		||||
@@ -373,7 +373,7 @@ Vp9PictureInfo VP9::GetVp9PictureInfo(const Host1x::NvdecCommon::NvdecRegisters&
 | 
			
		||||
 | 
			
		||||
void VP9::InsertEntropy(u64 offset, Vp9EntropyProbs& dst) {
 | 
			
		||||
    EntropyProbs entropy;
 | 
			
		||||
    host1x.MemoryManager().ReadBlock(offset, &entropy, sizeof(EntropyProbs));
 | 
			
		||||
    host1x.GMMU().ReadBlock(offset, &entropy, sizeof(EntropyProbs));
 | 
			
		||||
    entropy.Convert(dst);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@@ -383,9 +383,8 @@ Vp9FrameContainer VP9::GetCurrentFrame(const Host1x::NvdecCommon::NvdecRegisters
 | 
			
		||||
        // gpu.SyncGuestHost(); epic, why?
 | 
			
		||||
        current_frame.info = GetVp9PictureInfo(state);
 | 
			
		||||
        current_frame.bit_stream.resize(current_frame.info.bitstream_size);
 | 
			
		||||
        host1x.MemoryManager().ReadBlock(state.frame_bitstream_offset,
 | 
			
		||||
                                         current_frame.bit_stream.data(),
 | 
			
		||||
                                         current_frame.info.bitstream_size);
 | 
			
		||||
        host1x.GMMU().ReadBlock(state.frame_bitstream_offset, current_frame.bit_stream.data(),
 | 
			
		||||
                                current_frame.info.bitstream_size);
 | 
			
		||||
    }
 | 
			
		||||
    if (!next_frame.bit_stream.empty()) {
 | 
			
		||||
        Vp9FrameContainer temp{
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										32
									
								
								src/video_core/host1x/gpu_device_memory_manager.cpp
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										32
									
								
								src/video_core/host1x/gpu_device_memory_manager.cpp
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,32 @@
 | 
			
		||||
// SPDX-FileCopyrightText: 2023 yuzu Emulator Project
 | 
			
		||||
// SPDX-License-Identifier: GPL-2.0-or-later
 | 
			
		||||
 | 
			
		||||
#include "core/device_memory_manager.inc"
 | 
			
		||||
#include "video_core/host1x/gpu_device_memory_manager.h"
 | 
			
		||||
#include "video_core/rasterizer_interface.h"
 | 
			
		||||
 | 
			
		||||
namespace Tegra {
 | 
			
		||||
 | 
			
		||||
struct MaxwellDeviceMethods {
 | 
			
		||||
    static inline void MarkRegionCaching(Core::Memory::Memory* interface, VAddr address,
 | 
			
		||||
                                         size_t size, bool caching) {
 | 
			
		||||
        interface->RasterizerMarkRegionCached(address, size, caching);
 | 
			
		||||
    }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
} // namespace Tegra
 | 
			
		||||
 | 
			
		||||
template struct Core::DeviceMemoryManagerAllocator<Tegra::MaxwellDeviceTraits>;
 | 
			
		||||
template class Core::DeviceMemoryManager<Tegra::MaxwellDeviceTraits>;
 | 
			
		||||
 | 
			
		||||
template const u8* Tegra::MaxwellDeviceMemoryManager::GetPointer<u8>(DAddr addr) const;
 | 
			
		||||
template u8* Tegra::MaxwellDeviceMemoryManager::GetPointer<u8>(DAddr addr);
 | 
			
		||||
 | 
			
		||||
template u8 Tegra::MaxwellDeviceMemoryManager::Read<u8>(DAddr addr) const;
 | 
			
		||||
template u16 Tegra::MaxwellDeviceMemoryManager::Read<u16>(DAddr addr) const;
 | 
			
		||||
template u32 Tegra::MaxwellDeviceMemoryManager::Read<u32>(DAddr addr) const;
 | 
			
		||||
template u64 Tegra::MaxwellDeviceMemoryManager::Read<u64>(DAddr addr) const;
 | 
			
		||||
template void Tegra::MaxwellDeviceMemoryManager::Write<u8>(DAddr addr, u8 data);
 | 
			
		||||
template void Tegra::MaxwellDeviceMemoryManager::Write<u16>(DAddr addr, u16 data);
 | 
			
		||||
template void Tegra::MaxwellDeviceMemoryManager::Write<u32>(DAddr addr, u32 data);
 | 
			
		||||
template void Tegra::MaxwellDeviceMemoryManager::Write<u64>(DAddr addr, u64 data);
 | 
			
		||||
							
								
								
									
										24
									
								
								src/video_core/host1x/gpu_device_memory_manager.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										24
									
								
								src/video_core/host1x/gpu_device_memory_manager.h
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,24 @@
 | 
			
		||||
// SPDX-FileCopyrightText: 2023 yuzu Emulator Project
 | 
			
		||||
// SPDX-License-Identifier: GPL-2.0-or-later
 | 
			
		||||
 | 
			
		||||
#pragma once
 | 
			
		||||
 | 
			
		||||
#include "core/device_memory_manager.h"
 | 
			
		||||
 | 
			
		||||
namespace VideoCore {
 | 
			
		||||
class RasterizerInterface;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
namespace Tegra {
 | 
			
		||||
 | 
			
		||||
struct MaxwellDeviceMethods;
 | 
			
		||||
 | 
			
		||||
struct MaxwellDeviceTraits {
 | 
			
		||||
    static constexpr size_t device_virtual_bits = 34;
 | 
			
		||||
    using DeviceInterface = typename VideoCore::RasterizerInterface;
 | 
			
		||||
    using DeviceMethods = MaxwellDeviceMethods;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
using MaxwellDeviceMemoryManager = Core::DeviceMemoryManager<MaxwellDeviceTraits>;
 | 
			
		||||
 | 
			
		||||
} // namespace Tegra
 | 
			
		||||
@@ -9,9 +9,12 @@ namespace Tegra {
 | 
			
		||||
namespace Host1x {
 | 
			
		||||
 | 
			
		||||
Host1x::Host1x(Core::System& system_)
 | 
			
		||||
    : system{system_}, syncpoint_manager{}, memory_manager{system, 32, 12},
 | 
			
		||||
    : system{system_}, syncpoint_manager{},
 | 
			
		||||
      memory_manager(system.DeviceMemory()), gmmu_manager{system, memory_manager, 32, 12},
 | 
			
		||||
      allocator{std::make_unique<Common::FlatAllocator<u32, 0, 32>>(1 << 12)} {}
 | 
			
		||||
 | 
			
		||||
Host1x::~Host1x() = default;
 | 
			
		||||
 | 
			
		||||
} // namespace Host1x
 | 
			
		||||
 | 
			
		||||
} // namespace Tegra
 | 
			
		||||
 
 | 
			
		||||
@@ -6,6 +6,7 @@
 | 
			
		||||
#include "common/common_types.h"
 | 
			
		||||
 | 
			
		||||
#include "common/address_space.h"
 | 
			
		||||
#include "video_core/host1x/gpu_device_memory_manager.h"
 | 
			
		||||
#include "video_core/host1x/syncpoint_manager.h"
 | 
			
		||||
#include "video_core/memory_manager.h"
 | 
			
		||||
 | 
			
		||||
@@ -20,6 +21,7 @@ namespace Host1x {
 | 
			
		||||
class Host1x {
 | 
			
		||||
public:
 | 
			
		||||
    explicit Host1x(Core::System& system);
 | 
			
		||||
    ~Host1x();
 | 
			
		||||
 | 
			
		||||
    SyncpointManager& GetSyncpointManager() {
 | 
			
		||||
        return syncpoint_manager;
 | 
			
		||||
@@ -29,14 +31,22 @@ public:
 | 
			
		||||
        return syncpoint_manager;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    Tegra::MemoryManager& MemoryManager() {
 | 
			
		||||
    Tegra::MaxwellDeviceMemoryManager& MemoryManager() {
 | 
			
		||||
        return memory_manager;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    const Tegra::MemoryManager& MemoryManager() const {
 | 
			
		||||
    const Tegra::MaxwellDeviceMemoryManager& MemoryManager() const {
 | 
			
		||||
        return memory_manager;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    Tegra::MemoryManager& GMMU() {
 | 
			
		||||
        return gmmu_manager;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    const Tegra::MemoryManager& GMMU() const {
 | 
			
		||||
        return gmmu_manager;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    Common::FlatAllocator<u32, 0, 32>& Allocator() {
 | 
			
		||||
        return *allocator;
 | 
			
		||||
    }
 | 
			
		||||
@@ -48,7 +58,8 @@ public:
 | 
			
		||||
private:
 | 
			
		||||
    Core::System& system;
 | 
			
		||||
    SyncpointManager syncpoint_manager;
 | 
			
		||||
    Tegra::MemoryManager memory_manager;
 | 
			
		||||
    Tegra::MaxwellDeviceMemoryManager memory_manager;
 | 
			
		||||
    Tegra::MemoryManager gmmu_manager;
 | 
			
		||||
    std::unique_ptr<Common::FlatAllocator<u32, 0, 32>> allocator;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -81,7 +81,7 @@ void Vic::Execute() {
 | 
			
		||||
        LOG_ERROR(Service_NVDRV, "VIC Luma address not set.");
 | 
			
		||||
        return;
 | 
			
		||||
    }
 | 
			
		||||
    const VicConfig config{host1x.MemoryManager().Read<u64>(config_struct_address + 0x20)};
 | 
			
		||||
    const VicConfig config{host1x.GMMU().Read<u64>(config_struct_address + 0x20)};
 | 
			
		||||
    auto frame = nvdec_processor->GetFrame();
 | 
			
		||||
    if (!frame) {
 | 
			
		||||
        return;
 | 
			
		||||
@@ -162,12 +162,12 @@ void Vic::WriteRGBFrame(std::unique_ptr<FFmpeg::Frame> frame, const VicConfig& c
 | 
			
		||||
        Texture::SwizzleSubrect(luma_buffer, frame_buff, 4, width, height, 1, 0, 0, width, height,
 | 
			
		||||
                                block_height, 0, width * 4);
 | 
			
		||||
 | 
			
		||||
        host1x.MemoryManager().WriteBlock(output_surface_luma_address, luma_buffer.data(), size);
 | 
			
		||||
        host1x.GMMU().WriteBlock(output_surface_luma_address, luma_buffer.data(), size);
 | 
			
		||||
    } else {
 | 
			
		||||
        // send pitch linear frame
 | 
			
		||||
        const size_t linear_size = width * height * 4;
 | 
			
		||||
        host1x.MemoryManager().WriteBlock(output_surface_luma_address, converted_frame_buf_addr,
 | 
			
		||||
                                          linear_size);
 | 
			
		||||
        host1x.GMMU().WriteBlock(output_surface_luma_address, converted_frame_buf_addr,
 | 
			
		||||
                                 linear_size);
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@@ -193,8 +193,7 @@ void Vic::WriteYUVFrame(std::unique_ptr<FFmpeg::Frame> frame, const VicConfig& c
 | 
			
		||||
        const std::size_t dst = y * aligned_width;
 | 
			
		||||
        std::memcpy(luma_buffer.data() + dst, luma_src + src, frame_width);
 | 
			
		||||
    }
 | 
			
		||||
    host1x.MemoryManager().WriteBlock(output_surface_luma_address, luma_buffer.data(),
 | 
			
		||||
                                      luma_buffer.size());
 | 
			
		||||
    host1x.GMMU().WriteBlock(output_surface_luma_address, luma_buffer.data(), luma_buffer.size());
 | 
			
		||||
 | 
			
		||||
    // Chroma
 | 
			
		||||
    const std::size_t half_height = frame_height / 2;
 | 
			
		||||
@@ -233,8 +232,8 @@ void Vic::WriteYUVFrame(std::unique_ptr<FFmpeg::Frame> frame, const VicConfig& c
 | 
			
		||||
        ASSERT(false);
 | 
			
		||||
        break;
 | 
			
		||||
    }
 | 
			
		||||
    host1x.MemoryManager().WriteBlock(output_surface_chroma_address, chroma_buffer.data(),
 | 
			
		||||
                                      chroma_buffer.size());
 | 
			
		||||
    host1x.GMMU().WriteBlock(output_surface_chroma_address, chroma_buffer.data(),
 | 
			
		||||
                             chroma_buffer.size());
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
} // namespace Host1x
 | 
			
		||||
 
 | 
			
		||||
@@ -7,25 +7,26 @@
 | 
			
		||||
#include "common/assert.h"
 | 
			
		||||
#include "common/logging/log.h"
 | 
			
		||||
#include "core/core.h"
 | 
			
		||||
#include "core/device_memory.h"
 | 
			
		||||
#include "core/hle/kernel/k_page_table.h"
 | 
			
		||||
#include "core/hle/kernel/k_process.h"
 | 
			
		||||
#include "video_core/guest_memory.h"
 | 
			
		||||
#include "video_core/host1x/host1x.h"
 | 
			
		||||
#include "video_core/invalidation_accumulator.h"
 | 
			
		||||
#include "video_core/memory_manager.h"
 | 
			
		||||
#include "video_core/rasterizer_interface.h"
 | 
			
		||||
#include "video_core/renderer_base.h"
 | 
			
		||||
 | 
			
		||||
namespace Tegra {
 | 
			
		||||
using Core::Memory::GuestMemoryFlags;
 | 
			
		||||
using Tegra::Memory::GuestMemoryFlags;
 | 
			
		||||
 | 
			
		||||
std::atomic<size_t> MemoryManager::unique_identifier_generator{};
 | 
			
		||||
 | 
			
		||||
MemoryManager::MemoryManager(Core::System& system_, u64 address_space_bits_, u64 big_page_bits_,
 | 
			
		||||
                             u64 page_bits_)
 | 
			
		||||
    : system{system_}, memory{system.ApplicationMemory()}, device_memory{system.DeviceMemory()},
 | 
			
		||||
      address_space_bits{address_space_bits_}, page_bits{page_bits_}, big_page_bits{big_page_bits_},
 | 
			
		||||
      entries{}, big_entries{}, page_table{address_space_bits, address_space_bits + page_bits - 38,
 | 
			
		||||
                                           page_bits != big_page_bits ? page_bits : 0},
 | 
			
		||||
MemoryManager::MemoryManager(Core::System& system_, MaxwellDeviceMemoryManager& memory_,
 | 
			
		||||
                             u64 address_space_bits_, u64 big_page_bits_, u64 page_bits_)
 | 
			
		||||
    : system{system_}, memory{memory_}, address_space_bits{address_space_bits_},
 | 
			
		||||
      page_bits{page_bits_}, big_page_bits{big_page_bits_}, entries{}, big_entries{},
 | 
			
		||||
      page_table{address_space_bits, address_space_bits + page_bits - 38,
 | 
			
		||||
                 page_bits != big_page_bits ? page_bits : 0},
 | 
			
		||||
      kind_map{PTEKind::INVALID}, unique_identifier{unique_identifier_generator.fetch_add(
 | 
			
		||||
                                      1, std::memory_order_acq_rel)},
 | 
			
		||||
      accumulator{std::make_unique<VideoCommon::InvalidationAccumulator>()} {
 | 
			
		||||
@@ -42,11 +43,16 @@ MemoryManager::MemoryManager(Core::System& system_, u64 address_space_bits_, u64
 | 
			
		||||
    big_page_table_mask = big_page_table_size - 1;
 | 
			
		||||
 | 
			
		||||
    big_entries.resize(big_page_table_size / 32, 0);
 | 
			
		||||
    big_page_table_cpu.resize(big_page_table_size);
 | 
			
		||||
    big_page_table_dev.resize(big_page_table_size);
 | 
			
		||||
    big_page_continuous.resize(big_page_table_size / continuous_bits, 0);
 | 
			
		||||
    entries.resize(page_table_size / 32, 0);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
MemoryManager::MemoryManager(Core::System& system_, u64 address_space_bits_, u64 big_page_bits_,
 | 
			
		||||
                             u64 page_bits_)
 | 
			
		||||
    : MemoryManager(system_, system_.Host1x().MemoryManager(), address_space_bits_, big_page_bits_,
 | 
			
		||||
                    page_bits_) {}
 | 
			
		||||
 | 
			
		||||
MemoryManager::~MemoryManager() = default;
 | 
			
		||||
 | 
			
		||||
template <bool is_big_page>
 | 
			
		||||
@@ -100,7 +106,7 @@ inline void MemoryManager::SetBigPageContinuous(size_t big_page_index, bool valu
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <MemoryManager::EntryType entry_type>
 | 
			
		||||
GPUVAddr MemoryManager::PageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cpu_addr, size_t size,
 | 
			
		||||
GPUVAddr MemoryManager::PageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] DAddr dev_addr, size_t size,
 | 
			
		||||
                                    PTEKind kind) {
 | 
			
		||||
    [[maybe_unused]] u64 remaining_size{size};
 | 
			
		||||
    if constexpr (entry_type == EntryType::Mapped) {
 | 
			
		||||
@@ -114,9 +120,9 @@ GPUVAddr MemoryManager::PageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cp
 | 
			
		||||
            rasterizer->ModifyGPUMemory(unique_identifier, current_gpu_addr, page_size);
 | 
			
		||||
        }
 | 
			
		||||
        if constexpr (entry_type == EntryType::Mapped) {
 | 
			
		||||
            const VAddr current_cpu_addr = cpu_addr + offset;
 | 
			
		||||
            const DAddr current_dev_addr = dev_addr + offset;
 | 
			
		||||
            const auto index = PageEntryIndex<false>(current_gpu_addr);
 | 
			
		||||
            const u32 sub_value = static_cast<u32>(current_cpu_addr >> cpu_page_bits);
 | 
			
		||||
            const u32 sub_value = static_cast<u32>(current_dev_addr >> cpu_page_bits);
 | 
			
		||||
            page_table[index] = sub_value;
 | 
			
		||||
        }
 | 
			
		||||
        remaining_size -= page_size;
 | 
			
		||||
@@ -126,7 +132,7 @@ GPUVAddr MemoryManager::PageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cp
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <MemoryManager::EntryType entry_type>
 | 
			
		||||
GPUVAddr MemoryManager::BigPageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cpu_addr,
 | 
			
		||||
GPUVAddr MemoryManager::BigPageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] DAddr dev_addr,
 | 
			
		||||
                                       size_t size, PTEKind kind) {
 | 
			
		||||
    [[maybe_unused]] u64 remaining_size{size};
 | 
			
		||||
    for (u64 offset{}; offset < size; offset += big_page_size) {
 | 
			
		||||
@@ -137,20 +143,20 @@ GPUVAddr MemoryManager::BigPageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr
 | 
			
		||||
            rasterizer->ModifyGPUMemory(unique_identifier, current_gpu_addr, big_page_size);
 | 
			
		||||
        }
 | 
			
		||||
        if constexpr (entry_type == EntryType::Mapped) {
 | 
			
		||||
            const VAddr current_cpu_addr = cpu_addr + offset;
 | 
			
		||||
            const DAddr current_dev_addr = dev_addr + offset;
 | 
			
		||||
            const auto index = PageEntryIndex<true>(current_gpu_addr);
 | 
			
		||||
            const u32 sub_value = static_cast<u32>(current_cpu_addr >> cpu_page_bits);
 | 
			
		||||
            big_page_table_cpu[index] = sub_value;
 | 
			
		||||
            const u32 sub_value = static_cast<u32>(current_dev_addr >> cpu_page_bits);
 | 
			
		||||
            big_page_table_dev[index] = sub_value;
 | 
			
		||||
            const bool is_continuous = ([&] {
 | 
			
		||||
                uintptr_t base_ptr{
 | 
			
		||||
                    reinterpret_cast<uintptr_t>(memory.GetPointerSilent(current_cpu_addr))};
 | 
			
		||||
                    reinterpret_cast<uintptr_t>(memory.GetPointer<u8>(current_dev_addr))};
 | 
			
		||||
                if (base_ptr == 0) {
 | 
			
		||||
                    return false;
 | 
			
		||||
                }
 | 
			
		||||
                for (VAddr start_cpu = current_cpu_addr + page_size;
 | 
			
		||||
                     start_cpu < current_cpu_addr + big_page_size; start_cpu += page_size) {
 | 
			
		||||
                for (DAddr start_cpu = current_dev_addr + page_size;
 | 
			
		||||
                     start_cpu < current_dev_addr + big_page_size; start_cpu += page_size) {
 | 
			
		||||
                    base_ptr += page_size;
 | 
			
		||||
                    auto next_ptr = reinterpret_cast<uintptr_t>(memory.GetPointerSilent(start_cpu));
 | 
			
		||||
                    auto next_ptr = reinterpret_cast<uintptr_t>(memory.GetPointer<u8>(start_cpu));
 | 
			
		||||
                    if (next_ptr == 0 || base_ptr != next_ptr) {
 | 
			
		||||
                        return false;
 | 
			
		||||
                    }
 | 
			
		||||
@@ -172,12 +178,12 @@ void MemoryManager::BindRasterizer(VideoCore::RasterizerInterface* rasterizer_)
 | 
			
		||||
    rasterizer = rasterizer_;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
GPUVAddr MemoryManager::Map(GPUVAddr gpu_addr, VAddr cpu_addr, std::size_t size, PTEKind kind,
 | 
			
		||||
GPUVAddr MemoryManager::Map(GPUVAddr gpu_addr, DAddr dev_addr, std::size_t size, PTEKind kind,
 | 
			
		||||
                            bool is_big_pages) {
 | 
			
		||||
    if (is_big_pages) [[likely]] {
 | 
			
		||||
        return BigPageTableOp<EntryType::Mapped>(gpu_addr, cpu_addr, size, kind);
 | 
			
		||||
        return BigPageTableOp<EntryType::Mapped>(gpu_addr, dev_addr, size, kind);
 | 
			
		||||
    }
 | 
			
		||||
    return PageTableOp<EntryType::Mapped>(gpu_addr, cpu_addr, size, kind);
 | 
			
		||||
    return PageTableOp<EntryType::Mapped>(gpu_addr, dev_addr, size, kind);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
GPUVAddr MemoryManager::MapSparse(GPUVAddr gpu_addr, std::size_t size, bool is_big_pages) {
 | 
			
		||||
@@ -202,7 +208,7 @@ void MemoryManager::Unmap(GPUVAddr gpu_addr, std::size_t size) {
 | 
			
		||||
    PageTableOp<EntryType::Free>(gpu_addr, 0, size, PTEKind::INVALID);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
std::optional<VAddr> MemoryManager::GpuToCpuAddress(GPUVAddr gpu_addr) const {
 | 
			
		||||
std::optional<DAddr> MemoryManager::GpuToCpuAddress(GPUVAddr gpu_addr) const {
 | 
			
		||||
    if (!IsWithinGPUAddressRange(gpu_addr)) [[unlikely]] {
 | 
			
		||||
        return std::nullopt;
 | 
			
		||||
    }
 | 
			
		||||
@@ -211,17 +217,17 @@ std::optional<VAddr> MemoryManager::GpuToCpuAddress(GPUVAddr gpu_addr) const {
 | 
			
		||||
            return std::nullopt;
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        const VAddr cpu_addr_base = static_cast<VAddr>(page_table[PageEntryIndex<false>(gpu_addr)])
 | 
			
		||||
        const DAddr dev_addr_base = static_cast<DAddr>(page_table[PageEntryIndex<false>(gpu_addr)])
 | 
			
		||||
                                    << cpu_page_bits;
 | 
			
		||||
        return cpu_addr_base + (gpu_addr & page_mask);
 | 
			
		||||
        return dev_addr_base + (gpu_addr & page_mask);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    const VAddr cpu_addr_base =
 | 
			
		||||
        static_cast<VAddr>(big_page_table_cpu[PageEntryIndex<true>(gpu_addr)]) << cpu_page_bits;
 | 
			
		||||
    return cpu_addr_base + (gpu_addr & big_page_mask);
 | 
			
		||||
    const DAddr dev_addr_base =
 | 
			
		||||
        static_cast<DAddr>(big_page_table_dev[PageEntryIndex<true>(gpu_addr)]) << cpu_page_bits;
 | 
			
		||||
    return dev_addr_base + (gpu_addr & big_page_mask);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
std::optional<VAddr> MemoryManager::GpuToCpuAddress(GPUVAddr addr, std::size_t size) const {
 | 
			
		||||
std::optional<DAddr> MemoryManager::GpuToCpuAddress(GPUVAddr addr, std::size_t size) const {
 | 
			
		||||
    size_t page_index{addr >> page_bits};
 | 
			
		||||
    const size_t page_last{(addr + size + page_size - 1) >> page_bits};
 | 
			
		||||
    while (page_index < page_last) {
 | 
			
		||||
@@ -274,7 +280,7 @@ u8* MemoryManager::GetPointer(GPUVAddr gpu_addr) {
 | 
			
		||||
        return {};
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    return memory.GetPointer(*address);
 | 
			
		||||
    return memory.GetPointer<u8>(*address);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
const u8* MemoryManager::GetPointer(GPUVAddr gpu_addr) const {
 | 
			
		||||
@@ -283,7 +289,7 @@ const u8* MemoryManager::GetPointer(GPUVAddr gpu_addr) const {
 | 
			
		||||
        return {};
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    return memory.GetPointer(*address);
 | 
			
		||||
    return memory.GetPointer<u8>(*address);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#ifdef _MSC_VER // no need for gcc / clang but msvc's compiler is more conservative with inlining.
 | 
			
		||||
@@ -367,25 +373,25 @@ void MemoryManager::ReadBlockImpl(GPUVAddr gpu_src_addr, void* dest_buffer, std:
 | 
			
		||||
        dest_buffer = static_cast<u8*>(dest_buffer) + copy_amount;
 | 
			
		||||
    };
 | 
			
		||||
    auto mapped_normal = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) {
 | 
			
		||||
        const VAddr cpu_addr_base =
 | 
			
		||||
            (static_cast<VAddr>(page_table[page_index]) << cpu_page_bits) + offset;
 | 
			
		||||
        const DAddr dev_addr_base =
 | 
			
		||||
            (static_cast<DAddr>(page_table[page_index]) << cpu_page_bits) + offset;
 | 
			
		||||
        if constexpr (is_safe) {
 | 
			
		||||
            rasterizer->FlushRegion(cpu_addr_base, copy_amount, which);
 | 
			
		||||
            rasterizer->FlushRegion(dev_addr_base, copy_amount, which);
 | 
			
		||||
        }
 | 
			
		||||
        u8* physical = memory.GetPointer(cpu_addr_base);
 | 
			
		||||
        u8* physical = memory.GetPointer<u8>(dev_addr_base);
 | 
			
		||||
        std::memcpy(dest_buffer, physical, copy_amount);
 | 
			
		||||
        dest_buffer = static_cast<u8*>(dest_buffer) + copy_amount;
 | 
			
		||||
    };
 | 
			
		||||
    auto mapped_big = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) {
 | 
			
		||||
        const VAddr cpu_addr_base =
 | 
			
		||||
            (static_cast<VAddr>(big_page_table_cpu[page_index]) << cpu_page_bits) + offset;
 | 
			
		||||
        const DAddr dev_addr_base =
 | 
			
		||||
            (static_cast<DAddr>(big_page_table_dev[page_index]) << cpu_page_bits) + offset;
 | 
			
		||||
        if constexpr (is_safe) {
 | 
			
		||||
            rasterizer->FlushRegion(cpu_addr_base, copy_amount, which);
 | 
			
		||||
            rasterizer->FlushRegion(dev_addr_base, copy_amount, which);
 | 
			
		||||
        }
 | 
			
		||||
        if (!IsBigPageContinuous(page_index)) [[unlikely]] {
 | 
			
		||||
            memory.ReadBlockUnsafe(cpu_addr_base, dest_buffer, copy_amount);
 | 
			
		||||
            memory.ReadBlockUnsafe(dev_addr_base, dest_buffer, copy_amount);
 | 
			
		||||
        } else {
 | 
			
		||||
            u8* physical = memory.GetPointer(cpu_addr_base);
 | 
			
		||||
            u8* physical = memory.GetPointer<u8>(dev_addr_base);
 | 
			
		||||
            std::memcpy(dest_buffer, physical, copy_amount);
 | 
			
		||||
        }
 | 
			
		||||
        dest_buffer = static_cast<u8*>(dest_buffer) + copy_amount;
 | 
			
		||||
@@ -416,25 +422,25 @@ void MemoryManager::WriteBlockImpl(GPUVAddr gpu_dest_addr, const void* src_buffe
 | 
			
		||||
        src_buffer = static_cast<const u8*>(src_buffer) + copy_amount;
 | 
			
		||||
    };
 | 
			
		||||
    auto mapped_normal = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) {
 | 
			
		||||
        const VAddr cpu_addr_base =
 | 
			
		||||
            (static_cast<VAddr>(page_table[page_index]) << cpu_page_bits) + offset;
 | 
			
		||||
        const DAddr dev_addr_base =
 | 
			
		||||
            (static_cast<DAddr>(page_table[page_index]) << cpu_page_bits) + offset;
 | 
			
		||||
        if constexpr (is_safe) {
 | 
			
		||||
            rasterizer->InvalidateRegion(cpu_addr_base, copy_amount, which);
 | 
			
		||||
            rasterizer->InvalidateRegion(dev_addr_base, copy_amount, which);
 | 
			
		||||
        }
 | 
			
		||||
        u8* physical = memory.GetPointer(cpu_addr_base);
 | 
			
		||||
        u8* physical = memory.GetPointer<u8>(dev_addr_base);
 | 
			
		||||
        std::memcpy(physical, src_buffer, copy_amount);
 | 
			
		||||
        src_buffer = static_cast<const u8*>(src_buffer) + copy_amount;
 | 
			
		||||
    };
 | 
			
		||||
    auto mapped_big = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) {
 | 
			
		||||
        const VAddr cpu_addr_base =
 | 
			
		||||
            (static_cast<VAddr>(big_page_table_cpu[page_index]) << cpu_page_bits) + offset;
 | 
			
		||||
        const DAddr dev_addr_base =
 | 
			
		||||
            (static_cast<DAddr>(big_page_table_dev[page_index]) << cpu_page_bits) + offset;
 | 
			
		||||
        if constexpr (is_safe) {
 | 
			
		||||
            rasterizer->InvalidateRegion(cpu_addr_base, copy_amount, which);
 | 
			
		||||
            rasterizer->InvalidateRegion(dev_addr_base, copy_amount, which);
 | 
			
		||||
        }
 | 
			
		||||
        if (!IsBigPageContinuous(page_index)) [[unlikely]] {
 | 
			
		||||
            memory.WriteBlockUnsafe(cpu_addr_base, src_buffer, copy_amount);
 | 
			
		||||
            memory.WriteBlockUnsafe(dev_addr_base, src_buffer, copy_amount);
 | 
			
		||||
        } else {
 | 
			
		||||
            u8* physical = memory.GetPointer(cpu_addr_base);
 | 
			
		||||
            u8* physical = memory.GetPointer<u8>(dev_addr_base);
 | 
			
		||||
            std::memcpy(physical, src_buffer, copy_amount);
 | 
			
		||||
        }
 | 
			
		||||
        src_buffer = static_cast<const u8*>(src_buffer) + copy_amount;
 | 
			
		||||
@@ -470,14 +476,14 @@ void MemoryManager::FlushRegion(GPUVAddr gpu_addr, size_t size,
 | 
			
		||||
                          [[maybe_unused]] std::size_t copy_amount) {};
 | 
			
		||||
 | 
			
		||||
    auto mapped_normal = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) {
 | 
			
		||||
        const VAddr cpu_addr_base =
 | 
			
		||||
            (static_cast<VAddr>(page_table[page_index]) << cpu_page_bits) + offset;
 | 
			
		||||
        rasterizer->FlushRegion(cpu_addr_base, copy_amount, which);
 | 
			
		||||
        const DAddr dev_addr_base =
 | 
			
		||||
            (static_cast<DAddr>(page_table[page_index]) << cpu_page_bits) + offset;
 | 
			
		||||
        rasterizer->FlushRegion(dev_addr_base, copy_amount, which);
 | 
			
		||||
    };
 | 
			
		||||
    auto mapped_big = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) {
 | 
			
		||||
        const VAddr cpu_addr_base =
 | 
			
		||||
            (static_cast<VAddr>(big_page_table_cpu[page_index]) << cpu_page_bits) + offset;
 | 
			
		||||
        rasterizer->FlushRegion(cpu_addr_base, copy_amount, which);
 | 
			
		||||
        const DAddr dev_addr_base =
 | 
			
		||||
            (static_cast<DAddr>(big_page_table_dev[page_index]) << cpu_page_bits) + offset;
 | 
			
		||||
        rasterizer->FlushRegion(dev_addr_base, copy_amount, which);
 | 
			
		||||
    };
 | 
			
		||||
    auto flush_short_pages = [&](std::size_t page_index, std::size_t offset,
 | 
			
		||||
                                 std::size_t copy_amount) {
 | 
			
		||||
@@ -495,15 +501,15 @@ bool MemoryManager::IsMemoryDirty(GPUVAddr gpu_addr, size_t size,
 | 
			
		||||
                          [[maybe_unused]] std::size_t copy_amount) { return false; };
 | 
			
		||||
 | 
			
		||||
    auto mapped_normal = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) {
 | 
			
		||||
        const VAddr cpu_addr_base =
 | 
			
		||||
            (static_cast<VAddr>(page_table[page_index]) << cpu_page_bits) + offset;
 | 
			
		||||
        result |= rasterizer->MustFlushRegion(cpu_addr_base, copy_amount, which);
 | 
			
		||||
        const DAddr dev_addr_base =
 | 
			
		||||
            (static_cast<DAddr>(page_table[page_index]) << cpu_page_bits) + offset;
 | 
			
		||||
        result |= rasterizer->MustFlushRegion(dev_addr_base, copy_amount, which);
 | 
			
		||||
        return result;
 | 
			
		||||
    };
 | 
			
		||||
    auto mapped_big = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) {
 | 
			
		||||
        const VAddr cpu_addr_base =
 | 
			
		||||
            (static_cast<VAddr>(big_page_table_cpu[page_index]) << cpu_page_bits) + offset;
 | 
			
		||||
        result |= rasterizer->MustFlushRegion(cpu_addr_base, copy_amount, which);
 | 
			
		||||
        const DAddr dev_addr_base =
 | 
			
		||||
            (static_cast<DAddr>(big_page_table_dev[page_index]) << cpu_page_bits) + offset;
 | 
			
		||||
        result |= rasterizer->MustFlushRegion(dev_addr_base, copy_amount, which);
 | 
			
		||||
        return result;
 | 
			
		||||
    };
 | 
			
		||||
    auto check_short_pages = [&](std::size_t page_index, std::size_t offset,
 | 
			
		||||
@@ -517,7 +523,7 @@ bool MemoryManager::IsMemoryDirty(GPUVAddr gpu_addr, size_t size,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
size_t MemoryManager::MaxContinuousRange(GPUVAddr gpu_addr, size_t size) const {
 | 
			
		||||
    std::optional<VAddr> old_page_addr{};
 | 
			
		||||
    std::optional<DAddr> old_page_addr{};
 | 
			
		||||
    size_t range_so_far = 0;
 | 
			
		||||
    bool result{false};
 | 
			
		||||
    auto fail = [&]([[maybe_unused]] std::size_t page_index, [[maybe_unused]] std::size_t offset,
 | 
			
		||||
@@ -526,24 +532,24 @@ size_t MemoryManager::MaxContinuousRange(GPUVAddr gpu_addr, size_t size) const {
 | 
			
		||||
        return true;
 | 
			
		||||
    };
 | 
			
		||||
    auto short_check = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) {
 | 
			
		||||
        const VAddr cpu_addr_base =
 | 
			
		||||
            (static_cast<VAddr>(page_table[page_index]) << cpu_page_bits) + offset;
 | 
			
		||||
        if (old_page_addr && *old_page_addr != cpu_addr_base) {
 | 
			
		||||
        const DAddr dev_addr_base =
 | 
			
		||||
            (static_cast<DAddr>(page_table[page_index]) << cpu_page_bits) + offset;
 | 
			
		||||
        if (old_page_addr && *old_page_addr != dev_addr_base) {
 | 
			
		||||
            result = true;
 | 
			
		||||
            return true;
 | 
			
		||||
        }
 | 
			
		||||
        range_so_far += copy_amount;
 | 
			
		||||
        old_page_addr = {cpu_addr_base + copy_amount};
 | 
			
		||||
        old_page_addr = {dev_addr_base + copy_amount};
 | 
			
		||||
        return false;
 | 
			
		||||
    };
 | 
			
		||||
    auto big_check = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) {
 | 
			
		||||
        const VAddr cpu_addr_base =
 | 
			
		||||
            (static_cast<VAddr>(big_page_table_cpu[page_index]) << cpu_page_bits) + offset;
 | 
			
		||||
        if (old_page_addr && *old_page_addr != cpu_addr_base) {
 | 
			
		||||
        const DAddr dev_addr_base =
 | 
			
		||||
            (static_cast<DAddr>(big_page_table_dev[page_index]) << cpu_page_bits) + offset;
 | 
			
		||||
        if (old_page_addr && *old_page_addr != dev_addr_base) {
 | 
			
		||||
            return true;
 | 
			
		||||
        }
 | 
			
		||||
        range_so_far += copy_amount;
 | 
			
		||||
        old_page_addr = {cpu_addr_base + copy_amount};
 | 
			
		||||
        old_page_addr = {dev_addr_base + copy_amount};
 | 
			
		||||
        return false;
 | 
			
		||||
    };
 | 
			
		||||
    auto check_short_pages = [&](std::size_t page_index, std::size_t offset,
 | 
			
		||||
@@ -568,14 +574,14 @@ void MemoryManager::InvalidateRegion(GPUVAddr gpu_addr, size_t size,
 | 
			
		||||
                          [[maybe_unused]] std::size_t copy_amount) {};
 | 
			
		||||
 | 
			
		||||
    auto mapped_normal = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) {
 | 
			
		||||
        const VAddr cpu_addr_base =
 | 
			
		||||
            (static_cast<VAddr>(page_table[page_index]) << cpu_page_bits) + offset;
 | 
			
		||||
        rasterizer->InvalidateRegion(cpu_addr_base, copy_amount, which);
 | 
			
		||||
        const DAddr dev_addr_base =
 | 
			
		||||
            (static_cast<DAddr>(page_table[page_index]) << cpu_page_bits) + offset;
 | 
			
		||||
        rasterizer->InvalidateRegion(dev_addr_base, copy_amount, which);
 | 
			
		||||
    };
 | 
			
		||||
    auto mapped_big = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) {
 | 
			
		||||
        const VAddr cpu_addr_base =
 | 
			
		||||
            (static_cast<VAddr>(big_page_table_cpu[page_index]) << cpu_page_bits) + offset;
 | 
			
		||||
        rasterizer->InvalidateRegion(cpu_addr_base, copy_amount, which);
 | 
			
		||||
        const DAddr dev_addr_base =
 | 
			
		||||
            (static_cast<DAddr>(big_page_table_dev[page_index]) << cpu_page_bits) + offset;
 | 
			
		||||
        rasterizer->InvalidateRegion(dev_addr_base, copy_amount, which);
 | 
			
		||||
    };
 | 
			
		||||
    auto invalidate_short_pages = [&](std::size_t page_index, std::size_t offset,
 | 
			
		||||
                                      std::size_t copy_amount) {
 | 
			
		||||
@@ -587,7 +593,7 @@ void MemoryManager::InvalidateRegion(GPUVAddr gpu_addr, size_t size,
 | 
			
		||||
 | 
			
		||||
void MemoryManager::CopyBlock(GPUVAddr gpu_dest_addr, GPUVAddr gpu_src_addr, std::size_t size,
 | 
			
		||||
                              VideoCommon::CacheType which) {
 | 
			
		||||
    Core::Memory::GpuGuestMemoryScoped<u8, GuestMemoryFlags::SafeReadWrite> data(
 | 
			
		||||
    Tegra::Memory::GpuGuestMemoryScoped<u8, GuestMemoryFlags::SafeReadWrite> data(
 | 
			
		||||
        *this, gpu_src_addr, size);
 | 
			
		||||
    data.SetAddressAndSize(gpu_dest_addr, size);
 | 
			
		||||
    FlushRegion(gpu_dest_addr, size, which);
 | 
			
		||||
@@ -600,18 +606,18 @@ bool MemoryManager::IsGranularRange(GPUVAddr gpu_addr, std::size_t size) const {
 | 
			
		||||
            const std::size_t page{(page_index & big_page_mask) + size};
 | 
			
		||||
            return page <= big_page_size;
 | 
			
		||||
        }
 | 
			
		||||
        const std::size_t page{(gpu_addr & Core::Memory::YUZU_PAGEMASK) + size};
 | 
			
		||||
        return page <= Core::Memory::YUZU_PAGESIZE;
 | 
			
		||||
        const std::size_t page{(gpu_addr & Core::DEVICE_PAGEMASK) + size};
 | 
			
		||||
        return page <= Core::DEVICE_PAGESIZE;
 | 
			
		||||
    }
 | 
			
		||||
    if (GetEntry<false>(gpu_addr) != EntryType::Mapped) {
 | 
			
		||||
        return false;
 | 
			
		||||
    }
 | 
			
		||||
    const std::size_t page{(gpu_addr & Core::Memory::YUZU_PAGEMASK) + size};
 | 
			
		||||
    return page <= Core::Memory::YUZU_PAGESIZE;
 | 
			
		||||
    const std::size_t page{(gpu_addr & Core::DEVICE_PAGEMASK) + size};
 | 
			
		||||
    return page <= Core::DEVICE_PAGESIZE;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
bool MemoryManager::IsContinuousRange(GPUVAddr gpu_addr, std::size_t size) const {
 | 
			
		||||
    std::optional<VAddr> old_page_addr{};
 | 
			
		||||
    std::optional<DAddr> old_page_addr{};
 | 
			
		||||
    bool result{true};
 | 
			
		||||
    auto fail = [&]([[maybe_unused]] std::size_t page_index, [[maybe_unused]] std::size_t offset,
 | 
			
		||||
                    std::size_t copy_amount) {
 | 
			
		||||
@@ -619,23 +625,23 @@ bool MemoryManager::IsContinuousRange(GPUVAddr gpu_addr, std::size_t size) const
 | 
			
		||||
        return true;
 | 
			
		||||
    };
 | 
			
		||||
    auto short_check = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) {
 | 
			
		||||
        const VAddr cpu_addr_base =
 | 
			
		||||
            (static_cast<VAddr>(page_table[page_index]) << cpu_page_bits) + offset;
 | 
			
		||||
        if (old_page_addr && *old_page_addr != cpu_addr_base) {
 | 
			
		||||
        const DAddr dev_addr_base =
 | 
			
		||||
            (static_cast<DAddr>(page_table[page_index]) << cpu_page_bits) + offset;
 | 
			
		||||
        if (old_page_addr && *old_page_addr != dev_addr_base) {
 | 
			
		||||
            result = false;
 | 
			
		||||
            return true;
 | 
			
		||||
        }
 | 
			
		||||
        old_page_addr = {cpu_addr_base + copy_amount};
 | 
			
		||||
        old_page_addr = {dev_addr_base + copy_amount};
 | 
			
		||||
        return false;
 | 
			
		||||
    };
 | 
			
		||||
    auto big_check = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) {
 | 
			
		||||
        const VAddr cpu_addr_base =
 | 
			
		||||
            (static_cast<VAddr>(big_page_table_cpu[page_index]) << cpu_page_bits) + offset;
 | 
			
		||||
        if (old_page_addr && *old_page_addr != cpu_addr_base) {
 | 
			
		||||
        const DAddr dev_addr_base =
 | 
			
		||||
            (static_cast<DAddr>(big_page_table_dev[page_index]) << cpu_page_bits) + offset;
 | 
			
		||||
        if (old_page_addr && *old_page_addr != dev_addr_base) {
 | 
			
		||||
            result = false;
 | 
			
		||||
            return true;
 | 
			
		||||
        }
 | 
			
		||||
        old_page_addr = {cpu_addr_base + copy_amount};
 | 
			
		||||
        old_page_addr = {dev_addr_base + copy_amount};
 | 
			
		||||
        return false;
 | 
			
		||||
    };
 | 
			
		||||
    auto check_short_pages = [&](std::size_t page_index, std::size_t offset,
 | 
			
		||||
@@ -678,11 +684,11 @@ template <bool is_gpu_address>
 | 
			
		||||
void MemoryManager::GetSubmappedRangeImpl(
 | 
			
		||||
    GPUVAddr gpu_addr, std::size_t size,
 | 
			
		||||
    boost::container::small_vector<
 | 
			
		||||
        std::pair<std::conditional_t<is_gpu_address, GPUVAddr, VAddr>, std::size_t>, 32>& result)
 | 
			
		||||
        std::pair<std::conditional_t<is_gpu_address, GPUVAddr, DAddr>, std::size_t>, 32>& result)
 | 
			
		||||
    const {
 | 
			
		||||
    std::optional<std::pair<std::conditional_t<is_gpu_address, GPUVAddr, VAddr>, std::size_t>>
 | 
			
		||||
    std::optional<std::pair<std::conditional_t<is_gpu_address, GPUVAddr, DAddr>, std::size_t>>
 | 
			
		||||
        last_segment{};
 | 
			
		||||
    std::optional<VAddr> old_page_addr{};
 | 
			
		||||
    std::optional<DAddr> old_page_addr{};
 | 
			
		||||
    const auto split = [&last_segment, &result]([[maybe_unused]] std::size_t page_index,
 | 
			
		||||
                                                [[maybe_unused]] std::size_t offset,
 | 
			
		||||
                                                [[maybe_unused]] std::size_t copy_amount) {
 | 
			
		||||
@@ -694,20 +700,20 @@ void MemoryManager::GetSubmappedRangeImpl(
 | 
			
		||||
    const auto extend_size_big = [this, &split, &old_page_addr,
 | 
			
		||||
                                  &last_segment](std::size_t page_index, std::size_t offset,
 | 
			
		||||
                                                 std::size_t copy_amount) {
 | 
			
		||||
        const VAddr cpu_addr_base =
 | 
			
		||||
            (static_cast<VAddr>(big_page_table_cpu[page_index]) << cpu_page_bits) + offset;
 | 
			
		||||
        const DAddr dev_addr_base =
 | 
			
		||||
            (static_cast<DAddr>(big_page_table_dev[page_index]) << cpu_page_bits) + offset;
 | 
			
		||||
        if (old_page_addr) {
 | 
			
		||||
            if (*old_page_addr != cpu_addr_base) {
 | 
			
		||||
            if (*old_page_addr != dev_addr_base) {
 | 
			
		||||
                split(0, 0, 0);
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
        old_page_addr = {cpu_addr_base + copy_amount};
 | 
			
		||||
        old_page_addr = {dev_addr_base + copy_amount};
 | 
			
		||||
        if (!last_segment) {
 | 
			
		||||
            if constexpr (is_gpu_address) {
 | 
			
		||||
                const GPUVAddr new_base_addr = (page_index << big_page_bits) + offset;
 | 
			
		||||
                last_segment = {new_base_addr, copy_amount};
 | 
			
		||||
            } else {
 | 
			
		||||
                last_segment = {cpu_addr_base, copy_amount};
 | 
			
		||||
                last_segment = {dev_addr_base, copy_amount};
 | 
			
		||||
            }
 | 
			
		||||
        } else {
 | 
			
		||||
            last_segment->second += copy_amount;
 | 
			
		||||
@@ -716,20 +722,20 @@ void MemoryManager::GetSubmappedRangeImpl(
 | 
			
		||||
    const auto extend_size_short = [this, &split, &old_page_addr,
 | 
			
		||||
                                    &last_segment](std::size_t page_index, std::size_t offset,
 | 
			
		||||
                                                   std::size_t copy_amount) {
 | 
			
		||||
        const VAddr cpu_addr_base =
 | 
			
		||||
            (static_cast<VAddr>(page_table[page_index]) << cpu_page_bits) + offset;
 | 
			
		||||
        const DAddr dev_addr_base =
 | 
			
		||||
            (static_cast<DAddr>(page_table[page_index]) << cpu_page_bits) + offset;
 | 
			
		||||
        if (old_page_addr) {
 | 
			
		||||
            if (*old_page_addr != cpu_addr_base) {
 | 
			
		||||
            if (*old_page_addr != dev_addr_base) {
 | 
			
		||||
                split(0, 0, 0);
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
        old_page_addr = {cpu_addr_base + copy_amount};
 | 
			
		||||
        old_page_addr = {dev_addr_base + copy_amount};
 | 
			
		||||
        if (!last_segment) {
 | 
			
		||||
            if constexpr (is_gpu_address) {
 | 
			
		||||
                const GPUVAddr new_base_addr = (page_index << page_bits) + offset;
 | 
			
		||||
                last_segment = {new_base_addr, copy_amount};
 | 
			
		||||
            } else {
 | 
			
		||||
                last_segment = {cpu_addr_base, copy_amount};
 | 
			
		||||
                last_segment = {dev_addr_base, copy_amount};
 | 
			
		||||
            }
 | 
			
		||||
        } else {
 | 
			
		||||
            last_segment->second += copy_amount;
 | 
			
		||||
@@ -756,9 +762,12 @@ void MemoryManager::FlushCaching() {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
const u8* MemoryManager::GetSpan(const GPUVAddr src_addr, const std::size_t size) const {
 | 
			
		||||
    auto cpu_addr = GpuToCpuAddress(src_addr);
 | 
			
		||||
    if (cpu_addr) {
 | 
			
		||||
        return memory.GetSpan(*cpu_addr, size);
 | 
			
		||||
    if (!IsContinuousRange(src_addr, size)) {
 | 
			
		||||
        return nullptr;
 | 
			
		||||
    }
 | 
			
		||||
    auto dev_addr = GpuToCpuAddress(src_addr);
 | 
			
		||||
    if (dev_addr) {
 | 
			
		||||
        return memory.GetSpan(*dev_addr, size);
 | 
			
		||||
    }
 | 
			
		||||
    return nullptr;
 | 
			
		||||
}
 | 
			
		||||
@@ -767,9 +776,9 @@ u8* MemoryManager::GetSpan(const GPUVAddr src_addr, const std::size_t size) {
 | 
			
		||||
    if (!IsContinuousRange(src_addr, size)) {
 | 
			
		||||
        return nullptr;
 | 
			
		||||
    }
 | 
			
		||||
    auto cpu_addr = GpuToCpuAddress(src_addr);
 | 
			
		||||
    if (cpu_addr) {
 | 
			
		||||
        return memory.GetSpan(*cpu_addr, size);
 | 
			
		||||
    auto dev_addr = GpuToCpuAddress(src_addr);
 | 
			
		||||
    if (dev_addr) {
 | 
			
		||||
        return memory.GetSpan(*dev_addr, size);
 | 
			
		||||
    }
 | 
			
		||||
    return nullptr;
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
@@ -15,8 +15,8 @@
 | 
			
		||||
#include "common/range_map.h"
 | 
			
		||||
#include "common/scratch_buffer.h"
 | 
			
		||||
#include "common/virtual_buffer.h"
 | 
			
		||||
#include "core/memory.h"
 | 
			
		||||
#include "video_core/cache_types.h"
 | 
			
		||||
#include "video_core/host1x/gpu_device_memory_manager.h"
 | 
			
		||||
#include "video_core/pte_kind.h"
 | 
			
		||||
 | 
			
		||||
namespace VideoCore {
 | 
			
		||||
@@ -28,10 +28,6 @@ class InvalidationAccumulator;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
namespace Core {
 | 
			
		||||
class DeviceMemory;
 | 
			
		||||
namespace Memory {
 | 
			
		||||
class Memory;
 | 
			
		||||
} // namespace Memory
 | 
			
		||||
class System;
 | 
			
		||||
} // namespace Core
 | 
			
		||||
 | 
			
		||||
@@ -41,6 +37,9 @@ class MemoryManager final {
 | 
			
		||||
public:
 | 
			
		||||
    explicit MemoryManager(Core::System& system_, u64 address_space_bits_ = 40,
 | 
			
		||||
                           u64 big_page_bits_ = 16, u64 page_bits_ = 12);
 | 
			
		||||
    explicit MemoryManager(Core::System& system_, MaxwellDeviceMemoryManager& memory_,
 | 
			
		||||
                           u64 address_space_bits_ = 40, u64 big_page_bits_ = 16,
 | 
			
		||||
                           u64 page_bits_ = 12);
 | 
			
		||||
    ~MemoryManager();
 | 
			
		||||
 | 
			
		||||
    size_t GetID() const {
 | 
			
		||||
@@ -50,9 +49,9 @@ public:
 | 
			
		||||
    /// Binds a renderer to the memory manager.
 | 
			
		||||
    void BindRasterizer(VideoCore::RasterizerInterface* rasterizer);
 | 
			
		||||
 | 
			
		||||
    [[nodiscard]] std::optional<VAddr> GpuToCpuAddress(GPUVAddr addr) const;
 | 
			
		||||
    [[nodiscard]] std::optional<DAddr> GpuToCpuAddress(GPUVAddr addr) const;
 | 
			
		||||
 | 
			
		||||
    [[nodiscard]] std::optional<VAddr> GpuToCpuAddress(GPUVAddr addr, std::size_t size) const;
 | 
			
		||||
    [[nodiscard]] std::optional<DAddr> GpuToCpuAddress(GPUVAddr addr, std::size_t size) const;
 | 
			
		||||
 | 
			
		||||
    template <typename T>
 | 
			
		||||
    [[nodiscard]] T Read(GPUVAddr addr) const;
 | 
			
		||||
@@ -69,7 +68,7 @@ public:
 | 
			
		||||
        if (!address) {
 | 
			
		||||
            return {};
 | 
			
		||||
        }
 | 
			
		||||
        return memory.GetPointer(*address);
 | 
			
		||||
        return memory.GetPointer<T>(*address);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    template <typename T>
 | 
			
		||||
@@ -110,7 +109,7 @@ public:
 | 
			
		||||
    [[nodiscard]] bool IsGranularRange(GPUVAddr gpu_addr, std::size_t size) const;
 | 
			
		||||
 | 
			
		||||
    /**
 | 
			
		||||
     * Checks if a gpu region is mapped by a single range of cpu addresses.
 | 
			
		||||
     * Checks if a gpu region is mapped by a single range of device addresses.
 | 
			
		||||
     */
 | 
			
		||||
    [[nodiscard]] bool IsContinuousRange(GPUVAddr gpu_addr, std::size_t size) const;
 | 
			
		||||
 | 
			
		||||
@@ -120,14 +119,14 @@ public:
 | 
			
		||||
    [[nodiscard]] bool IsFullyMappedRange(GPUVAddr gpu_addr, std::size_t size) const;
 | 
			
		||||
 | 
			
		||||
    /**
 | 
			
		||||
     * Returns a vector with all the subranges of cpu addresses mapped beneath.
 | 
			
		||||
     * Returns a vector with all the subranges of device addresses mapped beneath.
 | 
			
		||||
     * if the region is continuous, a single pair will be returned. If it's unmapped, an empty
 | 
			
		||||
     * vector will be returned;
 | 
			
		||||
     */
 | 
			
		||||
    boost::container::small_vector<std::pair<GPUVAddr, std::size_t>, 32> GetSubmappedRange(
 | 
			
		||||
        GPUVAddr gpu_addr, std::size_t size) const;
 | 
			
		||||
 | 
			
		||||
    GPUVAddr Map(GPUVAddr gpu_addr, VAddr cpu_addr, std::size_t size,
 | 
			
		||||
    GPUVAddr Map(GPUVAddr gpu_addr, DAddr dev_addr, std::size_t size,
 | 
			
		||||
                 PTEKind kind = PTEKind::INVALID, bool is_big_pages = true);
 | 
			
		||||
    GPUVAddr MapSparse(GPUVAddr gpu_addr, std::size_t size, bool is_big_pages = true);
 | 
			
		||||
    void Unmap(GPUVAddr gpu_addr, std::size_t size);
 | 
			
		||||
@@ -186,12 +185,11 @@ private:
 | 
			
		||||
    void GetSubmappedRangeImpl(
 | 
			
		||||
        GPUVAddr gpu_addr, std::size_t size,
 | 
			
		||||
        boost::container::small_vector<
 | 
			
		||||
            std::pair<std::conditional_t<is_gpu_address, GPUVAddr, VAddr>, std::size_t>, 32>&
 | 
			
		||||
            std::pair<std::conditional_t<is_gpu_address, GPUVAddr, DAddr>, std::size_t>, 32>&
 | 
			
		||||
            result) const;
 | 
			
		||||
 | 
			
		||||
    Core::System& system;
 | 
			
		||||
    Core::Memory::Memory& memory;
 | 
			
		||||
    Core::DeviceMemory& device_memory;
 | 
			
		||||
    MaxwellDeviceMemoryManager& memory;
 | 
			
		||||
 | 
			
		||||
    const u64 address_space_bits;
 | 
			
		||||
    const u64 page_bits;
 | 
			
		||||
@@ -218,11 +216,11 @@ private:
 | 
			
		||||
    std::vector<u64> big_entries;
 | 
			
		||||
 | 
			
		||||
    template <EntryType entry_type>
 | 
			
		||||
    GPUVAddr PageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cpu_addr, size_t size,
 | 
			
		||||
    GPUVAddr PageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] DAddr dev_addr, size_t size,
 | 
			
		||||
                         PTEKind kind);
 | 
			
		||||
 | 
			
		||||
    template <EntryType entry_type>
 | 
			
		||||
    GPUVAddr BigPageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cpu_addr, size_t size,
 | 
			
		||||
    GPUVAddr BigPageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] DAddr dev_addr, size_t size,
 | 
			
		||||
                            PTEKind kind);
 | 
			
		||||
 | 
			
		||||
    template <bool is_big_page>
 | 
			
		||||
@@ -233,11 +231,11 @@ private:
 | 
			
		||||
 | 
			
		||||
    Common::MultiLevelPageTable<u32> page_table;
 | 
			
		||||
    Common::RangeMap<GPUVAddr, PTEKind> kind_map;
 | 
			
		||||
    Common::VirtualBuffer<u32> big_page_table_cpu;
 | 
			
		||||
    Common::VirtualBuffer<u32> big_page_table_dev;
 | 
			
		||||
 | 
			
		||||
    std::vector<u64> big_page_continuous;
 | 
			
		||||
    boost::container::small_vector<std::pair<VAddr, std::size_t>, 32> page_stash{};
 | 
			
		||||
    boost::container::small_vector<std::pair<VAddr, std::size_t>, 32> page_stash2{};
 | 
			
		||||
    boost::container::small_vector<std::pair<DAddr, std::size_t>, 32> page_stash{};
 | 
			
		||||
    boost::container::small_vector<std::pair<DAddr, std::size_t>, 32> page_stash2{};
 | 
			
		||||
 | 
			
		||||
    mutable std::mutex guard;
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -18,9 +18,9 @@
 | 
			
		||||
 | 
			
		||||
#include "common/assert.h"
 | 
			
		||||
#include "common/settings.h"
 | 
			
		||||
#include "core/memory.h"
 | 
			
		||||
#include "video_core/control/channel_state_cache.h"
 | 
			
		||||
#include "video_core/engines/maxwell_3d.h"
 | 
			
		||||
#include "video_core/host1x/gpu_device_memory_manager.h"
 | 
			
		||||
#include "video_core/memory_manager.h"
 | 
			
		||||
#include "video_core/rasterizer_interface.h"
 | 
			
		||||
#include "video_core/texture_cache/slot_vector.h"
 | 
			
		||||
@@ -102,18 +102,19 @@ template <class QueryCache, class CachedQuery, class CounterStream, class HostCo
 | 
			
		||||
class QueryCacheLegacy : public VideoCommon::ChannelSetupCaches<VideoCommon::ChannelInfo> {
 | 
			
		||||
public:
 | 
			
		||||
    explicit QueryCacheLegacy(VideoCore::RasterizerInterface& rasterizer_,
 | 
			
		||||
                              Core::Memory::Memory& cpu_memory_)
 | 
			
		||||
                              Tegra::MaxwellDeviceMemoryManager& device_memory_)
 | 
			
		||||
        : rasterizer{rasterizer_},
 | 
			
		||||
          // Use reinterpret_cast instead of static_cast as workaround for
 | 
			
		||||
          // UBSan bug (https://github.com/llvm/llvm-project/issues/59060)
 | 
			
		||||
          cpu_memory{cpu_memory_}, streams{{
 | 
			
		||||
                                       {CounterStream{reinterpret_cast<QueryCache&>(*this),
 | 
			
		||||
                                                      VideoCore::QueryType::SamplesPassed}},
 | 
			
		||||
                                       {CounterStream{reinterpret_cast<QueryCache&>(*this),
 | 
			
		||||
                                                      VideoCore::QueryType::PrimitivesGenerated}},
 | 
			
		||||
                                       {CounterStream{reinterpret_cast<QueryCache&>(*this),
 | 
			
		||||
                                                      VideoCore::QueryType::TfbPrimitivesWritten}},
 | 
			
		||||
                                   }} {
 | 
			
		||||
          device_memory{device_memory_},
 | 
			
		||||
          streams{{
 | 
			
		||||
              {CounterStream{reinterpret_cast<QueryCache&>(*this),
 | 
			
		||||
                             VideoCore::QueryType::SamplesPassed}},
 | 
			
		||||
              {CounterStream{reinterpret_cast<QueryCache&>(*this),
 | 
			
		||||
                             VideoCore::QueryType::PrimitivesGenerated}},
 | 
			
		||||
              {CounterStream{reinterpret_cast<QueryCache&>(*this),
 | 
			
		||||
                             VideoCore::QueryType::TfbPrimitivesWritten}},
 | 
			
		||||
          }} {
 | 
			
		||||
        (void)slot_async_jobs.insert(); // Null value
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
@@ -322,13 +323,14 @@ private:
 | 
			
		||||
            local_lock.unlock();
 | 
			
		||||
            if (timestamp) {
 | 
			
		||||
                u64 timestamp_value = *timestamp;
 | 
			
		||||
                cpu_memory.WriteBlockUnsafe(address + sizeof(u64), ×tamp_value, sizeof(u64));
 | 
			
		||||
                cpu_memory.WriteBlockUnsafe(address, &value, sizeof(u64));
 | 
			
		||||
                device_memory.WriteBlockUnsafe(address + sizeof(u64), ×tamp_value,
 | 
			
		||||
                                               sizeof(u64));
 | 
			
		||||
                device_memory.WriteBlockUnsafe(address, &value, sizeof(u64));
 | 
			
		||||
                rasterizer.InvalidateRegion(address, sizeof(u64) * 2,
 | 
			
		||||
                                            VideoCommon::CacheType::NoQueryCache);
 | 
			
		||||
            } else {
 | 
			
		||||
                u32 small_value = static_cast<u32>(value);
 | 
			
		||||
                cpu_memory.WriteBlockUnsafe(address, &small_value, sizeof(u32));
 | 
			
		||||
                device_memory.WriteBlockUnsafe(address, &small_value, sizeof(u32));
 | 
			
		||||
                rasterizer.InvalidateRegion(address, sizeof(u32),
 | 
			
		||||
                                            VideoCommon::CacheType::NoQueryCache);
 | 
			
		||||
            }
 | 
			
		||||
@@ -342,7 +344,7 @@ private:
 | 
			
		||||
    SlotVector<AsyncJob> slot_async_jobs;
 | 
			
		||||
 | 
			
		||||
    VideoCore::RasterizerInterface& rasterizer;
 | 
			
		||||
    Core::Memory::Memory& cpu_memory;
 | 
			
		||||
    Tegra::MaxwellDeviceMemoryManager& device_memory;
 | 
			
		||||
 | 
			
		||||
    mutable std::recursive_mutex mutex;
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -23,7 +23,7 @@ DECLARE_ENUM_FLAG_OPERATORS(QueryFlagBits)
 | 
			
		||||
 | 
			
		||||
class QueryBase {
 | 
			
		||||
public:
 | 
			
		||||
    VAddr guest_address{};
 | 
			
		||||
    DAddr guest_address{};
 | 
			
		||||
    QueryFlagBits flags{};
 | 
			
		||||
    u64 value{};
 | 
			
		||||
 | 
			
		||||
@@ -32,7 +32,7 @@ protected:
 | 
			
		||||
    QueryBase() = default;
 | 
			
		||||
 | 
			
		||||
    // Parameterized constructor
 | 
			
		||||
    QueryBase(VAddr address, QueryFlagBits flags_, u64 value_)
 | 
			
		||||
    QueryBase(DAddr address, QueryFlagBits flags_, u64 value_)
 | 
			
		||||
        : guest_address(address), flags(flags_), value{value_} {}
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -15,9 +15,9 @@
 | 
			
		||||
#include "common/logging/log.h"
 | 
			
		||||
#include "common/scope_exit.h"
 | 
			
		||||
#include "common/settings.h"
 | 
			
		||||
#include "core/memory.h"
 | 
			
		||||
#include "video_core/engines/maxwell_3d.h"
 | 
			
		||||
#include "video_core/gpu.h"
 | 
			
		||||
#include "video_core/host1x/gpu_device_memory_manager.h"
 | 
			
		||||
#include "video_core/memory_manager.h"
 | 
			
		||||
#include "video_core/query_cache/bank_base.h"
 | 
			
		||||
#include "video_core/query_cache/query_base.h"
 | 
			
		||||
@@ -113,9 +113,10 @@ struct QueryCacheBase<Traits>::QueryCacheBaseImpl {
 | 
			
		||||
    using RuntimeType = typename Traits::RuntimeType;
 | 
			
		||||
 | 
			
		||||
    QueryCacheBaseImpl(QueryCacheBase<Traits>* owner_, VideoCore::RasterizerInterface& rasterizer_,
 | 
			
		||||
                       Core::Memory::Memory& cpu_memory_, RuntimeType& runtime_, Tegra::GPU& gpu_)
 | 
			
		||||
                       Tegra::MaxwellDeviceMemoryManager& device_memory_, RuntimeType& runtime_,
 | 
			
		||||
                       Tegra::GPU& gpu_)
 | 
			
		||||
        : owner{owner_}, rasterizer{rasterizer_},
 | 
			
		||||
          cpu_memory{cpu_memory_}, runtime{runtime_}, gpu{gpu_} {
 | 
			
		||||
          device_memory{device_memory_}, runtime{runtime_}, gpu{gpu_} {
 | 
			
		||||
        streamer_mask = 0;
 | 
			
		||||
        for (size_t i = 0; i < static_cast<size_t>(QueryType::MaxQueryTypes); i++) {
 | 
			
		||||
            streamers[i] = runtime.GetStreamerInterface(static_cast<QueryType>(i));
 | 
			
		||||
@@ -158,7 +159,7 @@ struct QueryCacheBase<Traits>::QueryCacheBaseImpl {
 | 
			
		||||
 | 
			
		||||
    QueryCacheBase<Traits>* owner;
 | 
			
		||||
    VideoCore::RasterizerInterface& rasterizer;
 | 
			
		||||
    Core::Memory::Memory& cpu_memory;
 | 
			
		||||
    Tegra::MaxwellDeviceMemoryManager& device_memory;
 | 
			
		||||
    RuntimeType& runtime;
 | 
			
		||||
    Tegra::GPU& gpu;
 | 
			
		||||
    std::array<StreamerInterface*, static_cast<size_t>(QueryType::MaxQueryTypes)> streamers;
 | 
			
		||||
@@ -171,10 +172,11 @@ struct QueryCacheBase<Traits>::QueryCacheBaseImpl {
 | 
			
		||||
template <typename Traits>
 | 
			
		||||
QueryCacheBase<Traits>::QueryCacheBase(Tegra::GPU& gpu_,
 | 
			
		||||
                                       VideoCore::RasterizerInterface& rasterizer_,
 | 
			
		||||
                                       Core::Memory::Memory& cpu_memory_, RuntimeType& runtime_)
 | 
			
		||||
                                       Tegra::MaxwellDeviceMemoryManager& device_memory_,
 | 
			
		||||
                                       RuntimeType& runtime_)
 | 
			
		||||
    : cached_queries{} {
 | 
			
		||||
    impl = std::make_unique<QueryCacheBase<Traits>::QueryCacheBaseImpl>(
 | 
			
		||||
        this, rasterizer_, cpu_memory_, runtime_, gpu_);
 | 
			
		||||
        this, rasterizer_, device_memory_, runtime_, gpu_);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <typename Traits>
 | 
			
		||||
@@ -240,7 +242,7 @@ void QueryCacheBase<Traits>::CounterReport(GPUVAddr addr, QueryType counter_type
 | 
			
		||||
    if (!cpu_addr_opt) [[unlikely]] {
 | 
			
		||||
        return;
 | 
			
		||||
    }
 | 
			
		||||
    VAddr cpu_addr = *cpu_addr_opt;
 | 
			
		||||
    DAddr cpu_addr = *cpu_addr_opt;
 | 
			
		||||
    const size_t new_query_id = streamer->WriteCounter(cpu_addr, has_timestamp, payload, subreport);
 | 
			
		||||
    auto* query = streamer->GetQuery(new_query_id);
 | 
			
		||||
    if (is_fence) {
 | 
			
		||||
@@ -250,13 +252,12 @@ void QueryCacheBase<Traits>::CounterReport(GPUVAddr addr, QueryType counter_type
 | 
			
		||||
    query_location.stream_id.Assign(static_cast<u32>(streamer_id));
 | 
			
		||||
    query_location.query_id.Assign(static_cast<u32>(new_query_id));
 | 
			
		||||
    const auto gen_caching_indexing = [](VAddr cur_addr) {
 | 
			
		||||
        return std::make_pair<u64, u32>(cur_addr >> Core::Memory::YUZU_PAGEBITS,
 | 
			
		||||
                                        static_cast<u32>(cur_addr & Core::Memory::YUZU_PAGEMASK));
 | 
			
		||||
        return std::make_pair<u64, u32>(cur_addr >> Core::DEVICE_PAGEBITS,
 | 
			
		||||
                                        static_cast<u32>(cur_addr & Core::DEVICE_PAGEMASK));
 | 
			
		||||
    };
 | 
			
		||||
    u8* pointer = impl->cpu_memory.GetPointer(cpu_addr);
 | 
			
		||||
    u8* pointer_timestamp = impl->cpu_memory.GetPointer(cpu_addr + 8);
 | 
			
		||||
    u8* pointer = impl->device_memory.template GetPointer<u8>(cpu_addr);
 | 
			
		||||
    u8* pointer_timestamp = impl->device_memory.template GetPointer<u8>(cpu_addr + 8);
 | 
			
		||||
    bool is_synced = !Settings::IsGPULevelHigh() && is_fence;
 | 
			
		||||
 | 
			
		||||
    std::function<void()> operation([this, is_synced, streamer, query_base = query, query_location,
 | 
			
		||||
                                     pointer, pointer_timestamp] {
 | 
			
		||||
        if (True(query_base->flags & QueryFlagBits::IsInvalidated)) {
 | 
			
		||||
@@ -323,8 +324,8 @@ void QueryCacheBase<Traits>::CounterReport(GPUVAddr addr, QueryType counter_type
 | 
			
		||||
template <typename Traits>
 | 
			
		||||
void QueryCacheBase<Traits>::UnregisterPending() {
 | 
			
		||||
    const auto gen_caching_indexing = [](VAddr cur_addr) {
 | 
			
		||||
        return std::make_pair<u64, u32>(cur_addr >> Core::Memory::YUZU_PAGEBITS,
 | 
			
		||||
                                        static_cast<u32>(cur_addr & Core::Memory::YUZU_PAGEMASK));
 | 
			
		||||
        return std::make_pair<u64, u32>(cur_addr >> Core::DEVICE_PAGEBITS,
 | 
			
		||||
                                        static_cast<u32>(cur_addr & Core::DEVICE_PAGEMASK));
 | 
			
		||||
    };
 | 
			
		||||
    std::scoped_lock lock(cache_mutex);
 | 
			
		||||
    for (QueryLocation loc : impl->pending_unregister) {
 | 
			
		||||
@@ -388,7 +389,7 @@ bool QueryCacheBase<Traits>::AccelerateHostConditionalRendering() {
 | 
			
		||||
        }
 | 
			
		||||
        VAddr cpu_addr = *cpu_addr_opt;
 | 
			
		||||
        std::scoped_lock lock(cache_mutex);
 | 
			
		||||
        auto it1 = cached_queries.find(cpu_addr >> Core::Memory::YUZU_PAGEBITS);
 | 
			
		||||
        auto it1 = cached_queries.find(cpu_addr >> Core::DEVICE_PAGEBITS);
 | 
			
		||||
        if (it1 == cached_queries.end()) {
 | 
			
		||||
            return VideoCommon::LookupData{
 | 
			
		||||
                .address = cpu_addr,
 | 
			
		||||
@@ -396,10 +397,10 @@ bool QueryCacheBase<Traits>::AccelerateHostConditionalRendering() {
 | 
			
		||||
            };
 | 
			
		||||
        }
 | 
			
		||||
        auto& sub_container = it1->second;
 | 
			
		||||
        auto it_current = sub_container.find(cpu_addr & Core::Memory::YUZU_PAGEMASK);
 | 
			
		||||
        auto it_current = sub_container.find(cpu_addr & Core::DEVICE_PAGEMASK);
 | 
			
		||||
 | 
			
		||||
        if (it_current == sub_container.end()) {
 | 
			
		||||
            auto it_current_2 = sub_container.find((cpu_addr & Core::Memory::YUZU_PAGEMASK) + 4);
 | 
			
		||||
            auto it_current_2 = sub_container.find((cpu_addr & Core::DEVICE_PAGEMASK) + 4);
 | 
			
		||||
            if (it_current_2 == sub_container.end()) {
 | 
			
		||||
                return VideoCommon::LookupData{
 | 
			
		||||
                    .address = cpu_addr,
 | 
			
		||||
@@ -559,7 +560,7 @@ bool QueryCacheBase<Traits>::SemiFlushQueryDirty(QueryCacheBase<Traits>::QueryLo
 | 
			
		||||
    }
 | 
			
		||||
    if (True(query_base->flags & QueryFlagBits::IsFinalValueSynced) &&
 | 
			
		||||
        False(query_base->flags & QueryFlagBits::IsGuestSynced)) {
 | 
			
		||||
        auto* ptr = impl->cpu_memory.GetPointer(query_base->guest_address);
 | 
			
		||||
        auto* ptr = impl->device_memory.template GetPointer<u8>(query_base->guest_address);
 | 
			
		||||
        if (True(query_base->flags & QueryFlagBits::HasTimestamp)) {
 | 
			
		||||
            std::memcpy(ptr, &query_base->value, sizeof(query_base->value));
 | 
			
		||||
            return false;
 | 
			
		||||
 
 | 
			
		||||
@@ -13,15 +13,11 @@
 | 
			
		||||
#include "common/assert.h"
 | 
			
		||||
#include "common/bit_field.h"
 | 
			
		||||
#include "common/common_types.h"
 | 
			
		||||
#include "core/memory.h"
 | 
			
		||||
#include "video_core/control/channel_state_cache.h"
 | 
			
		||||
#include "video_core/host1x/gpu_device_memory_manager.h"
 | 
			
		||||
#include "video_core/query_cache/query_base.h"
 | 
			
		||||
#include "video_core/query_cache/types.h"
 | 
			
		||||
 | 
			
		||||
namespace Core::Memory {
 | 
			
		||||
class Memory;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
namespace VideoCore {
 | 
			
		||||
class RasterizerInterface;
 | 
			
		||||
}
 | 
			
		||||
@@ -53,7 +49,8 @@ public:
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    explicit QueryCacheBase(Tegra::GPU& gpu, VideoCore::RasterizerInterface& rasterizer_,
 | 
			
		||||
                            Core::Memory::Memory& cpu_memory_, RuntimeType& runtime_);
 | 
			
		||||
                            Tegra::MaxwellDeviceMemoryManager& device_memory_,
 | 
			
		||||
                            RuntimeType& runtime_);
 | 
			
		||||
 | 
			
		||||
    ~QueryCacheBase();
 | 
			
		||||
 | 
			
		||||
@@ -125,10 +122,10 @@ protected:
 | 
			
		||||
        const u64 addr_begin = addr;
 | 
			
		||||
        const u64 addr_end = addr_begin + size;
 | 
			
		||||
 | 
			
		||||
        const u64 page_end = addr_end >> Core::Memory::YUZU_PAGEBITS;
 | 
			
		||||
        const u64 page_end = addr_end >> Core::DEVICE_PAGEBITS;
 | 
			
		||||
        std::scoped_lock lock(cache_mutex);
 | 
			
		||||
        for (u64 page = addr_begin >> Core::Memory::YUZU_PAGEBITS; page <= page_end; ++page) {
 | 
			
		||||
            const u64 page_start = page << Core::Memory::YUZU_PAGEBITS;
 | 
			
		||||
        for (u64 page = addr_begin >> Core::DEVICE_PAGEBITS; page <= page_end; ++page) {
 | 
			
		||||
            const u64 page_start = page << Core::DEVICE_PAGEBITS;
 | 
			
		||||
            const auto in_range = [page_start, addr_begin, addr_end](const u32 query_location) {
 | 
			
		||||
                const u64 cache_begin = page_start + query_location;
 | 
			
		||||
                const u64 cache_end = cache_begin + sizeof(u32);
 | 
			
		||||
 
 | 
			
		||||
@@ -1,72 +0,0 @@
 | 
			
		||||
// SPDX-FileCopyrightText: Copyright 2019 yuzu Emulator Project
 | 
			
		||||
// SPDX-License-Identifier: GPL-2.0-or-later
 | 
			
		||||
 | 
			
		||||
#include <atomic>
 | 
			
		||||
 | 
			
		||||
#include "common/assert.h"
 | 
			
		||||
#include "common/common_types.h"
 | 
			
		||||
#include "common/div_ceil.h"
 | 
			
		||||
#include "core/memory.h"
 | 
			
		||||
#include "video_core/rasterizer_accelerated.h"
 | 
			
		||||
 | 
			
		||||
namespace VideoCore {
 | 
			
		||||
 | 
			
		||||
using namespace Core::Memory;
 | 
			
		||||
 | 
			
		||||
RasterizerAccelerated::RasterizerAccelerated(Memory& cpu_memory_)
 | 
			
		||||
    : cached_pages(std::make_unique<CachedPages>()), cpu_memory{cpu_memory_} {}
 | 
			
		||||
 | 
			
		||||
RasterizerAccelerated::~RasterizerAccelerated() = default;
 | 
			
		||||
 | 
			
		||||
void RasterizerAccelerated::UpdatePagesCachedCount(VAddr addr, u64 size, int delta) {
 | 
			
		||||
    u64 uncache_begin = 0;
 | 
			
		||||
    u64 cache_begin = 0;
 | 
			
		||||
    u64 uncache_bytes = 0;
 | 
			
		||||
    u64 cache_bytes = 0;
 | 
			
		||||
 | 
			
		||||
    std::atomic_thread_fence(std::memory_order_acquire);
 | 
			
		||||
    const u64 page_end = Common::DivCeil(addr + size, YUZU_PAGESIZE);
 | 
			
		||||
    for (u64 page = addr >> YUZU_PAGEBITS; page != page_end; ++page) {
 | 
			
		||||
        std::atomic_uint16_t& count = cached_pages->at(page >> 2).Count(page);
 | 
			
		||||
 | 
			
		||||
        if (delta > 0) {
 | 
			
		||||
            ASSERT_MSG(count.load(std::memory_order::relaxed) < UINT16_MAX, "Count may overflow!");
 | 
			
		||||
        } else if (delta < 0) {
 | 
			
		||||
            ASSERT_MSG(count.load(std::memory_order::relaxed) > 0, "Count may underflow!");
 | 
			
		||||
        } else {
 | 
			
		||||
            ASSERT_MSG(false, "Delta must be non-zero!");
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        // Adds or subtracts 1, as count is a unsigned 8-bit value
 | 
			
		||||
        count.fetch_add(static_cast<u16>(delta), std::memory_order_release);
 | 
			
		||||
 | 
			
		||||
        // Assume delta is either -1 or 1
 | 
			
		||||
        if (count.load(std::memory_order::relaxed) == 0) {
 | 
			
		||||
            if (uncache_bytes == 0) {
 | 
			
		||||
                uncache_begin = page;
 | 
			
		||||
            }
 | 
			
		||||
            uncache_bytes += YUZU_PAGESIZE;
 | 
			
		||||
        } else if (uncache_bytes > 0) {
 | 
			
		||||
            cpu_memory.RasterizerMarkRegionCached(uncache_begin << YUZU_PAGEBITS, uncache_bytes,
 | 
			
		||||
                                                  false);
 | 
			
		||||
            uncache_bytes = 0;
 | 
			
		||||
        }
 | 
			
		||||
        if (count.load(std::memory_order::relaxed) == 1 && delta > 0) {
 | 
			
		||||
            if (cache_bytes == 0) {
 | 
			
		||||
                cache_begin = page;
 | 
			
		||||
            }
 | 
			
		||||
            cache_bytes += YUZU_PAGESIZE;
 | 
			
		||||
        } else if (cache_bytes > 0) {
 | 
			
		||||
            cpu_memory.RasterizerMarkRegionCached(cache_begin << YUZU_PAGEBITS, cache_bytes, true);
 | 
			
		||||
            cache_bytes = 0;
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
    if (uncache_bytes > 0) {
 | 
			
		||||
        cpu_memory.RasterizerMarkRegionCached(uncache_begin << YUZU_PAGEBITS, uncache_bytes, false);
 | 
			
		||||
    }
 | 
			
		||||
    if (cache_bytes > 0) {
 | 
			
		||||
        cpu_memory.RasterizerMarkRegionCached(cache_begin << YUZU_PAGEBITS, cache_bytes, true);
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
} // namespace VideoCore
 | 
			
		||||
@@ -1,49 +0,0 @@
 | 
			
		||||
// SPDX-FileCopyrightText: Copyright 2019 yuzu Emulator Project
 | 
			
		||||
// SPDX-License-Identifier: GPL-2.0-or-later
 | 
			
		||||
 | 
			
		||||
#pragma once
 | 
			
		||||
 | 
			
		||||
#include <array>
 | 
			
		||||
#include <atomic>
 | 
			
		||||
 | 
			
		||||
#include "common/common_types.h"
 | 
			
		||||
#include "video_core/rasterizer_interface.h"
 | 
			
		||||
 | 
			
		||||
namespace Core::Memory {
 | 
			
		||||
class Memory;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
namespace VideoCore {
 | 
			
		||||
 | 
			
		||||
/// Implements the shared part in GPU accelerated rasterizers in RasterizerInterface.
 | 
			
		||||
class RasterizerAccelerated : public RasterizerInterface {
 | 
			
		||||
public:
 | 
			
		||||
    explicit RasterizerAccelerated(Core::Memory::Memory& cpu_memory_);
 | 
			
		||||
    ~RasterizerAccelerated() override;
 | 
			
		||||
 | 
			
		||||
    void UpdatePagesCachedCount(VAddr addr, u64 size, int delta) override;
 | 
			
		||||
 | 
			
		||||
private:
 | 
			
		||||
    class CacheEntry final {
 | 
			
		||||
    public:
 | 
			
		||||
        CacheEntry() = default;
 | 
			
		||||
 | 
			
		||||
        std::atomic_uint16_t& Count(std::size_t page) {
 | 
			
		||||
            return values[page & 3];
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        const std::atomic_uint16_t& Count(std::size_t page) const {
 | 
			
		||||
            return values[page & 3];
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
    private:
 | 
			
		||||
        std::array<std::atomic_uint16_t, 4> values{};
 | 
			
		||||
    };
 | 
			
		||||
    static_assert(sizeof(CacheEntry) == 8, "CacheEntry should be 8 bytes!");
 | 
			
		||||
 | 
			
		||||
    using CachedPages = std::array<CacheEntry, 0x2000000>;
 | 
			
		||||
    std::unique_ptr<CachedPages> cached_pages;
 | 
			
		||||
    Core::Memory::Memory& cpu_memory;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
} // namespace VideoCore
 | 
			
		||||
@@ -86,35 +86,35 @@ public:
 | 
			
		||||
    virtual void FlushAll() = 0;
 | 
			
		||||
 | 
			
		||||
    /// Notify rasterizer that any caches of the specified region should be flushed to Switch memory
 | 
			
		||||
    virtual void FlushRegion(VAddr addr, u64 size,
 | 
			
		||||
    virtual void FlushRegion(DAddr addr, u64 size,
 | 
			
		||||
                             VideoCommon::CacheType which = VideoCommon::CacheType::All) = 0;
 | 
			
		||||
 | 
			
		||||
    /// Check if the the specified memory area requires flushing to CPU Memory.
 | 
			
		||||
    virtual bool MustFlushRegion(VAddr addr, u64 size,
 | 
			
		||||
    virtual bool MustFlushRegion(DAddr addr, u64 size,
 | 
			
		||||
                                 VideoCommon::CacheType which = VideoCommon::CacheType::All) = 0;
 | 
			
		||||
 | 
			
		||||
    virtual RasterizerDownloadArea GetFlushArea(VAddr addr, u64 size) = 0;
 | 
			
		||||
    virtual RasterizerDownloadArea GetFlushArea(DAddr addr, u64 size) = 0;
 | 
			
		||||
 | 
			
		||||
    /// Notify rasterizer that any caches of the specified region should be invalidated
 | 
			
		||||
    virtual void InvalidateRegion(VAddr addr, u64 size,
 | 
			
		||||
    virtual void InvalidateRegion(DAddr addr, u64 size,
 | 
			
		||||
                                  VideoCommon::CacheType which = VideoCommon::CacheType::All) = 0;
 | 
			
		||||
 | 
			
		||||
    virtual void InnerInvalidation(std::span<const std::pair<VAddr, std::size_t>> sequences) {
 | 
			
		||||
    virtual void InnerInvalidation(std::span<const std::pair<DAddr, std::size_t>> sequences) {
 | 
			
		||||
        for (const auto& [cpu_addr, size] : sequences) {
 | 
			
		||||
            InvalidateRegion(cpu_addr, size);
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    /// Notify rasterizer that any caches of the specified region are desync with guest
 | 
			
		||||
    virtual void OnCacheInvalidation(VAddr addr, u64 size) = 0;
 | 
			
		||||
    virtual void OnCacheInvalidation(PAddr addr, u64 size) = 0;
 | 
			
		||||
 | 
			
		||||
    virtual bool OnCPUWrite(VAddr addr, u64 size) = 0;
 | 
			
		||||
    virtual bool OnCPUWrite(PAddr addr, u64 size) = 0;
 | 
			
		||||
 | 
			
		||||
    /// Sync memory between guest and host.
 | 
			
		||||
    virtual void InvalidateGPUCache() = 0;
 | 
			
		||||
 | 
			
		||||
    /// Unmap memory range
 | 
			
		||||
    virtual void UnmapMemory(VAddr addr, u64 size) = 0;
 | 
			
		||||
    virtual void UnmapMemory(DAddr addr, u64 size) = 0;
 | 
			
		||||
 | 
			
		||||
    /// Remap GPU memory range. This means underneath backing memory changed
 | 
			
		||||
    virtual void ModifyGPUMemory(size_t as_id, GPUVAddr addr, u64 size) = 0;
 | 
			
		||||
@@ -122,7 +122,7 @@ public:
 | 
			
		||||
    /// Notify rasterizer that any caches of the specified region should be flushed to Switch memory
 | 
			
		||||
    /// and invalidated
 | 
			
		||||
    virtual void FlushAndInvalidateRegion(
 | 
			
		||||
        VAddr addr, u64 size, VideoCommon::CacheType which = VideoCommon::CacheType::All) = 0;
 | 
			
		||||
        DAddr addr, u64 size, VideoCommon::CacheType which = VideoCommon::CacheType::All) = 0;
 | 
			
		||||
 | 
			
		||||
    /// Notify the host renderer to wait for previous primitive and compute operations.
 | 
			
		||||
    virtual void WaitForIdle() = 0;
 | 
			
		||||
@@ -157,13 +157,10 @@ public:
 | 
			
		||||
 | 
			
		||||
    /// Attempt to use a faster method to display the framebuffer to screen
 | 
			
		||||
    [[nodiscard]] virtual bool AccelerateDisplay(const Tegra::FramebufferConfig& config,
 | 
			
		||||
                                                 VAddr framebuffer_addr, u32 pixel_stride) {
 | 
			
		||||
                                                 DAddr framebuffer_addr, u32 pixel_stride) {
 | 
			
		||||
        return false;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    /// Increase/decrease the number of object in pages touching the specified region
 | 
			
		||||
    virtual void UpdatePagesCachedCount(VAddr addr, u64 size, int delta) {}
 | 
			
		||||
 | 
			
		||||
    /// Initialize disk cached resources for the game being emulated
 | 
			
		||||
    virtual void LoadDiskResources(u64 title_id, std::stop_token stop_loading,
 | 
			
		||||
                                   const DiskResourceLoadCallback& callback) {}
 | 
			
		||||
 
 | 
			
		||||
@@ -2,7 +2,6 @@
 | 
			
		||||
// SPDX-License-Identifier: GPL-2.0-or-later
 | 
			
		||||
 | 
			
		||||
#include "common/alignment.h"
 | 
			
		||||
#include "core/memory.h"
 | 
			
		||||
#include "video_core/control/channel_state.h"
 | 
			
		||||
#include "video_core/host1x/host1x.h"
 | 
			
		||||
#include "video_core/memory_manager.h"
 | 
			
		||||
@@ -19,8 +18,7 @@ bool AccelerateDMA::BufferClear(GPUVAddr src_address, u64 amount, u32 value) {
 | 
			
		||||
    return true;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
RasterizerNull::RasterizerNull(Core::Memory::Memory& cpu_memory_, Tegra::GPU& gpu)
 | 
			
		||||
    : RasterizerAccelerated(cpu_memory_), m_gpu{gpu} {}
 | 
			
		||||
RasterizerNull::RasterizerNull(Tegra::GPU& gpu) : m_gpu{gpu} {}
 | 
			
		||||
RasterizerNull::~RasterizerNull() = default;
 | 
			
		||||
 | 
			
		||||
void RasterizerNull::Draw(bool is_indexed, u32 instance_count) {}
 | 
			
		||||
@@ -45,25 +43,25 @@ void RasterizerNull::BindGraphicsUniformBuffer(size_t stage, u32 index, GPUVAddr
 | 
			
		||||
                                               u32 size) {}
 | 
			
		||||
void RasterizerNull::DisableGraphicsUniformBuffer(size_t stage, u32 index) {}
 | 
			
		||||
void RasterizerNull::FlushAll() {}
 | 
			
		||||
void RasterizerNull::FlushRegion(VAddr addr, u64 size, VideoCommon::CacheType) {}
 | 
			
		||||
bool RasterizerNull::MustFlushRegion(VAddr addr, u64 size, VideoCommon::CacheType) {
 | 
			
		||||
void RasterizerNull::FlushRegion(DAddr addr, u64 size, VideoCommon::CacheType) {}
 | 
			
		||||
bool RasterizerNull::MustFlushRegion(DAddr addr, u64 size, VideoCommon::CacheType) {
 | 
			
		||||
    return false;
 | 
			
		||||
}
 | 
			
		||||
void RasterizerNull::InvalidateRegion(VAddr addr, u64 size, VideoCommon::CacheType) {}
 | 
			
		||||
bool RasterizerNull::OnCPUWrite(VAddr addr, u64 size) {
 | 
			
		||||
void RasterizerNull::InvalidateRegion(DAddr addr, u64 size, VideoCommon::CacheType) {}
 | 
			
		||||
bool RasterizerNull::OnCPUWrite(PAddr addr, u64 size) {
 | 
			
		||||
    return false;
 | 
			
		||||
}
 | 
			
		||||
void RasterizerNull::OnCacheInvalidation(VAddr addr, u64 size) {}
 | 
			
		||||
VideoCore::RasterizerDownloadArea RasterizerNull::GetFlushArea(VAddr addr, u64 size) {
 | 
			
		||||
void RasterizerNull::OnCacheInvalidation(PAddr addr, u64 size) {}
 | 
			
		||||
VideoCore::RasterizerDownloadArea RasterizerNull::GetFlushArea(PAddr addr, u64 size) {
 | 
			
		||||
    VideoCore::RasterizerDownloadArea new_area{
 | 
			
		||||
        .start_address = Common::AlignDown(addr, Core::Memory::YUZU_PAGESIZE),
 | 
			
		||||
        .end_address = Common::AlignUp(addr + size, Core::Memory::YUZU_PAGESIZE),
 | 
			
		||||
        .start_address = Common::AlignDown(addr, Core::DEVICE_PAGESIZE),
 | 
			
		||||
        .end_address = Common::AlignUp(addr + size, Core::DEVICE_PAGESIZE),
 | 
			
		||||
        .preemtive = true,
 | 
			
		||||
    };
 | 
			
		||||
    return new_area;
 | 
			
		||||
}
 | 
			
		||||
void RasterizerNull::InvalidateGPUCache() {}
 | 
			
		||||
void RasterizerNull::UnmapMemory(VAddr addr, u64 size) {}
 | 
			
		||||
void RasterizerNull::UnmapMemory(DAddr addr, u64 size) {}
 | 
			
		||||
void RasterizerNull::ModifyGPUMemory(size_t as_id, GPUVAddr addr, u64 size) {}
 | 
			
		||||
void RasterizerNull::SignalFence(std::function<void()>&& func) {
 | 
			
		||||
    func();
 | 
			
		||||
@@ -78,7 +76,7 @@ void RasterizerNull::SignalSyncPoint(u32 value) {
 | 
			
		||||
}
 | 
			
		||||
void RasterizerNull::SignalReference() {}
 | 
			
		||||
void RasterizerNull::ReleaseFences(bool) {}
 | 
			
		||||
void RasterizerNull::FlushAndInvalidateRegion(VAddr addr, u64 size, VideoCommon::CacheType) {}
 | 
			
		||||
void RasterizerNull::FlushAndInvalidateRegion(DAddr addr, u64 size, VideoCommon::CacheType) {}
 | 
			
		||||
void RasterizerNull::WaitForIdle() {}
 | 
			
		||||
void RasterizerNull::FragmentBarrier() {}
 | 
			
		||||
void RasterizerNull::TiledCacheBarrier() {}
 | 
			
		||||
@@ -95,7 +93,7 @@ bool RasterizerNull::AccelerateSurfaceCopy(const Tegra::Engines::Fermi2D::Surfac
 | 
			
		||||
void RasterizerNull::AccelerateInlineToMemory(GPUVAddr address, size_t copy_size,
 | 
			
		||||
                                              std::span<const u8> memory) {}
 | 
			
		||||
bool RasterizerNull::AccelerateDisplay(const Tegra::FramebufferConfig& config,
 | 
			
		||||
                                       VAddr framebuffer_addr, u32 pixel_stride) {
 | 
			
		||||
                                       DAddr framebuffer_addr, u32 pixel_stride) {
 | 
			
		||||
    return true;
 | 
			
		||||
}
 | 
			
		||||
void RasterizerNull::LoadDiskResources(u64 title_id, std::stop_token stop_loading,
 | 
			
		||||
 
 | 
			
		||||
@@ -6,7 +6,6 @@
 | 
			
		||||
#include "common/common_types.h"
 | 
			
		||||
#include "video_core/control/channel_state_cache.h"
 | 
			
		||||
#include "video_core/engines/maxwell_dma.h"
 | 
			
		||||
#include "video_core/rasterizer_accelerated.h"
 | 
			
		||||
#include "video_core/rasterizer_interface.h"
 | 
			
		||||
 | 
			
		||||
namespace Core {
 | 
			
		||||
@@ -32,10 +31,10 @@ public:
 | 
			
		||||
    }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
class RasterizerNull final : public VideoCore::RasterizerAccelerated,
 | 
			
		||||
class RasterizerNull final : public VideoCore::RasterizerInterface,
 | 
			
		||||
                             protected VideoCommon::ChannelSetupCaches<VideoCommon::ChannelInfo> {
 | 
			
		||||
public:
 | 
			
		||||
    explicit RasterizerNull(Core::Memory::Memory& cpu_memory, Tegra::GPU& gpu);
 | 
			
		||||
    explicit RasterizerNull(Tegra::GPU& gpu);
 | 
			
		||||
    ~RasterizerNull() override;
 | 
			
		||||
 | 
			
		||||
    void Draw(bool is_indexed, u32 instance_count) override;
 | 
			
		||||
@@ -48,17 +47,17 @@ public:
 | 
			
		||||
    void BindGraphicsUniformBuffer(size_t stage, u32 index, GPUVAddr gpu_addr, u32 size) override;
 | 
			
		||||
    void DisableGraphicsUniformBuffer(size_t stage, u32 index) override;
 | 
			
		||||
    void FlushAll() override;
 | 
			
		||||
    void FlushRegion(VAddr addr, u64 size,
 | 
			
		||||
    void FlushRegion(DAddr addr, u64 size,
 | 
			
		||||
                     VideoCommon::CacheType which = VideoCommon::CacheType::All) override;
 | 
			
		||||
    bool MustFlushRegion(VAddr addr, u64 size,
 | 
			
		||||
    bool MustFlushRegion(DAddr addr, u64 size,
 | 
			
		||||
                         VideoCommon::CacheType which = VideoCommon::CacheType::All) override;
 | 
			
		||||
    void InvalidateRegion(VAddr addr, u64 size,
 | 
			
		||||
    void InvalidateRegion(DAddr addr, u64 size,
 | 
			
		||||
                          VideoCommon::CacheType which = VideoCommon::CacheType::All) override;
 | 
			
		||||
    void OnCacheInvalidation(VAddr addr, u64 size) override;
 | 
			
		||||
    bool OnCPUWrite(VAddr addr, u64 size) override;
 | 
			
		||||
    VideoCore::RasterizerDownloadArea GetFlushArea(VAddr addr, u64 size) override;
 | 
			
		||||
    void OnCacheInvalidation(DAddr addr, u64 size) override;
 | 
			
		||||
    bool OnCPUWrite(DAddr addr, u64 size) override;
 | 
			
		||||
    VideoCore::RasterizerDownloadArea GetFlushArea(DAddr addr, u64 size) override;
 | 
			
		||||
    void InvalidateGPUCache() override;
 | 
			
		||||
    void UnmapMemory(VAddr addr, u64 size) override;
 | 
			
		||||
    void UnmapMemory(DAddr addr, u64 size) override;
 | 
			
		||||
    void ModifyGPUMemory(size_t as_id, GPUVAddr addr, u64 size) override;
 | 
			
		||||
    void SignalFence(std::function<void()>&& func) override;
 | 
			
		||||
    void SyncOperation(std::function<void()>&& func) override;
 | 
			
		||||
@@ -66,7 +65,7 @@ public:
 | 
			
		||||
    void SignalReference() override;
 | 
			
		||||
    void ReleaseFences(bool force) override;
 | 
			
		||||
    void FlushAndInvalidateRegion(
 | 
			
		||||
        VAddr addr, u64 size, VideoCommon::CacheType which = VideoCommon::CacheType::All) override;
 | 
			
		||||
        DAddr addr, u64 size, VideoCommon::CacheType which = VideoCommon::CacheType::All) override;
 | 
			
		||||
    void WaitForIdle() override;
 | 
			
		||||
    void FragmentBarrier() override;
 | 
			
		||||
    void TiledCacheBarrier() override;
 | 
			
		||||
@@ -78,7 +77,7 @@ public:
 | 
			
		||||
    Tegra::Engines::AccelerateDMAInterface& AccessAccelerateDMA() override;
 | 
			
		||||
    void AccelerateInlineToMemory(GPUVAddr address, size_t copy_size,
 | 
			
		||||
                                  std::span<const u8> memory) override;
 | 
			
		||||
    bool AccelerateDisplay(const Tegra::FramebufferConfig& config, VAddr framebuffer_addr,
 | 
			
		||||
    bool AccelerateDisplay(const Tegra::FramebufferConfig& config, DAddr framebuffer_addr,
 | 
			
		||||
                           u32 pixel_stride) override;
 | 
			
		||||
    void LoadDiskResources(u64 title_id, std::stop_token stop_loading,
 | 
			
		||||
                           const VideoCore::DiskResourceLoadCallback& callback) override;
 | 
			
		||||
 
 | 
			
		||||
@@ -7,10 +7,9 @@
 | 
			
		||||
 | 
			
		||||
namespace Null {
 | 
			
		||||
 | 
			
		||||
RendererNull::RendererNull(Core::Frontend::EmuWindow& emu_window, Core::Memory::Memory& cpu_memory,
 | 
			
		||||
                           Tegra::GPU& gpu,
 | 
			
		||||
RendererNull::RendererNull(Core::Frontend::EmuWindow& emu_window, Tegra::GPU& gpu,
 | 
			
		||||
                           std::unique_ptr<Core::Frontend::GraphicsContext> context_)
 | 
			
		||||
    : RendererBase(emu_window, std::move(context_)), m_gpu(gpu), m_rasterizer(cpu_memory, gpu) {}
 | 
			
		||||
    : RendererBase(emu_window, std::move(context_)), m_gpu(gpu), m_rasterizer(gpu) {}
 | 
			
		||||
 | 
			
		||||
RendererNull::~RendererNull() = default;
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -13,8 +13,7 @@ namespace Null {
 | 
			
		||||
 | 
			
		||||
class RendererNull final : public VideoCore::RendererBase {
 | 
			
		||||
public:
 | 
			
		||||
    explicit RendererNull(Core::Frontend::EmuWindow& emu_window, Core::Memory::Memory& cpu_memory,
 | 
			
		||||
                          Tegra::GPU& gpu,
 | 
			
		||||
    explicit RendererNull(Core::Frontend::EmuWindow& emu_window, Tegra::GPU& gpu,
 | 
			
		||||
                          std::unique_ptr<Core::Frontend::GraphicsContext> context);
 | 
			
		||||
    ~RendererNull() override;
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -47,11 +47,10 @@ constexpr std::array PROGRAM_LUT{
 | 
			
		||||
} // Anonymous namespace
 | 
			
		||||
 | 
			
		||||
Buffer::Buffer(BufferCacheRuntime&, VideoCommon::NullBufferParams null_params)
 | 
			
		||||
    : VideoCommon::BufferBase<VideoCore::RasterizerInterface>(null_params) {}
 | 
			
		||||
    : VideoCommon::BufferBase(null_params) {}
 | 
			
		||||
 | 
			
		||||
Buffer::Buffer(BufferCacheRuntime& runtime, VideoCore::RasterizerInterface& rasterizer_,
 | 
			
		||||
               VAddr cpu_addr_, u64 size_bytes_)
 | 
			
		||||
    : VideoCommon::BufferBase<VideoCore::RasterizerInterface>(rasterizer_, cpu_addr_, size_bytes_) {
 | 
			
		||||
Buffer::Buffer(BufferCacheRuntime& runtime, DAddr cpu_addr_, u64 size_bytes_)
 | 
			
		||||
    : VideoCommon::BufferBase(cpu_addr_, size_bytes_) {
 | 
			
		||||
    buffer.Create();
 | 
			
		||||
    if (runtime.device.HasDebuggingToolAttached()) {
 | 
			
		||||
        const std::string name = fmt::format("Buffer 0x{:x}", CpuAddr());
 | 
			
		||||
 
 | 
			
		||||
@@ -10,7 +10,6 @@
 | 
			
		||||
#include "common/common_types.h"
 | 
			
		||||
#include "video_core/buffer_cache/buffer_cache_base.h"
 | 
			
		||||
#include "video_core/buffer_cache/memory_tracker_base.h"
 | 
			
		||||
#include "video_core/rasterizer_interface.h"
 | 
			
		||||
#include "video_core/renderer_opengl/gl_device.h"
 | 
			
		||||
#include "video_core/renderer_opengl/gl_resource_manager.h"
 | 
			
		||||
#include "video_core/renderer_opengl/gl_staging_buffer_pool.h"
 | 
			
		||||
@@ -19,10 +18,9 @@ namespace OpenGL {
 | 
			
		||||
 | 
			
		||||
class BufferCacheRuntime;
 | 
			
		||||
 | 
			
		||||
class Buffer : public VideoCommon::BufferBase<VideoCore::RasterizerInterface> {
 | 
			
		||||
class Buffer : public VideoCommon::BufferBase {
 | 
			
		||||
public:
 | 
			
		||||
    explicit Buffer(BufferCacheRuntime&, VideoCore::RasterizerInterface& rasterizer, VAddr cpu_addr,
 | 
			
		||||
                    u64 size_bytes);
 | 
			
		||||
    explicit Buffer(BufferCacheRuntime&, DAddr cpu_addr, u64 size_bytes);
 | 
			
		||||
    explicit Buffer(BufferCacheRuntime&, VideoCommon::NullBufferParams);
 | 
			
		||||
 | 
			
		||||
    void ImmediateUpload(size_t offset, std::span<const u8> data) noexcept;
 | 
			
		||||
@@ -244,7 +242,7 @@ struct BufferCacheParams {
 | 
			
		||||
    using Runtime = OpenGL::BufferCacheRuntime;
 | 
			
		||||
    using Buffer = OpenGL::Buffer;
 | 
			
		||||
    using Async_Buffer = OpenGL::StagingBufferMap;
 | 
			
		||||
    using MemoryTracker = VideoCommon::MemoryTrackerBase<VideoCore::RasterizerInterface>;
 | 
			
		||||
    using MemoryTracker = VideoCommon::MemoryTrackerBase<Tegra::MaxwellDeviceMemoryManager>;
 | 
			
		||||
 | 
			
		||||
    static constexpr bool IS_OPENGL = true;
 | 
			
		||||
    static constexpr bool HAS_PERSISTENT_UNIFORM_BUFFER_BINDINGS = true;
 | 
			
		||||
 
 | 
			
		||||
@@ -35,8 +35,9 @@ constexpr GLenum GetTarget(VideoCore::QueryType type) {
 | 
			
		||||
 | 
			
		||||
} // Anonymous namespace
 | 
			
		||||
 | 
			
		||||
QueryCache::QueryCache(RasterizerOpenGL& rasterizer_, Core::Memory::Memory& cpu_memory_)
 | 
			
		||||
    : QueryCacheLegacy(rasterizer_, cpu_memory_), gl_rasterizer{rasterizer_} {
 | 
			
		||||
QueryCache::QueryCache(RasterizerOpenGL& rasterizer_,
 | 
			
		||||
                       Tegra::MaxwellDeviceMemoryManager& device_memory_)
 | 
			
		||||
    : QueryCacheLegacy(rasterizer_, device_memory_), gl_rasterizer{rasterizer_} {
 | 
			
		||||
    EnableCounters();
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -8,6 +8,7 @@
 | 
			
		||||
#include <vector>
 | 
			
		||||
 | 
			
		||||
#include "common/common_types.h"
 | 
			
		||||
#include "video_core/host1x/gpu_device_memory_manager.h"
 | 
			
		||||
#include "video_core/query_cache.h"
 | 
			
		||||
#include "video_core/rasterizer_interface.h"
 | 
			
		||||
#include "video_core/renderer_opengl/gl_resource_manager.h"
 | 
			
		||||
@@ -28,7 +29,8 @@ using CounterStream = VideoCommon::CounterStreamBase<QueryCache, HostCounter>;
 | 
			
		||||
class QueryCache final
 | 
			
		||||
    : public VideoCommon::QueryCacheLegacy<QueryCache, CachedQuery, CounterStream, HostCounter> {
 | 
			
		||||
public:
 | 
			
		||||
    explicit QueryCache(RasterizerOpenGL& rasterizer_, Core::Memory::Memory& cpu_memory_);
 | 
			
		||||
    explicit QueryCache(RasterizerOpenGL& rasterizer_,
 | 
			
		||||
                        Tegra::MaxwellDeviceMemoryManager& device_memory_);
 | 
			
		||||
    ~QueryCache();
 | 
			
		||||
 | 
			
		||||
    OGLQuery AllocateQuery(VideoCore::QueryType type);
 | 
			
		||||
 
 | 
			
		||||
@@ -70,18 +70,18 @@ std::optional<VideoCore::QueryType> MaxwellToVideoCoreQuery(VideoCommon::QueryTy
 | 
			
		||||
} // Anonymous namespace
 | 
			
		||||
 | 
			
		||||
RasterizerOpenGL::RasterizerOpenGL(Core::Frontend::EmuWindow& emu_window_, Tegra::GPU& gpu_,
 | 
			
		||||
                                   Core::Memory::Memory& cpu_memory_, const Device& device_,
 | 
			
		||||
                                   ScreenInfo& screen_info_, ProgramManager& program_manager_,
 | 
			
		||||
                                   StateTracker& state_tracker_)
 | 
			
		||||
    : RasterizerAccelerated(cpu_memory_), gpu(gpu_), device(device_), screen_info(screen_info_),
 | 
			
		||||
                                   Tegra::MaxwellDeviceMemoryManager& device_memory_,
 | 
			
		||||
                                   const Device& device_, ScreenInfo& screen_info_,
 | 
			
		||||
                                   ProgramManager& program_manager_, StateTracker& state_tracker_)
 | 
			
		||||
    : gpu(gpu_), device_memory(device_memory_), device(device_), screen_info(screen_info_),
 | 
			
		||||
      program_manager(program_manager_), state_tracker(state_tracker_),
 | 
			
		||||
      texture_cache_runtime(device, program_manager, state_tracker, staging_buffer_pool),
 | 
			
		||||
      texture_cache(texture_cache_runtime, *this),
 | 
			
		||||
      texture_cache(texture_cache_runtime, device_memory_),
 | 
			
		||||
      buffer_cache_runtime(device, staging_buffer_pool),
 | 
			
		||||
      buffer_cache(*this, cpu_memory_, buffer_cache_runtime),
 | 
			
		||||
      shader_cache(*this, emu_window_, device, texture_cache, buffer_cache, program_manager,
 | 
			
		||||
                   state_tracker, gpu.ShaderNotify()),
 | 
			
		||||
      query_cache(*this, cpu_memory_), accelerate_dma(buffer_cache, texture_cache),
 | 
			
		||||
      buffer_cache(device_memory_, buffer_cache_runtime),
 | 
			
		||||
      shader_cache(device_memory_, emu_window_, device, texture_cache, buffer_cache,
 | 
			
		||||
                   program_manager, state_tracker, gpu.ShaderNotify()),
 | 
			
		||||
      query_cache(*this, device_memory_), accelerate_dma(buffer_cache, texture_cache),
 | 
			
		||||
      fence_manager(*this, gpu, texture_cache, buffer_cache, query_cache),
 | 
			
		||||
      blit_image(program_manager_) {}
 | 
			
		||||
 | 
			
		||||
@@ -475,7 +475,7 @@ void RasterizerOpenGL::DisableGraphicsUniformBuffer(size_t stage, u32 index) {
 | 
			
		||||
 | 
			
		||||
void RasterizerOpenGL::FlushAll() {}
 | 
			
		||||
 | 
			
		||||
void RasterizerOpenGL::FlushRegion(VAddr addr, u64 size, VideoCommon::CacheType which) {
 | 
			
		||||
void RasterizerOpenGL::FlushRegion(DAddr addr, u64 size, VideoCommon::CacheType which) {
 | 
			
		||||
    MICROPROFILE_SCOPE(OpenGL_CacheManagement);
 | 
			
		||||
    if (addr == 0 || size == 0) {
 | 
			
		||||
        return;
 | 
			
		||||
@@ -493,7 +493,7 @@ void RasterizerOpenGL::FlushRegion(VAddr addr, u64 size, VideoCommon::CacheType
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
bool RasterizerOpenGL::MustFlushRegion(VAddr addr, u64 size, VideoCommon::CacheType which) {
 | 
			
		||||
bool RasterizerOpenGL::MustFlushRegion(DAddr addr, u64 size, VideoCommon::CacheType which) {
 | 
			
		||||
    if ((True(which & VideoCommon::CacheType::BufferCache))) {
 | 
			
		||||
        std::scoped_lock lock{buffer_cache.mutex};
 | 
			
		||||
        if (buffer_cache.IsRegionGpuModified(addr, size)) {
 | 
			
		||||
@@ -510,7 +510,7 @@ bool RasterizerOpenGL::MustFlushRegion(VAddr addr, u64 size, VideoCommon::CacheT
 | 
			
		||||
    return false;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
VideoCore::RasterizerDownloadArea RasterizerOpenGL::GetFlushArea(VAddr addr, u64 size) {
 | 
			
		||||
VideoCore::RasterizerDownloadArea RasterizerOpenGL::GetFlushArea(DAddr addr, u64 size) {
 | 
			
		||||
    {
 | 
			
		||||
        std::scoped_lock lock{texture_cache.mutex};
 | 
			
		||||
        auto area = texture_cache.GetFlushArea(addr, size);
 | 
			
		||||
@@ -526,14 +526,14 @@ VideoCore::RasterizerDownloadArea RasterizerOpenGL::GetFlushArea(VAddr addr, u64
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
    VideoCore::RasterizerDownloadArea new_area{
 | 
			
		||||
        .start_address = Common::AlignDown(addr, Core::Memory::YUZU_PAGESIZE),
 | 
			
		||||
        .end_address = Common::AlignUp(addr + size, Core::Memory::YUZU_PAGESIZE),
 | 
			
		||||
        .start_address = Common::AlignDown(addr, Core::DEVICE_PAGESIZE),
 | 
			
		||||
        .end_address = Common::AlignUp(addr + size, Core::DEVICE_PAGESIZE),
 | 
			
		||||
        .preemtive = true,
 | 
			
		||||
    };
 | 
			
		||||
    return new_area;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void RasterizerOpenGL::InvalidateRegion(VAddr addr, u64 size, VideoCommon::CacheType which) {
 | 
			
		||||
void RasterizerOpenGL::InvalidateRegion(DAddr addr, u64 size, VideoCommon::CacheType which) {
 | 
			
		||||
    MICROPROFILE_SCOPE(OpenGL_CacheManagement);
 | 
			
		||||
    if (addr == 0 || size == 0) {
 | 
			
		||||
        return;
 | 
			
		||||
@@ -554,7 +554,7 @@ void RasterizerOpenGL::InvalidateRegion(VAddr addr, u64 size, VideoCommon::Cache
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
bool RasterizerOpenGL::OnCPUWrite(VAddr addr, u64 size) {
 | 
			
		||||
bool RasterizerOpenGL::OnCPUWrite(DAddr addr, u64 size) {
 | 
			
		||||
    MICROPROFILE_SCOPE(OpenGL_CacheManagement);
 | 
			
		||||
    if (addr == 0 || size == 0) {
 | 
			
		||||
        return false;
 | 
			
		||||
@@ -576,8 +576,9 @@ bool RasterizerOpenGL::OnCPUWrite(VAddr addr, u64 size) {
 | 
			
		||||
    return false;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void RasterizerOpenGL::OnCacheInvalidation(VAddr addr, u64 size) {
 | 
			
		||||
void RasterizerOpenGL::OnCacheInvalidation(DAddr addr, u64 size) {
 | 
			
		||||
    MICROPROFILE_SCOPE(OpenGL_CacheManagement);
 | 
			
		||||
 | 
			
		||||
    if (addr == 0 || size == 0) {
 | 
			
		||||
        return;
 | 
			
		||||
    }
 | 
			
		||||
@@ -596,7 +597,7 @@ void RasterizerOpenGL::InvalidateGPUCache() {
 | 
			
		||||
    gpu.InvalidateGPUCache();
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void RasterizerOpenGL::UnmapMemory(VAddr addr, u64 size) {
 | 
			
		||||
void RasterizerOpenGL::UnmapMemory(DAddr addr, u64 size) {
 | 
			
		||||
    {
 | 
			
		||||
        std::scoped_lock lock{texture_cache.mutex};
 | 
			
		||||
        texture_cache.UnmapMemory(addr, size);
 | 
			
		||||
@@ -635,7 +636,7 @@ void RasterizerOpenGL::ReleaseFences(bool force) {
 | 
			
		||||
    fence_manager.WaitPendingFences(force);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void RasterizerOpenGL::FlushAndInvalidateRegion(VAddr addr, u64 size,
 | 
			
		||||
void RasterizerOpenGL::FlushAndInvalidateRegion(DAddr addr, u64 size,
 | 
			
		||||
                                                VideoCommon::CacheType which) {
 | 
			
		||||
    if (Settings::IsGPULevelExtreme()) {
 | 
			
		||||
        FlushRegion(addr, size, which);
 | 
			
		||||
@@ -739,7 +740,7 @@ void RasterizerOpenGL::AccelerateInlineToMemory(GPUVAddr address, size_t copy_si
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
bool RasterizerOpenGL::AccelerateDisplay(const Tegra::FramebufferConfig& config,
 | 
			
		||||
                                         VAddr framebuffer_addr, u32 pixel_stride) {
 | 
			
		||||
                                         DAddr framebuffer_addr, u32 pixel_stride) {
 | 
			
		||||
    if (framebuffer_addr == 0) {
 | 
			
		||||
        return false;
 | 
			
		||||
    }
 | 
			
		||||
 
 | 
			
		||||
@@ -14,7 +14,6 @@
 | 
			
		||||
#include "common/common_types.h"
 | 
			
		||||
#include "video_core/control/channel_state_cache.h"
 | 
			
		||||
#include "video_core/engines/maxwell_dma.h"
 | 
			
		||||
#include "video_core/rasterizer_accelerated.h"
 | 
			
		||||
#include "video_core/rasterizer_interface.h"
 | 
			
		||||
#include "video_core/renderer_opengl/blit_image.h"
 | 
			
		||||
#include "video_core/renderer_opengl/gl_buffer_cache.h"
 | 
			
		||||
@@ -72,13 +71,13 @@ private:
 | 
			
		||||
    TextureCache& texture_cache;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
class RasterizerOpenGL : public VideoCore::RasterizerAccelerated,
 | 
			
		||||
class RasterizerOpenGL : public VideoCore::RasterizerInterface,
 | 
			
		||||
                         protected VideoCommon::ChannelSetupCaches<VideoCommon::ChannelInfo> {
 | 
			
		||||
public:
 | 
			
		||||
    explicit RasterizerOpenGL(Core::Frontend::EmuWindow& emu_window_, Tegra::GPU& gpu_,
 | 
			
		||||
                              Core::Memory::Memory& cpu_memory_, const Device& device_,
 | 
			
		||||
                              ScreenInfo& screen_info_, ProgramManager& program_manager_,
 | 
			
		||||
                              StateTracker& state_tracker_);
 | 
			
		||||
                              Tegra::MaxwellDeviceMemoryManager& device_memory_,
 | 
			
		||||
                              const Device& device_, ScreenInfo& screen_info_,
 | 
			
		||||
                              ProgramManager& program_manager_, StateTracker& state_tracker_);
 | 
			
		||||
    ~RasterizerOpenGL() override;
 | 
			
		||||
 | 
			
		||||
    void Draw(bool is_indexed, u32 instance_count) override;
 | 
			
		||||
@@ -92,17 +91,17 @@ public:
 | 
			
		||||
    void BindGraphicsUniformBuffer(size_t stage, u32 index, GPUVAddr gpu_addr, u32 size) override;
 | 
			
		||||
    void DisableGraphicsUniformBuffer(size_t stage, u32 index) override;
 | 
			
		||||
    void FlushAll() override;
 | 
			
		||||
    void FlushRegion(VAddr addr, u64 size,
 | 
			
		||||
    void FlushRegion(DAddr addr, u64 size,
 | 
			
		||||
                     VideoCommon::CacheType which = VideoCommon::CacheType::All) override;
 | 
			
		||||
    bool MustFlushRegion(VAddr addr, u64 size,
 | 
			
		||||
    bool MustFlushRegion(DAddr addr, u64 size,
 | 
			
		||||
                         VideoCommon::CacheType which = VideoCommon::CacheType::All) override;
 | 
			
		||||
    VideoCore::RasterizerDownloadArea GetFlushArea(VAddr addr, u64 size) override;
 | 
			
		||||
    void InvalidateRegion(VAddr addr, u64 size,
 | 
			
		||||
    VideoCore::RasterizerDownloadArea GetFlushArea(PAddr addr, u64 size) override;
 | 
			
		||||
    void InvalidateRegion(DAddr addr, u64 size,
 | 
			
		||||
                          VideoCommon::CacheType which = VideoCommon::CacheType::All) override;
 | 
			
		||||
    void OnCacheInvalidation(VAddr addr, u64 size) override;
 | 
			
		||||
    bool OnCPUWrite(VAddr addr, u64 size) override;
 | 
			
		||||
    void OnCacheInvalidation(PAddr addr, u64 size) override;
 | 
			
		||||
    bool OnCPUWrite(PAddr addr, u64 size) override;
 | 
			
		||||
    void InvalidateGPUCache() override;
 | 
			
		||||
    void UnmapMemory(VAddr addr, u64 size) override;
 | 
			
		||||
    void UnmapMemory(DAddr addr, u64 size) override;
 | 
			
		||||
    void ModifyGPUMemory(size_t as_id, GPUVAddr addr, u64 size) override;
 | 
			
		||||
    void SignalFence(std::function<void()>&& func) override;
 | 
			
		||||
    void SyncOperation(std::function<void()>&& func) override;
 | 
			
		||||
@@ -110,7 +109,7 @@ public:
 | 
			
		||||
    void SignalReference() override;
 | 
			
		||||
    void ReleaseFences(bool force = true) override;
 | 
			
		||||
    void FlushAndInvalidateRegion(
 | 
			
		||||
        VAddr addr, u64 size, VideoCommon::CacheType which = VideoCommon::CacheType::All) override;
 | 
			
		||||
        DAddr addr, u64 size, VideoCommon::CacheType which = VideoCommon::CacheType::All) override;
 | 
			
		||||
    void WaitForIdle() override;
 | 
			
		||||
    void FragmentBarrier() override;
 | 
			
		||||
    void TiledCacheBarrier() override;
 | 
			
		||||
@@ -123,7 +122,7 @@ public:
 | 
			
		||||
    Tegra::Engines::AccelerateDMAInterface& AccessAccelerateDMA() override;
 | 
			
		||||
    void AccelerateInlineToMemory(GPUVAddr address, size_t copy_size,
 | 
			
		||||
                                  std::span<const u8> memory) override;
 | 
			
		||||
    bool AccelerateDisplay(const Tegra::FramebufferConfig& config, VAddr framebuffer_addr,
 | 
			
		||||
    bool AccelerateDisplay(const Tegra::FramebufferConfig& config, DAddr framebuffer_addr,
 | 
			
		||||
                           u32 pixel_stride) override;
 | 
			
		||||
    void LoadDiskResources(u64 title_id, std::stop_token stop_loading,
 | 
			
		||||
                           const VideoCore::DiskResourceLoadCallback& callback) override;
 | 
			
		||||
@@ -235,6 +234,7 @@ private:
 | 
			
		||||
                       VideoCommon::QueryPropertiesFlags flags, u32 payload, u32 subreport);
 | 
			
		||||
 | 
			
		||||
    Tegra::GPU& gpu;
 | 
			
		||||
    Tegra::MaxwellDeviceMemoryManager& device_memory;
 | 
			
		||||
 | 
			
		||||
    const Device& device;
 | 
			
		||||
    ScreenInfo& screen_info;
 | 
			
		||||
 
 | 
			
		||||
@@ -168,11 +168,12 @@ void SetXfbState(VideoCommon::TransformFeedbackState& state, const Maxwell& regs
 | 
			
		||||
}
 | 
			
		||||
} // Anonymous namespace
 | 
			
		||||
 | 
			
		||||
ShaderCache::ShaderCache(RasterizerOpenGL& rasterizer_, Core::Frontend::EmuWindow& emu_window_,
 | 
			
		||||
                         const Device& device_, TextureCache& texture_cache_,
 | 
			
		||||
                         BufferCache& buffer_cache_, ProgramManager& program_manager_,
 | 
			
		||||
                         StateTracker& state_tracker_, VideoCore::ShaderNotify& shader_notify_)
 | 
			
		||||
    : VideoCommon::ShaderCache{rasterizer_}, emu_window{emu_window_}, device{device_},
 | 
			
		||||
ShaderCache::ShaderCache(Tegra::MaxwellDeviceMemoryManager& device_memory_,
 | 
			
		||||
                         Core::Frontend::EmuWindow& emu_window_, const Device& device_,
 | 
			
		||||
                         TextureCache& texture_cache_, BufferCache& buffer_cache_,
 | 
			
		||||
                         ProgramManager& program_manager_, StateTracker& state_tracker_,
 | 
			
		||||
                         VideoCore::ShaderNotify& shader_notify_)
 | 
			
		||||
    : VideoCommon::ShaderCache{device_memory_}, emu_window{emu_window_}, device{device_},
 | 
			
		||||
      texture_cache{texture_cache_}, buffer_cache{buffer_cache_}, program_manager{program_manager_},
 | 
			
		||||
      state_tracker{state_tracker_}, shader_notify{shader_notify_},
 | 
			
		||||
      use_asynchronous_shaders{device.UseAsynchronousShaders()},
 | 
			
		||||
 
 | 
			
		||||
@@ -17,7 +17,7 @@
 | 
			
		||||
 | 
			
		||||
namespace Tegra {
 | 
			
		||||
class MemoryManager;
 | 
			
		||||
}
 | 
			
		||||
} // namespace Tegra
 | 
			
		||||
 | 
			
		||||
namespace OpenGL {
 | 
			
		||||
 | 
			
		||||
@@ -28,10 +28,11 @@ using ShaderWorker = Common::StatefulThreadWorker<ShaderContext::Context>;
 | 
			
		||||
 | 
			
		||||
class ShaderCache : public VideoCommon::ShaderCache {
 | 
			
		||||
public:
 | 
			
		||||
    explicit ShaderCache(RasterizerOpenGL& rasterizer_, Core::Frontend::EmuWindow& emu_window_,
 | 
			
		||||
                         const Device& device_, TextureCache& texture_cache_,
 | 
			
		||||
                         BufferCache& buffer_cache_, ProgramManager& program_manager_,
 | 
			
		||||
                         StateTracker& state_tracker_, VideoCore::ShaderNotify& shader_notify_);
 | 
			
		||||
    explicit ShaderCache(Tegra::MaxwellDeviceMemoryManager& device_memory_,
 | 
			
		||||
                         Core::Frontend::EmuWindow& emu_window_, const Device& device_,
 | 
			
		||||
                         TextureCache& texture_cache_, BufferCache& buffer_cache_,
 | 
			
		||||
                         ProgramManager& program_manager_, StateTracker& state_tracker_,
 | 
			
		||||
                         VideoCore::ShaderNotify& shader_notify_);
 | 
			
		||||
    ~ShaderCache();
 | 
			
		||||
 | 
			
		||||
    void LoadDiskResources(u64 title_id, std::stop_token stop_loading,
 | 
			
		||||
 
 | 
			
		||||
@@ -15,7 +15,6 @@
 | 
			
		||||
#include "common/telemetry.h"
 | 
			
		||||
#include "core/core_timing.h"
 | 
			
		||||
#include "core/frontend/emu_window.h"
 | 
			
		||||
#include "core/memory.h"
 | 
			
		||||
#include "core/telemetry_session.h"
 | 
			
		||||
#include "video_core/host_shaders/ffx_a_h.h"
 | 
			
		||||
#include "video_core/host_shaders/ffx_fsr1_h.h"
 | 
			
		||||
@@ -144,12 +143,13 @@ void APIENTRY DebugHandler(GLenum source, GLenum type, GLuint id, GLenum severit
 | 
			
		||||
 | 
			
		||||
RendererOpenGL::RendererOpenGL(Core::TelemetrySession& telemetry_session_,
 | 
			
		||||
                               Core::Frontend::EmuWindow& emu_window_,
 | 
			
		||||
                               Core::Memory::Memory& cpu_memory_, Tegra::GPU& gpu_,
 | 
			
		||||
                               Tegra::MaxwellDeviceMemoryManager& device_memory_, Tegra::GPU& gpu_,
 | 
			
		||||
                               std::unique_ptr<Core::Frontend::GraphicsContext> context_)
 | 
			
		||||
    : RendererBase{emu_window_, std::move(context_)}, telemetry_session{telemetry_session_},
 | 
			
		||||
      emu_window{emu_window_}, cpu_memory{cpu_memory_}, gpu{gpu_}, device{emu_window_},
 | 
			
		||||
      emu_window{emu_window_}, device_memory{device_memory_}, gpu{gpu_}, device{emu_window_},
 | 
			
		||||
      state_tracker{}, program_manager{device},
 | 
			
		||||
      rasterizer(emu_window, gpu, cpu_memory, device, screen_info, program_manager, state_tracker) {
 | 
			
		||||
      rasterizer(emu_window, gpu, device_memory, device, screen_info, program_manager,
 | 
			
		||||
                 state_tracker) {
 | 
			
		||||
    if (Settings::values.renderer_debug && GLAD_GL_KHR_debug) {
 | 
			
		||||
        glEnable(GL_DEBUG_OUTPUT);
 | 
			
		||||
        glEnable(GL_DEBUG_OUTPUT_SYNCHRONOUS);
 | 
			
		||||
@@ -242,7 +242,7 @@ void RendererOpenGL::LoadFBToScreenInfo(const Tegra::FramebufferConfig& framebuf
 | 
			
		||||
    const u32 bytes_per_pixel{VideoCore::Surface::BytesPerBlock(pixel_format)};
 | 
			
		||||
    const u64 size_in_bytes{Tegra::Texture::CalculateSize(
 | 
			
		||||
        true, bytes_per_pixel, framebuffer.stride, framebuffer.height, 1, block_height_log2, 0)};
 | 
			
		||||
    const u8* const host_ptr{cpu_memory.GetPointer(framebuffer_addr)};
 | 
			
		||||
    const u8* const host_ptr{device_memory.GetPointer<u8>(framebuffer_addr)};
 | 
			
		||||
    const std::span<const u8> input_data(host_ptr, size_in_bytes);
 | 
			
		||||
    Tegra::Texture::UnswizzleTexture(gl_framebuffer_data, input_data, bytes_per_pixel,
 | 
			
		||||
                                     framebuffer.width, framebuffer.height, 1, block_height_log2,
 | 
			
		||||
 
 | 
			
		||||
Some files were not shown because too many files have changed in this diff Show More
		Reference in New Issue
	
	Block a user