Core: Clang format and other small issues.
This commit is contained in:
		@@ -8,8 +8,8 @@
 | 
			
		||||
#include "audio_core/sink/sink_stream.h"
 | 
			
		||||
#include "core/core.h"
 | 
			
		||||
#include "core/core_timing.h"
 | 
			
		||||
#include "core/memory.h"
 | 
			
		||||
#include "core/guest_memory.h"
 | 
			
		||||
#include "core/memory.h"
 | 
			
		||||
 | 
			
		||||
#include "core/hle/kernel/k_process.h"
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -9,8 +9,8 @@
 | 
			
		||||
#include "common/fixed_point.h"
 | 
			
		||||
#include "common/logging/log.h"
 | 
			
		||||
#include "common/scratch_buffer.h"
 | 
			
		||||
#include "core/memory.h"
 | 
			
		||||
#include "core/guest_memory.h"
 | 
			
		||||
#include "core/memory.h"
 | 
			
		||||
 | 
			
		||||
namespace AudioCore::Renderer {
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -27,13 +27,13 @@ struct DeviceMemoryManagerAllocator;
 | 
			
		||||
template <typename Traits>
 | 
			
		||||
class DeviceMemoryManager {
 | 
			
		||||
    using DeviceInterface = typename Traits::DeviceInterface;
 | 
			
		||||
    using DeviceMethods = Traits::DeviceMethods;
 | 
			
		||||
    using DeviceMethods = typename Traits::DeviceMethods;
 | 
			
		||||
 | 
			
		||||
public:
 | 
			
		||||
    DeviceMemoryManager(const DeviceMemory& device_memory);
 | 
			
		||||
    ~DeviceMemoryManager();
 | 
			
		||||
 | 
			
		||||
    void BindInterface(DeviceInterface* interface);
 | 
			
		||||
    void BindInterface(DeviceInterface* device_inter);
 | 
			
		||||
 | 
			
		||||
    DAddr Allocate(size_t size);
 | 
			
		||||
    void AllocateFixed(DAddr start, size_t size);
 | 
			
		||||
@@ -111,6 +111,7 @@ public:
 | 
			
		||||
private:
 | 
			
		||||
    static constexpr size_t device_virtual_bits = Traits::device_virtual_bits;
 | 
			
		||||
    static constexpr size_t device_as_size = 1ULL << device_virtual_bits;
 | 
			
		||||
    static constexpr size_t physical_min_bits = 32;
 | 
			
		||||
    static constexpr size_t physical_max_bits = 33;
 | 
			
		||||
    static constexpr size_t page_bits = 12;
 | 
			
		||||
    static constexpr size_t page_size = 1ULL << page_bits;
 | 
			
		||||
@@ -143,7 +144,7 @@ private:
 | 
			
		||||
    std::unique_ptr<DeviceMemoryManagerAllocator<Traits>> impl;
 | 
			
		||||
 | 
			
		||||
    const uintptr_t physical_base;
 | 
			
		||||
    DeviceInterface* interface;
 | 
			
		||||
    DeviceInterface* device_inter;
 | 
			
		||||
    Common::VirtualBuffer<u32> compressed_physical_ptr;
 | 
			
		||||
    Common::VirtualBuffer<u32> compressed_device_addr;
 | 
			
		||||
    Common::VirtualBuffer<u32> continuity_tracker;
 | 
			
		||||
 
 | 
			
		||||
@@ -12,6 +12,7 @@
 | 
			
		||||
#include "common/assert.h"
 | 
			
		||||
#include "common/div_ceil.h"
 | 
			
		||||
#include "common/scope_exit.h"
 | 
			
		||||
#include "common/settings.h"
 | 
			
		||||
#include "core/device_memory.h"
 | 
			
		||||
#include "core/device_memory_manager.h"
 | 
			
		||||
#include "core/memory.h"
 | 
			
		||||
@@ -162,20 +163,39 @@ struct DeviceMemoryManagerAllocator {
 | 
			
		||||
template <typename Traits>
 | 
			
		||||
DeviceMemoryManager<Traits>::DeviceMemoryManager(const DeviceMemory& device_memory_)
 | 
			
		||||
    : physical_base{reinterpret_cast<const uintptr_t>(device_memory_.buffer.BackingBasePointer())},
 | 
			
		||||
      interface{nullptr}, compressed_physical_ptr(device_as_size >> Memory::YUZU_PAGEBITS),
 | 
			
		||||
      compressed_device_addr(1ULL << (physical_max_bits - Memory::YUZU_PAGEBITS)),
 | 
			
		||||
      device_inter{nullptr}, compressed_physical_ptr(device_as_size >> Memory::YUZU_PAGEBITS),
 | 
			
		||||
      compressed_device_addr(1ULL << ((Settings::values.memory_layout_mode.GetValue() ==
 | 
			
		||||
                                               Settings::MemoryLayout::Memory_4Gb
 | 
			
		||||
                                           ? physical_min_bits
 | 
			
		||||
                                           : physical_max_bits) -
 | 
			
		||||
                                      Memory::YUZU_PAGEBITS)),
 | 
			
		||||
      continuity_tracker(device_as_size >> Memory::YUZU_PAGEBITS),
 | 
			
		||||
      cpu_backing_address(device_as_size >> Memory::YUZU_PAGEBITS) {
 | 
			
		||||
    impl = std::make_unique<DeviceMemoryManagerAllocator<Traits>>();
 | 
			
		||||
    cached_pages = std::make_unique<CachedPages>();
 | 
			
		||||
 | 
			
		||||
    const size_t total_virtual = device_as_size >> Memory::YUZU_PAGEBITS;
 | 
			
		||||
    for (size_t i = 0; i < total_virtual; i++) {
 | 
			
		||||
        compressed_physical_ptr[i] = 0;
 | 
			
		||||
        continuity_tracker[i] = 1;
 | 
			
		||||
        cpu_backing_address[i] = 0;
 | 
			
		||||
    }
 | 
			
		||||
    const size_t total_phys = 1ULL << ((Settings::values.memory_layout_mode.GetValue() ==
 | 
			
		||||
                                                Settings::MemoryLayout::Memory_4Gb
 | 
			
		||||
                                            ? physical_min_bits
 | 
			
		||||
                                            : physical_max_bits) -
 | 
			
		||||
                                       Memory::YUZU_PAGEBITS);
 | 
			
		||||
    for (size_t i = 0; i < total_phys; i++) {
 | 
			
		||||
        compressed_device_addr[i] = 0;
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <typename Traits>
 | 
			
		||||
DeviceMemoryManager<Traits>::~DeviceMemoryManager() = default;
 | 
			
		||||
 | 
			
		||||
template <typename Traits>
 | 
			
		||||
void DeviceMemoryManager<Traits>::BindInterface(DeviceInterface* interface_) {
 | 
			
		||||
    interface = interface_;
 | 
			
		||||
void DeviceMemoryManager<Traits>::BindInterface(DeviceInterface* device_inter_) {
 | 
			
		||||
    device_inter = device_inter_;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <typename Traits>
 | 
			
		||||
@@ -232,7 +252,7 @@ template <typename Traits>
 | 
			
		||||
void DeviceMemoryManager<Traits>::Unmap(DAddr address, size_t size) {
 | 
			
		||||
    size_t start_page_d = address >> Memory::YUZU_PAGEBITS;
 | 
			
		||||
    size_t num_pages = Common::AlignUp(size, Memory::YUZU_PAGESIZE) >> Memory::YUZU_PAGEBITS;
 | 
			
		||||
    interface->InvalidateRegion(address, size);
 | 
			
		||||
    device_inter->InvalidateRegion(address, size);
 | 
			
		||||
    std::scoped_lock lk(mapping_guard);
 | 
			
		||||
    for (size_t i = 0; i < num_pages; i++) {
 | 
			
		||||
        auto phys_addr = compressed_physical_ptr[start_page_d + i];
 | 
			
		||||
@@ -392,7 +412,7 @@ void DeviceMemoryManager<Traits>::WalkBlock(DAddr addr, std::size_t size, auto o
 | 
			
		||||
 | 
			
		||||
template <typename Traits>
 | 
			
		||||
void DeviceMemoryManager<Traits>::ReadBlock(DAddr address, void* dest_pointer, size_t size) {
 | 
			
		||||
    interface->FlushRegion(address, size);
 | 
			
		||||
    device_inter->FlushRegion(address, size);
 | 
			
		||||
    WalkBlock(
 | 
			
		||||
        address, size,
 | 
			
		||||
        [&](size_t copy_amount, DAddr current_vaddr) {
 | 
			
		||||
@@ -426,7 +446,7 @@ void DeviceMemoryManager<Traits>::WriteBlock(DAddr address, const void* src_poin
 | 
			
		||||
        [&](const std::size_t copy_amount) {
 | 
			
		||||
            src_pointer = static_cast<const u8*>(src_pointer) + copy_amount;
 | 
			
		||||
        });
 | 
			
		||||
    interface->InvalidateRegion(address, size);
 | 
			
		||||
    device_inter->InvalidateRegion(address, size);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <typename Traits>
 | 
			
		||||
@@ -468,14 +488,14 @@ void DeviceMemoryManager<Traits>::WriteBlockUnsafe(DAddr address, const void* sr
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <typename Traits>
 | 
			
		||||
size_t DeviceMemoryManager<Traits>::RegisterProcess(Memory::Memory* memory_interface) {
 | 
			
		||||
size_t DeviceMemoryManager<Traits>::RegisterProcess(Memory::Memory* memory_device_inter) {
 | 
			
		||||
    size_t new_id;
 | 
			
		||||
    if (!id_pool.empty()) {
 | 
			
		||||
        new_id = id_pool.front();
 | 
			
		||||
        id_pool.pop_front();
 | 
			
		||||
        registered_processes[new_id] = memory_interface;
 | 
			
		||||
        registered_processes[new_id] = memory_device_inter;
 | 
			
		||||
    } else {
 | 
			
		||||
        registered_processes.emplace_back(memory_interface);
 | 
			
		||||
        registered_processes.emplace_back(memory_device_inter);
 | 
			
		||||
        new_id = registered_processes.size() - 1U;
 | 
			
		||||
    }
 | 
			
		||||
    return new_id;
 | 
			
		||||
@@ -512,7 +532,7 @@ void DeviceMemoryManager<Traits>::UpdatePagesCachedCount(DAddr addr, size_t size
 | 
			
		||||
    size_t page = addr >> Memory::YUZU_PAGEBITS;
 | 
			
		||||
    auto [process_id, base_vaddress] = ExtractCPUBacking(page);
 | 
			
		||||
    size_t vpage = base_vaddress >> Memory::YUZU_PAGEBITS;
 | 
			
		||||
    auto* memory_interface = registered_processes[process_id];
 | 
			
		||||
    auto* memory_device_inter = registered_processes[process_id];
 | 
			
		||||
    for (; page != page_end; ++page) {
 | 
			
		||||
        std::atomic_uint8_t& count = cached_pages->at(page >> 3).Count(page);
 | 
			
		||||
 | 
			
		||||
@@ -536,7 +556,7 @@ void DeviceMemoryManager<Traits>::UpdatePagesCachedCount(DAddr addr, size_t size
 | 
			
		||||
            uncache_bytes += Memory::YUZU_PAGESIZE;
 | 
			
		||||
        } else if (uncache_bytes > 0) {
 | 
			
		||||
            lock();
 | 
			
		||||
            MarkRegionCaching(memory_interface, uncache_begin << Memory::YUZU_PAGEBITS,
 | 
			
		||||
            MarkRegionCaching(memory_device_inter, uncache_begin << Memory::YUZU_PAGEBITS,
 | 
			
		||||
                              uncache_bytes, false);
 | 
			
		||||
            uncache_bytes = 0;
 | 
			
		||||
        }
 | 
			
		||||
@@ -547,7 +567,7 @@ void DeviceMemoryManager<Traits>::UpdatePagesCachedCount(DAddr addr, size_t size
 | 
			
		||||
            cache_bytes += Memory::YUZU_PAGESIZE;
 | 
			
		||||
        } else if (cache_bytes > 0) {
 | 
			
		||||
            lock();
 | 
			
		||||
            MarkRegionCaching(memory_interface, cache_begin << Memory::YUZU_PAGEBITS, cache_bytes,
 | 
			
		||||
            MarkRegionCaching(memory_device_inter, cache_begin << Memory::YUZU_PAGEBITS, cache_bytes,
 | 
			
		||||
                              true);
 | 
			
		||||
            cache_bytes = 0;
 | 
			
		||||
        }
 | 
			
		||||
@@ -555,12 +575,12 @@ void DeviceMemoryManager<Traits>::UpdatePagesCachedCount(DAddr addr, size_t size
 | 
			
		||||
    }
 | 
			
		||||
    if (uncache_bytes > 0) {
 | 
			
		||||
        lock();
 | 
			
		||||
        MarkRegionCaching(memory_interface, uncache_begin << Memory::YUZU_PAGEBITS, uncache_bytes,
 | 
			
		||||
        MarkRegionCaching(memory_device_inter, uncache_begin << Memory::YUZU_PAGEBITS, uncache_bytes,
 | 
			
		||||
                          false);
 | 
			
		||||
    }
 | 
			
		||||
    if (cache_bytes > 0) {
 | 
			
		||||
        lock();
 | 
			
		||||
        MarkRegionCaching(memory_interface, cache_begin << Memory::YUZU_PAGEBITS, cache_bytes,
 | 
			
		||||
        MarkRegionCaching(memory_device_inter, cache_begin << Memory::YUZU_PAGEBITS, cache_bytes,
 | 
			
		||||
                          true);
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
@@ -202,7 +202,8 @@ public:
 | 
			
		||||
                } else {
 | 
			
		||||
                    this->m_memory.WriteBlockUnsafe(this->m_addr, this->data(), this->size_bytes());
 | 
			
		||||
                }
 | 
			
		||||
            } else if constexpr ((FLAGS & GuestMemoryFlags::Safe) || (FLAGS & GuestMemoryFlags::Cached))  {
 | 
			
		||||
            } else if constexpr ((FLAGS & GuestMemoryFlags::Safe) ||
 | 
			
		||||
                                 (FLAGS & GuestMemoryFlags::Cached)) {
 | 
			
		||||
                this->m_memory.InvalidateRegion(this->m_addr, this->size_bytes());
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
@@ -215,4 +216,4 @@ using CpuGuestMemory = GuestMemory<Core::Memory::Memory, T, FLAGS>;
 | 
			
		||||
template <typename T, GuestMemoryFlags FLAGS>
 | 
			
		||||
using CpuGuestMemoryScoped = GuestMemoryScoped<Core::Memory::Memory, T, FLAGS>;
 | 
			
		||||
 | 
			
		||||
} // namespace Tegra::Memory
 | 
			
		||||
} // namespace Core::Memory
 | 
			
		||||
 
 | 
			
		||||
@@ -12,6 +12,7 @@
 | 
			
		||||
#include "common/common_types.h"
 | 
			
		||||
#include "common/logging/log.h"
 | 
			
		||||
#include "common/scratch_buffer.h"
 | 
			
		||||
#include "core/guest_memory.h"
 | 
			
		||||
#include "core/hle/kernel/k_auto_object.h"
 | 
			
		||||
#include "core/hle/kernel/k_handle_table.h"
 | 
			
		||||
#include "core/hle/kernel/k_process.h"
 | 
			
		||||
@@ -22,7 +23,6 @@
 | 
			
		||||
#include "core/hle/service/hle_ipc.h"
 | 
			
		||||
#include "core/hle/service/ipc_helpers.h"
 | 
			
		||||
#include "core/memory.h"
 | 
			
		||||
#include "core/guest_memory.h"
 | 
			
		||||
 | 
			
		||||
namespace Service {
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -16,6 +16,12 @@
 | 
			
		||||
 | 
			
		||||
namespace Service::Nvidia::NvCore {
 | 
			
		||||
 | 
			
		||||
Session::Session(size_t id_, Kernel::KProcess* process_, size_t smmu_id_)
 | 
			
		||||
    : id{id_}, process{process_}, smmu_id{smmu_id_},
 | 
			
		||||
      has_preallocated_area{}, mapper{}, is_active{} {}
 | 
			
		||||
 | 
			
		||||
Session::~Session() = default;
 | 
			
		||||
 | 
			
		||||
struct ContainerImpl {
 | 
			
		||||
    explicit ContainerImpl(Container& core, Tegra::Host1x::Host1x& host1x_)
 | 
			
		||||
        : host1x{host1x_}, file{core, host1x_}, manager{host1x_}, device_file_data{} {}
 | 
			
		||||
@@ -54,8 +60,8 @@ size_t Container::OpenSession(Kernel::KProcess* process) {
 | 
			
		||||
        impl->id_pool.pop_front();
 | 
			
		||||
        impl->sessions[new_id] = Session{new_id, process, smmu_id};
 | 
			
		||||
    } else {
 | 
			
		||||
        impl->sessions.emplace_back(new_id, process, smmu_id);
 | 
			
		||||
        new_id = impl->new_ids++;
 | 
			
		||||
        impl->sessions.emplace_back(new_id, process, smmu_id);
 | 
			
		||||
    }
 | 
			
		||||
    auto& session = impl->sessions[new_id];
 | 
			
		||||
    session.is_active = true;
 | 
			
		||||
 
 | 
			
		||||
@@ -27,6 +27,14 @@ class SyncpointManager;
 | 
			
		||||
struct ContainerImpl;
 | 
			
		||||
 | 
			
		||||
struct Session {
 | 
			
		||||
    Session(size_t id_, Kernel::KProcess* process_, size_t smmu_id_);
 | 
			
		||||
    ~Session();
 | 
			
		||||
 | 
			
		||||
    Session(const Session&) = delete;
 | 
			
		||||
    Session& operator=(const Session&) = delete;
 | 
			
		||||
    Session(Session&&) = default;
 | 
			
		||||
    Session& operator=(Session&&) = default;
 | 
			
		||||
 | 
			
		||||
    size_t id;
 | 
			
		||||
    Kernel::KProcess* process;
 | 
			
		||||
    size_t smmu_id;
 | 
			
		||||
 
 | 
			
		||||
@@ -124,10 +124,11 @@ DAddr HeapMapper::Map(VAddr start, size_t size) {
 | 
			
		||||
    m_internal->base_set.clear();
 | 
			
		||||
    const IntervalType interval{start, start + size};
 | 
			
		||||
    m_internal->base_set.insert(interval);
 | 
			
		||||
    m_internal->ForEachInOverlapCounter(m_internal->mapping_overlaps, start, size, [this](VAddr start_addr, VAddr end_addr, int){
 | 
			
		||||
        const IntervalType other{start_addr, end_addr};
 | 
			
		||||
        m_internal->base_set.subtract(other);
 | 
			
		||||
    });
 | 
			
		||||
    m_internal->ForEachInOverlapCounter(m_internal->mapping_overlaps, start, size,
 | 
			
		||||
                                        [this](VAddr start_addr, VAddr end_addr, int) {
 | 
			
		||||
                                            const IntervalType other{start_addr, end_addr};
 | 
			
		||||
                                            m_internal->base_set.subtract(other);
 | 
			
		||||
                                        });
 | 
			
		||||
    if (!m_internal->base_set.empty()) {
 | 
			
		||||
        auto it = m_internal->base_set.begin();
 | 
			
		||||
        auto end_it = m_internal->base_set.end();
 | 
			
		||||
@@ -136,7 +137,8 @@ DAddr HeapMapper::Map(VAddr start, size_t size) {
 | 
			
		||||
            const VAddr inter_addr = it->lower();
 | 
			
		||||
            const size_t offset = inter_addr - m_vaddress;
 | 
			
		||||
            const size_t sub_size = inter_addr_end - inter_addr;
 | 
			
		||||
            m_internal->device_memory.Map(m_daddress + offset, m_vaddress + offset, sub_size, m_smmu_id);
 | 
			
		||||
            m_internal->device_memory.Map(m_daddress + offset, m_vaddress + offset, sub_size,
 | 
			
		||||
                                          m_smmu_id);
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
    m_internal->mapping_overlaps += std::make_pair(interval, 1);
 | 
			
		||||
@@ -147,12 +149,13 @@ DAddr HeapMapper::Map(VAddr start, size_t size) {
 | 
			
		||||
void HeapMapper::Unmap(VAddr start, size_t size) {
 | 
			
		||||
    std::scoped_lock lk(m_internal->guard);
 | 
			
		||||
    m_internal->base_set.clear();
 | 
			
		||||
    m_internal->ForEachInOverlapCounter(m_internal->mapping_overlaps, start, size, [this](VAddr start_addr, VAddr end_addr, int value) {
 | 
			
		||||
        if (value <= 1) {
 | 
			
		||||
            const IntervalType other{start_addr, end_addr};
 | 
			
		||||
            m_internal->base_set.insert(other);
 | 
			
		||||
        }
 | 
			
		||||
    });
 | 
			
		||||
    m_internal->ForEachInOverlapCounter(m_internal->mapping_overlaps, start, size,
 | 
			
		||||
                                        [this](VAddr start_addr, VAddr end_addr, int value) {
 | 
			
		||||
                                            if (value <= 1) {
 | 
			
		||||
                                                const IntervalType other{start_addr, end_addr};
 | 
			
		||||
                                                m_internal->base_set.insert(other);
 | 
			
		||||
                                            }
 | 
			
		||||
                                        });
 | 
			
		||||
    if (!m_internal->base_set.empty()) {
 | 
			
		||||
        auto it = m_internal->base_set.begin();
 | 
			
		||||
        auto end_it = m_internal->base_set.end();
 | 
			
		||||
 
 | 
			
		||||
@@ -13,8 +13,8 @@
 | 
			
		||||
#include "core/memory.h"
 | 
			
		||||
#include "video_core/host1x/host1x.h"
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
using Core::Memory::YUZU_PAGESIZE;
 | 
			
		||||
constexpr size_t BIG_PAGE_SIZE = YUZU_PAGESIZE * 16;
 | 
			
		||||
 | 
			
		||||
namespace Service::Nvidia::NvCore {
 | 
			
		||||
NvMap::Handle::Handle(u64 size_, Id id_)
 | 
			
		||||
@@ -96,8 +96,9 @@ void NvMap::UnmapHandle(Handle& handle_description) {
 | 
			
		||||
    const size_t map_size = handle_description.aligned_size;
 | 
			
		||||
    if (!handle_description.in_heap) {
 | 
			
		||||
        auto& smmu = host1x.MemoryManager();
 | 
			
		||||
        size_t aligned_up = Common::AlignUp(map_size, BIG_PAGE_SIZE);
 | 
			
		||||
        smmu.Unmap(handle_description.d_address, map_size);
 | 
			
		||||
        smmu.Free(handle_description.d_address, static_cast<size_t>(map_size));
 | 
			
		||||
        smmu.Free(handle_description.d_address, static_cast<size_t>(aligned_up));
 | 
			
		||||
        handle_description.d_address = 0;
 | 
			
		||||
        return;
 | 
			
		||||
    }
 | 
			
		||||
@@ -206,7 +207,8 @@ DAddr NvMap::PinHandle(NvMap::Handle::Id handle, bool low_area_pin) {
 | 
			
		||||
            handle_description->d_address = session->mapper->Map(vaddress, map_size);
 | 
			
		||||
            handle_description->in_heap = true;
 | 
			
		||||
        } else {
 | 
			
		||||
            while ((address = smmu.Allocate(map_size)) == 0) {
 | 
			
		||||
            size_t aligned_up = Common::AlignUp(map_size, BIG_PAGE_SIZE);
 | 
			
		||||
            while ((address = smmu.Allocate(aligned_up)) == 0) {
 | 
			
		||||
                // Free handles until the allocation succeeds
 | 
			
		||||
                std::scoped_lock queueLock(unmap_queue_lock);
 | 
			
		||||
                if (auto freeHandleDesc{unmap_queue.front()}) {
 | 
			
		||||
 
 | 
			
		||||
@@ -63,8 +63,8 @@ public:
 | 
			
		||||
        } flags{};
 | 
			
		||||
        static_assert(sizeof(Flags) == sizeof(u32));
 | 
			
		||||
 | 
			
		||||
        VAddr address{};   //!< The memory location in the guest's AS that this handle corresponds to,
 | 
			
		||||
                           //!< this can also be in the nvdrv tmem
 | 
			
		||||
        VAddr address{}; //!< The memory location in the guest's AS that this handle corresponds to,
 | 
			
		||||
                         //!< this can also be in the nvdrv tmem
 | 
			
		||||
        bool is_shared_mem_mapped{}; //!< If this nvmap has been mapped with the MapSharedMem IPC
 | 
			
		||||
                                     //!< call
 | 
			
		||||
 | 
			
		||||
@@ -73,8 +73,8 @@ public:
 | 
			
		||||
        bool in_heap{};
 | 
			
		||||
        size_t session_id{};
 | 
			
		||||
 | 
			
		||||
        DAddr d_address{}; //!< The memory location in the device's AS that this handle corresponds to,
 | 
			
		||||
                           //!< this can also be in the nvdrv tmem
 | 
			
		||||
        DAddr d_address{}; //!< The memory location in the device's AS that this handle corresponds
 | 
			
		||||
                           //!< to, this can also be in the nvdrv tmem
 | 
			
		||||
 | 
			
		||||
        Handle(u64 size, Id id);
 | 
			
		||||
 | 
			
		||||
@@ -82,7 +82,8 @@ public:
 | 
			
		||||
         * @brief Sets up the handle with the given memory config, can allocate memory from the tmem
 | 
			
		||||
         * if a 0 address is passed
 | 
			
		||||
         */
 | 
			
		||||
        [[nodiscard]] NvResult Alloc(Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress, size_t pSessionId);
 | 
			
		||||
        [[nodiscard]] NvResult Alloc(Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress,
 | 
			
		||||
                                     size_t pSessionId);
 | 
			
		||||
 | 
			
		||||
        /**
 | 
			
		||||
         * @brief Increases the dupe counter of the handle for the given session
 | 
			
		||||
 
 | 
			
		||||
@@ -4,8 +4,8 @@
 | 
			
		||||
#pragma once
 | 
			
		||||
 | 
			
		||||
#include <deque>
 | 
			
		||||
#include <vector>
 | 
			
		||||
#include <unordered_map>
 | 
			
		||||
#include <vector>
 | 
			
		||||
 | 
			
		||||
#include "common/common_types.h"
 | 
			
		||||
#include "common/swap.h"
 | 
			
		||||
 
 | 
			
		||||
@@ -69,7 +69,7 @@ NvResult nvhost_vic::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> inpu
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void nvhost_vic::OnOpen(size_t session_id, DeviceFD fd) {
 | 
			
		||||
        sessions[fd] = session_id;
 | 
			
		||||
    sessions[fd] = session_id;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void nvhost_vic::OnClose(DeviceFD fd) {
 | 
			
		||||
 
 | 
			
		||||
@@ -123,8 +123,8 @@ NvResult nvmap::IocAlloc(IocAllocParams& params, DeviceFD fd) {
 | 
			
		||||
        return NvResult::InsufficientMemory;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    const auto result =
 | 
			
		||||
        handle_description->Alloc(params.flags, params.align, params.kind, params.address, sessions[fd]);
 | 
			
		||||
    const auto result = handle_description->Alloc(params.flags, params.align, params.kind,
 | 
			
		||||
                                                  params.address, sessions[fd]);
 | 
			
		||||
    if (result != NvResult::Success) {
 | 
			
		||||
        LOG_CRITICAL(Service_NVDRV, "Object failed to allocate, handle={:08X}", params.handle);
 | 
			
		||||
        return result;
 | 
			
		||||
 
 | 
			
		||||
@@ -92,7 +92,8 @@ Result FreeNvMapHandle(Nvidia::Devices::nvmap& nvmap, u32 handle, Nvidia::Device
 | 
			
		||||
    Nvidia::Devices::nvmap::IocFreeParams free_params{
 | 
			
		||||
        .handle = handle,
 | 
			
		||||
    };
 | 
			
		||||
    R_UNLESS(nvmap.IocFree(free_params, nvmap_fd) == Nvidia::NvResult::Success, VI::ResultOperationFailed);
 | 
			
		||||
    R_UNLESS(nvmap.IocFree(free_params, nvmap_fd) == Nvidia::NvResult::Success,
 | 
			
		||||
             VI::ResultOperationFailed);
 | 
			
		||||
 | 
			
		||||
    // We succeeded.
 | 
			
		||||
    R_SUCCEED();
 | 
			
		||||
@@ -109,7 +110,8 @@ Result AllocNvMapHandle(Nvidia::Devices::nvmap& nvmap, u32 handle, Common::Proce
 | 
			
		||||
        .kind = 0,
 | 
			
		||||
        .address = GetInteger(buffer),
 | 
			
		||||
    };
 | 
			
		||||
    R_UNLESS(nvmap.IocAlloc(alloc_params, nvmap_fd) == Nvidia::NvResult::Success, VI::ResultOperationFailed);
 | 
			
		||||
    R_UNLESS(nvmap.IocAlloc(alloc_params, nvmap_fd) == Nvidia::NvResult::Success,
 | 
			
		||||
             VI::ResultOperationFailed);
 | 
			
		||||
 | 
			
		||||
    // We succeeded.
 | 
			
		||||
    R_SUCCEED();
 | 
			
		||||
@@ -201,8 +203,8 @@ Result FbShareBufferManager::Initialize(u64* out_buffer_id, u64* out_layer_id, u
 | 
			
		||||
    m_nvmap_fd = m_nvdrv->Open("/dev/nvmap", m_session_id);
 | 
			
		||||
 | 
			
		||||
    // Create an nvmap handle for the buffer and assign the memory to it.
 | 
			
		||||
    R_TRY(AllocateHandleForBuffer(std::addressof(m_buffer_nvmap_handle), *m_nvdrv, m_nvmap_fd, map_address,
 | 
			
		||||
                                  SharedBufferSize));
 | 
			
		||||
    R_TRY(AllocateHandleForBuffer(std::addressof(m_buffer_nvmap_handle), *m_nvdrv, m_nvmap_fd,
 | 
			
		||||
                                  map_address, SharedBufferSize));
 | 
			
		||||
 | 
			
		||||
    // Record the display id.
 | 
			
		||||
    m_display_id = display_id;
 | 
			
		||||
 
 | 
			
		||||
@@ -4,9 +4,9 @@
 | 
			
		||||
#pragma once
 | 
			
		||||
 | 
			
		||||
#include "common/math_util.h"
 | 
			
		||||
#include "core/hle/service/nvdrv/nvdata.h"
 | 
			
		||||
#include "core/hle/service/nvnflinger/nvnflinger.h"
 | 
			
		||||
#include "core/hle/service/nvnflinger/ui/fence.h"
 | 
			
		||||
#include "core/hle/service/nvdrv/nvdata.h"
 | 
			
		||||
 | 
			
		||||
namespace Kernel {
 | 
			
		||||
class KPageGroup;
 | 
			
		||||
@@ -62,7 +62,6 @@ private:
 | 
			
		||||
    Core::System& m_system;
 | 
			
		||||
    Nvnflinger& m_flinger;
 | 
			
		||||
    std::shared_ptr<Nvidia::Module> m_nvdrv;
 | 
			
		||||
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
} // namespace Service::Nvnflinger
 | 
			
		||||
 
 | 
			
		||||
@@ -240,8 +240,8 @@ bool BufferCache<P>::DMACopy(GPUVAddr src_address, GPUVAddr dest_address, u64 am
 | 
			
		||||
        memory_tracker.MarkRegionAsGpuModified(*cpu_dest_address, amount);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    Tegra::Memory::DeviceGuestMemoryScoped<u8, Tegra::Memory::GuestMemoryFlags::UnsafeReadWrite> tmp(
 | 
			
		||||
        device_memory, *cpu_src_address, amount, &tmp_buffer);
 | 
			
		||||
    Tegra::Memory::DeviceGuestMemoryScoped<u8, Tegra::Memory::GuestMemoryFlags::UnsafeReadWrite>
 | 
			
		||||
        tmp(device_memory, *cpu_src_address, amount, &tmp_buffer);
 | 
			
		||||
    tmp.SetAddressAndSize(*cpu_dest_address, amount);
 | 
			
		||||
    return true;
 | 
			
		||||
}
 | 
			
		||||
@@ -1355,7 +1355,7 @@ typename BufferCache<P>::OverlapResult BufferCache<P>::ResolveOverlaps(DAddr dev
 | 
			
		||||
    bool has_stream_leap = false;
 | 
			
		||||
    auto expand_begin = [&](DAddr add_value) {
 | 
			
		||||
        static constexpr DAddr min_page = CACHING_PAGESIZE + Core::Memory::YUZU_PAGESIZE;
 | 
			
		||||
        if (add_value > begin - min_page ) {
 | 
			
		||||
        if (add_value > begin - min_page) {
 | 
			
		||||
            begin = min_page;
 | 
			
		||||
            device_addr = Core::Memory::YUZU_PAGESIZE;
 | 
			
		||||
            return;
 | 
			
		||||
@@ -1365,7 +1365,7 @@ typename BufferCache<P>::OverlapResult BufferCache<P>::ResolveOverlaps(DAddr dev
 | 
			
		||||
    };
 | 
			
		||||
    auto expand_end = [&](DAddr add_value) {
 | 
			
		||||
        static constexpr DAddr max_page = 1ULL << Tegra::MaxwellDeviceMemoryManager::AS_BITS;
 | 
			
		||||
        if (add_value > max_page - end ) {
 | 
			
		||||
        if (add_value > max_page - end) {
 | 
			
		||||
            end = max_page;
 | 
			
		||||
            return;
 | 
			
		||||
        }
 | 
			
		||||
 
 | 
			
		||||
@@ -472,8 +472,8 @@ private:
 | 
			
		||||
        u64 changed_bits = (add_to_tracker ? current_bits : ~current_bits) & new_bits;
 | 
			
		||||
        VAddr addr = cpu_addr + word_index * BYTES_PER_WORD;
 | 
			
		||||
        IteratePages(changed_bits, [&](size_t offset, size_t size) {
 | 
			
		||||
            tracker->UpdatePagesCachedCount(addr + offset * BYTES_PER_PAGE,
 | 
			
		||||
                                               size * BYTES_PER_PAGE, add_to_tracker ? 1 : -1);
 | 
			
		||||
            tracker->UpdatePagesCachedCount(addr + offset * BYTES_PER_PAGE, size * BYTES_PER_PAGE,
 | 
			
		||||
                                            add_to_tracker ? 1 : -1);
 | 
			
		||||
        });
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -86,14 +86,14 @@ bool DmaPusher::Step() {
 | 
			
		||||
        }
 | 
			
		||||
        const auto safe_process = [&] {
 | 
			
		||||
            Tegra::Memory::GpuGuestMemory<Tegra::CommandHeader,
 | 
			
		||||
                                         Tegra::Memory::GuestMemoryFlags::SafeRead>
 | 
			
		||||
                                          Tegra::Memory::GuestMemoryFlags::SafeRead>
 | 
			
		||||
                headers(memory_manager, dma_state.dma_get, command_list_header.size,
 | 
			
		||||
                        &command_headers);
 | 
			
		||||
            ProcessCommands(headers);
 | 
			
		||||
        };
 | 
			
		||||
        const auto unsafe_process = [&] {
 | 
			
		||||
            Tegra::Memory::GpuGuestMemory<Tegra::CommandHeader,
 | 
			
		||||
                                         Tegra::Memory::GuestMemoryFlags::UnsafeRead>
 | 
			
		||||
                                          Tegra::Memory::GuestMemoryFlags::UnsafeRead>
 | 
			
		||||
                headers(memory_manager, dma_state.dma_get, command_list_header.size,
 | 
			
		||||
                        &command_headers);
 | 
			
		||||
            ProcessCommands(headers);
 | 
			
		||||
 
 | 
			
		||||
@@ -8,10 +8,10 @@
 | 
			
		||||
#include "common/scratch_buffer.h"
 | 
			
		||||
#include "video_core/engines/sw_blitter/blitter.h"
 | 
			
		||||
#include "video_core/engines/sw_blitter/converter.h"
 | 
			
		||||
#include "video_core/guest_memory.h"
 | 
			
		||||
#include "video_core/memory_manager.h"
 | 
			
		||||
#include "video_core/surface.h"
 | 
			
		||||
#include "video_core/textures/decoders.h"
 | 
			
		||||
#include "video_core/guest_memory.h"
 | 
			
		||||
 | 
			
		||||
namespace Tegra {
 | 
			
		||||
class MemoryManager;
 | 
			
		||||
 
 | 
			
		||||
@@ -20,7 +20,8 @@ using GuestMemoryFlags = Core::Memory::GuestMemoryFlags;
 | 
			
		||||
template <typename T, GuestMemoryFlags FLAGS>
 | 
			
		||||
using DeviceGuestMemory = Core::Memory::GuestMemory<Tegra::MaxwellDeviceMemoryManager, T, FLAGS>;
 | 
			
		||||
template <typename T, GuestMemoryFlags FLAGS>
 | 
			
		||||
using DeviceGuestMemoryScoped = Core::Memory::GuestMemoryScoped<Tegra::MaxwellDeviceMemoryManager, T, FLAGS>;
 | 
			
		||||
using DeviceGuestMemoryScoped =
 | 
			
		||||
    Core::Memory::GuestMemoryScoped<Tegra::MaxwellDeviceMemoryManager, T, FLAGS>;
 | 
			
		||||
template <typename T, GuestMemoryFlags FLAGS>
 | 
			
		||||
using GpuGuestMemory = Core::Memory::GuestMemory<Tegra::MemoryManager, T, FLAGS>;
 | 
			
		||||
template <typename T, GuestMemoryFlags FLAGS>
 | 
			
		||||
 
 | 
			
		||||
@@ -383,9 +383,8 @@ Vp9FrameContainer VP9::GetCurrentFrame(const Host1x::NvdecCommon::NvdecRegisters
 | 
			
		||||
        // gpu.SyncGuestHost(); epic, why?
 | 
			
		||||
        current_frame.info = GetVp9PictureInfo(state);
 | 
			
		||||
        current_frame.bit_stream.resize(current_frame.info.bitstream_size);
 | 
			
		||||
        host1x.GMMU().ReadBlock(state.frame_bitstream_offset,
 | 
			
		||||
                                         current_frame.bit_stream.data(),
 | 
			
		||||
                                         current_frame.info.bitstream_size);
 | 
			
		||||
        host1x.GMMU().ReadBlock(state.frame_bitstream_offset, current_frame.bit_stream.data(),
 | 
			
		||||
                                current_frame.info.bitstream_size);
 | 
			
		||||
    }
 | 
			
		||||
    if (!next_frame.bit_stream.empty()) {
 | 
			
		||||
        Vp9FrameContainer temp{
 | 
			
		||||
 
 | 
			
		||||
@@ -167,7 +167,7 @@ void Vic::WriteRGBFrame(std::unique_ptr<FFmpeg::Frame> frame, const VicConfig& c
 | 
			
		||||
        // send pitch linear frame
 | 
			
		||||
        const size_t linear_size = width * height * 4;
 | 
			
		||||
        host1x.GMMU().WriteBlock(output_surface_luma_address, converted_frame_buf_addr,
 | 
			
		||||
                                          linear_size);
 | 
			
		||||
                                 linear_size);
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@@ -193,8 +193,7 @@ void Vic::WriteYUVFrame(std::unique_ptr<FFmpeg::Frame> frame, const VicConfig& c
 | 
			
		||||
        const std::size_t dst = y * aligned_width;
 | 
			
		||||
        std::memcpy(luma_buffer.data() + dst, luma_src + src, frame_width);
 | 
			
		||||
    }
 | 
			
		||||
    host1x.GMMU().WriteBlock(output_surface_luma_address, luma_buffer.data(),
 | 
			
		||||
                                      luma_buffer.size());
 | 
			
		||||
    host1x.GMMU().WriteBlock(output_surface_luma_address, luma_buffer.data(), luma_buffer.size());
 | 
			
		||||
 | 
			
		||||
    // Chroma
 | 
			
		||||
    const std::size_t half_height = frame_height / 2;
 | 
			
		||||
@@ -234,7 +233,7 @@ void Vic::WriteYUVFrame(std::unique_ptr<FFmpeg::Frame> frame, const VicConfig& c
 | 
			
		||||
        break;
 | 
			
		||||
    }
 | 
			
		||||
    host1x.GMMU().WriteBlock(output_surface_chroma_address, chroma_buffer.data(),
 | 
			
		||||
                                      chroma_buffer.size());
 | 
			
		||||
                             chroma_buffer.size());
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
} // namespace Host1x
 | 
			
		||||
 
 | 
			
		||||
@@ -38,8 +38,9 @@ class MemoryManager final {
 | 
			
		||||
public:
 | 
			
		||||
    explicit MemoryManager(Core::System& system_, u64 address_space_bits_ = 40,
 | 
			
		||||
                           u64 big_page_bits_ = 16, u64 page_bits_ = 12);
 | 
			
		||||
    explicit MemoryManager(Core::System& system_, MaxwellDeviceMemoryManager& memory_, u64 address_space_bits_ = 40,
 | 
			
		||||
                           u64 big_page_bits_ = 16, u64 page_bits_ = 12);
 | 
			
		||||
    explicit MemoryManager(Core::System& system_, MaxwellDeviceMemoryManager& memory_,
 | 
			
		||||
                           u64 address_space_bits_ = 40, u64 big_page_bits_ = 16,
 | 
			
		||||
                           u64 page_bits_ = 12);
 | 
			
		||||
    ~MemoryManager();
 | 
			
		||||
 | 
			
		||||
    size_t GetID() const {
 | 
			
		||||
 
 | 
			
		||||
@@ -15,9 +15,9 @@
 | 
			
		||||
#include "common/common_types.h"
 | 
			
		||||
#include "core/memory.h"
 | 
			
		||||
#include "video_core/control/channel_state_cache.h"
 | 
			
		||||
#include "video_core/host1x/gpu_device_memory_manager.h"
 | 
			
		||||
#include "video_core/query_cache/query_base.h"
 | 
			
		||||
#include "video_core/query_cache/types.h"
 | 
			
		||||
#include "video_core/host1x/gpu_device_memory_manager.h"
 | 
			
		||||
 | 
			
		||||
namespace VideoCore {
 | 
			
		||||
class RasterizerInterface;
 | 
			
		||||
@@ -50,7 +50,8 @@ public:
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    explicit QueryCacheBase(Tegra::GPU& gpu, VideoCore::RasterizerInterface& rasterizer_,
 | 
			
		||||
                            Tegra::MaxwellDeviceMemoryManager& device_memory_, RuntimeType& runtime_);
 | 
			
		||||
                            Tegra::MaxwellDeviceMemoryManager& device_memory_,
 | 
			
		||||
                            RuntimeType& runtime_);
 | 
			
		||||
 | 
			
		||||
    ~QueryCacheBase();
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -20,8 +20,7 @@ class BufferCacheRuntime;
 | 
			
		||||
 | 
			
		||||
class Buffer : public VideoCommon::BufferBase {
 | 
			
		||||
public:
 | 
			
		||||
    explicit Buffer(BufferCacheRuntime&, DAddr cpu_addr,
 | 
			
		||||
                    u64 size_bytes);
 | 
			
		||||
    explicit Buffer(BufferCacheRuntime&, DAddr cpu_addr, u64 size_bytes);
 | 
			
		||||
    explicit Buffer(BufferCacheRuntime&, VideoCommon::NullBufferParams);
 | 
			
		||||
 | 
			
		||||
    void ImmediateUpload(size_t offset, std::span<const u8> data) noexcept;
 | 
			
		||||
 
 | 
			
		||||
@@ -35,7 +35,8 @@ constexpr GLenum GetTarget(VideoCore::QueryType type) {
 | 
			
		||||
 | 
			
		||||
} // Anonymous namespace
 | 
			
		||||
 | 
			
		||||
QueryCache::QueryCache(RasterizerOpenGL& rasterizer_, Tegra::MaxwellDeviceMemoryManager& device_memory_)
 | 
			
		||||
QueryCache::QueryCache(RasterizerOpenGL& rasterizer_,
 | 
			
		||||
                       Tegra::MaxwellDeviceMemoryManager& device_memory_)
 | 
			
		||||
    : QueryCacheLegacy(rasterizer_, device_memory_), gl_rasterizer{rasterizer_} {
 | 
			
		||||
    EnableCounters();
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
@@ -8,10 +8,10 @@
 | 
			
		||||
#include <vector>
 | 
			
		||||
 | 
			
		||||
#include "common/common_types.h"
 | 
			
		||||
#include "video_core/host1x/gpu_device_memory_manager.h"
 | 
			
		||||
#include "video_core/query_cache.h"
 | 
			
		||||
#include "video_core/rasterizer_interface.h"
 | 
			
		||||
#include "video_core/renderer_opengl/gl_resource_manager.h"
 | 
			
		||||
#include "video_core/host1x/gpu_device_memory_manager.h"
 | 
			
		||||
 | 
			
		||||
namespace Core {
 | 
			
		||||
class System;
 | 
			
		||||
@@ -29,7 +29,8 @@ using CounterStream = VideoCommon::CounterStreamBase<QueryCache, HostCounter>;
 | 
			
		||||
class QueryCache final
 | 
			
		||||
    : public VideoCommon::QueryCacheLegacy<QueryCache, CachedQuery, CounterStream, HostCounter> {
 | 
			
		||||
public:
 | 
			
		||||
    explicit QueryCache(RasterizerOpenGL& rasterizer_, Tegra::MaxwellDeviceMemoryManager& device_memory_);
 | 
			
		||||
    explicit QueryCache(RasterizerOpenGL& rasterizer_,
 | 
			
		||||
                        Tegra::MaxwellDeviceMemoryManager& device_memory_);
 | 
			
		||||
    ~QueryCache();
 | 
			
		||||
 | 
			
		||||
    OGLQuery AllocateQuery(VideoCore::QueryType type);
 | 
			
		||||
 
 | 
			
		||||
@@ -75,9 +75,9 @@ class RasterizerOpenGL : public VideoCore::RasterizerInterface,
 | 
			
		||||
                         protected VideoCommon::ChannelSetupCaches<VideoCommon::ChannelInfo> {
 | 
			
		||||
public:
 | 
			
		||||
    explicit RasterizerOpenGL(Core::Frontend::EmuWindow& emu_window_, Tegra::GPU& gpu_,
 | 
			
		||||
                              Tegra::MaxwellDeviceMemoryManager& device_memory_, const Device& device_,
 | 
			
		||||
                              ScreenInfo& screen_info_, ProgramManager& program_manager_,
 | 
			
		||||
                              StateTracker& state_tracker_);
 | 
			
		||||
                              Tegra::MaxwellDeviceMemoryManager& device_memory_,
 | 
			
		||||
                              const Device& device_, ScreenInfo& screen_info_,
 | 
			
		||||
                              ProgramManager& program_manager_, StateTracker& state_tracker_);
 | 
			
		||||
    ~RasterizerOpenGL() override;
 | 
			
		||||
 | 
			
		||||
    void Draw(bool is_indexed, u32 instance_count) override;
 | 
			
		||||
 
 | 
			
		||||
@@ -97,8 +97,8 @@ RendererVulkan::RendererVulkan(Core::TelemetrySession& telemetry_session_,
 | 
			
		||||
                render_window.GetFramebufferLayout().height),
 | 
			
		||||
      present_manager(instance, render_window, device, memory_allocator, scheduler, swapchain,
 | 
			
		||||
                      surface),
 | 
			
		||||
      blit_screen(device_memory, render_window, device, memory_allocator, swapchain, present_manager,
 | 
			
		||||
                  scheduler, screen_info),
 | 
			
		||||
      blit_screen(device_memory, render_window, device, memory_allocator, swapchain,
 | 
			
		||||
                  present_manager, scheduler, screen_info),
 | 
			
		||||
      rasterizer(render_window, gpu, device_memory, screen_info, device, memory_allocator,
 | 
			
		||||
                 state_tracker, scheduler) {
 | 
			
		||||
    if (Settings::values.renderer_force_max_clock.GetValue() && device.ShouldBoostClocks()) {
 | 
			
		||||
 
 | 
			
		||||
@@ -7,12 +7,12 @@
 | 
			
		||||
#include <string>
 | 
			
		||||
#include <variant>
 | 
			
		||||
 | 
			
		||||
#include "video_core/renderer_vulkan/vk_rasterizer.h"
 | 
			
		||||
 | 
			
		||||
#include "common/dynamic_library.h"
 | 
			
		||||
#include "video_core/host1x/gpu_device_memory_manager.h"
 | 
			
		||||
#include "video_core/renderer_base.h"
 | 
			
		||||
#include "video_core/renderer_vulkan/vk_blit_screen.h"
 | 
			
		||||
#include "video_core/renderer_vulkan/vk_present_manager.h"
 | 
			
		||||
#include "video_core/renderer_vulkan/vk_rasterizer.h"
 | 
			
		||||
#include "video_core/renderer_vulkan/vk_scheduler.h"
 | 
			
		||||
#include "video_core/renderer_vulkan/vk_state_tracker.h"
 | 
			
		||||
#include "video_core/renderer_vulkan/vk_swapchain.h"
 | 
			
		||||
@@ -20,7 +20,6 @@
 | 
			
		||||
#include "video_core/vulkan_common/vulkan_device.h"
 | 
			
		||||
#include "video_core/vulkan_common/vulkan_memory_allocator.h"
 | 
			
		||||
#include "video_core/vulkan_common/vulkan_wrapper.h"
 | 
			
		||||
#include "video_core/host1x/gpu_device_memory_manager.h"
 | 
			
		||||
 | 
			
		||||
namespace Core {
 | 
			
		||||
class TelemetrySession;
 | 
			
		||||
 
 | 
			
		||||
@@ -6,9 +6,9 @@
 | 
			
		||||
#include <memory>
 | 
			
		||||
 | 
			
		||||
#include "core/frontend/framebuffer_layout.h"
 | 
			
		||||
#include "video_core/host1x/gpu_device_memory_manager.h"
 | 
			
		||||
#include "video_core/vulkan_common/vulkan_memory_allocator.h"
 | 
			
		||||
#include "video_core/vulkan_common/vulkan_wrapper.h"
 | 
			
		||||
#include "video_core/host1x/gpu_device_memory_manager.h"
 | 
			
		||||
 | 
			
		||||
namespace Core {
 | 
			
		||||
class System;
 | 
			
		||||
@@ -53,8 +53,9 @@ struct ScreenInfo {
 | 
			
		||||
 | 
			
		||||
class BlitScreen {
 | 
			
		||||
public:
 | 
			
		||||
    explicit BlitScreen(Tegra::MaxwellDeviceMemoryManager& device_memory, Core::Frontend::EmuWindow& render_window,
 | 
			
		||||
                        const Device& device, MemoryAllocator& memory_manager, Swapchain& swapchain,
 | 
			
		||||
    explicit BlitScreen(Tegra::MaxwellDeviceMemoryManager& device_memory,
 | 
			
		||||
                        Core::Frontend::EmuWindow& render_window, const Device& device,
 | 
			
		||||
                        MemoryAllocator& memory_manager, Swapchain& swapchain,
 | 
			
		||||
                        PresentManager& present_manager, Scheduler& scheduler,
 | 
			
		||||
                        const ScreenInfo& screen_info);
 | 
			
		||||
    ~BlitScreen();
 | 
			
		||||
 
 | 
			
		||||
@@ -298,8 +298,9 @@ bool GraphicsPipelineCacheKey::operator==(const GraphicsPipelineCacheKey& rhs) c
 | 
			
		||||
    return std::memcmp(&rhs, this, Size()) == 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
PipelineCache::PipelineCache(Tegra::MaxwellDeviceMemoryManager& device_memory_, const Device& device_,
 | 
			
		||||
                             Scheduler& scheduler_, DescriptorPool& descriptor_pool_,
 | 
			
		||||
PipelineCache::PipelineCache(Tegra::MaxwellDeviceMemoryManager& device_memory_,
 | 
			
		||||
                             const Device& device_, Scheduler& scheduler_,
 | 
			
		||||
                             DescriptorPool& descriptor_pool_,
 | 
			
		||||
                             GuestDescriptorQueue& guest_descriptor_queue_,
 | 
			
		||||
                             RenderPassCache& render_pass_cache_, BufferCache& buffer_cache_,
 | 
			
		||||
                             TextureCache& texture_cache_, VideoCore::ShaderNotify& shader_notify_)
 | 
			
		||||
 
 | 
			
		||||
@@ -20,13 +20,13 @@
 | 
			
		||||
#include "shader_recompiler/object_pool.h"
 | 
			
		||||
#include "shader_recompiler/profile.h"
 | 
			
		||||
#include "video_core/engines/maxwell_3d.h"
 | 
			
		||||
#include "video_core/host1x/gpu_device_memory_manager.h"
 | 
			
		||||
#include "video_core/renderer_vulkan/fixed_pipeline_state.h"
 | 
			
		||||
#include "video_core/renderer_vulkan/vk_buffer_cache.h"
 | 
			
		||||
#include "video_core/renderer_vulkan/vk_compute_pipeline.h"
 | 
			
		||||
#include "video_core/renderer_vulkan/vk_graphics_pipeline.h"
 | 
			
		||||
#include "video_core/renderer_vulkan/vk_texture_cache.h"
 | 
			
		||||
#include "video_core/shader_cache.h"
 | 
			
		||||
#include "video_core/host1x/gpu_device_memory_manager.h"
 | 
			
		||||
 | 
			
		||||
namespace Core {
 | 
			
		||||
class System;
 | 
			
		||||
@@ -99,8 +99,8 @@ struct ShaderPools {
 | 
			
		||||
 | 
			
		||||
class PipelineCache : public VideoCommon::ShaderCache {
 | 
			
		||||
public:
 | 
			
		||||
    explicit PipelineCache(Tegra::MaxwellDeviceMemoryManager& device_memory_, const Device& device, Scheduler& scheduler,
 | 
			
		||||
                           DescriptorPool& descriptor_pool,
 | 
			
		||||
    explicit PipelineCache(Tegra::MaxwellDeviceMemoryManager& device_memory_, const Device& device,
 | 
			
		||||
                           Scheduler& scheduler, DescriptorPool& descriptor_pool,
 | 
			
		||||
                           GuestDescriptorQueue& guest_descriptor_queue,
 | 
			
		||||
                           RenderPassCache& render_pass_cache, BufferCache& buffer_cache,
 | 
			
		||||
                           TextureCache& texture_cache, VideoCore::ShaderNotify& shader_notify_);
 | 
			
		||||
 
 | 
			
		||||
@@ -14,10 +14,10 @@
 | 
			
		||||
#include "common/bit_util.h"
 | 
			
		||||
#include "common/common_types.h"
 | 
			
		||||
#include "core/memory.h"
 | 
			
		||||
#include "video_core/rasterizer_interface.h"
 | 
			
		||||
#include "video_core/engines/draw_manager.h"
 | 
			
		||||
#include "video_core/host1x/gpu_device_memory_manager.h"
 | 
			
		||||
#include "video_core/query_cache/query_cache.h"
 | 
			
		||||
#include "video_core/rasterizer_interface.h"
 | 
			
		||||
#include "video_core/renderer_vulkan/vk_buffer_cache.h"
 | 
			
		||||
#include "video_core/renderer_vulkan/vk_compute_pass.h"
 | 
			
		||||
#include "video_core/renderer_vulkan/vk_query_cache.h"
 | 
			
		||||
@@ -1156,9 +1156,10 @@ private:
 | 
			
		||||
 | 
			
		||||
struct QueryCacheRuntimeImpl {
 | 
			
		||||
    QueryCacheRuntimeImpl(QueryCacheRuntime& runtime, VideoCore::RasterizerInterface* rasterizer_,
 | 
			
		||||
                          Tegra::MaxwellDeviceMemoryManager& device_memory_, Vulkan::BufferCache& buffer_cache_,
 | 
			
		||||
                          const Device& device_, const MemoryAllocator& memory_allocator_,
 | 
			
		||||
                          Scheduler& scheduler_, StagingBufferPool& staging_pool_,
 | 
			
		||||
                          Tegra::MaxwellDeviceMemoryManager& device_memory_,
 | 
			
		||||
                          Vulkan::BufferCache& buffer_cache_, const Device& device_,
 | 
			
		||||
                          const MemoryAllocator& memory_allocator_, Scheduler& scheduler_,
 | 
			
		||||
                          StagingBufferPool& staging_pool_,
 | 
			
		||||
                          ComputePassDescriptorQueue& compute_pass_descriptor_queue,
 | 
			
		||||
                          DescriptorPool& descriptor_pool)
 | 
			
		||||
        : rasterizer{rasterizer_}, device_memory{device_memory_},
 | 
			
		||||
 
 | 
			
		||||
@@ -38,7 +38,6 @@
 | 
			
		||||
#include "video_core/vulkan_common/vulkan_device.h"
 | 
			
		||||
#include "video_core/vulkan_common/vulkan_wrapper.h"
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
namespace Vulkan {
 | 
			
		||||
 | 
			
		||||
using Maxwell = Tegra::Engines::Maxwell3D::Regs;
 | 
			
		||||
 
 | 
			
		||||
@@ -7,13 +7,13 @@
 | 
			
		||||
 | 
			
		||||
#include <boost/container/static_vector.hpp>
 | 
			
		||||
 | 
			
		||||
#include "video_core/renderer_vulkan/vk_buffer_cache.h"
 | 
			
		||||
 | 
			
		||||
#include "common/common_types.h"
 | 
			
		||||
#include "video_core/control/channel_state_cache.h"
 | 
			
		||||
#include "video_core/engines/maxwell_dma.h"
 | 
			
		||||
#include "video_core/host1x/gpu_device_memory_manager.h"
 | 
			
		||||
#include "video_core/rasterizer_interface.h"
 | 
			
		||||
#include "video_core/renderer_vulkan/blit_image.h"
 | 
			
		||||
#include "video_core/renderer_vulkan/vk_buffer_cache.h"
 | 
			
		||||
#include "video_core/renderer_vulkan/vk_descriptor_pool.h"
 | 
			
		||||
#include "video_core/renderer_vulkan/vk_fence_manager.h"
 | 
			
		||||
#include "video_core/renderer_vulkan/vk_pipeline_cache.h"
 | 
			
		||||
@@ -24,7 +24,6 @@
 | 
			
		||||
#include "video_core/renderer_vulkan/vk_update_descriptor.h"
 | 
			
		||||
#include "video_core/vulkan_common/vulkan_memory_allocator.h"
 | 
			
		||||
#include "video_core/vulkan_common/vulkan_wrapper.h"
 | 
			
		||||
#include "video_core/host1x/gpu_device_memory_manager.h"
 | 
			
		||||
 | 
			
		||||
namespace Core {
 | 
			
		||||
class System;
 | 
			
		||||
 
 | 
			
		||||
@@ -35,7 +35,8 @@ void ShaderCache::SyncGuestHost() {
 | 
			
		||||
    RemovePendingShaders();
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
ShaderCache::ShaderCache(Tegra::MaxwellDeviceMemoryManager& device_memory_) : device_memory{device_memory_} {}
 | 
			
		||||
ShaderCache::ShaderCache(Tegra::MaxwellDeviceMemoryManager& device_memory_)
 | 
			
		||||
    : device_memory{device_memory_} {}
 | 
			
		||||
 | 
			
		||||
bool ShaderCache::RefreshStages(std::array<u64, 6>& unique_hashes) {
 | 
			
		||||
    auto& dirty{maxwell3d->dirty.flags};
 | 
			
		||||
 
 | 
			
		||||
@@ -14,9 +14,9 @@
 | 
			
		||||
#include "common/common_types.h"
 | 
			
		||||
#include "common/polyfill_ranges.h"
 | 
			
		||||
#include "video_core/control/channel_state_cache.h"
 | 
			
		||||
#include "video_core/host1x/gpu_device_memory_manager.h"
 | 
			
		||||
#include "video_core/rasterizer_interface.h"
 | 
			
		||||
#include "video_core/shader_environment.h"
 | 
			
		||||
#include "video_core/host1x/gpu_device_memory_manager.h"
 | 
			
		||||
 | 
			
		||||
namespace Tegra {
 | 
			
		||||
class MemoryManager;
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user