gl_renderer: Cache textures, framebuffers, and shaders based on CPU address.
This commit is contained in:
		@@ -10,6 +10,7 @@
 | 
			
		||||
#include "common/common_types.h"
 | 
			
		||||
#include "common/swap.h"
 | 
			
		||||
#include "core/hle/service/nvdrv/devices/nvdevice.h"
 | 
			
		||||
#include "video_core/memory_manager.h"
 | 
			
		||||
 | 
			
		||||
namespace Service::Nvidia::Devices {
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -251,8 +251,8 @@ std::string ReadCString(VAddr vaddr, std::size_t max_length) {
 | 
			
		||||
    return string;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void RasterizerMarkRegionCached(Tegra::GPUVAddr gpu_addr, u64 size, bool cached) {
 | 
			
		||||
    if (gpu_addr == 0) {
 | 
			
		||||
void RasterizerMarkRegionCached(VAddr vaddr, u64 size, bool cached) {
 | 
			
		||||
    if (vaddr == 0) {
 | 
			
		||||
        return;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
@@ -261,19 +261,8 @@ void RasterizerMarkRegionCached(Tegra::GPUVAddr gpu_addr, u64 size, bool cached)
 | 
			
		||||
    // CPU pages, hence why we iterate on a CPU page basis (note: GPU page size is different). This
 | 
			
		||||
    // assumes the specified GPU address region is contiguous as well.
 | 
			
		||||
 | 
			
		||||
    u64 num_pages = ((gpu_addr + size - 1) >> PAGE_BITS) - (gpu_addr >> PAGE_BITS) + 1;
 | 
			
		||||
    for (unsigned i = 0; i < num_pages; ++i, gpu_addr += PAGE_SIZE) {
 | 
			
		||||
        boost::optional<VAddr> maybe_vaddr =
 | 
			
		||||
            Core::System::GetInstance().GPU().MemoryManager().GpuToCpuAddress(gpu_addr);
 | 
			
		||||
        // The GPU <-> CPU virtual memory mapping is not 1:1
 | 
			
		||||
        if (!maybe_vaddr) {
 | 
			
		||||
            LOG_ERROR(HW_Memory,
 | 
			
		||||
                      "Trying to flush a cached region to an invalid physical address {:016X}",
 | 
			
		||||
                      gpu_addr);
 | 
			
		||||
            continue;
 | 
			
		||||
        }
 | 
			
		||||
        VAddr vaddr = *maybe_vaddr;
 | 
			
		||||
 | 
			
		||||
    u64 num_pages = ((vaddr + size - 1) >> PAGE_BITS) - (vaddr >> PAGE_BITS) + 1;
 | 
			
		||||
    for (unsigned i = 0; i < num_pages; ++i, vaddr += PAGE_SIZE) {
 | 
			
		||||
        PageType& page_type = current_page_table->attributes[vaddr >> PAGE_BITS];
 | 
			
		||||
 | 
			
		||||
        if (cached) {
 | 
			
		||||
@@ -344,29 +333,19 @@ void RasterizerFlushVirtualRegion(VAddr start, u64 size, FlushMode mode) {
 | 
			
		||||
 | 
			
		||||
        const VAddr overlap_start = std::max(start, region_start);
 | 
			
		||||
        const VAddr overlap_end = std::min(end, region_end);
 | 
			
		||||
 | 
			
		||||
        const std::vector<Tegra::GPUVAddr> gpu_addresses =
 | 
			
		||||
            system_instance.GPU().MemoryManager().CpuToGpuAddress(overlap_start);
 | 
			
		||||
 | 
			
		||||
        if (gpu_addresses.empty()) {
 | 
			
		||||
            return;
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        const u64 overlap_size = overlap_end - overlap_start;
 | 
			
		||||
 | 
			
		||||
        for (const auto& gpu_address : gpu_addresses) {
 | 
			
		||||
            auto& rasterizer = system_instance.Renderer().Rasterizer();
 | 
			
		||||
            switch (mode) {
 | 
			
		||||
            case FlushMode::Flush:
 | 
			
		||||
                rasterizer.FlushRegion(gpu_address, overlap_size);
 | 
			
		||||
                break;
 | 
			
		||||
            case FlushMode::Invalidate:
 | 
			
		||||
                rasterizer.InvalidateRegion(gpu_address, overlap_size);
 | 
			
		||||
                break;
 | 
			
		||||
            case FlushMode::FlushAndInvalidate:
 | 
			
		||||
                rasterizer.FlushAndInvalidateRegion(gpu_address, overlap_size);
 | 
			
		||||
                break;
 | 
			
		||||
            }
 | 
			
		||||
        auto& rasterizer = system_instance.Renderer().Rasterizer();
 | 
			
		||||
        switch (mode) {
 | 
			
		||||
        case FlushMode::Flush:
 | 
			
		||||
            rasterizer.FlushRegion(overlap_start, overlap_size);
 | 
			
		||||
            break;
 | 
			
		||||
        case FlushMode::Invalidate:
 | 
			
		||||
            rasterizer.InvalidateRegion(overlap_start, overlap_size);
 | 
			
		||||
            break;
 | 
			
		||||
        case FlushMode::FlushAndInvalidate:
 | 
			
		||||
            rasterizer.FlushAndInvalidateRegion(overlap_start, overlap_size);
 | 
			
		||||
            break;
 | 
			
		||||
        }
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -11,7 +11,6 @@
 | 
			
		||||
#include <boost/icl/interval_map.hpp>
 | 
			
		||||
#include "common/common_types.h"
 | 
			
		||||
#include "core/memory_hook.h"
 | 
			
		||||
#include "video_core/memory_manager.h"
 | 
			
		||||
 | 
			
		||||
namespace Kernel {
 | 
			
		||||
class Process;
 | 
			
		||||
@@ -179,7 +178,7 @@ enum class FlushMode {
 | 
			
		||||
/**
 | 
			
		||||
 * Mark each page touching the region as cached.
 | 
			
		||||
 */
 | 
			
		||||
void RasterizerMarkRegionCached(Tegra::GPUVAddr gpu_addr, u64 size, bool cached);
 | 
			
		||||
void RasterizerMarkRegionCached(VAddr vaddr, u64 size, bool cached);
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * Flushes and invalidates any externally cached rasterizer resources touching the given virtual
 | 
			
		||||
 
 | 
			
		||||
@@ -17,7 +17,7 @@ template <class T>
 | 
			
		||||
class RasterizerCache : NonCopyable {
 | 
			
		||||
public:
 | 
			
		||||
    /// Mark the specified region as being invalidated
 | 
			
		||||
    void InvalidateRegion(Tegra::GPUVAddr region_addr, size_t region_size) {
 | 
			
		||||
    void InvalidateRegion(VAddr region_addr, size_t region_size) {
 | 
			
		||||
        for (auto iter = cached_objects.cbegin(); iter != cached_objects.cend();) {
 | 
			
		||||
            const auto& object{iter->second};
 | 
			
		||||
 | 
			
		||||
@@ -33,7 +33,7 @@ public:
 | 
			
		||||
 | 
			
		||||
protected:
 | 
			
		||||
    /// Tries to get an object from the cache with the specified address
 | 
			
		||||
    T TryGet(Tegra::GPUVAddr addr) const {
 | 
			
		||||
    T TryGet(VAddr addr) const {
 | 
			
		||||
        const auto& search{cached_objects.find(addr)};
 | 
			
		||||
        if (search != cached_objects.end()) {
 | 
			
		||||
            return search->second;
 | 
			
		||||
@@ -43,7 +43,7 @@ protected:
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    /// Gets a reference to the cache
 | 
			
		||||
    const std::unordered_map<Tegra::GPUVAddr, T>& GetCache() const {
 | 
			
		||||
    const std::unordered_map<VAddr, T>& GetCache() const {
 | 
			
		||||
        return cached_objects;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
@@ -74,5 +74,5 @@ protected:
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
private:
 | 
			
		||||
    std::unordered_map<Tegra::GPUVAddr, T> cached_objects;
 | 
			
		||||
    std::unordered_map<VAddr, T> cached_objects;
 | 
			
		||||
};
 | 
			
		||||
 
 | 
			
		||||
@@ -27,14 +27,14 @@ public:
 | 
			
		||||
    virtual void FlushAll() = 0;
 | 
			
		||||
 | 
			
		||||
    /// Notify rasterizer that any caches of the specified region should be flushed to Switch memory
 | 
			
		||||
    virtual void FlushRegion(Tegra::GPUVAddr addr, u64 size) = 0;
 | 
			
		||||
    virtual void FlushRegion(VAddr addr, u64 size) = 0;
 | 
			
		||||
 | 
			
		||||
    /// Notify rasterizer that any caches of the specified region should be invalidated
 | 
			
		||||
    virtual void InvalidateRegion(Tegra::GPUVAddr addr, u64 size) = 0;
 | 
			
		||||
    virtual void InvalidateRegion(VAddr addr, u64 size) = 0;
 | 
			
		||||
 | 
			
		||||
    /// Notify rasterizer that any caches of the specified region should be flushed to Switch memory
 | 
			
		||||
    /// and invalidated
 | 
			
		||||
    virtual void FlushAndInvalidateRegion(Tegra::GPUVAddr addr, u64 size) = 0;
 | 
			
		||||
    virtual void FlushAndInvalidateRegion(VAddr addr, u64 size) = 0;
 | 
			
		||||
 | 
			
		||||
    /// Attempt to use a faster method to perform a display transfer with is_texture_copy = 0
 | 
			
		||||
    virtual bool AccelerateDisplayTransfer(const void* config) {
 | 
			
		||||
 
 | 
			
		||||
@@ -279,10 +279,9 @@ static constexpr auto RangeFromInterval(Map& map, const Interval& interval) {
 | 
			
		||||
    return boost::make_iterator_range(map.equal_range(interval));
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void RasterizerOpenGL::UpdatePagesCachedCount(Tegra::GPUVAddr addr, u64 size, int delta) {
 | 
			
		||||
    const u64 page_start{addr >> Tegra::MemoryManager::PAGE_BITS};
 | 
			
		||||
    const u64 page_end{(addr + size + Tegra::MemoryManager::PAGE_SIZE - 1) >>
 | 
			
		||||
                       Tegra::MemoryManager::PAGE_BITS};
 | 
			
		||||
void RasterizerOpenGL::UpdatePagesCachedCount(VAddr addr, u64 size, int delta) {
 | 
			
		||||
    const u64 page_start{addr >> Memory::PAGE_BITS};
 | 
			
		||||
    const u64 page_end{(addr + size + Memory::PAGE_SIZE - 1) >> Memory::PAGE_BITS};
 | 
			
		||||
 | 
			
		||||
    // Interval maps will erase segments if count reaches 0, so if delta is negative we have to
 | 
			
		||||
    // subtract after iterating
 | 
			
		||||
@@ -294,10 +293,8 @@ void RasterizerOpenGL::UpdatePagesCachedCount(Tegra::GPUVAddr addr, u64 size, in
 | 
			
		||||
        const auto interval = pair.first & pages_interval;
 | 
			
		||||
        const int count = pair.second;
 | 
			
		||||
 | 
			
		||||
        const Tegra::GPUVAddr interval_start_addr = boost::icl::first(interval)
 | 
			
		||||
                                                    << Tegra::MemoryManager::PAGE_BITS;
 | 
			
		||||
        const Tegra::GPUVAddr interval_end_addr = boost::icl::last_next(interval)
 | 
			
		||||
                                                  << Tegra::MemoryManager::PAGE_BITS;
 | 
			
		||||
        const VAddr interval_start_addr = boost::icl::first(interval) << Memory::PAGE_BITS;
 | 
			
		||||
        const VAddr interval_end_addr = boost::icl::last_next(interval) << Memory::PAGE_BITS;
 | 
			
		||||
        const u64 interval_size = interval_end_addr - interval_start_addr;
 | 
			
		||||
 | 
			
		||||
        if (delta > 0 && count == delta)
 | 
			
		||||
@@ -578,17 +575,17 @@ void RasterizerOpenGL::FlushAll() {
 | 
			
		||||
    MICROPROFILE_SCOPE(OpenGL_CacheManagement);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void RasterizerOpenGL::FlushRegion(Tegra::GPUVAddr addr, u64 size) {
 | 
			
		||||
void RasterizerOpenGL::FlushRegion(VAddr addr, u64 size) {
 | 
			
		||||
    MICROPROFILE_SCOPE(OpenGL_CacheManagement);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void RasterizerOpenGL::InvalidateRegion(Tegra::GPUVAddr addr, u64 size) {
 | 
			
		||||
void RasterizerOpenGL::InvalidateRegion(VAddr addr, u64 size) {
 | 
			
		||||
    MICROPROFILE_SCOPE(OpenGL_CacheManagement);
 | 
			
		||||
    res_cache.InvalidateRegion(addr, size);
 | 
			
		||||
    shader_cache.InvalidateRegion(addr, size);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void RasterizerOpenGL::FlushAndInvalidateRegion(Tegra::GPUVAddr addr, u64 size) {
 | 
			
		||||
void RasterizerOpenGL::FlushAndInvalidateRegion(VAddr addr, u64 size) {
 | 
			
		||||
    MICROPROFILE_SCOPE(OpenGL_CacheManagement);
 | 
			
		||||
    InvalidateRegion(addr, size);
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
@@ -44,9 +44,9 @@ public:
 | 
			
		||||
    void Clear() override;
 | 
			
		||||
    void NotifyMaxwellRegisterChanged(u32 method) override;
 | 
			
		||||
    void FlushAll() override;
 | 
			
		||||
    void FlushRegion(Tegra::GPUVAddr addr, u64 size) override;
 | 
			
		||||
    void InvalidateRegion(Tegra::GPUVAddr addr, u64 size) override;
 | 
			
		||||
    void FlushAndInvalidateRegion(Tegra::GPUVAddr addr, u64 size) override;
 | 
			
		||||
    void FlushRegion(VAddr addr, u64 size) override;
 | 
			
		||||
    void InvalidateRegion(VAddr addr, u64 size) override;
 | 
			
		||||
    void FlushAndInvalidateRegion(VAddr addr, u64 size) override;
 | 
			
		||||
    bool AccelerateDisplayTransfer(const void* config) override;
 | 
			
		||||
    bool AccelerateTextureCopy(const void* config) override;
 | 
			
		||||
    bool AccelerateFill(const void* config) override;
 | 
			
		||||
 
 | 
			
		||||
@@ -33,11 +33,16 @@ struct FormatTuple {
 | 
			
		||||
    bool compressed;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
static VAddr TryGetCpuAddr(Tegra::GPUVAddr gpu_addr) {
 | 
			
		||||
    auto& gpu{Core::System::GetInstance().GPU()};
 | 
			
		||||
    const auto cpu_addr{gpu.MemoryManager().GpuToCpuAddress(gpu_addr)};
 | 
			
		||||
    return cpu_addr ? *cpu_addr : 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*static*/ SurfaceParams SurfaceParams::CreateForTexture(
 | 
			
		||||
    const Tegra::Texture::FullTextureInfo& config) {
 | 
			
		||||
 | 
			
		||||
    SurfaceParams params{};
 | 
			
		||||
    params.addr = config.tic.Address();
 | 
			
		||||
    params.addr = TryGetCpuAddr(config.tic.Address());
 | 
			
		||||
    params.is_tiled = config.tic.IsTiled();
 | 
			
		||||
    params.block_height = params.is_tiled ? config.tic.BlockHeight() : 0,
 | 
			
		||||
    params.pixel_format =
 | 
			
		||||
@@ -55,9 +60,8 @@ struct FormatTuple {
 | 
			
		||||
 | 
			
		||||
/*static*/ SurfaceParams SurfaceParams::CreateForFramebuffer(
 | 
			
		||||
    const Tegra::Engines::Maxwell3D::Regs::RenderTargetConfig& config) {
 | 
			
		||||
 | 
			
		||||
    SurfaceParams params{};
 | 
			
		||||
    params.addr = config.Address();
 | 
			
		||||
    params.addr = TryGetCpuAddr(config.Address());
 | 
			
		||||
    params.is_tiled = true;
 | 
			
		||||
    params.block_height = Tegra::Texture::TICEntry::DefaultBlockHeight;
 | 
			
		||||
    params.pixel_format = PixelFormatFromRenderTargetFormat(config.format);
 | 
			
		||||
@@ -75,9 +79,8 @@ struct FormatTuple {
 | 
			
		||||
/*static*/ SurfaceParams SurfaceParams::CreateForDepthBuffer(u32 zeta_width, u32 zeta_height,
 | 
			
		||||
                                                             Tegra::GPUVAddr zeta_address,
 | 
			
		||||
                                                             Tegra::DepthFormat format) {
 | 
			
		||||
 | 
			
		||||
    SurfaceParams params{};
 | 
			
		||||
    params.addr = zeta_address;
 | 
			
		||||
    params.addr = TryGetCpuAddr(zeta_address);
 | 
			
		||||
    params.is_tiled = true;
 | 
			
		||||
    params.block_height = Tegra::Texture::TICEntry::DefaultBlockHeight;
 | 
			
		||||
    params.pixel_format = PixelFormatFromDepthFormat(format);
 | 
			
		||||
@@ -171,11 +174,6 @@ static const FormatTuple& GetFormatTuple(PixelFormat pixel_format, ComponentType
 | 
			
		||||
    return format;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
VAddr SurfaceParams::GetCpuAddr() const {
 | 
			
		||||
    auto& gpu = Core::System::GetInstance().GPU();
 | 
			
		||||
    return *gpu.MemoryManager().GpuToCpuAddress(addr);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static bool IsPixelFormatASTC(PixelFormat format) {
 | 
			
		||||
    switch (format) {
 | 
			
		||||
    case PixelFormat::ASTC_2D_4X4:
 | 
			
		||||
@@ -222,33 +220,28 @@ static bool IsFormatBCn(PixelFormat format) {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <bool morton_to_gl, PixelFormat format>
 | 
			
		||||
void MortonCopy(u32 stride, u32 block_height, u32 height, std::vector<u8>& gl_buffer,
 | 
			
		||||
                Tegra::GPUVAddr addr) {
 | 
			
		||||
void MortonCopy(u32 stride, u32 block_height, u32 height, std::vector<u8>& gl_buffer, VAddr addr) {
 | 
			
		||||
    constexpr u32 bytes_per_pixel = SurfaceParams::GetFormatBpp(format) / CHAR_BIT;
 | 
			
		||||
    constexpr u32 gl_bytes_per_pixel = CachedSurface::GetGLBytesPerPixel(format);
 | 
			
		||||
    auto& gpu = Core::System::GetInstance().GPU();
 | 
			
		||||
 | 
			
		||||
    if (morton_to_gl) {
 | 
			
		||||
        // With the BCn formats (DXT and DXN), each 4x4 tile is swizzled instead of just individual
 | 
			
		||||
        // pixel values.
 | 
			
		||||
        const u32 tile_size{IsFormatBCn(format) ? 4U : 1U};
 | 
			
		||||
        const std::vector<u8> data =
 | 
			
		||||
            Tegra::Texture::UnswizzleTexture(*gpu.MemoryManager().GpuToCpuAddress(addr), tile_size,
 | 
			
		||||
                                             bytes_per_pixel, stride, height, block_height);
 | 
			
		||||
        const std::vector<u8> data = Tegra::Texture::UnswizzleTexture(
 | 
			
		||||
            addr, tile_size, bytes_per_pixel, stride, height, block_height);
 | 
			
		||||
        const size_t size_to_copy{std::min(gl_buffer.size(), data.size())};
 | 
			
		||||
        gl_buffer.assign(data.begin(), data.begin() + size_to_copy);
 | 
			
		||||
    } else {
 | 
			
		||||
        // TODO(bunnei): Assumes the default rendering GOB size of 16 (128 lines). We should
 | 
			
		||||
        // check the configuration for this and perform more generic un/swizzle
 | 
			
		||||
        LOG_WARNING(Render_OpenGL, "need to use correct swizzle/GOB parameters!");
 | 
			
		||||
        VideoCore::MortonCopyPixels128(
 | 
			
		||||
            stride, height, bytes_per_pixel, gl_bytes_per_pixel,
 | 
			
		||||
            Memory::GetPointer(*gpu.MemoryManager().GpuToCpuAddress(addr)), gl_buffer.data(),
 | 
			
		||||
            morton_to_gl);
 | 
			
		||||
        VideoCore::MortonCopyPixels128(stride, height, bytes_per_pixel, gl_bytes_per_pixel,
 | 
			
		||||
                                       Memory::GetPointer(addr), gl_buffer.data(), morton_to_gl);
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static constexpr std::array<void (*)(u32, u32, u32, std::vector<u8>&, Tegra::GPUVAddr),
 | 
			
		||||
static constexpr std::array<void (*)(u32, u32, u32, std::vector<u8>&, VAddr),
 | 
			
		||||
                            SurfaceParams::MaxPixelFormat>
 | 
			
		||||
    morton_to_gl_fns = {
 | 
			
		||||
        // clang-format off
 | 
			
		||||
@@ -305,7 +298,7 @@ static constexpr std::array<void (*)(u32, u32, u32, std::vector<u8>&, Tegra::GPU
 | 
			
		||||
        // clang-format on
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
static constexpr std::array<void (*)(u32, u32, u32, std::vector<u8>&, Tegra::GPUVAddr),
 | 
			
		||||
static constexpr std::array<void (*)(u32, u32, u32, std::vector<u8>&, VAddr),
 | 
			
		||||
                            SurfaceParams::MaxPixelFormat>
 | 
			
		||||
    gl_to_morton_fns = {
 | 
			
		||||
        // clang-format off
 | 
			
		||||
@@ -542,7 +535,7 @@ MICROPROFILE_DEFINE(OpenGL_SurfaceLoad, "OpenGL", "Surface Load", MP_RGB(128, 64
 | 
			
		||||
void CachedSurface::LoadGLBuffer() {
 | 
			
		||||
    ASSERT(params.type != SurfaceType::Fill);
 | 
			
		||||
 | 
			
		||||
    const u8* const texture_src_data = Memory::GetPointer(params.GetCpuAddr());
 | 
			
		||||
    const u8* const texture_src_data = Memory::GetPointer(params.addr);
 | 
			
		||||
 | 
			
		||||
    ASSERT(texture_src_data);
 | 
			
		||||
 | 
			
		||||
@@ -567,7 +560,7 @@ void CachedSurface::LoadGLBuffer() {
 | 
			
		||||
 | 
			
		||||
MICROPROFILE_DEFINE(OpenGL_SurfaceFlush, "OpenGL", "Surface Flush", MP_RGB(128, 192, 64));
 | 
			
		||||
void CachedSurface::FlushGLBuffer() {
 | 
			
		||||
    u8* const dst_buffer = Memory::GetPointer(params.GetCpuAddr());
 | 
			
		||||
    u8* const dst_buffer = Memory::GetPointer(params.addr);
 | 
			
		||||
 | 
			
		||||
    ASSERT(dst_buffer);
 | 
			
		||||
    ASSERT(gl_buffer.size() ==
 | 
			
		||||
@@ -764,11 +757,6 @@ Surface RasterizerCacheOpenGL::GetSurface(const SurfaceParams& params, bool pres
 | 
			
		||||
        return {};
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    auto& gpu = Core::System::GetInstance().GPU();
 | 
			
		||||
    // Don't try to create any entries in the cache if the address of the texture is invalid.
 | 
			
		||||
    if (gpu.MemoryManager().GpuToCpuAddress(params.addr) == boost::none)
 | 
			
		||||
        return {};
 | 
			
		||||
 | 
			
		||||
    // Look up surface in the cache based on address
 | 
			
		||||
    Surface surface{TryGet(params.addr)};
 | 
			
		||||
    if (surface) {
 | 
			
		||||
@@ -858,10 +846,8 @@ Surface RasterizerCacheOpenGL::RecreateSurface(const Surface& surface,
 | 
			
		||||
                                 "reinterpretation but the texture is tiled.");
 | 
			
		||||
        }
 | 
			
		||||
        size_t remaining_size = new_params.SizeInBytes() - params.SizeInBytes();
 | 
			
		||||
        auto address = Core::System::GetInstance().GPU().MemoryManager().GpuToCpuAddress(
 | 
			
		||||
            new_params.addr + params.SizeInBytes());
 | 
			
		||||
        std::vector<u8> data(remaining_size);
 | 
			
		||||
        Memory::ReadBlock(*address, data.data(), data.size());
 | 
			
		||||
        Memory::ReadBlock(new_params.addr + params.SizeInBytes(), data.data(), data.size());
 | 
			
		||||
        glBufferSubData(GL_PIXEL_PACK_BUFFER, params.SizeInBytes(), remaining_size, data.data());
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
@@ -888,30 +874,8 @@ Surface RasterizerCacheOpenGL::RecreateSurface(const Surface& surface,
 | 
			
		||||
    return new_surface;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
Surface RasterizerCacheOpenGL::TryFindFramebufferSurface(VAddr cpu_addr) const {
 | 
			
		||||
    // Tries to find the GPU address of a framebuffer based on the CPU address. This is because
 | 
			
		||||
    // final output framebuffers are specified by CPU address, but internally our GPU cache uses
 | 
			
		||||
    // GPU addresses. We iterate through all cached framebuffers, and compare their starting CPU
 | 
			
		||||
    // address to the one provided. This is obviously not great, and won't work if the
 | 
			
		||||
    // framebuffer overlaps surfaces.
 | 
			
		||||
 | 
			
		||||
    std::vector<Surface> surfaces;
 | 
			
		||||
    for (const auto& surface : GetCache()) {
 | 
			
		||||
        const auto& params = surface.second->GetSurfaceParams();
 | 
			
		||||
        const VAddr surface_cpu_addr = params.GetCpuAddr();
 | 
			
		||||
        if (cpu_addr >= surface_cpu_addr && cpu_addr < (surface_cpu_addr + params.size_in_bytes)) {
 | 
			
		||||
            ASSERT_MSG(cpu_addr == surface_cpu_addr, "overlapping surfaces are unsupported");
 | 
			
		||||
            surfaces.push_back(surface.second);
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    if (surfaces.empty()) {
 | 
			
		||||
        return {};
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    ASSERT_MSG(surfaces.size() == 1, ">1 surface is unsupported");
 | 
			
		||||
 | 
			
		||||
    return surfaces[0];
 | 
			
		||||
Surface RasterizerCacheOpenGL::TryFindFramebufferSurface(VAddr addr) const {
 | 
			
		||||
    return TryGet(addr);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void RasterizerCacheOpenGL::ReserveSurface(const Surface& surface) {
 | 
			
		||||
 
 | 
			
		||||
@@ -638,9 +638,6 @@ struct SurfaceParams {
 | 
			
		||||
               GetFormatBpp(pixel_format) / CHAR_BIT;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    /// Returns the CPU virtual address for this surface
 | 
			
		||||
    VAddr GetCpuAddr() const;
 | 
			
		||||
 | 
			
		||||
    /// Creates SurfaceParams from a texture configuration
 | 
			
		||||
    static SurfaceParams CreateForTexture(const Tegra::Texture::FullTextureInfo& config);
 | 
			
		||||
 | 
			
		||||
@@ -671,7 +668,7 @@ struct SurfaceParams {
 | 
			
		||||
               std::tie(other.pixel_format, other.type, other.cache_width, other.cache_height);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    Tegra::GPUVAddr addr;
 | 
			
		||||
    VAddr addr;
 | 
			
		||||
    bool is_tiled;
 | 
			
		||||
    u32 block_height;
 | 
			
		||||
    PixelFormat pixel_format;
 | 
			
		||||
@@ -712,7 +709,7 @@ class CachedSurface final {
 | 
			
		||||
public:
 | 
			
		||||
    CachedSurface(const SurfaceParams& params);
 | 
			
		||||
 | 
			
		||||
    Tegra::GPUVAddr GetAddr() const {
 | 
			
		||||
    VAddr GetAddr() const {
 | 
			
		||||
        return params.addr;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
@@ -763,8 +760,8 @@ public:
 | 
			
		||||
    /// Flushes the surface to Switch memory
 | 
			
		||||
    void FlushSurface(const Surface& surface);
 | 
			
		||||
 | 
			
		||||
    /// Tries to find a framebuffer GPU address based on the provided CPU address
 | 
			
		||||
    Surface TryFindFramebufferSurface(VAddr cpu_addr) const;
 | 
			
		||||
    /// Tries to find a framebuffer using on the provided CPU address
 | 
			
		||||
    Surface TryFindFramebufferSurface(VAddr addr) const;
 | 
			
		||||
 | 
			
		||||
private:
 | 
			
		||||
    void LoadSurface(const Surface& surface);
 | 
			
		||||
 
 | 
			
		||||
@@ -12,21 +12,17 @@
 | 
			
		||||
namespace OpenGL {
 | 
			
		||||
 | 
			
		||||
/// Gets the address for the specified shader stage program
 | 
			
		||||
static Tegra::GPUVAddr GetShaderAddress(Maxwell::ShaderProgram program) {
 | 
			
		||||
static VAddr GetShaderAddress(Maxwell::ShaderProgram program) {
 | 
			
		||||
    auto& gpu = Core::System::GetInstance().GPU().Maxwell3D();
 | 
			
		||||
    auto& shader_config = gpu.regs.shader_config[static_cast<size_t>(program)];
 | 
			
		||||
 | 
			
		||||
    return gpu.regs.code_address.CodeAddress() + shader_config.offset;
 | 
			
		||||
    return *gpu.memory_manager.GpuToCpuAddress(gpu.regs.code_address.CodeAddress() +
 | 
			
		||||
                                               shader_config.offset);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/// Gets the shader program code from memory for the specified address
 | 
			
		||||
static GLShader::ProgramCode GetShaderCode(Tegra::GPUVAddr addr) {
 | 
			
		||||
    auto& gpu = Core::System::GetInstance().GPU().Maxwell3D();
 | 
			
		||||
 | 
			
		||||
static GLShader::ProgramCode GetShaderCode(VAddr addr) {
 | 
			
		||||
    GLShader::ProgramCode program_code(GLShader::MAX_PROGRAM_CODE_LENGTH);
 | 
			
		||||
    const boost::optional<VAddr> cpu_address{gpu.memory_manager.GpuToCpuAddress(addr)};
 | 
			
		||||
    Memory::ReadBlock(*cpu_address, program_code.data(), program_code.size() * sizeof(u64));
 | 
			
		||||
 | 
			
		||||
    Memory::ReadBlock(addr, program_code.data(), program_code.size() * sizeof(u64));
 | 
			
		||||
    return program_code;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@@ -55,7 +51,7 @@ static void SetShaderUniformBlockBindings(GLuint shader) {
 | 
			
		||||
                                 sizeof(GLShader::MaxwellUniformData));
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
CachedShader::CachedShader(Tegra::GPUVAddr addr, Maxwell::ShaderProgram program_type)
 | 
			
		||||
CachedShader::CachedShader(VAddr addr, Maxwell::ShaderProgram program_type)
 | 
			
		||||
    : addr{addr}, program_type{program_type}, setup{GetShaderCode(addr)} {
 | 
			
		||||
 | 
			
		||||
    GLShader::ProgramResult program_result;
 | 
			
		||||
@@ -113,7 +109,7 @@ GLint CachedShader::GetUniformLocation(const std::string& name) {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
Shader ShaderCacheOpenGL::GetStageProgram(Maxwell::ShaderProgram program) {
 | 
			
		||||
    const Tegra::GPUVAddr program_addr{GetShaderAddress(program)};
 | 
			
		||||
    const VAddr program_addr{GetShaderAddress(program)};
 | 
			
		||||
 | 
			
		||||
    // Look up shader in the cache based on address
 | 
			
		||||
    Shader shader{TryGet(program_addr)};
 | 
			
		||||
 
 | 
			
		||||
@@ -8,7 +8,6 @@
 | 
			
		||||
#include <unordered_map>
 | 
			
		||||
 | 
			
		||||
#include "common/common_types.h"
 | 
			
		||||
#include "video_core/memory_manager.h"
 | 
			
		||||
#include "video_core/rasterizer_cache.h"
 | 
			
		||||
#include "video_core/renderer_opengl/gl_resource_manager.h"
 | 
			
		||||
#include "video_core/renderer_opengl/gl_shader_gen.h"
 | 
			
		||||
@@ -21,10 +20,10 @@ using Maxwell = Tegra::Engines::Maxwell3D::Regs;
 | 
			
		||||
 | 
			
		||||
class CachedShader final {
 | 
			
		||||
public:
 | 
			
		||||
    CachedShader(Tegra::GPUVAddr addr, Maxwell::ShaderProgram program_type);
 | 
			
		||||
    CachedShader(VAddr addr, Maxwell::ShaderProgram program_type);
 | 
			
		||||
 | 
			
		||||
    /// Gets the address of the shader in guest memory, required for cache management
 | 
			
		||||
    Tegra::GPUVAddr GetAddr() const {
 | 
			
		||||
    VAddr GetAddr() const {
 | 
			
		||||
        return addr;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
@@ -50,7 +49,7 @@ public:
 | 
			
		||||
    GLint GetUniformLocation(const std::string& name);
 | 
			
		||||
 | 
			
		||||
private:
 | 
			
		||||
    Tegra::GPUVAddr addr;
 | 
			
		||||
    VAddr addr;
 | 
			
		||||
    Maxwell::ShaderProgram program_type;
 | 
			
		||||
    GLShader::ShaderSetup setup;
 | 
			
		||||
    GLShader::ShaderEntries entries;
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user