chore: flush rive runtime change

Diffs=
59d5d4d658 chore: flush rive runtime change (#9428)
f2968887ec Add silvers without lfs (#9425)
eabc82a4c2 chore: temporarily remove silvers (#9424)
6cce6709cb refactor(vulkan): Lift lifecycle management (#9410)
7f5e51f4b4 refactor(renderer): Send map sizes to unmap functions as well (#9407)
43d6ac25e6 Metal fiddle context assert fix (#9393)

Co-authored-by: Chris Dalton <99840794+csmartdalton@users.noreply.github.com>
Co-authored-by: Jonathon Copeland <jcopela4@gmail.com>
Co-authored-by: Maxwell Talbot <talbot.maxwell@gmail.com>
This commit is contained in:
mjtalbot
2025-04-11 16:28:35 +00:00
parent 8d856edaf4
commit 4561b9d112
24 changed files with 577 additions and 497 deletions

View File

@@ -49,8 +49,6 @@ jobs:
steps:
- uses: actions/checkout@v3
- name: Get srivs
run: git lfs pull tests/unit_tests/silvers
- name: Install
run: |

View File

@@ -1 +1 @@
93d4fda5a233facc3c136269939225f7d4558e1f
59d5d4d65889f834957aaac1351f7ab4c64af7cf

View File

@@ -27,6 +27,7 @@ public:
size_t capacityInBytes() const { return m_capacityInBytes; }
bool isMapped() const { return m_mapSizeInBytes != 0; }
size_t mapSizeInBytes() const { return m_mapSizeInBytes; }
// Maps the next buffer in the ring.
void* mapBuffer(size_t mapSizeInBytes)

View File

@@ -1442,10 +1442,23 @@ public:
MapResourceBufferFn mapFn,
size_t elementCount)
{
assert(m_mappedMemory == nullptr);
void* ptr = (impl->*mapFn)(elementCount * sizeof(T));
reset(reinterpret_cast<T*>(ptr), elementCount);
}
using UnmapResourceBufferFn =
void (RenderContextImpl::*)(size_t mapSizeInBytes);
void unmapElements(RenderContextImpl* impl,
UnmapResourceBufferFn unmapFn,
size_t elementCount)
{
assert(m_mappedMemory != nullptr);
assert(m_mappingEnd - m_mappedMemory == elementCount);
(impl->*unmapFn)(elementCount * sizeof(T));
reset();
}
operator bool() const { return m_mappedMemory; }
// How many bytes have been written to the buffer?

View File

@@ -0,0 +1,118 @@
/*
* Copyright 2025 Rive
*/
#pragma once
#include "rive/refcnt.hpp"
#include <deque>
namespace rive::gpu
{
class GPUResourceManager;
// Base class for a GPU resource that needs to be kept alive until any in-flight
// command buffers that reference it have completed.
class GPUResource : public RefCnt<GPUResource>
{
public:
virtual ~GPUResource();
GPUResourceManager* manager() const { return m_manager.get(); }
protected:
GPUResource(rcp<GPUResourceManager> manager) : m_manager(std::move(manager))
{}
const rcp<GPUResourceManager> m_manager;
private:
friend class RefCnt<GPUResource>;
// Don't delete GPUResources immediately when their ref count reaches zero;
// instead wait until their safe frame number is reached.
void onRefCntReachedZero() const;
};
// A GPUResource that has been fully released, but whose underlying native
// resource may still be referenced by an in-flight command buffer.
struct ZombieResource
{
ZombieResource(GPUResource* resource_, uint64_t lastFrameNumber_) :
resource(resource_), lastFrameNumber(lastFrameNumber_)
{}
std::unique_ptr<GPUResource> resource;
const uint64_t lastFrameNumber; // Frame number at which the underlying
// native resource was last used.
};
// Manages the lifecycle of GPUResources. When the refcount reaches zero, rather
// than deleting it immediately, this class places it in "purgatory" until its
// safe frame number is reached.
class GPUResourceManager : public RefCnt<GPUResourceManager>
{
public:
virtual ~GPUResourceManager();
// Resource lifetime counters. Resources last used on or before
// 'safeFrameNumber' are safe to be released or recycled.
uint64_t currentFrameNumber() const { return m_currentFrameNumber; }
uint64_t safeFrameNumber() const { return m_safeFrameNumber; }
// Purges released resources whose lastFrameNumber is on or before
// safeFrameNumber, and updates the monotonically increasing
// m_currentFrameNumber.
void advanceFrameNumber(uint64_t nextFrameNumber, uint64_t safeFrameNumber);
// Called when a GPUResource has been fully released (refcount reaches 0).
// The underlying native resource won't actually be deleted until the
// resource's safe frame number is reached.
void onRenderingResourceReleased(GPUResource*);
// Called prior to the client beginning its shutdown cycle, and after all
// command buffers from all frames have finished executing. After shutting
// down, we delete GPUResources immediately instead of going
// through m_resourcePurgatory.
void shutdown();
private:
// An m_currentFrameNumber of this value indicates we're in a shutdown cycle
// and resources should be deleted immediately upon release instead of going
// through m_resourcePurgatory.
constexpr static uint64_t SHUTDOWN_FRAME_NUMBER = 0xffffffffffffffff;
// Resource lifetime counters. Resources last used on or before
// 'safeFrameNumber' are safe to be released or recycled.
uint64_t m_currentFrameNumber = 0;
uint64_t m_safeFrameNumber = 0;
// Temporary container for GPUResource instances that have been fully
// released, but need to persist until their safe frame number is reached.
std::deque<ZombieResource> m_resourcePurgatory;
};
// Manual GPUResource lifecycle manager. Rather than allowing GPUResourceManager
// to vaccuum up resources, they may be placed in a pool instead, which will
// recycle them once their safeFrameNumber is reached.
class GPUResourcePool : public GPUResource
{
public:
GPUResourcePool(rcp<GPUResourceManager> manager, size_t maxPoolSize) :
GPUResource(std::move(manager)), m_maxPoolCount(maxPoolSize)
{}
// Returns a previously-recycled resource whose safe frame number has been
// reached, or null of no such resource exists.
rcp<GPUResource> acquire();
// Places the given resouce back in the pool, where it waits until its safe
// frame number is reached. Refcount must be 1.
void recycle(rcp<GPUResource>);
private:
const size_t m_maxPoolCount;
std::deque<ZombieResource> m_pool;
};
} // namespace rive::gpu

View File

@@ -335,7 +335,7 @@ private:
void setResourceSizes(ResourceAllocationCounts, bool forceRealloc = false);
void mapResourceBuffers(const ResourceAllocationCounts&);
void unmapResourceBuffers();
void unmapResourceBuffers(const ResourceAllocationCounts&);
// Returns the next coverage buffer prefix to use in a logical flush.
// Sets needsCoverageBufferClear if the coverage buffer must be cleared in

View File

@@ -40,15 +40,15 @@ public:
void* mapTessVertexSpanBuffer(size_t mapSizeInBytes) override;
void* mapTriangleVertexBuffer(size_t mapSizeInBytes) override;
void unmapFlushUniformBuffer() override;
void unmapImageDrawUniformBuffer() override;
void unmapPathBuffer() override;
void unmapPaintBuffer() override;
void unmapPaintAuxBuffer() override;
void unmapContourBuffer() override;
void unmapGradSpanBuffer() override;
void unmapTessVertexSpanBuffer() override;
void unmapTriangleVertexBuffer() override;
void unmapFlushUniformBuffer(size_t mapSizeInBytes) override;
void unmapImageDrawUniformBuffer(size_t mapSizeInBytes) override;
void unmapPathBuffer(size_t mapSizeInBytes) override;
void unmapPaintBuffer(size_t mapSizeInBytes) override;
void unmapPaintAuxBuffer(size_t mapSizeInBytes) override;
void unmapContourBuffer(size_t mapSizeInBytes) override;
void unmapGradSpanBuffer(size_t mapSizeInBytes) override;
void unmapTessVertexSpanBuffer(size_t mapSizeInBytes) override;
void unmapTriangleVertexBuffer(size_t mapSizeInBytes) override;
double secondsNow() const override
{

View File

@@ -80,15 +80,15 @@ public:
virtual void* mapTriangleVertexBuffer(size_t mapSizeInBytes) = 0;
// Unmap GPU buffers. All buffers will be unmapped before flush().
virtual void unmapFlushUniformBuffer() = 0;
virtual void unmapImageDrawUniformBuffer() = 0;
virtual void unmapPathBuffer() = 0;
virtual void unmapPaintBuffer() = 0;
virtual void unmapPaintAuxBuffer() = 0;
virtual void unmapContourBuffer() = 0;
virtual void unmapGradSpanBuffer() = 0;
virtual void unmapTessVertexSpanBuffer() = 0;
virtual void unmapTriangleVertexBuffer() = 0;
virtual void unmapFlushUniformBuffer(size_t mapSizeInBytes) = 0;
virtual void unmapImageDrawUniformBuffer(size_t mapSizeInBytes) = 0;
virtual void unmapPathBuffer(size_t mapSizeInBytes) = 0;
virtual void unmapPaintBuffer(size_t mapSizeInBytes) = 0;
virtual void unmapPaintAuxBuffer(size_t mapSizeInBytes) = 0;
virtual void unmapContourBuffer(size_t mapSizeInBytes) = 0;
virtual void unmapGradSpanBuffer(size_t mapSizeInBytes) = 0;
virtual void unmapTessVertexSpanBuffer(size_t mapSizeInBytes) = 0;
virtual void unmapTriangleVertexBuffer(size_t mapSizeInBytes) = 0;
// Allocate resources that are updated and used during flush().
virtual void resizeGradientTexture(uint32_t width, uint32_t height) = 0;

View File

@@ -6,7 +6,6 @@
#include "rive/renderer/render_context_impl.hpp"
#include "rive/renderer/vulkan/vulkan_context.hpp"
#include "rive/renderer/vulkan/vkutil_resource_pool.hpp"
#include <chrono>
#include <map>
#include <vulkan/vulkan.h>
@@ -157,38 +156,50 @@ private:
void prepareToFlush(uint64_t nextFrameNumber,
uint64_t safeFrameNumber) override;
#define IMPLEMENT_PLS_BUFFER(Name, m_name) \
#define IMPLEMENT_PLS_BUFFER(Name, m_buffer) \
void resize##Name(size_t sizeInBytes) override \
{ \
m_name.setTargetSize(sizeInBytes); \
assert(m_buffer == nullptr); \
m_buffer##Pool.setTargetSize(sizeInBytes); \
} \
void* map##Name(size_t mapSizeInBytes) override \
{ \
return m_name.mapCurrentBuffer(mapSizeInBytes); \
assert(m_buffer != nullptr); \
return m_buffer->contents(); \
} \
void unmap##Name() override { m_name.unmapCurrentBuffer(); }
void unmap##Name(size_t mapSizeInBytes) override \
{ \
assert(m_buffer != nullptr); \
m_buffer->flushContents(mapSizeInBytes); \
}
#define IMPLEMENT_PLS_STRUCTURED_BUFFER(Name, m_name) \
#define IMPLEMENT_PLS_STRUCTURED_BUFFER(Name, m_buffer) \
void resize##Name(size_t sizeInBytes, gpu::StorageBufferStructure) \
override \
{ \
m_name.setTargetSize(sizeInBytes); \
assert(m_buffer == nullptr); \
m_buffer##Pool.setTargetSize(sizeInBytes); \
} \
void* map##Name(size_t mapSizeInBytes) override \
{ \
return m_name.mapCurrentBuffer(mapSizeInBytes); \
assert(m_buffer != nullptr); \
return m_buffer->contents(); \
} \
void unmap##Name() override { m_name.unmapCurrentBuffer(); }
void unmap##Name(size_t mapSizeInBytes) override \
{ \
assert(m_buffer != nullptr); \
m_buffer->flushContents(mapSizeInBytes); \
}
IMPLEMENT_PLS_BUFFER(FlushUniformBuffer, m_flushUniformBufferPool)
IMPLEMENT_PLS_BUFFER(ImageDrawUniformBuffer, m_imageDrawUniformBufferPool)
IMPLEMENT_PLS_STRUCTURED_BUFFER(PathBuffer, m_pathBufferPool)
IMPLEMENT_PLS_STRUCTURED_BUFFER(PaintBuffer, m_paintBufferPool)
IMPLEMENT_PLS_STRUCTURED_BUFFER(PaintAuxBuffer, m_paintAuxBufferPool)
IMPLEMENT_PLS_STRUCTURED_BUFFER(ContourBuffer, m_contourBufferPool)
IMPLEMENT_PLS_BUFFER(GradSpanBuffer, m_gradSpanBufferPool)
IMPLEMENT_PLS_BUFFER(TessVertexSpanBuffer, m_tessSpanBufferPool)
IMPLEMENT_PLS_BUFFER(TriangleVertexBuffer, m_triangleBufferPool)
IMPLEMENT_PLS_BUFFER(FlushUniformBuffer, m_flushUniformBuffer)
IMPLEMENT_PLS_BUFFER(ImageDrawUniformBuffer, m_imageDrawUniformBuffer)
IMPLEMENT_PLS_STRUCTURED_BUFFER(PathBuffer, m_pathBuffer)
IMPLEMENT_PLS_STRUCTURED_BUFFER(PaintBuffer, m_paintBuffer)
IMPLEMENT_PLS_STRUCTURED_BUFFER(PaintAuxBuffer, m_paintAuxBuffer)
IMPLEMENT_PLS_STRUCTURED_BUFFER(ContourBuffer, m_contourBuffer)
IMPLEMENT_PLS_BUFFER(GradSpanBuffer, m_gradSpanBuffer)
IMPLEMENT_PLS_BUFFER(TessVertexSpanBuffer, m_tessSpanBuffer)
IMPLEMENT_PLS_BUFFER(TriangleVertexBuffer, m_triangleBuffer)
#undef IMPLEMENT_PLS_BUFFER
#undef IMPLEMENT_PLS_STRUCTURED_BUFFER
@@ -200,10 +211,7 @@ private:
// Wraps a VkDescriptorPool created specifically for a PLS flush, and tracks
// its allocated descriptor sets.
// The vkutil::RenderingResource base ensures this class stays alive until
// its command buffer finishes, at which point we free the allocated
// descriptor sets and return the VkDescriptor to the renderContext.
class DescriptorSetPool final : public RefCnt<DescriptorSetPool>
class DescriptorSetPool final : public vkutil::Resource
{
public:
DescriptorSetPool(rcp<VulkanContext>);
@@ -213,16 +221,6 @@ private:
void reset();
private:
friend class RefCnt<DescriptorSetPool>;
friend class vkutil::ResourcePool<DescriptorSetPool>;
void onRefCntReachedZero() const
{
m_pool->onResourceRefCntReachedZero(this);
}
const rcp<VulkanContext> m_vk;
rcp<vkutil::ResourcePool<DescriptorSetPool>> m_pool;
VkDescriptorPool m_vkDescriptorPool;
};
@@ -238,7 +236,8 @@ private:
const uint32_t m_vendorID;
const VkFormat m_atlasFormat;
// Rive buffers.
// Rive buffer pools. These don't need to be rcp<> because the destructor of
// RenderContextVulkanImpl is already synchronized.
vkutil::BufferPool m_flushUniformBufferPool;
vkutil::BufferPool m_imageDrawUniformBufferPool;
vkutil::BufferPool m_pathBufferPool;
@@ -249,6 +248,18 @@ private:
vkutil::BufferPool m_tessSpanBufferPool;
vkutil::BufferPool m_triangleBufferPool;
// Specific Rive buffers that have been acquired for the current frame.
// When the frame ends, these get recycled back in their respective pools.
rcp<vkutil::Buffer> m_flushUniformBuffer;
rcp<vkutil::Buffer> m_imageDrawUniformBuffer;
rcp<vkutil::Buffer> m_pathBuffer;
rcp<vkutil::Buffer> m_paintBuffer;
rcp<vkutil::Buffer> m_paintAuxBuffer;
rcp<vkutil::Buffer> m_contourBuffer;
rcp<vkutil::Buffer> m_gradSpanBuffer;
rcp<vkutil::Buffer> m_tessSpanBuffer;
rcp<vkutil::Buffer> m_triangleBuffer;
std::chrono::steady_clock::time_point m_localEpoch =
std::chrono::steady_clock::now();
@@ -321,8 +332,18 @@ private:
rcp<vkutil::Buffer> m_imageRectVertexBuffer;
rcp<vkutil::Buffer> m_imageRectIndexBuffer;
// Pool of DescriptorSetPools that have been fully released. These will be
// recycled once their expirationFrameIdx is reached.
rcp<vkutil::ResourcePool<DescriptorSetPool>> m_descriptorSetPoolPool;
// Pool of DescriptorSetPool instances for flushing.
class DescriptorSetPoolPool : public GPUResourcePool
{
public:
constexpr static size_t MAX_POOL_SIZE = 64;
DescriptorSetPoolPool(rcp<GPUResourceManager> manager) :
GPUResourcePool(std::move(manager), MAX_POOL_SIZE)
{}
rcp<DescriptorSetPool> acquire();
};
rcp<DescriptorSetPoolPool> m_descriptorSetPoolPool;
};
} // namespace rive::gpu

View File

@@ -6,8 +6,8 @@
#include "rive/refcnt.hpp"
#include "rive/renderer/gpu.hpp"
#include "rive/renderer/gpu_resource.hpp"
#include <cassert>
#include <deque>
#include <stdio.h>
#include <stdlib.h>
#include <vulkan/vulkan.h>
@@ -50,42 +50,18 @@ enum class Mappability
// Base class for a GPU resource that needs to be kept alive until any in-flight
// command buffers that reference it have completed.
class RenderingResource : public RefCnt<RenderingResource>
class Resource : public GPUResource
{
public:
virtual ~RenderingResource() {}
virtual ~Resource() {}
const VulkanContext* vulkanContext() const { return m_vk.get(); }
VulkanContext* vk() const;
protected:
RenderingResource(rcp<VulkanContext> vk) : m_vk(std::move(vk)) {}
const rcp<VulkanContext> m_vk;
private:
friend class RefCnt<RenderingResource>;
// Don't delete RenderingResources immediately when their ref count reaches
// zero; wait until any in-flight command buffers are done referencing their
// underlying Vulkan objects.
void onRefCntReachedZero() const;
Resource(rcp<VulkanContext>);
};
// A RenderingResource that has been fully released, but whose underlying Vulkan
// object may still be referenced by an in-flight command buffer.
template <typename T> struct ZombieResource
{
ZombieResource(T* resource_, uint64_t lastFrameNumber_) :
resource(resource_), lastFrameNumber(lastFrameNumber_)
{
assert(resource_->debugging_refcnt() == 0);
}
std::unique_ptr<T> resource;
// Frame number at which the underlying Vulkan resource was last used.
const uint64_t lastFrameNumber;
};
class Buffer : public RenderingResource
class Buffer : public Resource
{
public:
~Buffer() override;
@@ -130,51 +106,32 @@ private:
};
// Wraps a pool of Buffers so we can map one while other(s) are in-flight.
class BufferPool
class BufferPool : public GPUResourcePool
{
public:
BufferPool(rcp<VulkanContext> vk,
VkBufferUsageFlags usageFlags,
size_t size = 0) :
m_vk(std::move(vk)), m_usageFlags(usageFlags), m_targetSize(size)
{}
VulkanContext* vulkanContext() const { return m_vk.get(); }
BufferPool(rcp<VulkanContext>, VkBufferUsageFlags, size_t size = 0);
size_t size() const { return m_targetSize; }
void setTargetSize(size_t size);
vkutil::Buffer* currentBuffer();
uint64_t currentBufferFrameNumber() { return m_currentBufferFrameNumber; }
// Returns a Buffer that is guaranteed to exist and be of size
// 'm_targetSize'.
rcp<vkutil::Buffer> acquire();
void* mapCurrentBuffer(size_t dirtySize = VK_WHOLE_SIZE);
void unmapCurrentBuffer();
// Returns the current buffer to the pool.
void releaseCurrentBuffer();
void recycle(rcp<vkutil::Buffer> buffer)
{
GPUResourcePool::recycle(std::move(buffer));
}
private:
const rcp<VulkanContext> m_vk;
VulkanContext* vk() const;
constexpr static size_t MAX_POOL_SIZE = 8;
const VkBufferUsageFlags m_usageFlags;
size_t m_targetSize;
rcp<vkutil::Buffer> m_currentBuffer;
uint64_t m_currentBufferFrameNumber = 0;
size_t m_pendingFlushSize = 0;
struct PooledBuffer
{
PooledBuffer() = default;
PooledBuffer(rcp<vkutil::Buffer> buffer_, uint64_t lastFrameNumber_) :
buffer(std::move(buffer_)), lastFrameNumber(lastFrameNumber_)
{}
rcp<vkutil::Buffer> buffer;
uint64_t lastFrameNumber = 0;
};
std::deque<PooledBuffer> m_pool;
};
class Texture : public RenderingResource
class Texture : public Resource
{
public:
~Texture() override;
@@ -193,7 +150,7 @@ private:
VkImage m_vkImage;
};
class TextureView : public RenderingResource
class TextureView : public Resource
{
public:
~TextureView() override;
@@ -215,7 +172,7 @@ private:
VkImageView m_vkImageView;
};
class Framebuffer : public RenderingResource
class Framebuffer : public Resource
{
public:
~Framebuffer() override;

View File

@@ -1,95 +0,0 @@
/*
* Copyright 2024 Rive
*/
#pragma once
#include "rive/renderer/vulkan/vulkan_context.hpp"
namespace rive::gpu::vkutil
{
// Used by ResourcePool<T> to construct resources.
template <typename T> class ResourceFactory
{
public:
ResourceFactory(rcp<VulkanContext> vk) : m_vk(std::move(vk)) {}
VulkanContext* vulkanContext() const { return m_vk.get(); }
rcp<T> make() { return make_rcp<T>(m_vk); }
private:
rcp<VulkanContext> m_vk;
};
// Manages a pool of recyclable Vulkan resources. When onRefCntReachedZero() is
// called on the resources owned by this pool, they are captured and recycled
// rather than deleted.
template <typename T, uint32_t MAX_RESOURCES_IN_POOL = 64>
class ResourcePool : public RefCnt<ResourcePool<T>>
{
public:
template <typename... FactoryArgs>
ResourcePool(FactoryArgs&&... factoryArgs) :
m_factory(std::forward<FactoryArgs>(factoryArgs)...)
{}
~ResourcePool()
{
m_releasedResources
.clear(); // Delete resources before freeing the factory.
}
rcp<T> make()
{
rcp<T> resource;
if (!m_releasedResources.empty() &&
m_releasedResources.front().lastFrameNumber <=
m_factory.vulkanContext()->safeFrameNumber())
{
resource = ref_rcp(m_releasedResources.front().resource.release());
m_releasedResources.pop_front();
resource->reset();
purgeExcessExpiredResources();
}
else
{
resource = m_factory.make();
}
resource->m_pool = ref_rcp(static_cast<ResourcePool<T>*>(this));
assert(resource->debugging_refcnt() == 1);
return resource;
}
void onResourceRefCntReachedZero(const T* resource)
{
auto mutableResource = const_cast<T*>(resource);
assert(mutableResource->debugging_refcnt() == 0);
assert(mutableResource->m_pool.get() == this);
// Recycle the resource!
m_releasedResources.emplace_back(
mutableResource,
m_factory.vulkanContext()->currentFrameNumber());
// Do this last in case it deletes our "this".
mutableResource->m_pool = nullptr;
purgeExcessExpiredResources();
}
void purgeExcessExpiredResources()
{
while (m_releasedResources.size() > MAX_RESOURCES_IN_POOL &&
m_releasedResources.front().lastFrameNumber <=
m_factory.vulkanContext()->safeFrameNumber())
{
m_releasedResources.pop_front();
}
}
protected:
ResourceFactory<T> m_factory;
// Pool of Resources that have been fully released.
// These can be recycled once their expirationFrameIdx is reached.
std::deque<vkutil::ZombieResource<T>> m_releasedResources;
};
} // namespace rive::gpu::vkutil

View File

@@ -4,8 +4,8 @@
#pragma once
#include "rive/renderer/gpu_resource.hpp"
#include "rive/renderer/vulkan/vkutil.hpp"
#include <deque>
VK_DEFINE_HANDLE(VmaAllocator);
@@ -35,7 +35,7 @@ struct VulkanFeatures
//
// Provides minor helper utilities, but for the most part, the client is
// expected to make raw Vulkan calls via the provided function pointers.
class VulkanContext : public RefCnt<VulkanContext>
class VulkanContext : public GPUResourceManager
{
public:
VulkanContext(VkInstance,
@@ -105,27 +105,6 @@ public:
bool isFormatSupportedWithFeatureFlags(VkFormat, VkFormatFeatureFlagBits);
bool supportsD24S8() const { return m_supportsD24S8; }
// Resource lifetime counters. Resources last used on or before
// 'safeFrameNumber' are safe to be released or recycled.
uint64_t currentFrameNumber() const { return m_currentFrameNumber; }
uint64_t safeFrameNumber() const { return m_safeFrameNumber; }
// Purges released resources whose lastFrameNumber is on or before
// safeFrameNumber, and updates the context's monotonically increasing
// m_currentFrameNumber.
void advanceFrameNumber(uint64_t nextFrameNumber, uint64_t safeFrameNumber);
// Called when a vkutil::RenderingResource has been fully released (refCnt
// reaches 0). The resource won't actually be deleted until the current
// frame's command buffer has finished executing.
void onRenderingResourceReleased(const vkutil::RenderingResource* resource);
// Called prior to the client beginning its shutdown cycle, and after all
// command buffers from all frames have finished executing. After shutting
// down, we delete vkutil::RenderingResources immediately instead of going
// through m_resourcePurgatory.
void shutdown();
// Resource allocation.
rcp<vkutil::Buffer> makeBuffer(const VkBufferCreateInfo&,
vkutil::Mappability);
@@ -216,23 +195,6 @@ public:
private:
const VmaAllocator m_vmaAllocator;
// Temporary storage for vkutil::RenderingResource instances that have been
// fully released, but need to persist until in-flight command buffers have
// finished referencing their underlying Vulkan objects.
std::deque<vkutil::ZombieResource<const vkutil::RenderingResource>>
m_resourcePurgatory;
// A m_currentFrameNumber of this value indicates we're in a shutdown cycle
// and resources should be deleted immediatly upon release instead of going
// through m_resourcePurgatory.
constexpr static uint64_t SHUTDOWN_FRAME_NUMBER =
std::numeric_limits<uint64_t>::max();
// Resource lifetime counters. Resources last used on or before
// 'safeFrameNumber' are safe to be released or recycled.
uint64_t m_currentFrameNumber = 0;
uint64_t m_safeFrameNumber = 0;
// Vulkan spec: must support one of D24S8 and D32S8.
bool m_supportsD24S8 = false;
};

View File

@@ -74,6 +74,7 @@ public:
m_swapchain.contentsScale = dpiScale(window);
m_swapchain.displaySyncEnabled = NO;
view.layer = m_swapchain;
m_swapchain.drawableSize = CGSizeMake(width, height);
auto renderContextImpl =
m_renderContext->static_impl_cast<RenderContextMetalImpl>();

View File

@@ -0,0 +1,103 @@
/*
* Copyright 2025 Rive
*/
#include "rive/renderer/gpu_resource.hpp"
namespace rive::gpu
{
GPUResource::~GPUResource() {}
void GPUResource::onRefCntReachedZero() const
{
// GPUResourceManager will hold off on deleting "this" until its safe frame
// number has been reached.
m_manager->onRenderingResourceReleased(const_cast<GPUResource*>(this));
}
GPUResourceManager::~GPUResourceManager()
{
// Call shutdown() before destroying the resource manager.
assert(m_currentFrameNumber == SHUTDOWN_FRAME_NUMBER);
assert(m_safeFrameNumber == SHUTDOWN_FRAME_NUMBER);
assert(m_resourcePurgatory.empty());
}
void GPUResourceManager::advanceFrameNumber(uint64_t nextFrameNumber,
uint64_t safeFrameNumber)
{
assert(nextFrameNumber >= m_currentFrameNumber);
assert(safeFrameNumber >= m_safeFrameNumber);
assert(safeFrameNumber <= nextFrameNumber);
m_currentFrameNumber = nextFrameNumber;
m_safeFrameNumber = safeFrameNumber;
// Delete all resources whose safe frame number has been reached.
while (!m_resourcePurgatory.empty() &&
m_resourcePurgatory.front().lastFrameNumber <= m_safeFrameNumber)
{
assert(m_resourcePurgatory.front().resource->debugging_refcnt() == 0);
m_resourcePurgatory.pop_front();
}
}
void GPUResourceManager::onRenderingResourceReleased(GPUResource* resource)
{
assert(resource->manager() == this);
if (m_currentFrameNumber > m_safeFrameNumber)
{
// Hold this resource until its safe frame number is reached.
assert(resource->debugging_refcnt() == 0);
assert(m_resourcePurgatory.empty() ||
m_currentFrameNumber >=
m_resourcePurgatory.back().lastFrameNumber);
m_resourcePurgatory.emplace_back(resource, m_currentFrameNumber);
}
else
{
// We're in a shutdown cycle. Delete immediately.
delete resource;
}
}
void GPUResourceManager::shutdown()
{
advanceFrameNumber(SHUTDOWN_FRAME_NUMBER, SHUTDOWN_FRAME_NUMBER);
}
rcp<GPUResource> GPUResourcePool::acquire()
{
rcp<GPUResource> resource;
if (!m_pool.empty() &&
m_pool.front().lastFrameNumber <= m_manager->safeFrameNumber())
{
// Recycle the oldest buffer in the pool.
resource = rcp(m_pool.front().resource.release());
m_pool.pop_front();
// Trim the pool in case it's grown out of control (meaning it was
// advanced multiple times in a single frame).
while (m_pool.size() > m_maxPoolCount &&
m_pool.front().lastFrameNumber <= m_manager->safeFrameNumber())
{
m_pool.pop_front();
}
}
assert(resource == nullptr || resource->debugging_refcnt() == 1);
return resource;
}
void GPUResourcePool::recycle(rcp<GPUResource> resource)
{
if (resource != nullptr)
{
// Return the current buffer to the pool.
assert(resource->debugging_refcnt() == 1);
assert(m_pool.empty() || m_manager->currentFrameNumber() >=
m_pool.back().lastFrameNumber);
m_pool.emplace_back(resource.release(),
m_manager->currentFrameNumber());
}
}
} // namespace rive::gpu

View File

@@ -612,9 +612,6 @@ void RenderContext::flush(const FlushResources& flushResources)
assert(flushResources.renderTarget->height() ==
m_frameDescriptor.renderTargetHeight);
m_impl->prepareToFlush(flushResources.currentFrameNumber,
flushResources.safeFrameNumber);
m_clipContentID = 0;
// Layout this frame's resource buffers and textures.
@@ -631,53 +628,58 @@ void RenderContext::flush(const FlushResources& flushResources)
// Determine the minimum required resource allocation sizes to service this
// flush.
ResourceAllocationCounts allocs;
allocs.flushUniformBufferCount = m_logicalFlushes.size();
allocs.imageDrawUniformBufferCount =
ResourceAllocationCounts resourceRequirements;
resourceRequirements.flushUniformBufferCount = m_logicalFlushes.size();
resourceRequirements.imageDrawUniformBufferCount =
totalFrameResourceCounts.imageDrawCount;
allocs.pathBufferCount =
resourceRequirements.pathBufferCount =
totalFrameResourceCounts.pathCount + layoutCounts.pathPaddingCount;
allocs.paintBufferCount =
resourceRequirements.paintBufferCount =
totalFrameResourceCounts.pathCount + layoutCounts.paintPaddingCount;
allocs.paintAuxBufferCount =
resourceRequirements.paintAuxBufferCount =
totalFrameResourceCounts.pathCount + layoutCounts.paintAuxPaddingCount;
allocs.contourBufferCount = totalFrameResourceCounts.contourCount +
layoutCounts.contourPaddingCount;
allocs.gradSpanBufferCount =
resourceRequirements.contourBufferCount =
totalFrameResourceCounts.contourCount +
layoutCounts.contourPaddingCount;
resourceRequirements.gradSpanBufferCount =
layoutCounts.gradSpanCount + layoutCounts.gradSpanPaddingCount;
allocs.tessSpanBufferCount =
resourceRequirements.tessSpanBufferCount =
totalFrameResourceCounts.maxTessellatedSegmentCount;
allocs.triangleVertexBufferCount =
resourceRequirements.triangleVertexBufferCount =
totalFrameResourceCounts.maxTriangleVertexCount;
allocs.gradTextureHeight = layoutCounts.maxGradTextureHeight;
allocs.tessTextureHeight = layoutCounts.maxTessTextureHeight;
allocs.atlasTextureWidth = layoutCounts.maxAtlasWidth;
allocs.atlasTextureHeight = layoutCounts.maxAtlasHeight;
allocs.coverageBufferLength = layoutCounts.maxCoverageBufferLength;
resourceRequirements.gradTextureHeight = layoutCounts.maxGradTextureHeight;
resourceRequirements.tessTextureHeight = layoutCounts.maxTessTextureHeight;
resourceRequirements.atlasTextureWidth = layoutCounts.maxAtlasWidth;
resourceRequirements.atlasTextureHeight = layoutCounts.maxAtlasHeight;
resourceRequirements.coverageBufferLength =
layoutCounts.maxCoverageBufferLength;
// Ensure we're within hardware limits.
assert(allocs.gradTextureHeight <= kMaxTextureHeight);
assert(allocs.tessTextureHeight <= kMaxTextureHeight);
assert(allocs.atlasTextureWidth <= atlasMaxSize() ||
allocs.atlasTextureWidth <= frameDescriptor().renderTargetWidth);
assert(allocs.atlasTextureHeight <= atlasMaxSize() ||
allocs.atlasTextureHeight <= frameDescriptor().renderTargetHeight);
assert(allocs.coverageBufferLength <=
assert(resourceRequirements.gradTextureHeight <= kMaxTextureHeight);
assert(resourceRequirements.tessTextureHeight <= kMaxTextureHeight);
assert(resourceRequirements.atlasTextureWidth <= atlasMaxSize() ||
resourceRequirements.atlasTextureWidth <=
frameDescriptor().renderTargetWidth);
assert(resourceRequirements.atlasTextureHeight <= atlasMaxSize() ||
resourceRequirements.atlasTextureHeight <=
frameDescriptor().renderTargetHeight);
assert(resourceRequirements.coverageBufferLength <=
platformFeatures().maxCoverageBufferLength);
// Track m_maxRecentResourceRequirements so we can trim GPU allocations when
// steady-state usage goes down.
m_maxRecentResourceRequirements =
simd::max(allocs.toVec(), m_maxRecentResourceRequirements.toVec());
simd::max(resourceRequirements.toVec(),
m_maxRecentResourceRequirements.toVec());
// Grow resources enough to handle this flush.
// If "allocs" already fits in our current allocations, then don't change
// them. If they don't fit, overallocate by 25% in order to create some
// slack for growth.
allocs = simd::if_then_else(allocs.toVec() <=
m_currentResourceAllocations.toVec(),
m_currentResourceAllocations.toVec(),
allocs.toVec() * size_t(5) / size_t(4));
ResourceAllocationCounts allocs = simd::if_then_else(
resourceRequirements.toVec() <= m_currentResourceAllocations.toVec(),
m_currentResourceAllocations.toVec(),
resourceRequirements.toVec() * size_t(5) / size_t(4));
// In case the 25% growth pushed us above limits.
allocs.gradTextureHeight =
@@ -726,7 +728,11 @@ void RenderContext::flush(const FlushResources& flushResources)
}
setResourceSizes(allocs);
mapResourceBuffers(allocs);
m_impl->prepareToFlush(flushResources.currentFrameNumber,
flushResources.safeFrameNumber);
mapResourceBuffers(resourceRequirements);
for (const auto& flush : m_logicalFlushes)
{
@@ -753,7 +759,7 @@ void RenderContext::flush(const FlushResources& flushResources)
assert(m_triangleVertexData.elementsWritten() <=
totalFrameResourceCounts.maxTriangleVertexCount);
unmapResourceBuffers();
unmapResourceBuffers(resourceRequirements);
// Issue logical flushes to the backend.
for (const auto& flush : m_logicalFlushes)
@@ -1900,52 +1906,66 @@ void RenderContext::mapResourceBuffers(
m_triangleVertexData.hasRoomFor(mapCounts.triangleVertexBufferCount));
}
void RenderContext::unmapResourceBuffers()
void RenderContext::unmapResourceBuffers(
const ResourceAllocationCounts& mapCounts)
{
if (m_flushUniformData)
{
m_impl->unmapFlushUniformBuffer();
m_flushUniformData.reset();
m_flushUniformData.unmapElements(
m_impl.get(),
&RenderContextImpl::unmapFlushUniformBuffer,
mapCounts.flushUniformBufferCount);
}
if (m_imageDrawUniformData)
{
m_impl->unmapImageDrawUniformBuffer();
m_imageDrawUniformData.reset();
m_imageDrawUniformData.unmapElements(
m_impl.get(),
&RenderContextImpl::unmapImageDrawUniformBuffer,
mapCounts.imageDrawUniformBufferCount);
}
if (m_pathData)
{
m_impl->unmapPathBuffer();
m_pathData.reset();
m_pathData.unmapElements(m_impl.get(),
&RenderContextImpl::unmapPathBuffer,
mapCounts.pathBufferCount);
}
if (m_paintData)
{
m_impl->unmapPaintBuffer();
m_paintData.reset();
m_paintData.unmapElements(m_impl.get(),
&RenderContextImpl::unmapPaintBuffer,
mapCounts.paintBufferCount);
}
if (m_paintAuxData)
{
m_impl->unmapPaintAuxBuffer();
m_paintAuxData.reset();
m_paintAuxData.unmapElements(m_impl.get(),
&RenderContextImpl::unmapPaintAuxBuffer,
mapCounts.paintAuxBufferCount);
}
if (m_contourData)
{
m_impl->unmapContourBuffer();
m_contourData.reset();
m_contourData.unmapElements(m_impl.get(),
&RenderContextImpl::unmapContourBuffer,
mapCounts.contourBufferCount);
}
if (m_gradSpanData)
{
m_impl->unmapGradSpanBuffer();
m_gradSpanData.reset();
m_gradSpanData.unmapElements(m_impl.get(),
&RenderContextImpl::unmapGradSpanBuffer,
mapCounts.gradSpanBufferCount);
}
if (m_tessSpanData)
{
m_impl->unmapTessVertexSpanBuffer();
m_tessSpanData.reset();
m_tessSpanData.unmapElements(
m_impl.get(),
&RenderContextImpl::unmapTessVertexSpanBuffer,
mapCounts.tessSpanBufferCount);
}
if (m_triangleVertexData)
{
m_impl->unmapTriangleVertexBuffer();
m_triangleVertexData.reset();
m_triangleVertexData.unmapElements(
m_impl.get(),
&RenderContextImpl::unmapTriangleVertexBuffer,
mapCounts.triangleVertexBufferCount);
}
}

View File

@@ -132,48 +132,57 @@ void* RenderContextHelperImpl::mapTriangleVertexBuffer(size_t mapSizeInBytes)
return m_triangleBuffer->mapBuffer(mapSizeInBytes);
}
void RenderContextHelperImpl::unmapFlushUniformBuffer()
void RenderContextHelperImpl::unmapFlushUniformBuffer(size_t mapSizeInBytes)
{
assert(m_flushUniformBuffer->mapSizeInBytes() == mapSizeInBytes);
m_flushUniformBuffer->unmapAndSubmitBuffer();
}
void RenderContextHelperImpl::unmapImageDrawUniformBuffer()
void RenderContextHelperImpl::unmapImageDrawUniformBuffer(size_t mapSizeInBytes)
{
assert(m_imageDrawUniformBuffer->mapSizeInBytes() == mapSizeInBytes);
m_imageDrawUniformBuffer->unmapAndSubmitBuffer();
}
void RenderContextHelperImpl::unmapPathBuffer()
void RenderContextHelperImpl::unmapPathBuffer(size_t mapSizeInBytes)
{
assert(m_pathBuffer->mapSizeInBytes() == mapSizeInBytes);
m_pathBuffer->unmapAndSubmitBuffer();
}
void RenderContextHelperImpl::unmapPaintBuffer()
void RenderContextHelperImpl::unmapPaintBuffer(size_t mapSizeInBytes)
{
assert(m_paintBuffer->mapSizeInBytes() == mapSizeInBytes);
m_paintBuffer->unmapAndSubmitBuffer();
}
void RenderContextHelperImpl::unmapPaintAuxBuffer()
void RenderContextHelperImpl::unmapPaintAuxBuffer(size_t mapSizeInBytes)
{
assert(m_paintAuxBuffer->mapSizeInBytes() == mapSizeInBytes);
m_paintAuxBuffer->unmapAndSubmitBuffer();
}
void RenderContextHelperImpl::unmapContourBuffer()
void RenderContextHelperImpl::unmapContourBuffer(size_t mapSizeInBytes)
{
assert(m_contourBuffer->mapSizeInBytes() == mapSizeInBytes);
m_contourBuffer->unmapAndSubmitBuffer();
}
void RenderContextHelperImpl::unmapGradSpanBuffer()
void RenderContextHelperImpl::unmapGradSpanBuffer(size_t mapSizeInBytes)
{
assert(m_gradSpanBuffer->mapSizeInBytes() == mapSizeInBytes);
m_gradSpanBuffer->unmapAndSubmitBuffer();
}
void RenderContextHelperImpl::unmapTessVertexSpanBuffer()
void RenderContextHelperImpl::unmapTessVertexSpanBuffer(size_t mapSizeInBytes)
{
assert(m_tessSpanBuffer->mapSizeInBytes() == mapSizeInBytes);
m_tessSpanBuffer->unmapAndSubmitBuffer();
}
void RenderContextHelperImpl::unmapTriangleVertexBuffer()
void RenderContextHelperImpl::unmapTriangleVertexBuffer(size_t mapSizeInBytes)
{
assert(m_triangleBuffer->mapSizeInBytes() == mapSizeInBytes);
m_triangleBuffer->unmapAndSubmitBuffer();
}
} // namespace rive::gpu

View File

@@ -377,28 +377,27 @@ public:
RenderBufferFlags renderBufferFlags,
size_t sizeInBytes) :
lite_rtti_override(renderBufferType, renderBufferFlags, sizeInBytes),
m_bufferPool(std::move(vk),
render_buffer_usage_flags(renderBufferType),
sizeInBytes)
m_bufferPool(make_rcp<vkutil::BufferPool>(
std::move(vk),
render_buffer_usage_flags(renderBufferType),
sizeInBytes))
{}
vkutil::Buffer* currentBuffer() { return m_bufferPool.currentBuffer(); }
vkutil::Buffer* currentBuffer() { return m_currentBuffer.get(); }
protected:
void* onMap() override
{
// A mesh buffer can't be mapped more than once in a frame or we could
// blow out GPU memory.
assert(m_bufferPool.currentBufferFrameNumber() !=
m_bufferPool.vulkanContext()->currentFrameNumber());
m_bufferPool.releaseCurrentBuffer();
return m_bufferPool.mapCurrentBuffer();
m_bufferPool->recycle(std::move(m_currentBuffer));
m_currentBuffer = m_bufferPool->acquire();
return m_currentBuffer->contents();
}
void onUnmap() override { m_bufferPool.unmapCurrentBuffer(); }
void onUnmap() override { m_currentBuffer->flushContents(); }
private:
vkutil::BufferPool m_bufferPool;
rcp<vkutil::BufferPool> m_bufferPool;
rcp<vkutil::Buffer> m_currentBuffer;
};
rcp<RenderBuffer> RenderContextVulkanImpl::makeRenderBuffer(
@@ -2064,8 +2063,7 @@ RenderContextVulkanImpl::RenderContextVulkanImpl(
m_gradSpanBufferPool(m_vk, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT),
m_tessSpanBufferPool(m_vk, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT),
m_triangleBufferPool(m_vk, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT),
m_descriptorSetPoolPool(
make_rcp<vkutil::ResourcePool<DescriptorSetPool>>(m_vk))
m_descriptorSetPoolPool(make_rcp<DescriptorSetPoolPool>(m_vk))
{
m_platformFeatures.supportsRasterOrdering =
m_vk->features.rasterizationOrderColorAttachmentAccess;
@@ -2441,6 +2439,17 @@ void RenderContextVulkanImpl::initGPUObjects()
RenderContextVulkanImpl::~RenderContextVulkanImpl()
{
// These should all have gotten recycled at the end of the last frame.
assert(m_flushUniformBuffer == nullptr);
assert(m_imageDrawUniformBuffer == nullptr);
assert(m_pathBuffer == nullptr);
assert(m_paintBuffer == nullptr);
assert(m_paintAuxBuffer == nullptr);
assert(m_contourBuffer == nullptr);
assert(m_gradSpanBuffer == nullptr);
assert(m_tessSpanBuffer == nullptr);
assert(m_triangleBuffer == nullptr);
// Tell the context we are entering our shutdown cycle. After this point,
// all resources will be deleted immediately upon their refCount reaching
// zero, as opposed to being kept alive for in-flight command buffers.
@@ -2571,24 +2580,31 @@ void RenderContextVulkanImpl::resizeCoverageBuffer(size_t sizeInBytes)
void RenderContextVulkanImpl::prepareToFlush(uint64_t nextFrameNumber,
uint64_t safeFrameNumber)
{
// These should all have gotten recycled at the end of the last frame.
assert(m_flushUniformBuffer == nullptr);
assert(m_imageDrawUniformBuffer == nullptr);
assert(m_pathBuffer == nullptr);
assert(m_paintBuffer == nullptr);
assert(m_paintAuxBuffer == nullptr);
assert(m_contourBuffer == nullptr);
assert(m_gradSpanBuffer == nullptr);
assert(m_tessSpanBuffer == nullptr);
assert(m_triangleBuffer == nullptr);
// Advance the context frame and delete resources that are no longer
// referenced by in-flight command buffers.
m_vk->advanceFrameNumber(nextFrameNumber, safeFrameNumber);
// Clean expired resources in our pool of descriptor set pools.
m_descriptorSetPoolPool->purgeExcessExpiredResources();
// Release the current buffers in our pools so we can get new ones for the
// next flush.
m_flushUniformBufferPool.releaseCurrentBuffer();
m_imageDrawUniformBufferPool.releaseCurrentBuffer();
m_pathBufferPool.releaseCurrentBuffer();
m_paintBufferPool.releaseCurrentBuffer();
m_paintAuxBufferPool.releaseCurrentBuffer();
m_contourBufferPool.releaseCurrentBuffer();
m_gradSpanBufferPool.releaseCurrentBuffer();
m_tessSpanBufferPool.releaseCurrentBuffer();
m_triangleBufferPool.releaseCurrentBuffer();
// Acquire buffers for the flush.
m_flushUniformBuffer = m_flushUniformBufferPool.acquire();
m_imageDrawUniformBuffer = m_imageDrawUniformBufferPool.acquire();
m_pathBuffer = m_pathBufferPool.acquire();
m_paintBuffer = m_paintBufferPool.acquire();
m_paintAuxBuffer = m_paintAuxBufferPool.acquire();
m_contourBuffer = m_contourBufferPool.acquire();
m_gradSpanBuffer = m_gradSpanBufferPool.acquire();
m_tessSpanBuffer = m_tessSpanBufferPool.acquire();
m_triangleBuffer = m_triangleBufferPool.acquire();
}
namespace descriptor_pool_limits
@@ -2607,8 +2623,8 @@ constexpr static uint32_t kMaxDescriptorSets = 3 + kMaxImageTextureUpdates;
} // namespace descriptor_pool_limits
RenderContextVulkanImpl::DescriptorSetPool::DescriptorSetPool(
rcp<VulkanContext> vk) :
m_vk(std::move(vk))
rcp<VulkanContext> vulkanContext) :
vkutil::Resource(std::move(vulkanContext))
{
VkDescriptorPoolSize descriptorPoolSizes[] = {
{
@@ -2646,7 +2662,7 @@ RenderContextVulkanImpl::DescriptorSetPool::DescriptorSetPool(
.pPoolSizes = descriptorPoolSizes,
};
VK_CHECK(m_vk->CreateDescriptorPool(m_vk->device,
VK_CHECK(vk()->CreateDescriptorPool(vk()->device,
&descriptorPoolCreateInfo,
nullptr,
&m_vkDescriptorPool));
@@ -2654,7 +2670,7 @@ RenderContextVulkanImpl::DescriptorSetPool::DescriptorSetPool(
RenderContextVulkanImpl::DescriptorSetPool::~DescriptorSetPool()
{
m_vk->DestroyDescriptorPool(m_vk->device, m_vkDescriptorPool, nullptr);
vk()->DestroyDescriptorPool(vk()->device, m_vkDescriptorPool, nullptr);
}
VkDescriptorSet RenderContextVulkanImpl::DescriptorSetPool::
@@ -2668,7 +2684,7 @@ VkDescriptorSet RenderContextVulkanImpl::DescriptorSetPool::
};
VkDescriptorSet descriptorSet;
VK_CHECK(m_vk->AllocateDescriptorSets(m_vk->device,
VK_CHECK(vk()->AllocateDescriptorSets(vk()->device,
&descriptorSetAllocateInfo,
&descriptorSet));
@@ -2677,7 +2693,24 @@ VkDescriptorSet RenderContextVulkanImpl::DescriptorSetPool::
void RenderContextVulkanImpl::DescriptorSetPool::reset()
{
m_vk->ResetDescriptorPool(m_vk->device, m_vkDescriptorPool, 0);
vk()->ResetDescriptorPool(vk()->device, m_vkDescriptorPool, 0);
}
rcp<RenderContextVulkanImpl::DescriptorSetPool> RenderContextVulkanImpl::
DescriptorSetPoolPool::acquire()
{
auto descriptorSetPool =
static_rcp_cast<DescriptorSetPool>(GPUResourcePool::acquire());
if (descriptorSetPool == nullptr)
{
descriptorSetPool = make_rcp<DescriptorSetPool>(
static_rcp_cast<VulkanContext>(m_manager));
}
else
{
descriptorSetPool->reset();
}
return descriptorSetPool;
}
vkutil::TextureView* RenderTargetVulkan::ensureOffscreenColorTextureView()
@@ -2803,7 +2836,8 @@ void RenderContextVulkanImpl::flush(const FlushDescriptor& desc)
auto commandBuffer =
reinterpret_cast<VkCommandBuffer>(desc.externalCommandBuffer);
rcp<DescriptorSetPool> descriptorSetPool = m_descriptorSetPoolPool->make();
rcp<DescriptorSetPool> descriptorSetPool =
m_descriptorSetPoolPool->acquire();
// Apply pending texture updates.
if (m_featherTexture->hasUpdates())
@@ -2837,7 +2871,7 @@ void RenderContextVulkanImpl::flush(const FlushDescriptor& desc)
.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
},
{{
.buffer = *m_flushUniformBufferPool.currentBuffer(),
.buffer = *m_flushUniformBuffer,
.offset = desc.flushUniformDataOffsetInBytes,
.range = sizeof(gpu::FlushUniforms),
}});
@@ -2849,7 +2883,7 @@ void RenderContextVulkanImpl::flush(const FlushDescriptor& desc)
.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC,
},
{{
.buffer = *m_imageDrawUniformBufferPool.currentBuffer(),
.buffer = *m_imageDrawUniformBuffer,
.offset = 0,
.range = sizeof(gpu::ImageDrawUniforms),
}});
@@ -2861,7 +2895,7 @@ void RenderContextVulkanImpl::flush(const FlushDescriptor& desc)
.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
},
{{
.buffer = *m_pathBufferPool.currentBuffer(),
.buffer = *m_pathBuffer,
.offset = desc.firstPath * sizeof(gpu::PathData),
.range = VK_WHOLE_SIZE,
}});
@@ -2874,12 +2908,12 @@ void RenderContextVulkanImpl::flush(const FlushDescriptor& desc)
},
{
{
.buffer = *m_paintBufferPool.currentBuffer(),
.buffer = *m_paintBuffer,
.offset = desc.firstPaint * sizeof(gpu::PaintData),
.range = VK_WHOLE_SIZE,
},
{
.buffer = *m_paintAuxBufferPool.currentBuffer(),
.buffer = *m_paintAuxBuffer,
.offset = desc.firstPaintAux * sizeof(gpu::PaintAuxData),
.range = VK_WHOLE_SIZE,
},
@@ -2893,7 +2927,7 @@ void RenderContextVulkanImpl::flush(const FlushDescriptor& desc)
.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
},
{{
.buffer = *m_contourBufferPool.currentBuffer(),
.buffer = *m_contourBuffer,
.offset = desc.firstContour * sizeof(gpu::ContourData),
.range = VK_WHOLE_SIZE,
}});
@@ -3013,7 +3047,7 @@ void RenderContextVulkanImpl::flush(const FlushDescriptor& desc)
m_vk->CmdSetScissor(commandBuffer, 0, 1, &renderArea);
VkBuffer gradSpanBuffer = *m_gradSpanBufferPool.currentBuffer();
VkBuffer gradSpanBuffer = *m_gradSpanBuffer;
VkDeviceSize gradSpanOffset =
desc.firstGradSpan * sizeof(gpu::GradientSpan);
m_vk->CmdBindVertexBuffers(commandBuffer,
@@ -3097,7 +3131,7 @@ void RenderContextVulkanImpl::flush(const FlushDescriptor& desc)
m_vk->CmdSetScissor(commandBuffer, 0, 1, &renderArea);
VkBuffer tessBuffer = *m_tessSpanBufferPool.currentBuffer();
VkBuffer tessBuffer = *m_tessSpanBuffer;
VkDeviceSize tessOffset =
desc.firstTessVertexSpan * sizeof(gpu::TessVertexSpan);
m_vk->CmdBindVertexBuffers(commandBuffer,
@@ -3726,7 +3760,9 @@ void RenderContextVulkanImpl::flush(const FlushDescriptor& desc)
{
// We ran out of room for image texture updates. Allocate a
// new pool.
descriptorSetPool = m_descriptorSetPoolPool->make();
m_descriptorSetPoolPool->recycle(
std::move(descriptorSetPool));
descriptorSetPool = m_descriptorSetPoolPool->acquire();
imageTextureUpdateCount = 0;
}
@@ -3881,7 +3917,7 @@ void RenderContextVulkanImpl::flush(const FlushDescriptor& desc)
case DrawType::interiorTriangulation:
case DrawType::atlasBlit:
{
VkBuffer buffer = *m_triangleBufferPool.currentBuffer();
VkBuffer buffer = *m_triangleBuffer;
m_vk->CmdBindVertexBuffers(commandBuffer,
0,
1,
@@ -4029,6 +4065,23 @@ void RenderContextVulkanImpl::flush(const FlushDescriptor& desc)
}
renderTarget->setTargetLastAccess(finalRenderTargetAccess);
m_descriptorSetPoolPool->recycle(std::move(descriptorSetPool));
if (desc.isFinalFlushOfFrame)
{
// Recycle buffers.
m_flushUniformBufferPool.recycle(std::move(m_flushUniformBuffer));
m_imageDrawUniformBufferPool.recycle(
std::move(m_imageDrawUniformBuffer));
m_pathBufferPool.recycle(std::move(m_pathBuffer));
m_paintBufferPool.recycle(std::move(m_paintBuffer));
m_paintAuxBufferPool.recycle(std::move(m_paintAuxBuffer));
m_contourBufferPool.recycle(std::move(m_contourBuffer));
m_gradSpanBufferPool.recycle(std::move(m_gradSpanBuffer));
m_tessSpanBufferPool.recycle(std::move(m_tessSpanBuffer));
m_triangleBufferPool.recycle(std::move(m_triangleBuffer));
}
}
void RenderContextVulkanImpl::hotloadShaders(

View File

@@ -10,18 +10,17 @@
namespace rive::gpu::vkutil
{
void vkutil::RenderingResource::onRefCntReachedZero() const
Resource::Resource(rcp<VulkanContext> vk) : GPUResource(std::move(vk)) {}
inline VulkanContext* Resource::vk() const
{
// VulkanContext will hold off on deleting "this" until any in-flight
// command buffers have finished (potentially) referencing our underlying
// Vulkan objects.
m_vk->onRenderingResourceReleased(this);
return static_cast<VulkanContext*>(m_manager.get());
}
Buffer::Buffer(rcp<VulkanContext> vk,
const VkBufferCreateInfo& info,
Mappability mappability) :
RenderingResource(std::move(vk)), m_mappability(mappability), m_info(info)
Resource(std::move(vk)), m_mappability(mappability), m_info(info)
{
m_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
init();
@@ -37,9 +36,9 @@ void Buffer::resizeImmediately(size_t sizeInBytes)
{
if (m_mappability != Mappability::none)
{
vmaUnmapMemory(m_vk->allocator(), m_vmaAllocation);
vmaUnmapMemory(vk()->allocator(), m_vmaAllocation);
}
vmaDestroyBuffer(m_vk->allocator(), m_vkBuffer, m_vmaAllocation);
vmaDestroyBuffer(vk()->allocator(), m_vkBuffer, m_vmaAllocation);
}
m_info.size = sizeInBytes;
init();
@@ -69,7 +68,7 @@ void Buffer::init()
.usage = VMA_MEMORY_USAGE_AUTO,
};
VK_CHECK(vmaCreateBuffer(m_vk->allocator(),
VK_CHECK(vmaCreateBuffer(vk()->allocator(),
&m_info,
&allocInfo,
&m_vkBuffer,
@@ -81,7 +80,7 @@ void Buffer::init()
// Leave the buffer constantly mapped and let the OS/drivers handle
// the rest.
VK_CHECK(
vmaMapMemory(m_vk->allocator(), m_vmaAllocation, &m_contents));
vmaMapMemory(vk()->allocator(), m_vmaAllocation, &m_contents));
}
else
{
@@ -98,7 +97,7 @@ void Buffer::init()
void Buffer::flushContents(size_t updatedSizeInBytes)
{
vmaFlushAllocation(m_vk->allocator(),
vmaFlushAllocation(vk()->allocator(),
m_vmaAllocation,
0,
updatedSizeInBytes);
@@ -106,16 +105,27 @@ void Buffer::flushContents(size_t updatedSizeInBytes)
void Buffer::invalidateContents(size_t updatedSizeInBytes)
{
vmaInvalidateAllocation(m_vk->allocator(),
vmaInvalidateAllocation(vk()->allocator(),
m_vmaAllocation,
0,
updatedSizeInBytes);
}
BufferPool::BufferPool(rcp<VulkanContext> vk,
VkBufferUsageFlags usageFlags,
size_t size) :
GPUResourcePool(std::move(vk), MAX_POOL_SIZE),
m_usageFlags(usageFlags),
m_targetSize(size)
{}
inline VulkanContext* BufferPool::vk() const
{
return static_cast<VulkanContext*>(m_manager.get());
}
void BufferPool::setTargetSize(size_t size)
{
assert(m_currentBuffer == nullptr); // Call releaseCurrentBuffer() first.
// Buffers always get bound, even if unused, so make sure they aren't empty
// and we get a valid Vulkan handle.
size = std::max<size_t>(size, 1);
@@ -130,71 +140,28 @@ void BufferPool::setTargetSize(size_t size)
m_targetSize = size;
}
vkutil::Buffer* BufferPool::currentBuffer()
rcp<vkutil::Buffer> BufferPool::acquire()
{
if (m_currentBuffer == nullptr)
auto buffer = static_rcp_cast<vkutil::Buffer>(GPUResourcePool::acquire());
if (buffer == nullptr)
{
if (!m_pool.empty() &&
m_pool.front().lastFrameNumber <= m_vk->safeFrameNumber())
{
// Recycle the oldest buffer in the pool.
m_currentBuffer = std::move(m_pool.front().buffer);
if (m_currentBuffer->info().size != m_targetSize)
buffer = vk()->makeBuffer(
{
m_currentBuffer->resizeImmediately(m_targetSize);
}
m_pool.pop_front();
// Trim the pool in case it's grown out of control (meaning it was
// advanced multiple times in a single frame).
constexpr static size_t POOL_MAX_COUNT = 8;
while (m_pool.size() > POOL_MAX_COUNT &&
m_pool.front().lastFrameNumber <= m_vk->safeFrameNumber())
{
m_pool.pop_front();
}
}
else
{
// There wasn't a free buffer in the pool. Create a new one.
m_currentBuffer =
m_vk->makeBuffer({.size = m_targetSize, .usage = m_usageFlags},
Mappability::writeOnly);
}
.size = m_targetSize,
.usage = m_usageFlags,
},
Mappability::writeOnly);
}
m_currentBufferFrameNumber = m_vk->currentFrameNumber();
return m_currentBuffer.get();
}
void* BufferPool::mapCurrentBuffer(size_t dirtySize)
{
m_pendingFlushSize = dirtySize;
return currentBuffer()->contents();
}
void BufferPool::unmapCurrentBuffer()
{
assert(m_pendingFlushSize > 0);
currentBuffer()->flushContents(m_pendingFlushSize);
m_pendingFlushSize = 0;
}
void BufferPool::releaseCurrentBuffer()
{
if (m_currentBuffer != nullptr)
else if (buffer->info().size != m_targetSize)
{
// Return the current buffer to the pool.
m_pool.emplace_back(std::move(m_currentBuffer),
m_currentBufferFrameNumber);
assert(m_currentBuffer == nullptr);
buffer->resizeImmediately(m_targetSize);
}
// The current buffer's frameNumber will update when it gets accessed.
m_currentBufferFrameNumber = 0;
return buffer;
}
Texture::Texture(rcp<VulkanContext> vk, const VkImageCreateInfo& info) :
RenderingResource(std::move(vk)), m_info(info)
Texture::Texture(rcp<VulkanContext> vulkanContext,
const VkImageCreateInfo& info) :
Resource(std::move(vulkanContext)), m_info(info)
{
m_info = info;
m_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
@@ -232,7 +199,7 @@ Texture::Texture(rcp<VulkanContext> vk, const VkImageCreateInfo& info) :
.usage = VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED,
};
if (vmaCreateImage(m_vk->allocator(),
if (vmaCreateImage(vk()->allocator(),
&m_info,
&allocInfo,
&m_vkImage,
@@ -247,7 +214,7 @@ Texture::Texture(rcp<VulkanContext> vk, const VkImageCreateInfo& info) :
.usage = VMA_MEMORY_USAGE_AUTO,
};
VK_CHECK(vmaCreateImage(m_vk->allocator(),
VK_CHECK(vmaCreateImage(vk()->allocator(),
&m_info,
&allocInfo,
&m_vkImage,
@@ -259,34 +226,34 @@ Texture::~Texture()
{
if (m_vmaAllocation != VK_NULL_HANDLE)
{
vmaDestroyImage(m_vk->allocator(), m_vkImage, m_vmaAllocation);
vmaDestroyImage(vk()->allocator(), m_vkImage, m_vmaAllocation);
}
}
TextureView::TextureView(rcp<VulkanContext> vk,
TextureView::TextureView(rcp<VulkanContext> vulkanContext,
rcp<Texture> textureRef,
const VkImageViewCreateInfo& info) :
RenderingResource(std::move(vk)),
Resource(std::move(vulkanContext)),
m_textureRefOrNull(std::move(textureRef)),
m_info(info)
{
assert(m_textureRefOrNull == nullptr || info.image == *m_textureRefOrNull);
m_info.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
VK_CHECK(
m_vk->CreateImageView(m_vk->device, &m_info, nullptr, &m_vkImageView));
vk()->CreateImageView(vk()->device, &m_info, nullptr, &m_vkImageView));
}
TextureView::~TextureView()
{
m_vk->DestroyImageView(m_vk->device, m_vkImageView, nullptr);
vk()->DestroyImageView(vk()->device, m_vkImageView, nullptr);
}
Framebuffer::Framebuffer(rcp<VulkanContext> vk,
Framebuffer::Framebuffer(rcp<VulkanContext> vulkanContext,
const VkFramebufferCreateInfo& info) :
RenderingResource(std::move(vk)), m_info(info)
Resource(std::move(vulkanContext)), m_info(info)
{
m_info.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO;
VK_CHECK(m_vk->CreateFramebuffer(m_vk->device,
VK_CHECK(vk()->CreateFramebuffer(vk()->device,
&m_info,
nullptr,
&m_vkFramebuffer));
@@ -294,6 +261,6 @@ Framebuffer::Framebuffer(rcp<VulkanContext> vk,
Framebuffer::~Framebuffer()
{
m_vk->DestroyFramebuffer(m_vk->device, m_vkFramebuffer, nullptr);
vk()->DestroyFramebuffer(vk()->device, m_vkFramebuffer, nullptr);
}
} // namespace rive::gpu::vkutil

View File

@@ -67,12 +67,7 @@ VulkanContext::VulkanContext(
"No suitable depth format supported!");
}
VulkanContext::~VulkanContext()
{
assert(m_currentFrameNumber == SHUTDOWN_FRAME_NUMBER);
assert(m_resourcePurgatory.empty());
vmaDestroyAllocator(m_vmaAllocator);
}
VulkanContext::~VulkanContext() { vmaDestroyAllocator(m_vmaAllocator); }
bool VulkanContext::isFormatSupportedWithFeatureFlags(
VkFormat format,
@@ -85,48 +80,6 @@ bool VulkanContext::isFormatSupportedWithFeatureFlags(
return properties.optimalTilingFeatures & featureFlags;
}
void VulkanContext::advanceFrameNumber(uint64_t nextFrameNumber,
uint64_t safeFrameNumber)
{
assert(nextFrameNumber >= m_currentFrameNumber);
assert(nextFrameNumber < SHUTDOWN_FRAME_NUMBER);
assert(safeFrameNumber >= m_safeFrameNumber);
assert(safeFrameNumber < nextFrameNumber);
m_currentFrameNumber = nextFrameNumber;
m_safeFrameNumber = safeFrameNumber;
// Delete all resources that are no longer referenced by an in-flight
// command buffer.
while (!m_resourcePurgatory.empty() &&
m_resourcePurgatory.front().lastFrameNumber <= m_safeFrameNumber)
{
m_resourcePurgatory.pop_front();
}
}
void VulkanContext::onRenderingResourceReleased(
const vkutil::RenderingResource* resource)
{
assert(resource->vulkanContext() == this);
if (m_currentFrameNumber != SHUTDOWN_FRAME_NUMBER)
{
// Hold this resource until it is no longer referenced by an in-flight
// command buffer.
m_resourcePurgatory.emplace_back(resource, m_currentFrameNumber);
}
else
{
// We're in a shutdown cycle. Delete immediately.
delete resource;
}
}
void VulkanContext::shutdown()
{
m_currentFrameNumber = SHUTDOWN_FRAME_NUMBER;
m_resourcePurgatory.clear();
}
rcp<vkutil::Buffer> VulkanContext::makeBuffer(const VkBufferCreateInfo& info,
vkutil::Mappability mappability)
{

View File

@@ -1 +0,0 @@
silvers/** filter=lfs diff=lfs merge=lfs -text

Binary file not shown.

View File

@@ -26,15 +26,15 @@ while [[ $# -gt 0 ]]; do
memory)
echo Will perform memory checks...
UTILITY='leaks --atExit --'
shift
shift # past argument
;;
release)
CONFIG=release
shift
shift # past argument
;;
rebaseline)
export REBASELINE_SILVERS=true
shift
shift # past argument
;;
*)
shift # past argument