Initial file moves and renames

Diffs=
25d423274 Initial file moves and renames (#7951)

Co-authored-by: rivessamr <suki@rive.app>
This commit is contained in:
rivessamr
2024-08-27 22:26:30 +00:00
parent 5737f9017a
commit 1b24bf9f89
137 changed files with 1391 additions and 1383 deletions

View File

@@ -1 +1 @@
69cffe900faeb0241d20ea8ece3f5cfc5f1563a5
25d423274ff8e2894c874093b2d5ca86fc2895c2

View File

@@ -1,44 +0,0 @@
/*
* Copyright 2022 Rive
*/
#include "rive/pls/pls_factory.hpp"
#include "pls_paint.hpp"
#include "pls_path.hpp"
#include "rive/pls/pls_renderer.hpp"
namespace rive::pls
{
rcp<RenderShader> PLSFactory::makeLinearGradient(float sx,
float sy,
float ex,
float ey,
const ColorInt colors[], // [count]
const float stops[], // [count]
size_t count)
{
return PLSGradient::MakeLinear(sx, sy, ex, ey, colors, stops, count);
}
rcp<RenderShader> PLSFactory::makeRadialGradient(float cx,
float cy,
float radius,
const ColorInt colors[], // [count]
const float stops[], // [count]
size_t count)
{
return PLSGradient::MakeRadial(cx, cy, radius, colors, stops, count);
}
rcp<RenderPath> PLSFactory::makeRenderPath(RawPath& rawPath, FillRule fillRule)
{
return make_rcp<PLSPath>(fillRule, rawPath);
}
rcp<RenderPath> PLSFactory::makeEmptyRenderPath() { return make_rcp<PLSPath>(); }
rcp<RenderPaint> PLSFactory::makeRenderPaint() { return make_rcp<PLSPaint>(); }
} // namespace rive::pls

View File

@@ -2,13 +2,13 @@
The Rive Renderer is a vector and raster graphics renderer custom-built for Rive content, for animation, and for runtime.
This folder contains the renderer code and an example for how to interface with it directly. It contains the best in class concrete implementation of Rive's rendering abstraction layer, which we call the Rive Renderer.
This directory contains the renderer code and an example for how to interface with it directly. It contains the best in class concrete implementation of Rive's rendering abstraction layer, which we call the Rive Renderer.
## Clone the rive-runtime repo
```
git clone https://github.com/rive-app/rive-runtime.git
cd rive-runtime/pls
cd rive-runtime/renderer
```
## Build GLFW

View File

@@ -4,9 +4,9 @@
#pragma once
#include "rive/pls/pls.hpp"
#include "rive/renderer/gpu.hpp"
namespace rive::pls
namespace rive::gpu
{
// API-agnostic implementation of an abstract buffer ring. We use rings to ensure the GPU can render
// one frame in parallel while the CPU prepares the next frame.
@@ -86,4 +86,4 @@ protected:
void* onMapBuffer(int bufferIdx, size_t mapSizeInBytes) override { return shadowBuffer(); }
void onUnmapAndSubmitBuffer(int bufferIdx, size_t mapSizeInBytes) override {}
};
} // namespace rive::pls
} // namespace rive::gpu

View File

@@ -4,12 +4,12 @@
#pragma once
#include "rive/pls/d3d/d3d11.hpp"
#include "rive/pls/pls_render_context_helper_impl.hpp"
#include "rive/renderer/d3d/d3d11.hpp"
#include "rive/renderer/render_context_helper_impl.hpp"
#include <map>
#include <string>
namespace rive::pls
namespace rive::gpu
{
class PLSRenderContextD3DImpl;
@@ -119,7 +119,7 @@ private:
std::unique_ptr<BufferRing> makeUniformBufferRing(size_t capacityInBytes) override;
std::unique_ptr<BufferRing> makeStorageBufferRing(size_t capacityInBytes,
pls::StorageBufferStructure) override;
gpu::StorageBufferStructure) override;
std::unique_ptr<BufferRing> makeVertexBufferRing(size_t capacityInBytes) override;
std::unique_ptr<BufferRing> makeTextureTransferBufferRing(size_t capacityInBytes) override;
@@ -134,9 +134,9 @@ private:
UINT firstHighLevelStruct);
void setPipelineLayoutAndShaders(DrawType,
pls::ShaderFeatures,
pls::InterlockMode,
pls::ShaderMiscFlags pixelShaderMiscFlags);
gpu::ShaderFeatures,
gpu::InterlockMode,
gpu::ShaderMiscFlags pixelShaderMiscFlags);
const D3DCapabilities m_d3dCapabilities;
@@ -175,7 +175,7 @@ private:
ComPtr<ID3D11Buffer> m_patchVertexBuffer;
ComPtr<ID3D11Buffer> m_patchIndexBuffer;
// Vertex/index buffers for drawing image rects. (pls::InterlockMode::atomics only.)
// Vertex/index buffers for drawing image rects. (gpu::InterlockMode::atomics only.)
ComPtr<ID3D11Buffer> m_imageRectVertexBuffer;
ComPtr<ID3D11Buffer> m_imageRectIndexBuffer;
@@ -198,4 +198,4 @@ private:
ComPtr<ID3D11BlendState> m_srcOverBlendState;
};
} // namespace rive::pls
} // namespace rive::gpu

View File

@@ -6,18 +6,18 @@
#include "rive/math/raw_path.hpp"
#include "rive/math/wangs_formula.hpp"
#include "rive/pls/pls.hpp"
#include "rive/pls/pls_render_context.hpp"
#include "rive/pls/fixed_queue.hpp"
#include "rive/renderer/gpu.hpp"
#include "rive/renderer/render_context.hpp"
#include "rive/renderer/fixed_queue.hpp"
#include "rive/shapes/paint/stroke_cap.hpp"
#include "rive/shapes/paint/stroke_join.hpp"
#include "rive/refcnt.hpp"
namespace rive::pls
namespace rive::gpu
{
class PLSDraw;
class PLSPath;
class PLSPaint;
class RiveRenderPath;
class RiveRenderPaint;
class PLSRenderContext;
class PLSGradient;
@@ -47,28 +47,28 @@ public:
const Mat2D& matrix() const { return m_matrix; }
BlendMode blendMode() const { return m_blendMode; }
Type type() const { return m_type; }
pls::DrawContents drawContents() const { return m_drawContents; }
bool isStroked() const { return m_drawContents & pls::DrawContents::stroke; }
bool isEvenOddFill() const { return m_drawContents & pls::DrawContents::evenOddFill; }
bool isOpaque() const { return m_drawContents & pls::DrawContents::opaquePaint; }
gpu::DrawContents drawContents() const { return m_drawContents; }
bool isStroked() const { return m_drawContents & gpu::DrawContents::stroke; }
bool isEvenOddFill() const { return m_drawContents & gpu::DrawContents::evenOddFill; }
bool isOpaque() const { return m_drawContents & gpu::DrawContents::opaquePaint; }
uint32_t clipID() const { return m_clipID; }
bool hasClipRect() const { return m_clipRectInverseMatrix != nullptr; }
const pls::ClipRectInverseMatrix* clipRectInverseMatrix() const
const gpu::ClipRectInverseMatrix* clipRectInverseMatrix() const
{
return m_clipRectInverseMatrix;
}
pls::SimplePaintValue simplePaintValue() const { return m_simplePaintValue; }
gpu::SimplePaintValue simplePaintValue() const { return m_simplePaintValue; }
const PLSGradient* gradient() const { return m_gradientRef; }
// Clipping setup.
void setClipID(uint32_t clipID);
void setClipRect(const pls::ClipRectInverseMatrix* m) { m_clipRectInverseMatrix = m; }
void setClipRect(const gpu::ClipRectInverseMatrix* m) { m_clipRectInverseMatrix = m; }
// Used to allocate GPU resources for a collection of draws.
using ResourceCounters = PLSRenderContext::LogicalFlush::ResourceCounters;
const ResourceCounters& resourceCounts() const { return m_resourceCounts; }
// Linked list of all PLSDraws within a pls::DrawBatch.
// Linked list of all PLSDraws within a gpu::DrawBatch.
void setBatchInternalNeighbor(const PLSDraw* neighbor)
{
assert(m_batchInternalNeighbor == nullptr);
@@ -97,9 +97,9 @@ protected:
const Type m_type;
uint32_t m_clipID = 0;
const pls::ClipRectInverseMatrix* m_clipRectInverseMatrix = nullptr;
const gpu::ClipRectInverseMatrix* m_clipRectInverseMatrix = nullptr;
pls::DrawContents m_drawContents = pls::DrawContents::none;
gpu::DrawContents m_drawContents = gpu::DrawContents::none;
// Filled in by the subclass constructor.
ResourceCounters m_resourceCounts;
@@ -107,9 +107,9 @@ protected:
// Gradient data used by some draws. Stored in the base class so allocateGradientIfNeeded()
// doesn't have to be virtual.
const PLSGradient* m_gradientRef = nullptr;
pls::SimplePaintValue m_simplePaintValue;
gpu::SimplePaintValue m_simplePaintValue;
// Linked list of all PLSDraws within a pls::DrawBatch.
// Linked list of all PLSDraws within a gpu::DrawBatch.
const PLSDraw* m_batchInternalNeighbor = nullptr;
};
@@ -117,57 +117,58 @@ protected:
inline void PLSDrawReleaseRefs::operator()(PLSDraw* draw) { draw->releaseRefs(); }
// High level abstraction of a single path to be drawn (midpoint fan or interior triangulation).
class PLSPathDraw : public PLSDraw
class RiveRenderPathDraw : public PLSDraw
{
public:
// Creates either a normal path draw or an interior triangulation if the path is large enough.
static PLSDrawUniquePtr Make(PLSRenderContext*,
const Mat2D&,
rcp<const PLSPath>,
rcp<const RiveRenderPath>,
FillRule,
const PLSPaint*,
const RiveRenderPaint*,
RawPath* scratchPath);
FillRule fillRule() const { return m_fillRule; }
pls::PaintType paintType() const { return m_paintType; }
gpu::PaintType paintType() const { return m_paintType; }
float strokeRadius() const { return m_strokeRadius; }
pls::ContourDirections contourDirections() const { return m_contourDirections; }
gpu::ContourDirections contourDirections() const { return m_contourDirections; }
void pushToRenderContext(PLSRenderContext::LogicalFlush*) final;
void releaseRefs() override;
public:
PLSPathDraw(IAABB pathBounds,
const Mat2D&,
rcp<const PLSPath>,
FillRule,
const PLSPaint*,
Type,
pls::InterlockMode);
RiveRenderPathDraw(IAABB pathBounds,
const Mat2D&,
rcp<const RiveRenderPath>,
FillRule,
const RiveRenderPaint*,
Type,
gpu::InterlockMode);
virtual void onPushToRenderContext(PLSRenderContext::LogicalFlush*) = 0;
const PLSPath* const m_pathRef;
const FillRule m_fillRule; // Bc PLSPath fillRule can mutate during the artboard draw process.
const pls::PaintType m_paintType;
const RiveRenderPath* const m_pathRef;
const FillRule
m_fillRule; // Bc RiveRenderPath fillRule can mutate during the artboard draw process.
const gpu::PaintType m_paintType;
float m_strokeRadius = 0;
pls::ContourDirections m_contourDirections;
gpu::ContourDirections m_contourDirections;
// Used to guarantee m_pathRef doesn't change for the entire time we hold it.
RIVE_DEBUG_CODE(uint64_t m_rawPathMutationID;)
};
// Draws a path by fanning tessellation patches around the midpoint of each contour.
class MidpointFanPathDraw : public PLSPathDraw
class MidpointFanPathDraw : public RiveRenderPathDraw
{
public:
MidpointFanPathDraw(PLSRenderContext*,
IAABB pixelBounds,
const Mat2D&,
rcp<const PLSPath>,
rcp<const RiveRenderPath>,
FillRule,
const PLSPaint*);
const RiveRenderPaint*);
protected:
void onPushToRenderContext(PLSRenderContext::LogicalFlush*) override;
@@ -219,7 +220,7 @@ protected:
// Draws a path by triangulating the interior into non-overlapping triangles and tessellating the
// outer curves.
class InteriorTriangulationDraw : public PLSPathDraw
class InteriorTriangulationDraw : public RiveRenderPathDraw
{
public:
enum class TriangulatorAxis
@@ -232,9 +233,9 @@ public:
InteriorTriangulationDraw(PLSRenderContext*,
IAABB pixelBounds,
const Mat2D&,
rcp<const PLSPath>,
rcp<const RiveRenderPath>,
FillRule,
const PLSPaint*,
const RiveRenderPaint*,
RawPath* scratchPath,
TriangulatorAxis);
@@ -353,4 +354,4 @@ public:
protected:
const uint32_t m_previousClipID;
};
} // namespace rive::pls
} // namespace rive::gpu

View File

@@ -4,9 +4,9 @@
#pragma once
#include "rive/pls/trivial_block_allocator.hpp"
#include "rive/renderer/trivial_block_allocator.hpp"
namespace rive::pls
namespace rive::gpu
{
// Fast, simple queue that operates on a block-allocated array. push_back() may only be called up to
// m_capacity times before the queue must be rewound.
@@ -69,4 +69,4 @@ private:
T* m_end = nullptr;
RIVE_DEBUG_CODE(size_t m_capacity = 0;)
};
} // namespace rive::pls
} // namespace rive::gpu

View File

@@ -4,11 +4,11 @@
#pragma once
#include "rive/pls/gl/gles3.hpp"
#include "rive/renderer/gl/gles3.hpp"
#include "rive/refcnt.hpp"
#include "rive/shapes/paint/blend_mode.hpp"
namespace rive::pls
namespace rive::gpu
{
// Lightweight wrapper around common GL state.
class GLState : public RefCnt<GLState>
@@ -57,4 +57,4 @@ private:
bool boundPixelUnpackBufferID : 1;
} m_validState;
};
} // namespace rive::pls
} // namespace rive::gpu

View File

@@ -4,7 +4,7 @@
#pragma once
#include "rive/pls/gl/gles3.hpp"
#include "rive/renderer/gl/gles3.hpp"
#include "rive/math/aabb.hpp"
#include <cstddef>
#include <utility>

View File

@@ -4,11 +4,11 @@
#pragma once
#include "rive/pls/pls.hpp"
#include "rive/renderer/gpu.hpp"
#include "rive/enum_bitset.hpp"
#include <iostream>
namespace rive::pls
namespace rive::gpu
{
// When using EXT_shader_pixel_local_storage, we have to emulate the render pass load/store actions
// using a shader. These bits define specific actions that can be turned on or off in that shader.
@@ -25,9 +25,9 @@ RIVE_MAKE_ENUM_BITSET(LoadStoreActionsEXT)
// Determines the specific load actions that need to be emulated for the given render pass, and
// unpacks the clear color, if required.
LoadStoreActionsEXT BuildLoadActionsEXT(const pls::FlushDescriptor&,
LoadStoreActionsEXT BuildLoadActionsEXT(const gpu::FlushDescriptor&,
std::array<float, 4>* clearColor4f);
// Appends pls_load_store_ext.glsl to the stream, with the appropriate #defines prepended.
// Appends load_store_ext.glsl to the stream, with the appropriate #defines prepended.
std::ostream& BuildLoadStoreEXTGLSL(std::ostream&, LoadStoreActionsEXT);
} // namespace rive::pls
} // namespace rive::gpu

View File

@@ -5,11 +5,11 @@
#pragma once
#include "rive/renderer.hpp"
#include "rive/pls/gl/gles3.hpp"
#include "rive/pls/pls.hpp"
#include "rive/renderer/gl/gles3.hpp"
#include "rive/renderer/gpu.hpp"
#include <array>
namespace rive::pls
namespace rive::gpu
{
class GLState;
@@ -28,7 +28,7 @@ protected:
void init(rcp<GLState>);
// Used by the android runtime to marshal buffers off to the GL thread for deletion.
std::array<GLuint, pls::kBufferRingSize> detachBuffers();
std::array<GLuint, gpu::kBufferRingSize> detachBuffers();
void* onMap() override;
void onUnmap() override;
@@ -41,9 +41,9 @@ private:
bool canMapBuffer() const;
const GLenum m_target;
std::array<GLuint, pls::kBufferRingSize> m_bufferIDs{};
std::array<GLuint, gpu::kBufferRingSize> m_bufferIDs{};
int m_submittedBufferIdx = -1;
std::unique_ptr<uint8_t[]> m_fallbackMappedMemory; // Used when canMapBuffer() is false.
rcp<GLState> m_state;
};
} // namespace rive::pls
} // namespace rive::gpu

View File

@@ -4,15 +4,15 @@
#pragma once
#include "rive/pls/gl/gl_state.hpp"
#include "rive/pls/gl/gl_utils.hpp"
#include "rive/pls/pls_render_context_helper_impl.hpp"
#include "rive/renderer/gl/gl_state.hpp"
#include "rive/renderer/gl/gl_utils.hpp"
#include "rive/renderer/render_context_helper_impl.hpp"
#include <map>
namespace rive::pls
namespace rive::gpu
{
class PLSPath;
class PLSPaint;
class RiveRenderPath;
class RiveRenderPaint;
class PLSRenderTargetGL;
// OpenGL backend implementation of PLSRenderContextImpl.
@@ -74,26 +74,26 @@ private:
const FlushDescriptor&) = 0;
// Depending on how we handle PLS atomic resolves, the PLSImpl may require certain flags.
virtual pls::ShaderMiscFlags shaderMiscFlags(const pls::FlushDescriptor&,
pls::DrawType) const
virtual gpu::ShaderMiscFlags shaderMiscFlags(const gpu::FlushDescriptor&,
gpu::DrawType) const
{
return pls::ShaderMiscFlags::none;
return gpu::ShaderMiscFlags::none;
}
// Called before issuing a plsAtomicResolve draw, so the PLSImpl can make any necessary GL
// state changes.
virtual void setupAtomicResolve(PLSRenderContextGLImpl*, const pls::FlushDescriptor&) {}
virtual void setupAtomicResolve(PLSRenderContextGLImpl*, const gpu::FlushDescriptor&) {}
virtual void pushShaderDefines(pls::InterlockMode,
virtual void pushShaderDefines(gpu::InterlockMode,
std::vector<const char*>* defines) const = 0;
void ensureRasterOrderingEnabled(PLSRenderContextGLImpl*,
const pls::FlushDescriptor&,
const gpu::FlushDescriptor&,
bool enabled);
void barrier(const pls::FlushDescriptor& desc)
void barrier(const gpu::FlushDescriptor& desc)
{
assert(m_rasterOrderingEnabled == pls::TriState::no);
assert(m_rasterOrderingEnabled == gpu::TriState::no);
onBarrier(desc);
}
@@ -101,9 +101,9 @@ private:
private:
virtual void onEnableRasterOrdering(bool enabled) {}
virtual void onBarrier(const pls::FlushDescriptor& desc) {}
virtual void onBarrier(const gpu::FlushDescriptor& desc) {}
pls::TriState m_rasterOrderingEnabled = pls::TriState::unknown;
gpu::TriState m_rasterOrderingEnabled = gpu::TriState::unknown;
};
class PLSImplEXTNative;
@@ -133,10 +133,10 @@ private:
DrawShader(PLSRenderContextGLImpl* plsContextImpl,
GLenum shaderType,
pls::DrawType drawType,
gpu::DrawType drawType,
ShaderFeatures shaderFeatures,
pls::InterlockMode interlockMode,
pls::ShaderMiscFlags shaderMiscFlags);
gpu::InterlockMode interlockMode,
gpu::ShaderMiscFlags shaderMiscFlags);
~DrawShader() { glDeleteShader(m_id); }
@@ -155,10 +155,10 @@ private:
DrawProgram(const DrawProgram&) = delete;
DrawProgram& operator=(const DrawProgram&) = delete;
DrawProgram(PLSRenderContextGLImpl*,
pls::DrawType,
pls::ShaderFeatures,
pls::InterlockMode,
pls::ShaderMiscFlags);
gpu::DrawType,
gpu::ShaderFeatures,
gpu::InterlockMode,
gpu::ShaderMiscFlags);
~DrawProgram();
GLuint id() const { return m_id; }
@@ -173,7 +173,7 @@ private:
std::unique_ptr<BufferRing> makeUniformBufferRing(size_t capacityInBytes) override;
std::unique_ptr<BufferRing> makeStorageBufferRing(size_t capacityInBytes,
pls::StorageBufferStructure) override;
gpu::StorageBufferStructure) override;
std::unique_ptr<BufferRing> makeVertexBufferRing(size_t capacityInBytes) override;
std::unique_ptr<BufferRing> makeTextureTransferBufferRing(size_t capacityInBytes) override;
@@ -223,4 +223,4 @@ private:
const rcp<GLState> m_state;
};
} // namespace rive::pls
} // namespace rive::gpu

View File

@@ -4,13 +4,13 @@
#pragma once
#include "rive/pls/pls.hpp"
#include "rive/pls/pls_render_target.hpp"
#include "rive/pls/gl/gles3.hpp"
#include "rive/pls/gl/gl_utils.hpp"
#include "rive/renderer/gpu.hpp"
#include "rive/renderer/render_target.hpp"
#include "rive/renderer/gl/gles3.hpp"
#include "rive/renderer/gl/gl_utils.hpp"
#include "utils/lite_rtti.hpp"
namespace rive::pls
namespace rive::gpu
{
class PLSRenderContextGLImpl;
@@ -25,7 +25,7 @@ public:
// Ensures backing textures for the internal PLS planes are allocated.
// Does not allocate an offscreen target texture.
// Does not allocate an "scratchColor" texture if InterlockMode is experimentalAtomics.
virtual void allocateInternalPLSTextures(pls::InterlockMode) = 0;
virtual void allocateInternalPLSTextures(gpu::InterlockMode) = 0;
// Specifies which PLS planes to enable when a render target is bound.
enum class DrawBufferMask
@@ -108,7 +108,7 @@ public:
{
bindInternalFramebuffer(target, DrawBufferMask::color);
}
void allocateInternalPLSTextures(pls::InterlockMode) final;
void allocateInternalPLSTextures(gpu::InterlockMode) final;
void bindInternalFramebuffer(GLenum target, DrawBufferMask) final;
void bindHeadlessFramebuffer(const GLCapabilities&) final;
void bindAsImageTextures(DrawBufferMask) final;
@@ -172,7 +172,7 @@ public:
void allocateOffscreenTargetTexture();
void bindDestinationFramebuffer(GLenum target) final;
void allocateInternalPLSTextures(pls::InterlockMode) final;
void allocateInternalPLSTextures(gpu::InterlockMode) final;
void bindInternalFramebuffer(GLenum target, DrawBufferMask) final;
void bindHeadlessFramebuffer(const GLCapabilities&) final;
void bindAsImageTextures(DrawBufferMask) final;
@@ -193,4 +193,4 @@ private:
// Created for m_textureRenderTarget if/when we can't render directly to the framebuffer.
glutils::Texture m_offscreenTargetTexture = glutils::Texture::Zero();
};
} // namespace rive::pls
} // namespace rive::gpu

View File

@@ -12,7 +12,7 @@
#include "rive/refcnt.hpp"
#include "rive/shapes/paint/blend_mode.hpp"
#include "rive/shapes/paint/color.hpp"
#include "rive/pls/trivial_block_allocator.hpp"
#include "rive/renderer/trivial_block_allocator.hpp"
namespace rive
{
@@ -31,7 +31,7 @@ class RenderBuffer;
//
// Batching strokes as well:
// https://docs.google.com/document/d/1CRKihkFjbd1bwT08ErMCP4fwSR7D4gnHvgdw_esY9GM/edit
namespace rive::pls
namespace rive::gpu
{
class PLSDraw;
class PLSGradient;
@@ -107,7 +107,7 @@ struct PlatformFeatures
// bottom-up or top-down?
bool atomicPLSMustBeInitializedAsDraw = false; // Backend cannot initialize PLS with typical
// clear/load APIs in atomic mode. Issue a
// "DrawType::plsAtomicInitialize" draw instead.
// "DrawType::gpuAtomicInitialize" draw instead.
uint8_t pathIDGranularity = 1; // Workaround for precision issues. Determines how far apart we
// space unique path IDs.
};
@@ -456,8 +456,8 @@ enum class DrawType : uint8_t
interiorTriangulation,
imageRect,
imageMesh,
plsAtomicInitialize, // Clear/init PLS data when we can't do it with existing clear/load APIs.
plsAtomicResolve, // Resolve PLS data to the final renderTarget color in atomic mode.
gpuAtomicInitialize, // Clear/init PLS data when we can't do it with existing clear/load APIs.
gpuAtomicResolve, // Resolve PLS data to the final renderTarget color in atomic mode.
stencilClipReset, // Clear or intersect (based on DrawContents) the stencil clip bit.
};
@@ -471,8 +471,8 @@ constexpr static bool DrawTypeIsImageDraw(DrawType drawType)
case DrawType::midpointFanPatches:
case DrawType::outerCurvePatches:
case DrawType::interiorTriangulation:
case DrawType::plsAtomicInitialize:
case DrawType::plsAtomicResolve:
case DrawType::gpuAtomicInitialize:
case DrawType::gpuAtomicResolve:
case DrawType::stencilClipReset:
return false;
}
@@ -578,7 +578,7 @@ enum class ShaderFeatures
RIVE_MAKE_ENUM_BITSET(ShaderFeatures)
constexpr static size_t kShaderFeatureCount = 6;
constexpr static ShaderFeatures kAllShaderFeatures =
static_cast<pls::ShaderFeatures>((1 << kShaderFeatureCount) - 1);
static_cast<gpu::ShaderFeatures>((1 << kShaderFeatureCount) - 1);
constexpr static ShaderFeatures kVertexShaderFeaturesMask = ShaderFeatures::ENABLE_CLIPPING |
ShaderFeatures::ENABLE_CLIP_RECT |
ShaderFeatures::ENABLE_ADVANCED_BLEND;
@@ -606,7 +606,7 @@ constexpr static ShaderFeatures ShaderFeaturesMaskFor(DrawType drawType,
{
case DrawType::imageRect:
case DrawType::imageMesh:
if (interlockMode != pls::InterlockMode::atomics)
if (interlockMode != gpu::InterlockMode::atomics)
{
mask = ShaderFeatures::ENABLE_CLIPPING | ShaderFeatures::ENABLE_CLIP_RECT |
ShaderFeatures::ENABLE_ADVANCED_BLEND |
@@ -619,11 +619,11 @@ constexpr static ShaderFeatures ShaderFeaturesMaskFor(DrawType drawType,
case DrawType::midpointFanPatches:
case DrawType::outerCurvePatches:
case DrawType::interiorTriangulation:
case DrawType::plsAtomicResolve:
case DrawType::gpuAtomicResolve:
mask = kAllShaderFeatures;
break;
case DrawType::plsAtomicInitialize:
assert(interlockMode == pls::InterlockMode::atomics);
case DrawType::gpuAtomicInitialize:
assert(interlockMode == gpu::InterlockMode::atomics);
mask = ShaderFeatures::ENABLE_CLIPPING | ShaderFeatures::ENABLE_ADVANCED_BLEND;
break;
case DrawType::stencilClipReset:
@@ -644,15 +644,15 @@ enum class ShaderMiscFlags : uint32_t
// need to read the color buffer when advanced blend is not used.
fixedFunctionColorBlend = 1 << 0,
// DrawType::plsAtomicInitialize only. Also store the color clear value to PLS when drawing a
// DrawType::gpuAtomicInitialize only. Also store the color clear value to PLS when drawing a
// clear, in addition to clearing the other PLS planes.
storeColorClear = 1 << 1,
// DrawType::plsAtomicInitialize only. Swizzle the existing framebuffer contents from BGRA to
// DrawType::gpuAtomicInitialize only. Swizzle the existing framebuffer contents from BGRA to
// RGBA. (For when this data had to get copied from a BGRA target.)
swizzleColorBGRAToRGBA = 1 << 2,
// DrawType::plsAtomicResolve only. Optimization for when rendering to an offscreen texture.
// DrawType::gpuAtomicResolve only. Optimization for when rendering to an offscreen texture.
//
// It renders the final "resolve" operation directly to the renderTarget in a single pass,
// instead of (1) resolving the offscreen texture, and then (2) copying the offscreen texture to
@@ -686,8 +686,8 @@ RIVE_MAKE_ENUM_BITSET(DrawContents)
// A nestedClip draw updates the clip buffer while simultaneously clipping against the outerClip
// that is currently in the clip buffer.
constexpr static pls::DrawContents kNestedClipUpdateMask =
(pls::DrawContents::activeClip | pls::DrawContents::clipUpdate);
constexpr static gpu::DrawContents kNestedClipUpdateMask =
(gpu::DrawContents::activeClip | gpu::DrawContents::clipUpdate);
// Low-level batch of geometry to submit to the GPU.
struct DrawBatch
@@ -802,7 +802,7 @@ struct FlushDescriptor
// Fence that will be signalled once "externalCommandBuffer" finishes executing the entire
// frame. (Null if isFinalFlushOfFrame is false.)
pls::CommandBufferCompletionFence* frameCompletionFence = nullptr;
gpu::CommandBufferCompletionFence* frameCompletionFence = nullptr;
bool hasTriangleVertices = false;
bool wireframe = false;
@@ -922,7 +922,7 @@ public:
private:
WRITEONLY float m_matrix[6];
WRITEONLY float m_strokeRadius; // "0" indicates that the path is filled, not stroked.
WRITEONLY uint32_t m_zIndex; // pls::InterlockMode::depthStencil only.
WRITEONLY uint32_t m_zIndex; // gpu::InterlockMode::depthStencil only.
};
static_assert(sizeof(PathData) == StorageBufferElementSizeInBytes(PathData::kBufferStructure) * 2);
static_assert(256 % sizeof(PathData) == 0);
@@ -972,7 +972,7 @@ public:
const PLSTexture*,
const ClipRectInverseMatrix*,
const PLSRenderTarget*,
const pls::PlatformFeatures&);
const gpu::PlatformFeatures&);
private:
WRITEONLY float m_matrix[6]; // Maps _fragCoord to paint coordinates.
@@ -1052,7 +1052,7 @@ private:
WRITEONLY float m_clipRectInverseMatrix[6];
WRITEONLY uint32_t m_clipID;
WRITEONLY uint32_t m_blendMode;
WRITEONLY uint32_t m_zIndex; // pls::InterlockMode::depthStencil only.
WRITEONLY uint32_t m_zIndex; // gpu::InterlockMode::depthStencil only.
// Uniform blocks must be multiples of 256 bytes in size.
WRITEONLY uint8_t m_padTo256Bytes[256 - 68];
@@ -1163,4 +1163,4 @@ enum class TriState
yes,
unknown
};
} // namespace rive::pls
} // namespace rive::gpu

View File

@@ -6,9 +6,9 @@
#include "rive/refcnt.hpp"
#include "rive/renderer.hpp"
#include "rive/pls/pls_render_context_impl.hpp"
#include "rive/renderer/render_context_impl.hpp"
namespace rive::pls
namespace rive::gpu
{
class PLSTexture : public RefCnt<PLSTexture>
{
@@ -64,4 +64,4 @@ protected:
private:
rcp<PLSTexture> m_texture;
};
} // namespace rive::pls
} // namespace rive::gpu

View File

@@ -4,7 +4,7 @@
#pragma once
#include "rive/pls/pls_render_context_helper_impl.hpp"
#include "rive/renderer/render_context_helper_impl.hpp"
#include <map>
#include <mutex>
@@ -12,7 +12,7 @@
#import <Metal/Metal.h>
#endif
namespace rive::pls
namespace rive::gpu
{
class BackgroundShaderCompiler;
@@ -160,18 +160,18 @@ private:
void prepareToMapBuffers() override;
// Creates a MTLRenderCommandEncoder and sets the common state for PLS draws.
id<MTLRenderCommandEncoder> makeRenderPassForDraws(const pls::FlushDescriptor&,
id<MTLRenderCommandEncoder> makeRenderPassForDraws(const gpu::FlushDescriptor&,
MTLRenderPassDescriptor*,
id<MTLCommandBuffer>,
pls::ShaderMiscFlags baselineMiscFlags);
gpu::ShaderMiscFlags baselineMiscFlags);
// Returns the specific DrawPipeline for the given feature set, if it has been compiled. If it
// has not finished compiling yet, this method may return a (potentially slower) DrawPipeline
// that can draw a superset of the given features.
const DrawPipeline* findCompatibleDrawPipeline(pls::DrawType,
pls::ShaderFeatures,
pls::InterlockMode,
pls::ShaderMiscFlags);
const DrawPipeline* findCompatibleDrawPipeline(gpu::DrawType,
gpu::ShaderFeatures,
gpu::InterlockMode,
gpu::ShaderMiscFlags);
void flush(const FlushDescriptor&) override;
@@ -199,7 +199,7 @@ private:
id<MTLBuffer> m_pathPatchVertexBuffer;
id<MTLBuffer> m_pathPatchIndexBuffer;
// Vertex/index buffers for drawing image rects. (pls::InterlockMode::atomics only.)
// Vertex/index buffers for drawing image rects. (gpu::InterlockMode::atomics only.)
id<MTLBuffer> m_imageRectVertexBuffer;
id<MTLBuffer> m_imageRectIndexBuffer;
@@ -208,4 +208,4 @@ private:
std::mutex m_bufferRingLocks[kBufferRingSize];
int m_bufferRingIdx = 0;
};
} // namespace rive::pls
} // namespace rive::gpu

View File

@@ -5,10 +5,10 @@
#pragma once
#include "rive/math/vec2d.hpp"
#include "rive/pls/pls.hpp"
#include "rive/pls/pls_factory.hpp"
#include "rive/pls/pls_render_target.hpp"
#include "rive/pls/trivial_block_allocator.hpp"
#include "rive/renderer/gpu.hpp"
#include "rive/renderer/rive_render_factory.hpp"
#include "rive/renderer/render_target.hpp"
#include "rive/renderer/trivial_block_allocator.hpp"
#include "rive/shapes/paint/color.hpp"
#include <array>
#include <unordered_map>
@@ -21,7 +21,7 @@ namespace rive
class RawPath;
} // namespace rive
namespace rive::pls
namespace rive::gpu
{
class GradientLibrary;
class IntersectionBoard;
@@ -32,9 +32,9 @@ class MidpointFanPathDraw;
class StencilClipReset;
class PLSDraw;
class PLSGradient;
class PLSPaint;
class PLSPath;
class PLSPathDraw;
class RiveRenderPaint;
class RiveRenderPath;
class RiveRenderPathDraw;
class PLSRenderContextImpl;
// Used as a key for complex gradients.
@@ -66,7 +66,7 @@ struct PLSDrawReleaseRefs
};
using PLSDrawUniquePtr = std::unique_ptr<PLSDraw, PLSDrawReleaseRefs>;
// Top-level, API agnostic rendering context for PLSRenderer. This class manages all the GPU
// Top-level, API agnostic rendering context for RiveRenderer. This class manages all the GPU
// buffers, context state, and other resources required for Rive's pixel local storage path
// rendering algorithm.
//
@@ -86,7 +86,7 @@ using PLSDrawUniquePtr = std::unique_ptr<PLSDraw, PLSDrawReleaseRefs>;
// }
// }
// context->flush();
class PLSRenderContext : public PLSFactory
class PLSRenderContext : public RiveRenderFactory
{
public:
PLSRenderContext(std::unique_ptr<PLSRenderContextImpl>);
@@ -95,7 +95,7 @@ public:
PLSRenderContextImpl* impl() { return m_impl.get(); }
template <typename T> T* static_impl_cast() { return static_cast<T*>(m_impl.get()); }
const pls::PlatformFeatures& platformFeatures() const;
const gpu::PlatformFeatures& platformFeatures() const;
// Options for controlling how and where a frame is rendered.
struct FrameDescriptor
@@ -138,7 +138,7 @@ public:
// as rectangular paths with an image paint.
bool frameSupportsImagePaintForPaths() const;
const pls::InterlockMode frameInterlockMode() const { return m_frameInterlockMode; }
const gpu::InterlockMode frameInterlockMode() const { return m_frameInterlockMode; }
// Generates a unique clip ID that is guaranteed to not exist in the current clip buffer, and
// assigns a contentBounds to it.
@@ -207,7 +207,7 @@ public:
void* externalCommandBuffer = nullptr;
// Fence that will be signalled once "externalCommandBuffer" finishes executing.
pls::CommandBufferCompletionFence* frameCompletionFence = nullptr;
gpu::CommandBufferCompletionFence* frameCompletionFence = nullptr;
};
// Submits all GPU commands that have been built up since beginFrame().
@@ -249,13 +249,13 @@ public:
return m_perFrameAllocator.make<T>(std::forward<Args>(args)...);
}
// Backend-specific PLSFactory implementation.
// Backend-specific RiveRenderFactory implementation.
rcp<RenderBuffer> makeRenderBuffer(RenderBufferType, RenderBufferFlags, size_t) override;
rcp<RenderImage> decodeImage(Span<const uint8_t>) override;
private:
friend class PLSDraw;
friend class PLSPathDraw;
friend class RiveRenderPathDraw;
friend class MidpointFanPathDraw;
friend class InteriorTriangulationDraw;
friend class ImageRectDraw;
@@ -322,8 +322,8 @@ private:
// Per-frame state.
FrameDescriptor m_frameDescriptor;
pls::InterlockMode m_frameInterlockMode;
pls::ShaderFeatures m_frameShaderFeaturesMask;
gpu::InterlockMode m_frameInterlockMode;
gpu::ShaderFeatures m_frameShaderFeaturesMask;
RIVE_DEBUG_CODE(bool m_didBeginFrame = false;)
// Clipping state.
@@ -333,18 +333,18 @@ private:
std::vector<int64_t> m_indirectDrawList;
std::unique_ptr<IntersectionBoard> m_intersectionBoard;
WriteOnlyMappedMemory<pls::FlushUniforms> m_flushUniformData;
WriteOnlyMappedMemory<pls::PathData> m_pathData;
WriteOnlyMappedMemory<pls::PaintData> m_paintData;
WriteOnlyMappedMemory<pls::PaintAuxData> m_paintAuxData;
WriteOnlyMappedMemory<pls::ContourData> m_contourData;
WriteOnlyMappedMemory<gpu::FlushUniforms> m_flushUniformData;
WriteOnlyMappedMemory<gpu::PathData> m_pathData;
WriteOnlyMappedMemory<gpu::PaintData> m_paintData;
WriteOnlyMappedMemory<gpu::PaintAuxData> m_paintAuxData;
WriteOnlyMappedMemory<gpu::ContourData> m_contourData;
// Simple gradients get written by the CPU.
WriteOnlyMappedMemory<pls::TwoTexelRamp> m_simpleColorRampsData;
WriteOnlyMappedMemory<gpu::TwoTexelRamp> m_simpleColorRampsData;
// Complex gradients get rendered by the GPU.
WriteOnlyMappedMemory<pls::GradientSpan> m_gradSpanData;
WriteOnlyMappedMemory<pls::TessVertexSpan> m_tessSpanData;
WriteOnlyMappedMemory<pls::TriangleVertex> m_triangleVertexData;
WriteOnlyMappedMemory<pls::ImageDrawUniforms> m_imageDrawUniformData;
WriteOnlyMappedMemory<gpu::GradientSpan> m_gradSpanData;
WriteOnlyMappedMemory<gpu::TessVertexSpan> m_tessSpanData;
WriteOnlyMappedMemory<gpu::TriangleVertex> m_triangleVertexData;
WriteOnlyMappedMemory<gpu::ImageDrawUniforms> m_imageDrawUniformData;
// Simple allocator for trivially-destructible data that needs to persist until the current
// frame has completed. All memory in this allocator is dropped at the end of the every frame.
@@ -383,10 +383,10 @@ private:
// Resets the CPU-side STL containers so they don't have unbounded growth.
void resetContainers();
// Access this flush's pls::FlushDescriptor (which is not valid until layoutResources()).
// Access this flush's gpu::FlushDescriptor (which is not valid until layoutResources()).
// NOTE: Some fields in the FlushDescriptor (tessVertexSpanCount, hasTriangleVertices,
// drawList, and combinedShaderFeatures) do not become valid until after writeResources().
const pls::FlushDescriptor& desc()
const gpu::FlushDescriptor& desc()
{
assert(m_hasDoneLayout);
return m_flushDesc;
@@ -479,7 +479,7 @@ private:
// issue a logical flush and try again.
[[nodiscard]] bool allocateGradient(const PLSGradient*,
ResourceCounters*,
pls::ColorRampLocation*);
gpu::ColorRampLocation*);
// Carves out space for this specific flush within the total frame's resource buffers and
// lays out the flush-specific resource textures. Updates the total frame running conters
@@ -497,14 +497,14 @@ private:
// Pushes a record to the GPU for the given path, which will be referenced by future calls
// to pushContour() and pushCubic().
void pushPath(PLSPathDraw*, pls::PatchType, uint32_t tessVertexCount);
void pushPath(RiveRenderPathDraw*, gpu::PatchType, uint32_t tessVertexCount);
// Pushes a contour record to the GPU for the given contour, which references the
// most-recently pushed path and will be referenced by future calls to pushCubic().
//
// The first curve of the contour will be pre-padded with 'paddingVertexCount' tessellation
// vertices, colocated at T=0. The caller must use this argument to align the end of the
// contour on a boundary of the patch size. (See pls::PaddingToAlignUp().)
// contour on a boundary of the patch size. (See gpu::PaddingToAlignUp().)
void pushContour(Vec2D midpoint, bool closed, uint32_t paddingVertexCount);
// Appends a cubic curve and join to the most-recently pushed contour, and reserves the
@@ -571,7 +571,7 @@ private:
uint32_t contourIDWithFlags);
// Functionally equivalent to "pushMirroredTessellationSpans(); pushTessellationSpans();",
// but packs each forward and mirrored pair into a single pls::TessVertexSpan.
// but packs each forward and mirrored pair into a single gpu::TessVertexSpan.
RIVE_ALWAYS_INLINE void pushMirroredAndForwardTessellationSpans(
const Vec2D pts[4],
Vec2D joinTangent,
@@ -583,10 +583,13 @@ private:
// Either appends a new drawBatch to m_drawList or merges into m_drawList.tail().
// Updates the batch's ShaderFeatures according to the passed parameters.
DrawBatch& pushPathDraw(PLSPathDraw*, DrawType, uint32_t vertexCount, uint32_t baseVertex);
DrawBatch& pushPathDraw(RiveRenderPathDraw*,
DrawType,
uint32_t vertexCount,
uint32_t baseVertex);
DrawBatch& pushDraw(PLSDraw*,
DrawType,
pls::PaintType,
gpu::PaintType,
uint32_t elementCount,
uint32_t baseElement);
@@ -599,7 +602,7 @@ private:
// Simple gradients have one stop at t=0 and one stop at t=1. They're implemented with 2
// texels.
std::unordered_map<uint64_t, uint32_t> m_simpleGradients; // [color0, color1] -> texelsIdx.
std::vector<pls::TwoTexelRamp> m_pendingSimpleGradientWrites;
std::vector<gpu::TwoTexelRamp> m_pendingSimpleGradientWrites;
// Complex gradients have stop(s) between t=0 and t=1. In theory they should be scaled to a
// ramp where every stop lands exactly on a pixel center, but for now we just always scale
@@ -610,7 +613,7 @@ private:
std::vector<ClipInfo> m_clips;
// High-level draw list. These get built into a low-level list of pls::DrawBatch objects
// High-level draw list. These get built into a low-level list of gpu::DrawBatch objects
// during writeResources().
std::vector<PLSDrawUniquePtr> m_plsDraws;
IAABB m_combinedDrawBounds;
@@ -626,15 +629,15 @@ private:
uint32_t m_outerCubicTessVertexIdx;
uint32_t m_midpointFanTessVertexIdx;
pls::FlushDescriptor m_flushDesc;
pls::GradTextureLayout m_gradTextureLayout; // Not determined until writeResources().
gpu::FlushDescriptor m_flushDesc;
gpu::GradTextureLayout m_gradTextureLayout; // Not determined until writeResources().
BlockAllocatedLinkedList<DrawBatch> m_drawList;
pls::ShaderFeatures m_combinedShaderFeatures;
gpu::ShaderFeatures m_combinedShaderFeatures;
// Most recent path and contour state.
bool m_currentPathIsStroked;
pls::ContourDirections m_currentPathContourDirections;
gpu::ContourDirections m_currentPathContourDirections;
uint32_t m_currentPathID;
uint32_t m_currentContourID;
uint32_t m_currentContourPaddingVertexCount; // Padding to add to the first curve.
@@ -653,4 +656,4 @@ private:
std::vector<std::unique_ptr<LogicalFlush>> m_logicalFlushes;
};
} // namespace rive::pls
} // namespace rive::gpu

View File

@@ -4,11 +4,11 @@
#pragma once
#include "rive/pls/pls_render_context_impl.hpp"
#include "rive/pls/buffer_ring.hpp"
#include "rive/renderer/render_context_impl.hpp"
#include "rive/renderer/buffer_ring.hpp"
#include <chrono>
namespace rive::pls
namespace rive::gpu
{
// PLSRenderContextImpl that uses BufferRing to manage GPU resources.
class PLSRenderContextHelperImpl : public PLSRenderContextImpl
@@ -18,10 +18,10 @@ public:
void resizeFlushUniformBuffer(size_t sizeInBytes) override;
void resizeImageDrawUniformBuffer(size_t sizeInBytes) override;
void resizePathBuffer(size_t sizeInBytes, pls::StorageBufferStructure) override;
void resizePaintBuffer(size_t sizeInBytes, pls::StorageBufferStructure) override;
void resizePaintAuxBuffer(size_t sizeInBytes, pls::StorageBufferStructure) override;
void resizeContourBuffer(size_t sizeInBytes, pls::StorageBufferStructure) override;
void resizePathBuffer(size_t sizeInBytes, gpu::StorageBufferStructure) override;
void resizePaintBuffer(size_t sizeInBytes, gpu::StorageBufferStructure) override;
void resizePaintAuxBuffer(size_t sizeInBytes, gpu::StorageBufferStructure) override;
void resizeContourBuffer(size_t sizeInBytes, gpu::StorageBufferStructure) override;
void resizeSimpleColorRampsBuffer(size_t sizeInBytes) override;
void resizeGradSpanBuffer(size_t sizeInBytes) override;
void resizeTessVertexSpanBuffer(size_t sizeInBytes) override;
@@ -74,7 +74,7 @@ protected:
virtual std::unique_ptr<BufferRing> makeUniformBufferRing(size_t capacityInBytes) = 0;
virtual std::unique_ptr<BufferRing> makeStorageBufferRing(size_t capacityInBytes,
pls::StorageBufferStructure) = 0;
gpu::StorageBufferStructure) = 0;
virtual std::unique_ptr<BufferRing> makeVertexBufferRing(size_t capacityInBytes) = 0;
virtual std::unique_ptr<BufferRing> makeTextureTransferBufferRing(size_t capacityInBytes) = 0;
@@ -91,4 +91,4 @@ private:
std::unique_ptr<BufferRing> m_triangleBuffer;
std::chrono::steady_clock::time_point m_localEpoch = std::chrono::steady_clock::now();
};
} // namespace rive::pls
} // namespace rive::gpu

View File

@@ -4,9 +4,9 @@
#pragma once
#include "rive/pls/pls_render_context.hpp"
#include "rive/renderer/render_context.hpp"
namespace rive::pls
namespace rive::gpu
{
class PLSTexture;
@@ -33,10 +33,10 @@ public:
// buffer as a storage buffer.
virtual void resizeFlushUniformBuffer(size_t sizeInBytes) = 0;
virtual void resizeImageDrawUniformBuffer(size_t sizeInBytes) = 0;
virtual void resizePathBuffer(size_t sizeInBytes, pls::StorageBufferStructure) = 0;
virtual void resizePaintBuffer(size_t sizeInBytes, pls::StorageBufferStructure) = 0;
virtual void resizePaintAuxBuffer(size_t sizeInBytes, pls::StorageBufferStructure) = 0;
virtual void resizeContourBuffer(size_t sizeInBytes, pls::StorageBufferStructure) = 0;
virtual void resizePathBuffer(size_t sizeInBytes, gpu::StorageBufferStructure) = 0;
virtual void resizePaintBuffer(size_t sizeInBytes, gpu::StorageBufferStructure) = 0;
virtual void resizePaintAuxBuffer(size_t sizeInBytes, gpu::StorageBufferStructure) = 0;
virtual void resizeContourBuffer(size_t sizeInBytes, gpu::StorageBufferStructure) = 0;
virtual void resizeSimpleColorRampsBuffer(size_t sizeInBytes) = 0;
virtual void resizeGradSpanBuffer(size_t sizeInBytes) = 0;
virtual void resizeTessVertexSpanBuffer(size_t sizeInBytes) = 0;
@@ -87,7 +87,7 @@ public:
// 3. Execute the draw list. (The Rive renderer shaders read the gradient and tessellation
// textures in order to do path rendering.)
//
virtual void flush(const pls::FlushDescriptor&) = 0;
virtual void flush(const gpu::FlushDescriptor&) = 0;
// Steady clock, used to determine when we should trim our resource allocations.
virtual double secondsNow() const = 0;
@@ -95,4 +95,4 @@ public:
protected:
PlatformFeatures m_platformFeatures;
};
} // namespace rive::pls
} // namespace rive::gpu

View File

@@ -9,7 +9,7 @@
#include "rive/math/aabb.hpp"
#include "rive/math/simd.hpp"
namespace rive::pls
namespace rive::gpu
{
// Wraps a backend-specific buffer that PLSRenderContext draws into.
class PLSRenderTarget : public RefCnt<PLSRenderTarget>
@@ -32,4 +32,4 @@ private:
uint32_t m_width;
uint32_t m_height;
};
} // namespace rive::pls
} // namespace rive::gpu

View File

@@ -6,10 +6,10 @@
#include "rive/factory.hpp"
namespace rive::pls
namespace rive::gpu
{
// Partial rive::Factory implementation for the PLS objects that are backend-agnostic.
class PLSFactory : public Factory
class RiveRenderFactory : public Factory
{
public:
rcp<RenderShader> makeLinearGradient(float sx,
@@ -33,4 +33,4 @@ public:
rcp<RenderPaint> makeRenderPaint() override;
};
} // namespace rive::pls
} // namespace rive::gpu

View File

@@ -6,9 +6,9 @@
#include "rive/math/raw_path.hpp"
#include "rive/renderer.hpp"
#include "rive/pls/pls.hpp"
#include "rive/pls/pls_draw.hpp"
#include "rive/pls/pls_render_context.hpp"
#include "rive/renderer/gpu.hpp"
#include "rive/renderer/draw.hpp"
#include "rive/renderer/render_context.hpp"
#include <vector>
namespace rive
@@ -16,18 +16,18 @@ namespace rive
class GrInnerFanTriangulator;
};
namespace rive::pls
namespace rive::gpu
{
class PLSPath;
class PLSPaint;
class RiveRenderPath;
class RiveRenderPaint;
class PLSRenderContext;
// Renderer implementation for Rive's pixel local storage renderer.
class PLSRenderer : public Renderer
class RiveRenderer : public Renderer
{
public:
PLSRenderer(PLSRenderContext*);
~PLSRenderer() override;
RiveRenderer(PLSRenderContext*);
~RiveRenderer() override;
void save() override;
void restore() override;
@@ -54,8 +54,8 @@ public:
#endif
private:
void clipRectImpl(AABB, const PLSPath* originalPath);
void clipPathImpl(const PLSPath*);
void clipRectImpl(AABB, const RiveRenderPath* originalPath);
void clipPathImpl(const RiveRenderPath*);
// Clips and pushes the given draw to m_context. If the clipped draw is too complex to be
// supported by the GPU buffers, even after a logical flush, then nothing is drawn.
@@ -73,7 +73,7 @@ private:
size_t clipStackHeight = 0;
AABB clipRect;
Mat2D clipRectMatrix;
const pls::ClipRectInverseMatrix* clipRectInverseMatrix = nullptr;
const gpu::ClipRectInverseMatrix* clipRectInverseMatrix = nullptr;
bool clipIsEmpty = false;
};
std::vector<RenderState> m_stack{1};
@@ -81,17 +81,18 @@ private:
struct ClipElement
{
ClipElement() = default;
ClipElement(const Mat2D&, const PLSPath*, FillRule);
ClipElement(const Mat2D&, const RiveRenderPath*, FillRule);
~ClipElement();
void reset(const Mat2D&, const PLSPath*, FillRule);
bool isEquivalent(const Mat2D&, const PLSPath*) const;
void reset(const Mat2D&, const RiveRenderPath*, FillRule);
bool isEquivalent(const Mat2D&, const RiveRenderPath*) const;
Mat2D matrix;
uint64_t rawPathMutationID;
AABB pathBounds;
rcp<const PLSPath> path;
FillRule fillRule; // Bc PLSPath fillRule can mutate during the artboard draw process.
rcp<const RiveRenderPath> path;
FillRule
fillRule; // Bc RiveRenderPath fillRule can mutate during the artboard draw process.
uint32_t clipID;
};
std::vector<ClipElement> m_clipStack;
@@ -101,9 +102,9 @@ private:
std::vector<PLSDrawUniquePtr> m_internalDrawBatch;
// Path of the rectangle [0, 0, 1, 1]. Used to draw images.
rcp<PLSPath> m_unitRectPath;
rcp<RiveRenderPath> m_unitRectPath;
// Used to build coarse path interiors for the "interior triangulation" algorithm.
RawPath m_scratchPath;
};
} // namespace rive::pls
} // namespace rive::gpu

View File

@@ -4,14 +4,14 @@
#pragma once
#include "rive/pls/pls_render_context_impl.hpp"
#include "rive/pls/vulkan/vulkan_context.hpp"
#include "rive/renderer/render_context_impl.hpp"
#include "rive/renderer/vulkan/vulkan_context.hpp"
#include <chrono>
#include <map>
#include <vulkan/vulkan.h>
#include <deque>
namespace rive::pls
namespace rive::gpu
{
class PLSTextureVulkanImpl;
@@ -70,10 +70,10 @@ private:
rcp<vkutil::Texture> m_offscreenColorTexture; // Used when m_targetTextureView does not have
// VK_ACCESS_INPUT_ATTACHMENT_READ_BIT
rcp<vkutil::Texture> m_coverageTexture; // pls::InterlockMode::rasterOrdering.
rcp<vkutil::Texture> m_coverageTexture; // gpu::InterlockMode::rasterOrdering.
rcp<vkutil::Texture> m_clipTexture;
rcp<vkutil::Texture> m_scratchColorTexture;
rcp<vkutil::Texture> m_coverageAtomicTexture; // pls::InterlockMode::atomics.
rcp<vkutil::Texture> m_coverageAtomicTexture; // gpu::InterlockMode::atomics.
rcp<vkutil::TextureView> m_offscreenColorTextureView;
rcp<vkutil::TextureView> m_coverageTextureView;
@@ -128,7 +128,7 @@ private:
void unmap##Name() override { m_name.flushMappedContentsAt(m_bufferRingIdx); }
#define IMPLEMENT_PLS_STRUCTURED_BUFFER(Name, m_name) \
void resize##Name(size_t sizeInBytes, pls::StorageBufferStructure) override \
void resize##Name(size_t sizeInBytes, gpu::StorageBufferStructure) override \
{ \
m_name.setTargetSize(sizeInBytes); \
} \
@@ -240,11 +240,11 @@ private:
rcp<vkutil::Buffer> m_imageRectVertexBuffer;
rcp<vkutil::Buffer> m_imageRectIndexBuffer;
rcp<pls::CommandBufferCompletionFence> m_frameCompletionFences[pls::kBufferRingSize];
rcp<gpu::CommandBufferCompletionFence> m_frameCompletionFences[gpu::kBufferRingSize];
int m_bufferRingIdx = -1;
// Pool of DescriptorSetPools that have been fully released. These can be
// recycled once their expirationFrameIdx is reached.
std::deque<vkutil::ZombieResource<DescriptorSetPool>> m_descriptorSetPoolPool;
};
} // namespace rive::pls
} // namespace rive::gpu

View File

@@ -5,19 +5,19 @@
#pragma once
#include "rive/refcnt.hpp"
#include "rive/pls/pls.hpp"
#include "rive/renderer/gpu.hpp"
#include <cassert>
#include <stdio.h>
#include <stdlib.h>
#include <vulkan/vulkan.h>
#include <vk_mem_alloc.h>
namespace rive::pls
namespace rive::gpu
{
class VulkanContext;
} // namespace rive::pls
} // namespace rive::gpu
namespace rive::pls::vkutil
namespace rive::gpu::vkutil
{
inline static void vk_check(VkResult res, const char* file, int line)
{
@@ -28,7 +28,7 @@ inline static void vk_check(VkResult res, const char* file, int line)
}
}
#define VK_CHECK(x) ::rive::pls::vkutil::vk_check(x, __FILE__, __LINE__)
#define VK_CHECK(x) ::rive::gpu::vkutil::vk_check(x, __FILE__, __LINE__)
constexpr static VkColorComponentFlags kColorWriteMaskRGBA =
VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT | VK_COLOR_COMPONENT_B_BIT |
@@ -69,7 +69,7 @@ private:
template <typename T> struct ZombieResource
{
ZombieResource(T* resource_, uint64_t lastFrameUsed) :
resource(resource_), expirationFrameIdx(lastFrameUsed + pls::kBufferRingSize)
resource(resource_), expirationFrameIdx(lastFrameUsed + gpu::kBufferRingSize)
{
assert(resource_->debugging_refcnt() == 0);
}
@@ -102,7 +102,7 @@ public:
void flushMappedContents(size_t updatedSizeInBytes);
private:
friend class ::rive::pls::VulkanContext;
friend class ::rive::gpu::VulkanContext;
Buffer(rcp<VulkanContext>, const VkBufferCreateInfo&, Mappability);
@@ -156,7 +156,7 @@ public:
private:
size_t m_targetSize;
size_t m_pendingFlushSize = 0;
rcp<vkutil::Buffer> m_buffers[pls::kBufferRingSize];
rcp<vkutil::Buffer> m_buffers[gpu::kBufferRingSize];
};
class Texture : public RenderingResource
@@ -169,7 +169,7 @@ public:
const VkImage* vkImageAddressOf() const { return &m_vkImage; }
private:
friend class ::rive::pls::VulkanContext;
friend class ::rive::gpu::VulkanContext;
Texture(rcp<VulkanContext>, const VkImageCreateInfo&);
@@ -190,7 +190,7 @@ public:
const VkImageView* vkImageViewAddressOf() const { return &m_vkImageView; }
private:
friend class ::rive::pls::VulkanContext;
friend class ::rive::gpu::VulkanContext;
TextureView(rcp<VulkanContext>,
rcp<Texture> textureRef,
@@ -212,7 +212,7 @@ public:
operator VkFramebuffer() const { return m_vkFramebuffer; }
private:
friend class ::rive::pls::VulkanContext;
friend class ::rive::gpu::VulkanContext;
Framebuffer(rcp<VulkanContext>, const VkFramebufferCreateInfo&);
@@ -277,4 +277,4 @@ inline VkClearColorValue color_clear_r32ui(uint32_t value)
ret.uint32[0] = value;
return ret;
}
} // namespace rive::pls::vkutil
} // namespace rive::gpu::vkutil

View File

@@ -4,10 +4,10 @@
#pragma once
#include "rive/pls/vulkan/vkutil.hpp"
#include "rive/renderer/vulkan/vkutil.hpp"
#include <deque>
namespace rive::pls
namespace rive::gpu
{
// Specifies the Vulkan API version and which relevant features have been enabled.
// The client should ensure the features get enabled if they are supported.
@@ -102,7 +102,7 @@ public:
// Called at the beginning of a new frame. This is where we purge
// m_resourcePurgatory, so the client is responsible to guarantee that all
// command buffers from frame "N + 1 - pls::kBufferRingSize" have finished
// command buffers from frame "N + 1 - gpu::kBufferRingSize" have finished
// executing before calling this method.
void onNewFrameBegun();
@@ -168,4 +168,4 @@ private:
uint64_t m_currentFrameIdx = 0;
bool m_shutdown = false; // Indicates that we are in a shutdown cycle.
};
} // namespace rive::pls
} // namespace rive::gpu

View File

@@ -4,13 +4,13 @@
#pragma once
#include "rive/pls/pls_render_context_helper_impl.hpp"
#include "rive/pls/webgpu/em_js_handle.hpp"
#include "rive/pls/gl/load_store_actions_ext.hpp"
#include "rive/renderer/render_context_helper_impl.hpp"
#include "rive/renderer/webgpu/em_js_handle.hpp"
#include "rive/renderer/gl/load_store_actions_ext.hpp"
#include <map>
#include <webgpu/webgpu_cpp.h>
namespace rive::pls
namespace rive::gpu
{
class PLSRenderContextWebGPUVulkan;
@@ -70,7 +70,7 @@ public:
wgpu::Device,
wgpu::Queue,
const ContextOptions&,
const pls::PlatformFeatures& baselinePlatformFeatures = {});
const gpu::PlatformFeatures& baselinePlatformFeatures = {});
virtual ~PLSRenderContextWebGPUImpl();
@@ -89,7 +89,7 @@ protected:
PLSRenderContextWebGPUImpl(wgpu::Device device,
wgpu::Queue queue,
const ContextOptions&,
const pls::PlatformFeatures& baselinePlatformFeatures);
const gpu::PlatformFeatures& baselinePlatformFeatures);
// Create the BindGroupLayout that binds the PLS attachments as textures. This is not necessary
// on all implementations.
@@ -100,7 +100,7 @@ protected:
}
// Create a standard PLS "draw" pipeline for the current implementation.
virtual wgpu::RenderPipeline makePLSDrawPipeline(rive::pls::DrawType drawType,
virtual wgpu::RenderPipeline makePLSDrawPipeline(rive::gpu::DrawType drawType,
wgpu::TextureFormat framebufferFormat,
wgpu::ShaderModule vertexShader,
wgpu::ShaderModule fragmentShader,
@@ -126,7 +126,7 @@ private:
std::unique_ptr<BufferRing> makeUniformBufferRing(size_t capacityInBytes) override;
std::unique_ptr<BufferRing> makeStorageBufferRing(size_t capacityInBytes,
pls::StorageBufferStructure) override;
gpu::StorageBufferStructure) override;
std::unique_ptr<BufferRing> makeVertexBufferRing(size_t capacityInBytes) override;
std::unique_ptr<BufferRing> makeTextureTransferBufferRing(size_t capacityInBytes) override;
@@ -182,4 +182,4 @@ private:
wgpu::Texture m_nullImagePaintTexture; // Bound when there is not an image paint.
wgpu::TextureView m_nullImagePaintTextureView;
};
} // namespace rive::pls
} // namespace rive::gpu

View File

@@ -2,7 +2,7 @@
#include <vector>
#include "rive/pls/pls_render_context.hpp"
#include "rive/renderer/render_context.hpp"
struct GLFWwindow;
@@ -25,12 +25,12 @@ public:
virtual ~FiddleContext() {}
virtual float dpiScale(GLFWwindow*) const = 0;
virtual rive::Factory* factory() = 0;
virtual rive::pls::PLSRenderContext* plsContextOrNull() = 0;
virtual rive::pls::PLSRenderTarget* plsRenderTargetOrNull() = 0;
virtual rive::gpu::PLSRenderContext* plsContextOrNull() = 0;
virtual rive::gpu::PLSRenderTarget* plsRenderTargetOrNull() = 0;
virtual void onSizeChanged(GLFWwindow*, int width, int height, uint32_t sampleCount) {}
virtual void toggleZoomWindow() = 0;
virtual std::unique_ptr<rive::Renderer> makeRenderer(int width, int height) = 0;
virtual void begin(const rive::pls::PLSRenderContext::FrameDescriptor&) = 0;
virtual void begin(const rive::gpu::PLSRenderContext::FrameDescriptor&) = 0;
virtual void flushPLSContext() = 0; // Called by end()
virtual void end(GLFWwindow*, std::vector<uint8_t>* pixelData = nullptr) = 0;
virtual void tick(){};

View File

@@ -6,9 +6,9 @@ std::unique_ptr<FiddleContext> FiddleContext::MakeD3DPLS(FiddleContextOptions) {
#else
#include "rive/pls/pls_renderer.hpp"
#include "rive/pls/d3d/pls_render_context_d3d_impl.hpp"
#include "rive/pls/d3d/d3d11.hpp"
#include "rive/renderer/rive_renderer.hpp"
#include "rive/renderer/d3d/render_context_d3d_impl.hpp"
#include "rive/renderer/d3d/d3d11.hpp"
#include <array>
#include <dxgi1_2.h>
@@ -18,7 +18,7 @@ std::unique_ptr<FiddleContext> FiddleContext::MakeD3DPLS(FiddleContextOptions) {
#include <GLFW/glfw3native.h>
using namespace rive;
using namespace rive::pls;
using namespace rive::gpu;
class FiddleContextD3DPLS : public FiddleContext
{
@@ -37,9 +37,9 @@ public:
rive::Factory* factory() override { return m_plsContext.get(); }
rive::pls::PLSRenderContext* plsContextOrNull() override { return m_plsContext.get(); }
rive::gpu::PLSRenderContext* plsContextOrNull() override { return m_plsContext.get(); }
rive::pls::PLSRenderTarget* plsRenderTargetOrNull() override { return m_renderTarget.get(); }
rive::gpu::PLSRenderTarget* plsRenderTargetOrNull() override { return m_renderTarget.get(); }
void onSizeChanged(GLFWwindow* window, int width, int height, uint32_t sampleCount) override
{
@@ -69,10 +69,10 @@ public:
std::unique_ptr<Renderer> makeRenderer(int width, int height) override
{
return std::make_unique<PLSRenderer>(m_plsContext.get());
return std::make_unique<RiveRenderer>(m_plsContext.get());
}
void begin(const rive::pls::PLSRenderContext::FrameDescriptor& frameDescriptor) override
void begin(const rive::gpu::PLSRenderContext::FrameDescriptor& frameDescriptor) override
{
m_plsContext->beginFrame(frameDescriptor);
}

View File

@@ -12,15 +12,15 @@ std::unique_ptr<FiddleContext> FiddleContext::MakeDawnPLS(FiddleContextOptions o
#include "dawn/native/DawnNative.h"
#include "dawn/dawn_proc.h"
#include "rive/pls/pls_factory.hpp"
#include "rive/pls/pls_renderer.hpp"
#include "rive/pls/webgpu/pls_render_context_webgpu_impl.hpp"
#include "rive/renderer/rive_render_factory.hpp"
#include "rive/renderer/rive_renderer.hpp"
#include "rive/renderer/webgpu/render_context_webgpu_impl.hpp"
#include <array>
#include <thread>
using namespace rive;
using namespace rive::pls;
using namespace rive::gpu;
static void print_device_error(WGPUErrorType errorType, const char* message, void*)
{
@@ -173,9 +173,9 @@ public:
Factory* factory() override { return m_plsContext.get(); }
rive::pls::PLSRenderContext* plsContextOrNull() override { return m_plsContext.get(); }
rive::gpu::PLSRenderContext* plsContextOrNull() override { return m_plsContext.get(); }
rive::pls::PLSRenderTarget* plsRenderTargetOrNull() override { return m_renderTarget.get(); }
rive::gpu::PLSRenderTarget* plsRenderTargetOrNull() override { return m_renderTarget.get(); }
void onSizeChanged(GLFWwindow* window, int width, int height, uint32_t sampleCount) override
{
@@ -214,7 +214,7 @@ public:
std::unique_ptr<Renderer> makeRenderer(int width, int height) override
{
return std::make_unique<PLSRenderer>(m_plsContext.get());
return std::make_unique<RiveRenderer>(m_plsContext.get());
}
void begin(const PLSRenderContext::FrameDescriptor& frameDescriptor) override

View File

@@ -7,10 +7,10 @@ std::unique_ptr<FiddleContext> FiddleContext::MakeGLPLS() { return nullptr; }
#else
#include "path_fiddle.hpp"
#include "rive/pls/gl/gles3.hpp"
#include "rive/pls/pls_renderer.hpp"
#include "rive/pls/gl/pls_render_context_gl_impl.hpp"
#include "rive/pls/gl/pls_render_target_gl.hpp"
#include "rive/renderer/gl/gles3.hpp"
#include "rive/renderer/rive_renderer.hpp"
#include "rive/renderer/gl/render_context_gl_impl.hpp"
#include "rive/renderer/gl/render_target_gl.hpp"
#ifdef RIVE_WEBGL
#include <emscripten/emscripten.h>
@@ -21,7 +21,7 @@ std::unique_ptr<FiddleContext> FiddleContext::MakeGLPLS() { return nullptr; }
#include "GLFW/glfw3.h"
using namespace rive;
using namespace rive::pls;
using namespace rive::gpu;
#ifdef RIVE_DESKTOP_GL
#ifdef DEBUG
@@ -211,9 +211,9 @@ public:
rive::Factory* factory() override { return m_plsContext.get(); }
rive::pls::PLSRenderContext* plsContextOrNull() override { return m_plsContext.get(); }
rive::gpu::PLSRenderContext* plsContextOrNull() override { return m_plsContext.get(); }
rive::pls::PLSRenderTarget* plsRenderTargetOrNull() override { return m_renderTarget.get(); }
rive::gpu::PLSRenderTarget* plsRenderTargetOrNull() override { return m_renderTarget.get(); }
void onSizeChanged(GLFWwindow* window, int width, int height, uint32_t sampleCount) override
{
@@ -222,7 +222,7 @@ public:
std::unique_ptr<Renderer> makeRenderer(int width, int height) override
{
return std::make_unique<PLSRenderer>(m_plsContext.get());
return std::make_unique<RiveRenderer>(m_plsContext.get());
}
void begin(const PLSRenderContext::FrameDescriptor& frameDescriptor) override
@@ -288,9 +288,9 @@ public:
rive::Factory* factory() override { return &m_factory; }
rive::pls::PLSRenderContext* plsContextOrNull() override { return nullptr; }
rive::gpu::PLSRenderContext* plsContextOrNull() override { return nullptr; }
rive::pls::PLSRenderTarget* plsRenderTargetOrNull() override { return nullptr; }
rive::gpu::PLSRenderTarget* plsRenderTargetOrNull() override { return nullptr; }
std::unique_ptr<Renderer> makeRenderer(int width, int height) override
{

View File

@@ -1,9 +1,9 @@
#include "fiddle_context.hpp"
#include "rive/pls/pls_renderer.hpp"
#include "rive/pls/gl/pls_render_context_gl_impl.hpp"
#include "rive/pls/gl/pls_render_target_gl.hpp"
#include "rive/pls/metal/pls_render_context_metal_impl.h"
#include "rive/renderer/rive_renderer.hpp"
#include "rive/renderer/gl/render_context_gl_impl.hpp"
#include "rive/renderer/gl/render_target_gl.hpp"
#include "rive/renderer/metal/render_context_metal_impl.h"
#import <Metal/Metal.h>
#import <QuartzCore/CAMetalLayer.h>
@@ -13,7 +13,7 @@
#include "GLFW/glfw3native.h"
using namespace rive;
using namespace rive::pls;
using namespace rive::gpu;
class FiddleContextMetalPLS : public FiddleContext
{
@@ -45,9 +45,9 @@ public:
Factory* factory() override { return m_plsContext.get(); }
rive::pls::PLSRenderContext* plsContextOrNull() override { return m_plsContext.get(); }
rive::gpu::PLSRenderContext* plsContextOrNull() override { return m_plsContext.get(); }
rive::pls::PLSRenderTarget* plsRenderTargetOrNull() override { return m_renderTarget.get(); }
rive::gpu::PLSRenderTarget* plsRenderTargetOrNull() override { return m_renderTarget.get(); }
void onSizeChanged(GLFWwindow* window, int width, int height, uint32_t sampleCount) override
{
@@ -73,7 +73,7 @@ public:
std::unique_ptr<Renderer> makeRenderer(int width, int height) override
{
return std::make_unique<PLSRenderer>(m_plsContext.get());
return std::make_unique<RiveRenderer>(m_plsContext.get());
}
void begin(const PLSRenderContext::FrameDescriptor& frameDescriptor) override

View File

@@ -15,8 +15,8 @@ std::unique_ptr<FiddleContext> FiddleContext::MakeVulkanPLS(FiddleContextOptions
#include "rive_vk_bootstrap/rive_vk_bootstrap.hpp"
#include "rive_vk_bootstrap/vulkan_fence_pool.hpp"
#include "rive/pls/pls_renderer.hpp"
#include "rive/pls/vulkan/pls_render_context_vulkan_impl.hpp"
#include "rive/renderer/rive_renderer.hpp"
#include "rive/renderer/vulkan/render_context_vulkan_impl.hpp"
#include <GLFW/glfw3.h>
#include <GLFW/glfw3native.h>
#include <vulkan/vulkan.h>
@@ -24,11 +24,11 @@ std::unique_ptr<FiddleContext> FiddleContext::MakeVulkanPLS(FiddleContextOptions
#include <vk_mem_alloc.h>
using namespace rive;
using namespace rive::pls;
using namespace rive::gpu;
// +1 because PLS doesn't wait for the previous fence until partway through flush.
// (After we need to acquire a new image from the swapchain.)
static constexpr int kResourcePoolSize = pls::kBufferRingSize + 1;
static constexpr int kResourcePoolSize = gpu::kBufferRingSize + 1;
class FiddleContextVulkanPLS : public FiddleContext
{
@@ -144,9 +144,9 @@ public:
Factory* factory() override { return m_plsContext.get(); }
rive::pls::PLSRenderContext* plsContextOrNull() override { return m_plsContext.get(); }
rive::gpu::PLSRenderContext* plsContextOrNull() override { return m_plsContext.get(); }
rive::pls::PLSRenderTarget* plsRenderTargetOrNull() override { return m_renderTarget.get(); }
rive::gpu::PLSRenderTarget* plsRenderTargetOrNull() override { return m_renderTarget.get(); }
void onSizeChanged(GLFWwindow* window, int width, int height, uint32_t sampleCount) override
{
@@ -232,7 +232,7 @@ public:
std::unique_ptr<Renderer> makeRenderer(int width, int height) override
{
return std::make_unique<PLSRenderer>(m_plsContext.get());
return std::make_unique<RiveRenderer>(m_plsContext.get());
}
void begin(const PLSRenderContext::FrameDescriptor& frameDescriptor) override

View File

@@ -400,13 +400,13 @@ int main(int argc, const char** argv)
}
else if (!strcmp(argv[i], "--sw") || !strcmp(argv[i], "--swiftshader"))
{
// Use the swiftshader built by packages/runtime/pls/make_swiftshader.sh
// Use the swiftshader built by packages/runtime/renderer/make_swiftshader.sh
set_environment_variable("VK_ICD_FILENAMES", kSwiftShaderICD);
api = API::vulkan;
}
else if (!strcmp(argv[i], "--swatomic") || !strcmp(argv[i], "--swiftshaderatomic"))
{
// Use the swiftshader built by packages/runtime/pls/make_swiftshader.sh
// Use the swiftshader built by packages/runtime/renderer/make_swiftshader.sh
set_environment_variable("VK_ICD_FILENAMES", kSwiftShaderICD);
api = API::vulkan;
s_forceAtomicMode = true;

View File

@@ -81,7 +81,7 @@ filter({})
local pls_generated_headers = RIVE_BUILD_OUT .. '/include'
local pls_shaders_absolute_dir = path.getabsolute(pls_generated_headers .. '/generated/shaders')
local makecommand = 'make -C '
.. path.getabsolute('renderer/shaders')
.. path.getabsolute('src/shaders')
.. ' OUT='
.. pls_shaders_absolute_dir
@@ -154,20 +154,20 @@ do
includedirs({
'include',
'glad',
'renderer',
'src',
'../include',
pls_generated_headers,
})
flags({ 'FatalWarnings' })
files({ 'renderer/*.cpp', 'renderer/decoding/*.cpp' })
files({ 'src/*.cpp', 'renderer/decoding/*.cpp' })
if _OPTIONS['with_vulkan'] then
externalincludedirs({
vulkan_headers .. '/include',
vulkan_memory_allocator .. '/include',
})
files({ 'renderer/vulkan/*.cpp' })
files({ 'src/vulkan/*.cpp' })
end
filter({ 'toolset:not msc' })
@@ -203,20 +203,20 @@ do
filter({ 'system:not ios' })
do
files({
'renderer/gl/gl_state.cpp',
'renderer/gl/gl_utils.cpp',
'renderer/gl/load_store_actions_ext.cpp',
'renderer/gl/pls_render_buffer_gl_impl.cpp',
'renderer/gl/pls_render_context_gl_impl.cpp',
'renderer/gl/pls_render_target_gl.cpp',
'src/gl/gl_state.cpp',
'src/gl/gl_utils.cpp',
'src/gl/load_store_actions_ext.cpp',
'src/gl/render_buffer_gl_impl.cpp',
'src/gl/render_context_gl_impl.cpp',
'src/gl/render_target_gl.cpp',
})
end
filter({ 'system:windows or macosx or linux' })
do
files({
'renderer/gl/pls_impl_webgl.cpp', -- Emulate WebGL with ANGLE.
'renderer/gl/pls_impl_rw_texture.cpp',
'src/gl/pls_impl_webgl.cpp', -- Emulate WebGL with ANGLE.
'src/gl/pls_impl_rw_texture.cpp',
'glad/glad.c',
'glad/glad_custom.c',
}) -- GL loader library for ANGLE.
@@ -225,15 +225,15 @@ do
filter('system:android')
do
files({
'renderer/gl/load_gles_extensions.cpp',
'renderer/gl/pls_impl_ext_native.cpp',
'renderer/gl/pls_impl_framebuffer_fetch.cpp',
'src/gl/load_gles_extensions.cpp',
'src/gl/pls_impl_ext_native.cpp',
'src/gl/pls_impl_framebuffer_fetch.cpp',
})
end
filter({ 'system:macosx or ios', 'options:not nop-obj-c' })
do
files({ 'renderer/metal/*.mm' })
files({ 'src/metal/*.mm' })
buildoptions({ '-fobjc-arc' })
end
@@ -249,14 +249,14 @@ do
filter({ 'options:with-webgpu or with-dawn' })
do
files({
'renderer/webgpu/**.cpp',
'renderer/gl/load_store_actions_ext.cpp',
'src/webgpu/**.cpp',
'src/gl/load_store_actions_ext.cpp',
})
end
filter({ 'options:nop-obj-c' })
do
files({ 'renderer/metal/pls_metal_nop.cpp' })
files({ 'src/metal/metal_nop.cpp' })
end
filter({ 'options:not no-rive-decoders' })
@@ -268,11 +268,11 @@ do
filter('system:windows')
do
architecture('x64')
files({ 'renderer/d3d/*.cpp' })
files({ 'src/d3d/*.cpp' })
end
filter('system:emscripten')
do
files({ 'renderer/gl/pls_impl_webgl.cpp' })
files({ 'src/gl/pls_impl_webgl.cpp' })
end
end

View File

@@ -4,7 +4,7 @@ if not _OPTIONS['with_vulkan'] then
end
if not vulkan_headers or not vulkan_memory_allocator then
error('Please `dofile` packages/runtime/pls/premake5_pls_renderer.lua first.')
error('Please `dofile` packages/runtime/renderer/premake5_pls_renderer.lua first.')
end
local dependency = require('dependency')

View File

@@ -3,7 +3,7 @@
*/
#include <VkBootstrap.h>
#include "rive/pls/vulkan/vulkan_context.hpp"
#include "rive/renderer/vulkan/vulkan_context.hpp"
namespace rive_vkb
{
@@ -34,11 +34,11 @@ VKAPI_ATTR VkBool32 VKAPI_CALL default_debug_callback(VkDebugUtilsMessageSeverit
// Select a GPU, using 'gpuNameFilter' or 'getenv("RIVE_GPU")', otherwise
// preferring discrete. Abort if the filter matches more than one name.
std::tuple<vkb::PhysicalDevice, rive::pls::VulkanFeatures> select_physical_device(
std::tuple<vkb::PhysicalDevice, rive::gpu::VulkanFeatures> select_physical_device(
vkb::PhysicalDeviceSelector& selector,
const char* gpuNameFilter = nullptr);
inline std::tuple<vkb::PhysicalDevice, rive::pls::VulkanFeatures> select_physical_device(
inline std::tuple<vkb::PhysicalDevice, rive::gpu::VulkanFeatures> select_physical_device(
vkb::Instance instance,
const char* gpuNameFilter = nullptr)
{

View File

@@ -4,10 +4,10 @@
#pragma once
#include "rive/pls/pls.hpp"
#include "rive/pls/vulkan/vulkan_context.hpp"
#include "rive/renderer/gpu.hpp"
#include "rive/renderer/vulkan/vulkan_context.hpp"
namespace rive::pls
namespace rive::gpu
{
class VulkanFence;
@@ -90,4 +90,4 @@ inline rcp<VulkanFence> VulkanFencePool::makeFence()
assert(fence->debugging_refcnt() == 1);
return fence;
}
} // namespace rive::pls
} // namespace rive::gpu

View File

@@ -86,7 +86,7 @@ static const char* physical_device_type_name(VkPhysicalDeviceType type)
// Select a GPU name if it contains the substring 'filter' or '$RIVE_GPU'.
// Return false if 'filter' and '$RIVE_GPU' are both null.
// Abort if the filter matches more than one name.
std::tuple<vkb::PhysicalDevice, rive::pls::VulkanFeatures> select_physical_device(
std::tuple<vkb::PhysicalDevice, rive::gpu::VulkanFeatures> select_physical_device(
vkb::PhysicalDeviceSelector& selector,
const char* gpuNameFilter)
{
@@ -153,7 +153,7 @@ std::tuple<vkb::PhysicalDevice, rive::pls::VulkanFeatures> select_physical_devic
rive_vkb::physical_device_type_name(physicalDevice.properties.deviceType),
physicalDevice.properties.deviceName);
rive::pls::VulkanFeatures plsVulkanFeatures;
rive::gpu::VulkanFeatures plsVulkanFeatures;
physicalDevice.enable_features_if_present({
.independentBlend = VK_TRUE,
.fillModeNonSolid = VK_TRUE,

View File

@@ -2,9 +2,9 @@
* Copyright 2023 Rive
*/
#include "rive/pls/d3d/pls_render_context_d3d_impl.hpp"
#include "rive/renderer/d3d/render_context_d3d_impl.hpp"
#include "rive/pls/pls_image.hpp"
#include "rive/renderer/image.hpp"
#include "shaders/constants.glsl"
#include <D3DCompiler.h>
@@ -32,7 +32,7 @@ constexpr static UINT kImageRectVertexDataSlot = 2;
constexpr static UINT kImageMeshVertexDataSlot = 3;
constexpr static UINT kImageMeshUVDataSlot = 4;
namespace rive::pls
namespace rive::gpu
{
ComPtr<ID3D11Texture2D> make_simple_2d_texture(ID3D11Device* gpu,
DXGI_FORMAT format,
@@ -267,9 +267,9 @@ PLSRenderContextD3DImpl::PLSRenderContextD3DImpl(ComPtr<ID3D11Device> gpu,
nullptr,
&m_tessellatePixelShader));
m_tessSpanIndexBuffer = makeSimpleImmutableBuffer(sizeof(pls::kTessSpanIndices),
m_tessSpanIndexBuffer = makeSimpleImmutableBuffer(sizeof(gpu::kTessSpanIndices),
D3D11_BIND_INDEX_BUFFER,
pls::kTessSpanIndices);
gpu::kTessSpanIndices);
}
// Set up the path patch rendering buffers.
@@ -281,13 +281,13 @@ PLSRenderContextD3DImpl::PLSRenderContextD3DImpl(ComPtr<ID3D11Device> gpu,
m_patchIndexBuffer =
makeSimpleImmutableBuffer(sizeof(patchIndices), D3D11_BIND_INDEX_BUFFER, patchIndices);
// Set up the imageRect rendering buffers. (pls::InterlockMode::atomics only.)
m_imageRectVertexBuffer = makeSimpleImmutableBuffer(sizeof(pls::kImageRectVertices),
// Set up the imageRect rendering buffers. (gpu::InterlockMode::atomics only.)
m_imageRectVertexBuffer = makeSimpleImmutableBuffer(sizeof(gpu::kImageRectVertices),
D3D11_BIND_VERTEX_BUFFER,
pls::kImageRectVertices);
m_imageRectIndexBuffer = makeSimpleImmutableBuffer(sizeof(pls::kImageRectIndices),
gpu::kImageRectVertices);
m_imageRectIndexBuffer = makeSimpleImmutableBuffer(sizeof(gpu::kImageRectIndices),
D3D11_BIND_INDEX_BUFFER,
pls::kImageRectIndices);
gpu::kImageRectIndices);
// Create buffers for uniforms.
{
@@ -295,16 +295,16 @@ PLSRenderContextD3DImpl::PLSRenderContextD3DImpl(ComPtr<ID3D11Device> gpu,
desc.Usage = D3D11_USAGE_DEFAULT;
desc.BindFlags = D3D11_BIND_CONSTANT_BUFFER;
desc.ByteWidth = sizeof(pls::FlushUniforms);
desc.StructureByteStride = sizeof(pls::FlushUniforms);
desc.ByteWidth = sizeof(gpu::FlushUniforms);
desc.StructureByteStride = sizeof(gpu::FlushUniforms);
VERIFY_OK(m_gpu->CreateBuffer(&desc, nullptr, m_flushUniforms.ReleaseAndGetAddressOf()));
desc.ByteWidth = sizeof(DrawUniforms);
desc.StructureByteStride = sizeof(DrawUniforms);
VERIFY_OK(m_gpu->CreateBuffer(&desc, nullptr, m_drawUniforms.ReleaseAndGetAddressOf()));
desc.ByteWidth = sizeof(pls::ImageDrawUniforms);
desc.StructureByteStride = sizeof(pls::ImageDrawUniforms);
desc.ByteWidth = sizeof(gpu::ImageDrawUniforms);
desc.StructureByteStride = sizeof(gpu::ImageDrawUniforms);
VERIFY_OK(
m_gpu->CreateBuffer(&desc, nullptr, m_imageDrawUniforms.ReleaseAndGetAddressOf()));
}
@@ -677,12 +677,12 @@ std::unique_ptr<BufferRing> PLSRenderContextD3DImpl::makeUniformBufferRing(size_
std::unique_ptr<BufferRing> PLSRenderContextD3DImpl::makeStorageBufferRing(
size_t capacityInBytes,
pls::StorageBufferStructure bufferStructure)
gpu::StorageBufferStructure bufferStructure)
{
return capacityInBytes != 0 ? std::make_unique<StructuredBufferRingD3D>(
this,
capacityInBytes,
pls::StorageBufferElementSizeInBytes(bufferStructure))
gpu::StorageBufferElementSizeInBytes(bufferStructure))
: nullptr;
}
@@ -927,7 +927,7 @@ ID3D11ShaderResourceView* PLSRenderContextD3DImpl::replaceStructuredBufferSRV(
// Shaders access our storage buffers as arrays of basic types, as opposed to structures. Our
// SRV therefore needs to be indexed by the underlying basic type, not the high level structure.
constexpr static UINT kUnderlyingTypeSizeInBytes =
pls::StorageBufferElementSizeInBytes(HighLevelStruct::kBufferStructure);
gpu::StorageBufferElementSizeInBytes(HighLevelStruct::kBufferStructure);
static_assert(sizeof(HighLevelStruct) % kUnderlyingTypeSizeInBytes == 0);
constexpr static UINT kStructIndexMultiplier =
sizeof(HighLevelStruct) / kUnderlyingTypeSizeInBytes;
@@ -938,14 +938,14 @@ ID3D11ShaderResourceView* PLSRenderContextD3DImpl::replaceStructuredBufferSRV(
}
void PLSRenderContextD3DImpl::setPipelineLayoutAndShaders(DrawType drawType,
pls::ShaderFeatures shaderFeatures,
pls::InterlockMode interlockMode,
pls::ShaderMiscFlags pixelShaderMiscFlags)
gpu::ShaderFeatures shaderFeatures,
gpu::InterlockMode interlockMode,
gpu::ShaderMiscFlags pixelShaderMiscFlags)
{
uint32_t vertexShaderKey = pls::ShaderUniqueKey(drawType,
uint32_t vertexShaderKey = gpu::ShaderUniqueKey(drawType,
shaderFeatures & kVertexShaderFeaturesMask,
interlockMode,
pls::ShaderMiscFlags::none);
gpu::ShaderMiscFlags::none);
auto vertexEntry = m_drawVertexShaders.find(vertexShaderKey);
uint32_t pixelShaderKey =
@@ -965,7 +965,7 @@ void PLSRenderContextD3DImpl::setPipelineLayoutAndShaders(DrawType drawType,
}
if (m_d3dCapabilities.supportsRasterizerOrderedViews)
{
if ((interlockMode == pls::InterlockMode::rasterOrdering &&
if ((interlockMode == gpu::InterlockMode::rasterOrdering &&
drawType != DrawType::interiorTriangulation) ||
drawType == DrawType::imageMesh)
{
@@ -980,11 +980,11 @@ void PLSRenderContextD3DImpl::setPipelineLayoutAndShaders(DrawType drawType,
{
s << "#define " << GLSL_ENABLE_MIN_16_PRECISION << '\n';
}
if (pixelShaderMiscFlags & pls::ShaderMiscFlags::fixedFunctionColorBlend)
if (pixelShaderMiscFlags & gpu::ShaderMiscFlags::fixedFunctionColorBlend)
{
s << "#define " << GLSL_FIXED_FUNCTION_COLOR_BLEND << '\n';
}
if (pixelShaderMiscFlags & pls::ShaderMiscFlags::coalescedResolveAndTransfer)
if (pixelShaderMiscFlags & gpu::ShaderMiscFlags::coalescedResolveAndTransfer)
{
s << "#define " << GLSL_COALESCED_PLS_RESOLVE_AND_TRANSFER << '\n';
s << "#define " << GLSL_COLOR_PLANE_IDX_OVERRIDE << ' '
@@ -1000,7 +1000,7 @@ void PLSRenderContextD3DImpl::setPipelineLayoutAndShaders(DrawType drawType,
s << "#define " << GLSL_DRAW_INTERIOR_TRIANGLES << '\n';
break;
case DrawType::imageRect:
assert(interlockMode == pls::InterlockMode::atomics);
assert(interlockMode == gpu::InterlockMode::atomics);
s << "#define " << GLSL_DRAW_IMAGE << '\n';
s << "#define " << GLSL_DRAW_IMAGE_RECT << '\n';
break;
@@ -1008,12 +1008,12 @@ void PLSRenderContextD3DImpl::setPipelineLayoutAndShaders(DrawType drawType,
s << "#define " << GLSL_DRAW_IMAGE << '\n';
s << "#define " << GLSL_DRAW_IMAGE_MESH << '\n';
break;
case DrawType::plsAtomicResolve:
assert(interlockMode == pls::InterlockMode::atomics);
case DrawType::gpuAtomicResolve:
assert(interlockMode == gpu::InterlockMode::atomics);
s << "#define " << GLSL_DRAW_RENDER_TARGET_UPDATE_BOUNDS << '\n';
s << "#define " << GLSL_RESOLVE_PLS << '\n';
break;
case DrawType::plsAtomicInitialize:
case DrawType::gpuAtomicInitialize:
case DrawType::stencilClipReset:
RIVE_UNREACHABLE();
}
@@ -1028,33 +1028,33 @@ void PLSRenderContextD3DImpl::setPipelineLayoutAndShaders(DrawType drawType,
{
case DrawType::midpointFanPatches:
case DrawType::outerCurvePatches:
s << pls::glsl::draw_path_common << '\n';
s << (interlockMode == pls::InterlockMode::rasterOrdering ? pls::glsl::draw_path
: pls::glsl::atomic_draw)
s << gpu::glsl::draw_path_common << '\n';
s << (interlockMode == gpu::InterlockMode::rasterOrdering ? gpu::glsl::draw_path
: gpu::glsl::atomic_draw)
<< '\n';
break;
case DrawType::interiorTriangulation:
s << pls::glsl::draw_path_common << '\n';
s << (interlockMode == pls::InterlockMode::rasterOrdering ? pls::glsl::draw_path
: pls::glsl::atomic_draw)
s << gpu::glsl::draw_path_common << '\n';
s << (interlockMode == gpu::InterlockMode::rasterOrdering ? gpu::glsl::draw_path
: gpu::glsl::atomic_draw)
<< '\n';
break;
case DrawType::imageRect:
assert(interlockMode == pls::InterlockMode::atomics);
s << pls::glsl::atomic_draw << '\n';
assert(interlockMode == gpu::InterlockMode::atomics);
s << gpu::glsl::atomic_draw << '\n';
break;
case DrawType::imageMesh:
s << (interlockMode == pls::InterlockMode::rasterOrdering
? pls::glsl::draw_image_mesh
: pls::glsl::atomic_draw)
s << (interlockMode == gpu::InterlockMode::rasterOrdering
? gpu::glsl::draw_image_mesh
: gpu::glsl::atomic_draw)
<< '\n';
break;
case DrawType::plsAtomicResolve:
case DrawType::gpuAtomicResolve:
case DrawType::stencilClipReset:
assert(interlockMode == pls::InterlockMode::atomics);
s << pls::glsl::atomic_draw << '\n';
assert(interlockMode == gpu::InterlockMode::atomics);
s << gpu::glsl::atomic_draw << '\n';
break;
case DrawType::plsAtomicInitialize:
case DrawType::gpuAtomicInitialize:
RIVE_UNREACHABLE();
}
@@ -1124,10 +1124,10 @@ void PLSRenderContextD3DImpl::setPipelineLayoutAndShaders(DrawType drawType,
0};
vertexAttribCount = 2;
break;
case DrawType::plsAtomicResolve:
case DrawType::gpuAtomicResolve:
vertexAttribCount = 0;
break;
case DrawType::plsAtomicInitialize:
case DrawType::gpuAtomicInitialize:
case DrawType::stencilClipReset:
RIVE_UNREACHABLE();
}
@@ -1216,22 +1216,22 @@ void PLSRenderContextD3DImpl::flush(const FlushDescriptor& desc)
// All programs use the same storage buffers.
ID3D11ShaderResourceView* storageBufferBufferSRVs[] = {
desc.pathCount > 0 ? replaceStructuredBufferSRV<pls::PathData>(
desc.pathCount > 0 ? replaceStructuredBufferSRV<gpu::PathData>(
pathBufferRing(),
desc.pathCount,
math::lossless_numeric_cast<UINT>(desc.firstPath))
: nullptr,
desc.pathCount > 0 ? replaceStructuredBufferSRV<pls::PaintData>(
desc.pathCount > 0 ? replaceStructuredBufferSRV<gpu::PaintData>(
paintBufferRing(),
desc.pathCount,
math::lossless_numeric_cast<UINT>(desc.firstPaint))
: nullptr,
desc.pathCount > 0 ? replaceStructuredBufferSRV<pls::PaintAuxData>(
desc.pathCount > 0 ? replaceStructuredBufferSRV<gpu::PaintAuxData>(
paintAuxBufferRing(),
desc.pathCount,
math::lossless_numeric_cast<UINT>(desc.firstPaintAux))
: nullptr,
desc.contourCount > 0 ? replaceStructuredBufferSRV<pls::ContourData>(
desc.contourCount > 0 ? replaceStructuredBufferSRV<gpu::ContourData>(
contourBufferRing(),
desc.contourCount,
math::lossless_numeric_cast<UINT>(desc.firstContour))
@@ -1243,7 +1243,7 @@ void PLSRenderContextD3DImpl::flush(const FlushDescriptor& desc)
m_gpuContext->VSSetShaderResources(PATH_BUFFER_IDX,
std::size(storageBufferBufferSRVs),
storageBufferBufferSRVs);
if (desc.interlockMode == pls::InterlockMode::atomics)
if (desc.interlockMode == gpu::InterlockMode::atomics)
{
// Atomic mode accesses the paint buffers from the pixel shader.
m_gpuContext->PSSetShaderResources(PAINT_BUFFER_IDX, 2, storageBufferBufferSRVs + 1);
@@ -1334,7 +1334,7 @@ void PLSRenderContextD3DImpl::flush(const FlushDescriptor& desc)
m_gpuContext->OMSetRenderTargets(1, m_tessTextureRTV.GetAddressOf(), NULL);
m_gpuContext->DrawIndexedInstanced(
std::size(pls::kTessSpanIndices),
std::size(gpu::kTessSpanIndices),
desc.tessVertexSpanCount,
0,
0,
@@ -1353,7 +1353,7 @@ void PLSRenderContextD3DImpl::flush(const FlushDescriptor& desc)
!(desc.combinedShaderFeatures & ShaderFeatures::ENABLE_ADVANCED_BLEND);
switch (desc.colorLoadAction)
{
case pls::LoadAction::clear:
case gpu::LoadAction::clear:
if (renderDirectToRasterPipeline)
{
float clearColor4f[4];
@@ -1369,11 +1369,11 @@ void PLSRenderContextD3DImpl::flush(const FlushDescriptor& desc)
}
else
{
UINT clearColorui[4] = {pls::SwizzleRiveColorToRGBA(desc.clearColor)};
UINT clearColorui[4] = {gpu::SwizzleRiveColorToRGBA(desc.clearColor)};
m_gpuContext->ClearUnorderedAccessViewUint(renderTarget->targetUAV(), clearColorui);
}
break;
case pls::LoadAction::preserveRenderTarget:
case gpu::LoadAction::preserveRenderTarget:
if (!renderDirectToRasterPipeline && !renderTarget->targetTextureSupportsUAV())
{
// We're rendering to an offscreen UAV and preserving the target. Copy the target
@@ -1384,10 +1384,10 @@ void PLSRenderContextD3DImpl::flush(const FlushDescriptor& desc)
desc.renderTargetUpdateBounds);
}
break;
case pls::LoadAction::dontCare:
case gpu::LoadAction::dontCare:
break;
}
if (desc.combinedShaderFeatures & pls::ShaderFeatures::ENABLE_CLIPPING)
if (desc.combinedShaderFeatures & gpu::ShaderFeatures::ENABLE_CLIPPING)
{
constexpr static UINT kZero[4]{};
m_gpuContext->ClearUnorderedAccessViewUint(renderTarget->clipUAV(), kZero);
@@ -1402,9 +1402,9 @@ void PLSRenderContextD3DImpl::flush(const FlushDescriptor& desc)
m_patchVertexBuffer.Get(),
desc.hasTriangleVertices ? submitted_buffer(triangleBufferRing()) : NULL,
m_imageRectVertexBuffer.Get()};
UINT vertexStrides[3] = {sizeof(pls::PatchVertex),
sizeof(pls::TriangleVertex),
sizeof(pls::ImageRectVertex)};
UINT vertexStrides[3] = {sizeof(gpu::PatchVertex),
sizeof(gpu::TriangleVertex),
sizeof(gpu::ImageRectVertex)};
UINT vertexOffsets[3] = {0, 0, 0};
static_assert(kPatchVertexDataSlot == 0);
static_assert(kTriangleVertexDataSlot == 1);
@@ -1428,7 +1428,7 @@ void PLSRenderContextD3DImpl::flush(const FlushDescriptor& desc)
ID3D11UnorderedAccessView* plsUAVs[] = {
renderDirectToRasterPipeline ? NULL : renderTarget->targetUAV(),
renderTarget->clipUAV(),
desc.interlockMode == pls::InterlockMode::rasterOrdering
desc.interlockMode == gpu::InterlockMode::rasterOrdering
? renderTarget->scratchColorUAV()
: NULL, // Atomic mode doesn't use the scratchColor.
renderTarget->coverageUAV(),
@@ -1460,7 +1460,7 @@ void PLSRenderContextD3DImpl::flush(const FlushDescriptor& desc)
const char* const imageDrawUniformData = heap_buffer_contents(imageDrawUniformBufferRing());
bool renderPassHasCoalescedResolveAndTransfer =
desc.interlockMode == pls::InterlockMode::atomics && !renderDirectToRasterPipeline &&
desc.interlockMode == gpu::InterlockMode::atomics && !renderDirectToRasterPipeline &&
!renderTarget->targetTextureSupportsUAV();
for (const DrawBatch& batch : *desc.drawList)
@@ -1471,16 +1471,16 @@ void PLSRenderContextD3DImpl::flush(const FlushDescriptor& desc)
}
DrawType drawType = batch.drawType;
auto shaderFeatures = desc.interlockMode == pls::InterlockMode::atomics
auto shaderFeatures = desc.interlockMode == gpu::InterlockMode::atomics
? desc.combinedShaderFeatures
: batch.shaderFeatures;
auto pixelShaderMiscFlags =
drawType == pls::DrawType::plsAtomicResolve && renderPassHasCoalescedResolveAndTransfer
? pls::ShaderMiscFlags::coalescedResolveAndTransfer
: pls::ShaderMiscFlags::none;
drawType == gpu::DrawType::gpuAtomicResolve && renderPassHasCoalescedResolveAndTransfer
? gpu::ShaderMiscFlags::coalescedResolveAndTransfer
: gpu::ShaderMiscFlags::none;
if (renderDirectToRasterPipeline)
{
pixelShaderMiscFlags |= pls::ShaderMiscFlags::fixedFunctionColorBlend;
pixelShaderMiscFlags |= gpu::ShaderMiscFlags::fixedFunctionColorBlend;
}
setPipelineLayoutAndShaders(drawType,
shaderFeatures,
@@ -1530,7 +1530,7 @@ void PLSRenderContextD3DImpl::flush(const FlushDescriptor& desc)
imageDrawUniformData + batch.imageDrawDataOffset,
0,
0);
m_gpuContext->DrawIndexed(std::size(pls::kImageRectIndices), 0, 0);
m_gpuContext->DrawIndexed(std::size(gpu::kImageRectIndices), 0, 0);
break;
case DrawType::imageMesh:
{
@@ -1560,8 +1560,8 @@ void PLSRenderContextD3DImpl::flush(const FlushDescriptor& desc)
m_gpuContext->DrawIndexed(batch.elementCount, batch.baseElement, 0);
break;
}
case DrawType::plsAtomicResolve:
assert(desc.interlockMode == pls::InterlockMode::atomics);
case DrawType::gpuAtomicResolve:
assert(desc.interlockMode == gpu::InterlockMode::atomics);
m_gpuContext->IASetPrimitiveTopology(D3D11_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP);
m_gpuContext->RSSetState(m_backCulledRasterState[0].Get());
if (renderPassHasCoalescedResolveAndTransfer)
@@ -1595,13 +1595,13 @@ void PLSRenderContextD3DImpl::flush(const FlushDescriptor& desc)
}
m_gpuContext->Draw(4, 0);
break;
case DrawType::plsAtomicInitialize:
case DrawType::gpuAtomicInitialize:
case DrawType::stencilClipReset:
RIVE_UNREACHABLE();
}
}
if (desc.interlockMode == pls::InterlockMode::rasterOrdering &&
if (desc.interlockMode == gpu::InterlockMode::rasterOrdering &&
!renderTarget->targetTextureSupportsUAV())
{
// We rendered to an offscreen UAV and did not resolve to the renderTarget. Copy back to the
@@ -1614,4 +1614,4 @@ void PLSRenderContextD3DImpl::flush(const FlushDescriptor& desc)
desc.renderTargetUpdateBounds);
}
}
} // namespace rive::pls
} // namespace rive::gpu

View File

@@ -2,17 +2,17 @@
* Copyright 2023 Rive
*/
#include "rive/pls/pls_draw.hpp"
#include "rive/renderer/draw.hpp"
#include "gr_inner_fan_triangulator.hpp"
#include "path_utils.hpp"
#include "pls_path.hpp"
#include "pls_paint.hpp"
#include "rive_render_path.hpp"
#include "rive_render_paint.hpp"
#include "rive/math/wangs_formula.hpp"
#include "rive/pls/pls_image.hpp"
#include "rive/renderer/image.hpp"
#include "shaders/constants.glsl"
namespace rive::pls
namespace rive::gpu
{
namespace
{
@@ -288,7 +288,7 @@ PLSDraw::PLSDraw(IAABB pixelBounds,
{
if (m_blendMode != BlendMode::srcOver)
{
m_drawContents |= pls::DrawContents::advancedBlend;
m_drawContents |= gpu::DrawContents::advancedBlend;
}
}
@@ -298,15 +298,15 @@ void PLSDraw::setClipID(uint32_t clipID)
// For clipUpdates, m_clipID refers to the ID we are writing to the stencil buffer (NOT the ID
// we are clipping against). It therefore doesn't affect the activeClip flag in that case.
if (!(m_drawContents & pls::DrawContents::clipUpdate))
if (!(m_drawContents & gpu::DrawContents::clipUpdate))
{
if (m_clipID != 0)
{
m_drawContents |= pls::DrawContents::activeClip;
m_drawContents |= gpu::DrawContents::activeClip;
}
else
{
m_drawContents &= ~pls::DrawContents::activeClip;
m_drawContents &= ~gpu::DrawContents::activeClip;
}
}
}
@@ -324,17 +324,17 @@ void PLSDraw::releaseRefs()
safe_unref(m_gradientRef);
}
PLSDrawUniquePtr PLSPathDraw::Make(PLSRenderContext* context,
const Mat2D& matrix,
rcp<const PLSPath> path,
FillRule fillRule,
const PLSPaint* paint,
RawPath* scratchPath)
PLSDrawUniquePtr RiveRenderPathDraw::Make(PLSRenderContext* context,
const Mat2D& matrix,
rcp<const RiveRenderPath> path,
FillRule fillRule,
const RiveRenderPaint* paint,
RawPath* scratchPath)
{
assert(path != nullptr);
assert(paint != nullptr);
AABB mappedBounds;
if (context->frameInterlockMode() == pls::InterlockMode::atomics)
if (context->frameInterlockMode() == gpu::InterlockMode::atomics)
{
// In atomic mode, find a tighter bounding box in order to maximize reordering.
mappedBounds = matrix.mapBoundingBox(path->getRawPath().points().data(),
@@ -369,9 +369,9 @@ PLSDrawUniquePtr PLSPathDraw::Make(PLSRenderContext* context,
// it.
const AABB& localBounds = path->getBounds();
// FIXME! Implement interior triangulation in depthStencil mode.
if (context->frameInterlockMode() != pls::InterlockMode::depthStencil &&
if (context->frameInterlockMode() != gpu::InterlockMode::depthStencil &&
path->getRawPath().verbs().count() < 1000 &&
pls::FindTransformedArea(localBounds, matrix) > 512 * 512)
gpu::FindTransformedArea(localBounds, matrix) > 512 * 512)
{
return PLSDrawUniquePtr(context->make<InteriorTriangulationDraw>(
context,
@@ -394,13 +394,13 @@ PLSDrawUniquePtr PLSPathDraw::Make(PLSRenderContext* context,
paint));
}
PLSPathDraw::PLSPathDraw(IAABB pixelBounds,
const Mat2D& matrix,
rcp<const PLSPath> path,
FillRule fillRule,
const PLSPaint* paint,
Type type,
pls::InterlockMode frameInterlockMode) :
RiveRenderPathDraw::RiveRenderPathDraw(IAABB pixelBounds,
const Mat2D& matrix,
rcp<const RiveRenderPath> path,
FillRule fillRule,
const RiveRenderPaint* paint,
Type type,
gpu::InterlockMode frameInterlockMode) :
PLSDraw(pixelBounds, matrix, paint->getBlendMode(), ref_rcp(paint->getImageTexture()), type),
m_pathRef(path.release()),
m_fillRule(paint->getIsStroked() ? FillRule::nonZero : fillRule),
@@ -411,39 +411,39 @@ PLSPathDraw::PLSPathDraw(IAABB pixelBounds,
assert(paint != nullptr);
if (m_blendMode == BlendMode::srcOver && paint->getIsOpaque())
{
m_drawContents |= pls::DrawContents::opaquePaint;
m_drawContents |= gpu::DrawContents::opaquePaint;
}
if (paint->getIsStroked())
{
m_drawContents |= pls::DrawContents::stroke;
m_drawContents |= gpu::DrawContents::stroke;
m_strokeRadius = paint->getThickness() * .5f;
// Ensure stroke radius is nonzero. (In PLS, zero radius means the path is filled.)
m_strokeRadius = fmaxf(m_strokeRadius, std::numeric_limits<float>::min());
assert(!std::isnan(m_strokeRadius)); // These should get culled in PLSRenderer::drawPath().
assert(!std::isnan(m_strokeRadius)); // These should get culled in RiveRenderer::drawPath().
assert(m_strokeRadius > 0);
}
else if (m_fillRule == FillRule::evenOdd)
{
m_drawContents |= pls::DrawContents::evenOddFill;
m_drawContents |= gpu::DrawContents::evenOddFill;
}
if (paint->getType() == pls::PaintType::clipUpdate)
if (paint->getType() == gpu::PaintType::clipUpdate)
{
m_drawContents |= pls::DrawContents::clipUpdate;
m_drawContents |= gpu::DrawContents::clipUpdate;
if (paint->getSimpleValue().outerClipID != 0)
{
m_drawContents |= pls::DrawContents::activeClip;
m_drawContents |= gpu::DrawContents::activeClip;
}
}
if (isStroked())
{
// Stroke triangles are always forward.
m_contourDirections = pls::ContourDirections::forward;
m_contourDirections = gpu::ContourDirections::forward;
}
else if (frameInterlockMode != pls::InterlockMode::depthStencil)
else if (frameInterlockMode != gpu::InterlockMode::depthStencil)
{
// atomic and rasterOrdering fills need reverse AND forward triangles.
m_contourDirections = pls::ContourDirections::reverseAndForward;
m_contourDirections = gpu::ContourDirections::reverseAndForward;
}
else if (m_fillRule != FillRule::evenOdd)
{
@@ -452,13 +452,13 @@ PLSPathDraw::PLSPathDraw(IAABB pixelBounds,
// counterclockwise triangles twice and clockwise only once.
float matrixDeterminant = matrix[0] * matrix[3] - matrix[2] * matrix[1];
m_contourDirections = m_pathRef->getCoarseArea() * matrixDeterminant >= 0
? pls::ContourDirections::forward
: pls::ContourDirections::reverse;
? gpu::ContourDirections::forward
: gpu::ContourDirections::reverse;
}
else
{
// "evenOdd" depthStencil fils just get drawn twice, so any direction is fine.
m_contourDirections = pls::ContourDirections::forward;
m_contourDirections = gpu::ContourDirections::forward;
}
m_simplePaintValue = paint->getSimpleValue();
@@ -468,7 +468,7 @@ PLSPathDraw::PLSPathDraw(IAABB pixelBounds,
assert(isStroked() == (strokeRadius() > 0));
}
void PLSPathDraw::pushToRenderContext(PLSRenderContext::LogicalFlush* flush)
void RiveRenderPathDraw::pushToRenderContext(PLSRenderContext::LogicalFlush* flush)
{
// Make sure the rawPath in our path reference hasn't changed since we began holding!
assert(m_rawPathMutationID == m_pathRef->getRawPathMutationID());
@@ -489,7 +489,7 @@ void PLSPathDraw::pushToRenderContext(PLSRenderContext::LogicalFlush* flush)
}
}
void PLSPathDraw::releaseRefs()
void RiveRenderPathDraw::releaseRefs()
{
PLSDraw::releaseRefs();
RIVE_DEBUG_CODE(m_pathRef->unlockRawPathMutations();)
@@ -499,16 +499,16 @@ void PLSPathDraw::releaseRefs()
MidpointFanPathDraw::MidpointFanPathDraw(PLSRenderContext* context,
IAABB pixelBounds,
const Mat2D& matrix,
rcp<const PLSPath> path,
rcp<const RiveRenderPath> path,
FillRule fillRule,
const PLSPaint* paint) :
PLSPathDraw(pixelBounds,
matrix,
std::move(path),
fillRule,
paint,
Type::midpointFanPath,
context->frameInterlockMode())
const RiveRenderPaint* paint) :
RiveRenderPathDraw(pixelBounds,
matrix,
std::move(path),
fillRule,
paint,
Type::midpointFanPath,
context->frameInterlockMode())
{
if (isStroked())
{
@@ -999,11 +999,11 @@ MidpointFanPathDraw::MidpointFanPathDraw(PLSRenderContext* context,
m_resourceCounts.contourCount = contourCount;
// maxTessellatedSegmentCount does not get doubled when we emit both forward and mirrored
// contours because the forward and mirrored pair both get packed into a single
// pls::TessVertexSpan.
// gpu::TessVertexSpan.
m_resourceCounts.maxTessellatedSegmentCount =
lineCount + unpaddedCurveCount + emptyStrokeCountForCaps;
m_resourceCounts.midpointFanTessVertexCount =
m_contourDirections == pls::ContourDirections::reverseAndForward ? tessVertexCount * 2
m_contourDirections == gpu::ContourDirections::reverseAndForward ? tessVertexCount * 2
: tessVertexCount;
}
}
@@ -1342,18 +1342,18 @@ void MidpointFanPathDraw::pushEmulatedStrokeCapAsJoinBeforeCubic(
InteriorTriangulationDraw::InteriorTriangulationDraw(PLSRenderContext* context,
IAABB pixelBounds,
const Mat2D& matrix,
rcp<const PLSPath> path,
rcp<const RiveRenderPath> path,
FillRule fillRule,
const PLSPaint* paint,
const RiveRenderPaint* paint,
RawPath* scratchPath,
TriangulatorAxis triangulatorAxis) :
PLSPathDraw(pixelBounds,
matrix,
std::move(path),
fillRule,
paint,
Type::interiorTriangulationPath,
context->frameInterlockMode())
RiveRenderPathDraw(pixelBounds,
matrix,
std::move(path),
fillRule,
paint,
Type::interiorTriangulationPath,
context->frameInterlockMode())
{
assert(!isStroked());
assert(m_strokeRadius == 0);
@@ -1367,7 +1367,7 @@ InteriorTriangulationDraw::InteriorTriangulationDraw(PLSRenderContext* context,
void InteriorTriangulationDraw::onPushToRenderContext(PLSRenderContext::LogicalFlush* flush)
{
processPath(PathOp::submitOuterCubics, nullptr, nullptr, TriangulatorAxis::dontCare, flush);
if (flush->desc().interlockMode == pls::InterlockMode::atomics)
if (flush->desc().interlockMode == gpu::InterlockMode::atomics)
{
// We need a barrier between the outer cubics and interior triangles in atomic mode.
flush->pushBarrier();
@@ -1525,12 +1525,12 @@ void InteriorTriangulationDraw::processPath(PathOp op,
m_resourceCounts.contourCount = contourCount;
// maxTessellatedSegmentCount does not get doubled when we emit both forward and
// mirrored contours because the forward and mirrored pair both get packed into a single
// pls::TessVertexSpan.
// gpu::TessVertexSpan.
m_resourceCounts.maxTessellatedSegmentCount = patchCount;
// outerCubic patches emit their tessellated geometry twice: once forward and once
// mirrored.
m_resourceCounts.outerCubicTessVertexCount =
m_contourDirections == pls::ContourDirections::reverseAndForward
m_contourDirections == gpu::ContourDirections::reverseAndForward
? patchCount * kOuterCurvePatchSegmentSpan * 2
: patchCount * kOuterCurvePatchSegmentSpan;
m_resourceCounts.maxTriangleVertexCount = m_triangulator->maxVertexCount();
@@ -1628,10 +1628,10 @@ StencilClipReset::StencilClipReset(PLSRenderContext* context,
switch (resetAction)
{
case ResetAction::intersectPreviousClip:
m_drawContents |= pls::DrawContents::activeClip;
m_drawContents |= gpu::DrawContents::activeClip;
[[fallthrough]];
case ResetAction::clearPreviousClip:
m_drawContents |= pls::DrawContents::clipUpdate;
m_drawContents |= gpu::DrawContents::clipUpdate;
break;
}
m_resourceCounts.maxTriangleVertexCount = 6;
@@ -1641,4 +1641,4 @@ void StencilClipReset::pushToRenderContext(PLSRenderContext::LogicalFlush* flush
{
flush->pushStencilClipReset(this);
}
} // namespace rive::pls
} // namespace rive::gpu

View File

@@ -7,7 +7,7 @@
#include "rive/math/simd.hpp"
#include "rive/math/vec2d.hpp"
namespace rive::pls
namespace rive::gpu
{
// Optimized SIMD helper for evaluating a single cubic at many points.
class EvalCubic
@@ -52,4 +52,4 @@ private:
float4 m_C;
float4 m_P0;
};
} // namespace rive::pls
} // namespace rive::gpu

View File

@@ -2,11 +2,11 @@
* Copyright 2023 Rive
*/
#include "rive/pls/gl/gl_state.hpp"
#include "rive/renderer/gl/gl_state.hpp"
#include "shaders/constants.glsl"
namespace rive::pls
namespace rive::gpu
{
void GLState::invalidate()
{
@@ -260,4 +260,4 @@ void GLState::deleteBuffer(GLuint bufferID)
if (m_validState.boundPixelUnpackBufferID && m_boundPixelUnpackBufferID == bufferID)
m_boundPixelUnpackBufferID = 0;
}
} // namespace rive::pls
} // namespace rive::gpu

View File

@@ -2,7 +2,7 @@
* Copyright 2022 Rive
*/
#include "rive/pls/gl/gl_utils.hpp"
#include "rive/renderer/gl/gl_utils.hpp"
#include <stdio.h>
#include <sstream>
@@ -87,7 +87,7 @@ GLuint CompileShader(GLuint type,
{
shaderSource << "#define " << defines[i] << " true\n";
}
shaderSource << rive::pls::glsl::glsl << "\n";
shaderSource << rive::gpu::glsl::glsl << "\n";
for (size_t i = 0; i < numInputSources; ++i)
{
shaderSource << inputSources[i] << "\n";

View File

@@ -2,7 +2,7 @@
* Copyright 2023 Rive
*/
#include "rive/pls/gl/gles3.hpp"
#include "rive/renderer/gl/gles3.hpp"
#include <EGL/egl.h>

View File

@@ -2,13 +2,13 @@
* Copyright 2023 Rive
*/
#include "rive/pls/gl/load_store_actions_ext.hpp"
#include "rive/renderer/gl/load_store_actions_ext.hpp"
#include "generated/shaders/pls_load_store_ext.glsl.hpp"
namespace rive::pls
namespace rive::gpu
{
LoadStoreActionsEXT BuildLoadActionsEXT(const pls::FlushDescriptor& desc,
LoadStoreActionsEXT BuildLoadActionsEXT(const gpu::FlushDescriptor& desc,
std::array<float, 4>* clearColor4f)
{
LoadStoreActionsEXT actions = LoadStoreActionsEXT::clearCoverage;
@@ -21,7 +21,7 @@ LoadStoreActionsEXT BuildLoadActionsEXT(const pls::FlushDescriptor& desc,
{
actions |= LoadStoreActionsEXT::loadColor;
}
if (desc.combinedShaderFeatures & pls::ShaderFeatures::ENABLE_CLIPPING)
if (desc.combinedShaderFeatures & gpu::ShaderFeatures::ENABLE_CLIPPING)
{
actions |= LoadStoreActionsEXT::clearClip;
}
@@ -51,7 +51,7 @@ std::ostream& BuildLoadStoreEXTGLSL(std::ostream& shader, LoadStoreActionsEXT ac
{
addDefine(GLSL_CLEAR_CLIP);
}
shader << pls::glsl::pls_load_store_ext;
shader << gpu::glsl::pls_load_store_ext;
return shader;
}
} // namespace rive::pls
} // namespace rive::gpu

View File

@@ -2,17 +2,17 @@
* Copyright 2023 Rive
*/
#include "rive/pls/gl/pls_render_context_gl_impl.hpp"
#include "rive/renderer/gl/render_context_gl_impl.hpp"
#include "rive/pls/gl/load_store_actions_ext.hpp"
#include "rive/pls/gl/gl_utils.hpp"
#include "rive/renderer/gl/load_store_actions_ext.hpp"
#include "rive/renderer/gl/gl_utils.hpp"
#include "rive/math/simd.hpp"
#include "rive/pls/gl/pls_render_target_gl.hpp"
#include "rive/renderer/gl/render_target_gl.hpp"
#include <sstream>
#include "generated/shaders/pls_load_store_ext.exports.h"
namespace rive::pls
namespace rive::gpu
{
// Wraps an EXT_shader_pixel_local_storage load/store program, described by a set of
// LoadStoreActions.
@@ -24,7 +24,7 @@ public:
PLSLoadStoreProgram(LoadStoreActionsEXT actions,
GLuint vertexShader,
pls::ShaderFeatures combinedShaderFeatures,
gpu::ShaderFeatures combinedShaderFeatures,
rcp<GLState> state) :
m_state(std::move(state))
{
@@ -34,7 +34,7 @@ public:
std::ostringstream glsl;
glsl << "#version 300 es\n";
glsl << "#define " GLSL_FRAGMENT "\n";
if (combinedShaderFeatures & pls::ShaderFeatures::ENABLE_CLIPPING)
if (combinedShaderFeatures & gpu::ShaderFeatures::ENABLE_CLIPPING)
{
glsl << "#define " GLSL_ENABLE_CLIPPING "\n";
}
@@ -131,16 +131,16 @@ public:
glDisable(GL_SHADER_PIXEL_LOCAL_STORAGE_EXT);
}
void pushShaderDefines(pls::InterlockMode, std::vector<const char*>* defines) const override
void pushShaderDefines(gpu::InterlockMode, std::vector<const char*>* defines) const override
{
defines->push_back(GLSL_PLS_IMPL_EXT_NATIVE);
}
private:
const PLSLoadStoreProgram& findLoadStoreProgram(LoadStoreActionsEXT actions,
pls::ShaderFeatures combinedShaderFeatures)
gpu::ShaderFeatures combinedShaderFeatures)
{
bool hasClipping = combinedShaderFeatures & pls::ShaderFeatures::ENABLE_CLIPPING;
bool hasClipping = combinedShaderFeatures & gpu::ShaderFeatures::ENABLE_CLIPPING;
uint32_t programKey =
(static_cast<uint32_t>(actions) << 1) | static_cast<uint32_t>(hasClipping);
@@ -180,4 +180,4 @@ std::unique_ptr<PLSRenderContextGLImpl::PLSImpl> PLSRenderContextGLImpl::MakePLS
{
return std::make_unique<PLSImplEXTNative>(capabilities);
}
} // namespace rive::pls
} // namespace rive::gpu

View File

@@ -2,15 +2,15 @@
* Copyright 2023 Rive
*/
#include "rive/pls/gl/pls_render_context_gl_impl.hpp"
#include "rive/renderer/gl/render_context_gl_impl.hpp"
#include "rive/pls/gl/gl_utils.hpp"
#include "rive/pls/gl/pls_render_target_gl.hpp"
#include "rive/renderer/gl/gl_utils.hpp"
#include "rive/renderer/gl/render_target_gl.hpp"
#include "shaders/constants.glsl"
#include "generated/shaders/glsl.exports.h"
namespace rive::pls
namespace rive::gpu
{
using DrawBufferMask = PLSRenderTargetGL::DrawBufferMask;
@@ -52,11 +52,11 @@ public:
GLuint coverageClear[4]{desc.coverageClearValue};
auto fbFetchBuffers = DrawBufferMask::color;
if (desc.combinedShaderFeatures & pls::ShaderFeatures::ENABLE_CLIPPING)
if (desc.combinedShaderFeatures & gpu::ShaderFeatures::ENABLE_CLIPPING)
{
fbFetchBuffers |= DrawBufferMask::clip;
}
if (desc.interlockMode == pls::InterlockMode::rasterOrdering)
if (desc.interlockMode == gpu::InterlockMode::rasterOrdering)
{
fbFetchBuffers |= DrawBufferMask::coverage | DrawBufferMask::scratchColor;
}
@@ -85,7 +85,7 @@ public:
UnpackColorToRGBA32F(desc.clearColor, clearColor4f);
glClearBufferfv(GL_COLOR, COLOR_PLANE_IDX, clearColor4f);
}
if (desc.combinedShaderFeatures & pls::ShaderFeatures::ENABLE_CLIPPING)
if (desc.combinedShaderFeatures & gpu::ShaderFeatures::ENABLE_CLIPPING)
{
constexpr static uint32_t kZero[4]{};
glClearBufferuiv(GL_COLOR, CLIP_PLANE_IDX, kZero);
@@ -95,13 +95,13 @@ public:
glClearBufferuiv(GL_COLOR, COVERAGE_PLANE_IDX, coverageClear);
}
if (desc.interlockMode == pls::InterlockMode::atomics &&
!(desc.combinedShaderFeatures & pls::ShaderFeatures::ENABLE_CLIP_RECT))
if (desc.interlockMode == gpu::InterlockMode::atomics &&
!(desc.combinedShaderFeatures & gpu::ShaderFeatures::ENABLE_CLIP_RECT))
{
plsContextImpl->state()->setBlendEquation(BlendMode::srcOver);
}
if (desc.interlockMode == pls::InterlockMode::atomics)
if (desc.interlockMode == gpu::InterlockMode::atomics)
{
glMemoryBarrierByRegion(GL_SHADER_IMAGE_ACCESS_BARRIER_BIT);
}
@@ -109,7 +109,7 @@ public:
void deactivatePixelLocalStorage(PLSRenderContextGLImpl*, const FlushDescriptor& desc) override
{
if (desc.interlockMode == pls::InterlockMode::atomics)
if (desc.interlockMode == gpu::InterlockMode::atomics)
{
glMemoryBarrierByRegion(GL_ALL_BARRIER_BITS);
}
@@ -131,11 +131,11 @@ public:
}
}
void pushShaderDefines(pls::InterlockMode interlockMode,
void pushShaderDefines(gpu::InterlockMode interlockMode,
std::vector<const char*>* defines) const override
{
defines->push_back(GLSL_PLS_IMPL_FRAMEBUFFER_FETCH);
if (interlockMode == pls::InterlockMode::atomics)
if (interlockMode == gpu::InterlockMode::atomics)
{
defines->push_back(GLSL_USING_PLS_STORAGE_TEXTURES);
}
@@ -157,13 +157,13 @@ public:
}
}
void onBarrier(const pls::FlushDescriptor& desc) override
void onBarrier(const gpu::FlushDescriptor& desc) override
{
if (m_capabilities.QCOM_shader_framebuffer_fetch_noncoherent)
{
glFramebufferFetchBarrierQCOM();
}
if (desc.interlockMode == pls::InterlockMode::atomics)
if (desc.interlockMode == gpu::InterlockMode::atomics)
{
glMemoryBarrierByRegion(GL_SHADER_IMAGE_ACCESS_BARRIER_BIT);
}
@@ -178,4 +178,4 @@ std::unique_ptr<PLSRenderContextGLImpl::PLSImpl> PLSRenderContextGLImpl::
{
return std::make_unique<PLSImplFramebufferFetch>(extensions);
}
} // namespace rive::pls
} // namespace rive::gpu

View File

@@ -2,27 +2,27 @@
* Copyright 2023 Rive
*/
#include "rive/pls/gl/pls_render_context_gl_impl.hpp"
#include "rive/renderer/gl/render_context_gl_impl.hpp"
#include "rive/pls/gl/pls_render_target_gl.hpp"
#include "rive/renderer/gl/render_target_gl.hpp"
#include "shaders/constants.glsl"
#include "rive/pls/gl/gl_utils.hpp"
#include "rive/renderer/gl/gl_utils.hpp"
#include "generated/shaders/glsl.exports.h"
namespace rive::pls
namespace rive::gpu
{
using DrawBufferMask = PLSRenderTargetGL::DrawBufferMask;
static bool needs_atomic_fixed_function_color_blend(const pls::FlushDescriptor& desc)
static bool needs_atomic_fixed_function_color_blend(const gpu::FlushDescriptor& desc)
{
assert(desc.interlockMode == pls::InterlockMode::atomics);
return !(desc.combinedShaderFeatures & pls::ShaderFeatures::ENABLE_ADVANCED_BLEND);
assert(desc.interlockMode == gpu::InterlockMode::atomics);
return !(desc.combinedShaderFeatures & gpu::ShaderFeatures::ENABLE_ADVANCED_BLEND);
}
static bool needs_coalesced_atomic_resolve_and_transfer(const pls::FlushDescriptor& desc)
static bool needs_coalesced_atomic_resolve_and_transfer(const gpu::FlushDescriptor& desc)
{
assert(desc.interlockMode == pls::InterlockMode::atomics);
assert(desc.interlockMode == gpu::InterlockMode::atomics);
return (desc.combinedShaderFeatures & ShaderFeatures::ENABLE_ADVANCED_BLEND) &&
lite_rtti_cast<FramebufferRenderTargetGL*>(
static_cast<PLSRenderTargetGL*>(desc.renderTarget)) != nullptr;
@@ -42,7 +42,7 @@ class PLSRenderContextGLImpl::PLSImplRWTexture : public PLSRenderContextGLImpl::
auto renderTarget = static_cast<PLSRenderTargetGL*>(desc.renderTarget);
renderTarget->allocateInternalPLSTextures(desc.interlockMode);
if (desc.interlockMode == pls::InterlockMode::atomics &&
if (desc.interlockMode == gpu::InterlockMode::atomics &&
needs_atomic_fixed_function_color_blend(desc))
{
plsContextImpl->state()->setBlendEquation(BlendMode::srcOver);
@@ -53,7 +53,7 @@ class PLSRenderContextGLImpl::PLSImplRWTexture : public PLSRenderContextGLImpl::
// We're targeting an external FBO but can't render to it directly. Make sure to
// allocate and attach an offscreen target texture.
framebufferRenderTarget->allocateOffscreenTargetTexture();
if (desc.colorLoadAction == pls::LoadAction::preserveRenderTarget)
if (desc.colorLoadAction == gpu::LoadAction::preserveRenderTarget)
{
// Copy the framebuffer's contents to our offscreen texture.
framebufferRenderTarget->bindDestinationFramebuffer(GL_READ_FRAMEBUFFER);
@@ -65,7 +65,7 @@ class PLSRenderContextGLImpl::PLSImplRWTexture : public PLSRenderContextGLImpl::
// Clear the necessary textures.
auto rwTexBuffers = DrawBufferMask::coverage;
if (desc.interlockMode == pls::InterlockMode::rasterOrdering)
if (desc.interlockMode == gpu::InterlockMode::rasterOrdering)
{
rwTexBuffers |= DrawBufferMask::color | DrawBufferMask::scratchColor;
}
@@ -73,12 +73,12 @@ class PLSRenderContextGLImpl::PLSImplRWTexture : public PLSRenderContextGLImpl::
{
rwTexBuffers |= DrawBufferMask::color;
}
if (desc.combinedShaderFeatures & pls::ShaderFeatures::ENABLE_CLIPPING)
if (desc.combinedShaderFeatures & gpu::ShaderFeatures::ENABLE_CLIPPING)
{
rwTexBuffers |= DrawBufferMask::clip;
}
renderTarget->bindInternalFramebuffer(GL_FRAMEBUFFER, rwTexBuffers);
if (desc.colorLoadAction == pls::LoadAction::clear &&
if (desc.colorLoadAction == gpu::LoadAction::clear &&
(rwTexBuffers & DrawBufferMask::color))
{
// If the color buffer is not a storage texture, we will clear it once the main
@@ -87,7 +87,7 @@ class PLSRenderContextGLImpl::PLSImplRWTexture : public PLSRenderContextGLImpl::
UnpackColorToRGBA32F(desc.clearColor, clearColor4f);
glClearBufferfv(GL_COLOR, COLOR_PLANE_IDX, clearColor4f);
}
if (desc.combinedShaderFeatures & pls::ShaderFeatures::ENABLE_CLIPPING)
if (desc.combinedShaderFeatures & gpu::ShaderFeatures::ENABLE_CLIPPING)
{
constexpr static GLuint kZeroClear[4]{};
glClearBufferuiv(GL_COLOR, CLIP_PLANE_IDX, kZeroClear);
@@ -99,14 +99,14 @@ class PLSRenderContextGLImpl::PLSImplRWTexture : public PLSRenderContextGLImpl::
switch (desc.interlockMode)
{
case pls::InterlockMode::rasterOrdering:
case gpu::InterlockMode::rasterOrdering:
// rasterOrdering mode renders by storing to an image texture. Bind a framebuffer
// with no color attachments.
renderTarget->bindHeadlessFramebuffer(plsContextImpl->m_capabilities);
break;
case pls::InterlockMode::atomics:
case gpu::InterlockMode::atomics:
renderTarget->bindDestinationFramebuffer(GL_FRAMEBUFFER);
if (desc.colorLoadAction == pls::LoadAction::clear &&
if (desc.colorLoadAction == gpu::LoadAction::clear &&
!(rwTexBuffers & DrawBufferMask::color))
{
// We're rendering directly to the main framebuffer. Clear it now.
@@ -131,29 +131,29 @@ class PLSRenderContextGLImpl::PLSImplRWTexture : public PLSRenderContextGLImpl::
glMemoryBarrierByRegion(GL_SHADER_IMAGE_ACCESS_BARRIER_BIT);
}
pls::ShaderMiscFlags shaderMiscFlags(const pls::FlushDescriptor& desc,
pls::DrawType drawType) const final
gpu::ShaderMiscFlags shaderMiscFlags(const gpu::FlushDescriptor& desc,
gpu::DrawType drawType) const final
{
auto flags = pls::ShaderMiscFlags::none;
if (desc.interlockMode == pls::InterlockMode::atomics)
auto flags = gpu::ShaderMiscFlags::none;
if (desc.interlockMode == gpu::InterlockMode::atomics)
{
if (needs_atomic_fixed_function_color_blend(desc))
{
flags |= pls::ShaderMiscFlags::fixedFunctionColorBlend;
flags |= gpu::ShaderMiscFlags::fixedFunctionColorBlend;
}
if (drawType == pls::DrawType::plsAtomicResolve &&
if (drawType == gpu::DrawType::gpuAtomicResolve &&
needs_coalesced_atomic_resolve_and_transfer(desc))
{
flags |= pls::ShaderMiscFlags::coalescedResolveAndTransfer;
flags |= gpu::ShaderMiscFlags::coalescedResolveAndTransfer;
}
}
return flags;
}
void setupAtomicResolve(PLSRenderContextGLImpl* plsContextImpl,
const pls::FlushDescriptor& desc) override
const gpu::FlushDescriptor& desc) override
{
assert(desc.interlockMode == pls::InterlockMode::atomics);
assert(desc.interlockMode == gpu::InterlockMode::atomics);
if (needs_coalesced_atomic_resolve_and_transfer(desc))
{
// Turn the color mask back on now that we're about to resolve.
@@ -167,7 +167,7 @@ class PLSRenderContextGLImpl::PLSImplRWTexture : public PLSRenderContextGLImpl::
// atomic mode never needs to copy anything here because it transfers the offscreen texture
// during resolve.
if (desc.interlockMode == pls::InterlockMode::rasterOrdering)
if (desc.interlockMode == gpu::InterlockMode::rasterOrdering)
{
if (auto framebufferRenderTarget = lite_rtti_cast<FramebufferRenderTargetGL*>(
static_cast<PLSRenderTargetGL*>(desc.renderTarget)))
@@ -183,13 +183,13 @@ class PLSRenderContextGLImpl::PLSImplRWTexture : public PLSRenderContextGLImpl::
}
}
void pushShaderDefines(pls::InterlockMode, std::vector<const char*>* defines) const override
void pushShaderDefines(gpu::InterlockMode, std::vector<const char*>* defines) const override
{
defines->push_back(GLSL_PLS_IMPL_STORAGE_TEXTURE);
defines->push_back(GLSL_USING_PLS_STORAGE_TEXTURES);
}
void onBarrier(const pls::FlushDescriptor&) override
void onBarrier(const gpu::FlushDescriptor&) override
{
return glMemoryBarrierByRegion(GL_SHADER_IMAGE_ACCESS_BARRIER_BIT);
}
@@ -199,4 +199,4 @@ std::unique_ptr<PLSRenderContextGLImpl::PLSImpl> PLSRenderContextGLImpl::MakePLS
{
return std::make_unique<PLSImplRWTexture>();
}
} // namespace rive::pls
} // namespace rive::gpu

View File

@@ -2,10 +2,10 @@
* Copyright 2022 Rive
*/
#include "rive/pls/gl/pls_render_context_gl_impl.hpp"
#include "rive/renderer/gl/render_context_gl_impl.hpp"
#include "rive/pls/gl/gl_utils.hpp"
#include "rive/pls/gl/pls_render_target_gl.hpp"
#include "rive/renderer/gl/gl_utils.hpp"
#include "rive/renderer/gl/render_target_gl.hpp"
#include "shaders/constants.glsl"
#include "generated/shaders/glsl.exports.h"
@@ -142,19 +142,19 @@ void glProvokingVertexANGLE(GLenum provokeMode)
}
#endif // RIVE_WEBGL
namespace rive::pls
namespace rive::gpu
{
using DrawBufferMask = PLSRenderTargetGL::DrawBufferMask;
static GLenum webgl_load_op(pls::LoadAction loadAction)
static GLenum webgl_load_op(gpu::LoadAction loadAction)
{
switch (loadAction)
{
case pls::LoadAction::clear:
case gpu::LoadAction::clear:
return GL_LOAD_OP_CLEAR_ANGLE;
case pls::LoadAction::preserveRenderTarget:
case gpu::LoadAction::preserveRenderTarget:
return GL_LOAD_OP_LOAD_ANGLE;
case pls::LoadAction::dontCare:
case gpu::LoadAction::dontCare:
return GL_LOAD_OP_ZERO_ANGLE;
}
RIVE_UNREACHABLE();
@@ -197,7 +197,7 @@ class PLSRenderContextGLImpl::PLSImplWebGL : public PLSRenderContextGLImpl::PLSI
UnpackColorToRGBA32F(desc.clearColor, clearColor4f);
glFramebufferPixelLocalClearValuefvANGLE(COLOR_PLANE_IDX, clearColor4f);
}
GLenum clipLoadAction = (desc.combinedShaderFeatures & pls::ShaderFeatures::ENABLE_CLIPPING)
GLenum clipLoadAction = (desc.combinedShaderFeatures & gpu::ShaderFeatures::ENABLE_CLIPPING)
? GL_LOAD_OP_ZERO_ANGLE
: GL_DONT_CARE;
GLenum loadOps[4] = {webgl_load_op(desc.colorLoadAction),
@@ -235,7 +235,7 @@ class PLSRenderContextGLImpl::PLSImplWebGL : public PLSRenderContextGLImpl::PLSI
}
}
void pushShaderDefines(pls::InterlockMode, std::vector<const char*>* defines) const override
void pushShaderDefines(gpu::InterlockMode, std::vector<const char*>* defines) const override
{
defines->push_back(GLSL_PLS_IMPL_ANGLE);
}
@@ -245,4 +245,4 @@ std::unique_ptr<PLSRenderContextGLImpl::PLSImpl> PLSRenderContextGLImpl::MakePLS
{
return std::make_unique<PLSImplWebGL>();
}
} // namespace rive::pls
} // namespace rive::gpu

View File

@@ -2,11 +2,11 @@
* Copyright 2023 Rive
*/
#include "rive/pls/gl/pls_render_buffer_gl_impl.hpp"
#include "rive/renderer/gl/render_buffer_gl_impl.hpp"
#include "rive/pls/gl/gl_state.hpp"
#include "rive/renderer/gl/gl_state.hpp"
namespace rive::pls
namespace rive::gpu
{
PLSRenderBufferGLImpl::PLSRenderBufferGLImpl(RenderBufferType type,
RenderBufferFlags flags,
@@ -41,7 +41,7 @@ void PLSRenderBufferGLImpl::init(rcp<GLState> state)
assert(!m_bufferIDs[0]);
m_state = std::move(state);
int bufferCount =
(flags() & RenderBufferFlags::mappedOnceAtInitialization) ? 1 : pls::kBufferRingSize;
(flags() & RenderBufferFlags::mappedOnceAtInitialization) ? 1 : gpu::kBufferRingSize;
glGenBuffers(bufferCount, m_bufferIDs.data());
m_state->bindVAO(0);
for (int i = 0; i < bufferCount; ++i)
@@ -55,7 +55,7 @@ void PLSRenderBufferGLImpl::init(rcp<GLState> state)
}
}
std::array<GLuint, pls::kBufferRingSize> PLSRenderBufferGLImpl::detachBuffers()
std::array<GLuint, gpu::kBufferRingSize> PLSRenderBufferGLImpl::detachBuffers()
{
auto detachedBuffers = m_bufferIDs;
m_bufferIDs.fill(0);
@@ -64,7 +64,7 @@ std::array<GLuint, pls::kBufferRingSize> PLSRenderBufferGLImpl::detachBuffers()
void* PLSRenderBufferGLImpl::onMap()
{
m_submittedBufferIdx = (m_submittedBufferIdx + 1) % pls::kBufferRingSize;
m_submittedBufferIdx = (m_submittedBufferIdx + 1) % gpu::kBufferRingSize;
if (!canMapBuffer())
{
if (!m_fallbackMappedMemory)
@@ -123,4 +123,4 @@ bool PLSRenderBufferGLImpl::canMapBuffer() const
return !(flags() & RenderBufferFlags::mappedOnceAtInitialization);
#endif
}
} // namespace rive::pls
} // namespace rive::gpu

View File

@@ -2,12 +2,12 @@
* Copyright 2022 Rive
*/
#include "rive/pls/gl/pls_render_context_gl_impl.hpp"
#include "rive/renderer/gl/render_context_gl_impl.hpp"
#include "rive/pls/gl/pls_render_buffer_gl_impl.hpp"
#include "rive/pls/gl/pls_render_target_gl.hpp"
#include "rive/pls/pls_draw.hpp"
#include "rive/pls/pls_image.hpp"
#include "rive/renderer/gl/render_buffer_gl_impl.hpp"
#include "rive/renderer/gl/render_target_gl.hpp"
#include "rive/renderer/draw.hpp"
#include "rive/renderer/image.hpp"
#include "shaders/constants.glsl"
#include "generated/shaders/advanced_blend.glsl.hpp"
@@ -24,7 +24,7 @@
#ifdef RIVE_WEBGL
// In an effort to save space on web, and since web doesn't have ES 3.1 level support, don't include
// the atomic sources.
namespace rive::pls::glsl
namespace rive::gpu::glsl
{
const char atomic_draw[] = "";
}
@@ -61,7 +61,7 @@ EM_JS(void,
// GL_TEXTURE0 as a scratch texture index.
constexpr static int kPLSTexIdxOffset = 1;
namespace rive::pls
namespace rive::gpu
{
PLSRenderContextGLImpl::PLSRenderContextGLImpl(const char* rendererString,
GLCapabilities capabilities,
@@ -160,8 +160,8 @@ PLSRenderContextGLImpl::PLSRenderContextGLImpl(const char* rendererString,
m_state->bindBuffer(GL_ELEMENT_ARRAY_BUFFER, m_tessSpanIndexBuffer);
glBufferData(GL_ELEMENT_ARRAY_BUFFER,
sizeof(pls::kTessSpanIndices),
pls::kTessSpanIndices,
sizeof(gpu::kTessSpanIndices),
gpu::kTessSpanIndices,
GL_STATIC_DRAW);
m_state->bindVAO(m_drawVAO);
@@ -198,17 +198,17 @@ PLSRenderContextGLImpl::PLSRenderContextGLImpl(const char* rendererString,
m_state->bindBuffer(GL_ARRAY_BUFFER, m_imageRectVertexBuffer);
glBufferData(GL_ARRAY_BUFFER,
sizeof(pls::kImageRectVertices),
pls::kImageRectVertices,
sizeof(gpu::kImageRectVertices),
gpu::kImageRectVertices,
GL_STATIC_DRAW);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 4, GL_FLOAT, GL_FALSE, sizeof(pls::ImageRectVertex), nullptr);
glVertexAttribPointer(0, 4, GL_FLOAT, GL_FALSE, sizeof(gpu::ImageRectVertex), nullptr);
m_state->bindBuffer(GL_ELEMENT_ARRAY_BUFFER, m_imageRectIndexBuffer);
glBufferData(GL_ELEMENT_ARRAY_BUFFER,
sizeof(pls::kImageRectIndices),
pls::kImageRectIndices,
sizeof(gpu::kImageRectIndices),
gpu::kImageRectIndices,
GL_STATIC_DRAW);
}
@@ -401,45 +401,45 @@ protected:
};
// GL internalformat to use for a texture that polyfills a storage buffer.
static GLenum storage_texture_internalformat(pls::StorageBufferStructure bufferStructure)
static GLenum storage_texture_internalformat(gpu::StorageBufferStructure bufferStructure)
{
switch (bufferStructure)
{
case pls::StorageBufferStructure::uint32x4:
case gpu::StorageBufferStructure::uint32x4:
return GL_RGBA32UI;
case pls::StorageBufferStructure::uint32x2:
case gpu::StorageBufferStructure::uint32x2:
return GL_RG32UI;
case pls::StorageBufferStructure::float32x4:
case gpu::StorageBufferStructure::float32x4:
return GL_RGBA32F;
}
RIVE_UNREACHABLE();
}
// GL format to use for a texture that polyfills a storage buffer.
static GLenum storage_texture_format(pls::StorageBufferStructure bufferStructure)
static GLenum storage_texture_format(gpu::StorageBufferStructure bufferStructure)
{
switch (bufferStructure)
{
case pls::StorageBufferStructure::uint32x4:
case gpu::StorageBufferStructure::uint32x4:
return GL_RGBA_INTEGER;
case pls::StorageBufferStructure::uint32x2:
case gpu::StorageBufferStructure::uint32x2:
return GL_RG_INTEGER;
case pls::StorageBufferStructure::float32x4:
case gpu::StorageBufferStructure::float32x4:
return GL_RGBA;
}
RIVE_UNREACHABLE();
}
// GL type to use for a texture that polyfills a storage buffer.
static GLenum storage_texture_type(pls::StorageBufferStructure bufferStructure)
static GLenum storage_texture_type(gpu::StorageBufferStructure bufferStructure)
{
switch (bufferStructure)
{
case pls::StorageBufferStructure::uint32x4:
case gpu::StorageBufferStructure::uint32x4:
return GL_UNSIGNED_INT;
case pls::StorageBufferStructure::uint32x2:
case gpu::StorageBufferStructure::uint32x2:
return GL_UNSIGNED_INT;
case pls::StorageBufferStructure::float32x4:
case gpu::StorageBufferStructure::float32x4:
return GL_FLOAT;
}
RIVE_UNREACHABLE();
@@ -449,7 +449,7 @@ class StorageBufferRingGLImpl : public BufferRingGLImpl
{
public:
StorageBufferRingGLImpl(size_t capacityInBytes,
pls::StorageBufferStructure bufferStructure,
gpu::StorageBufferStructure bufferStructure,
rcp<GLState> state) :
BufferRingGLImpl(
// If we don't support storage buffers, instead make a pixel-unpack buffer that
@@ -472,24 +472,24 @@ public:
}
protected:
const pls::StorageBufferStructure m_bufferStructure;
const gpu::StorageBufferStructure m_bufferStructure;
};
class TexelBufferRingWebGL : public BufferRing
{
public:
TexelBufferRingWebGL(size_t capacityInBytes,
pls::StorageBufferStructure bufferStructure,
gpu::StorageBufferStructure bufferStructure,
rcp<GLState> state) :
BufferRing(pls::StorageTextureBufferSize(capacityInBytes, bufferStructure)),
BufferRing(gpu::StorageTextureBufferSize(capacityInBytes, bufferStructure)),
m_bufferStructure(bufferStructure),
m_state(std::move(state))
{
auto [width, height] = pls::StorageTextureSize(capacityInBytes, m_bufferStructure);
auto [width, height] = gpu::StorageTextureSize(capacityInBytes, m_bufferStructure);
GLenum internalformat = storage_texture_internalformat(m_bufferStructure);
glGenTextures(pls::kBufferRingSize, m_textures);
glGenTextures(gpu::kBufferRingSize, m_textures);
glActiveTexture(GL_TEXTURE0);
for (size_t i = 0; i < pls::kBufferRingSize; ++i)
for (size_t i = 0; i < gpu::kBufferRingSize; ++i)
{
glBindTexture(GL_TEXTURE_2D, m_textures[i]);
glTexStorage2D(GL_TEXTURE_2D, 1, internalformat, width, height);
@@ -498,7 +498,7 @@ public:
glBindTexture(GL_TEXTURE_2D, 0);
}
~TexelBufferRingWebGL() { glDeleteTextures(pls::kBufferRingSize, m_textures); }
~TexelBufferRingWebGL() { glDeleteTextures(gpu::kBufferRingSize, m_textures); }
void* onMapBuffer(int bufferIdx, size_t mapSizeInBytes) override { return shadowBuffer(); }
void onUnmapAndSubmitBuffer(int bufferIdx, size_t mapSizeInBytes) override {}
@@ -508,7 +508,7 @@ public:
size_t offsetSizeInBytes) const
{
auto [updateWidth, updateHeight] =
pls::StorageTextureSize(bindingSizeInBytes, m_bufferStructure);
gpu::StorageTextureSize(bindingSizeInBytes, m_bufferStructure);
m_state->bindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
glActiveTexture(GL_TEXTURE0 + kPLSTexIdxOffset + bindingIdx);
glBindTexture(GL_TEXTURE_2D, m_textures[submittedBufferIdx()]);
@@ -524,9 +524,9 @@ public:
}
protected:
const pls::StorageBufferStructure m_bufferStructure;
const gpu::StorageBufferStructure m_bufferStructure;
const rcp<GLState> m_state;
GLuint m_textures[pls::kBufferRingSize];
GLuint m_textures[gpu::kBufferRingSize];
};
std::unique_ptr<BufferRing> PLSRenderContextGLImpl::makeUniformBufferRing(size_t capacityInBytes)
@@ -536,7 +536,7 @@ std::unique_ptr<BufferRing> PLSRenderContextGLImpl::makeUniformBufferRing(size_t
std::unique_ptr<BufferRing> PLSRenderContextGLImpl::makeStorageBufferRing(
size_t capacityInBytes,
pls::StorageBufferStructure bufferStructure)
gpu::StorageBufferStructure bufferStructure)
{
if (capacityInBytes == 0)
{
@@ -611,13 +611,13 @@ void PLSRenderContextGLImpl::resizeTessellationTexture(uint32_t width, uint32_t
PLSRenderContextGLImpl::DrawShader::DrawShader(PLSRenderContextGLImpl* plsContextImpl,
GLenum shaderType,
pls::DrawType drawType,
gpu::DrawType drawType,
ShaderFeatures shaderFeatures,
pls::InterlockMode interlockMode,
pls::ShaderMiscFlags shaderMiscFlags)
gpu::InterlockMode interlockMode,
gpu::ShaderMiscFlags shaderMiscFlags)
{
#ifdef DISABLE_PLS_ATOMICS
if (interlockMode == pls::InterlockMode::atomics)
if (interlockMode == gpu::InterlockMode::atomics)
{
// Don't draw anything in atomic mode if support for it isn't compiled in.
return;
@@ -629,12 +629,12 @@ PLSRenderContextGLImpl::DrawShader::DrawShader(PLSRenderContextGLImpl* plsContex
{
plsContextImpl->m_plsImpl->pushShaderDefines(interlockMode, &defines);
}
if (interlockMode == pls::InterlockMode::atomics)
if (interlockMode == gpu::InterlockMode::atomics)
{
// Atomics are currently always done on storage textures.
defines.push_back(GLSL_USING_PLS_STORAGE_TEXTURES);
}
if (shaderMiscFlags & pls::ShaderMiscFlags::fixedFunctionColorBlend)
if (shaderMiscFlags & gpu::ShaderMiscFlags::fixedFunctionColorBlend)
{
defines.push_back(GLSL_FIXED_FUNCTION_COLOR_BLEND);
}
@@ -644,8 +644,8 @@ PLSRenderContextGLImpl::DrawShader::DrawShader(PLSRenderContextGLImpl* plsContex
if (shaderFeatures & feature)
{
assert((kVertexShaderFeaturesMask & feature) || shaderType == GL_FRAGMENT_SHADER);
if (interlockMode == pls::InterlockMode::depthStencil &&
feature == pls::ShaderFeatures::ENABLE_ADVANCED_BLEND &&
if (interlockMode == gpu::InterlockMode::depthStencil &&
feature == gpu::ShaderFeatures::ENABLE_ADVANCED_BLEND &&
plsContextImpl->m_capabilities.KHR_blend_equation_advanced_coherent)
{
defines.push_back(GLSL_ENABLE_KHR_BLEND);
@@ -656,7 +656,7 @@ PLSRenderContextGLImpl::DrawShader::DrawShader(PLSRenderContextGLImpl* plsContex
}
}
}
if (interlockMode == pls::InterlockMode::depthStencil)
if (interlockMode == gpu::InterlockMode::depthStencil)
{
defines.push_back(GLSL_USING_DEPTH_STENCIL);
}
@@ -679,8 +679,8 @@ PLSRenderContextGLImpl::DrawShader::DrawShader(PLSRenderContextGLImpl* plsContex
}
switch (drawType)
{
case pls::DrawType::midpointFanPatches:
case pls::DrawType::outerCurvePatches:
case gpu::DrawType::midpointFanPatches:
case gpu::DrawType::outerCurvePatches:
if (shaderType == GL_VERTEX_SHADER)
{
defines.push_back(GLSL_ENABLE_INSTANCE_INDEX);
@@ -690,46 +690,46 @@ PLSRenderContextGLImpl::DrawShader::DrawShader(PLSRenderContextGLImpl* plsContex
}
}
defines.push_back(GLSL_DRAW_PATH);
sources.push_back(pls::glsl::draw_path_common);
sources.push_back(interlockMode == pls::InterlockMode::atomics ? pls::glsl::atomic_draw
: pls::glsl::draw_path);
sources.push_back(gpu::glsl::draw_path_common);
sources.push_back(interlockMode == gpu::InterlockMode::atomics ? gpu::glsl::atomic_draw
: gpu::glsl::draw_path);
break;
case pls::DrawType::stencilClipReset:
assert(interlockMode == pls::InterlockMode::depthStencil);
sources.push_back(pls::glsl::stencil_draw);
case gpu::DrawType::stencilClipReset:
assert(interlockMode == gpu::InterlockMode::depthStencil);
sources.push_back(gpu::glsl::stencil_draw);
break;
case pls::DrawType::interiorTriangulation:
case gpu::DrawType::interiorTriangulation:
defines.push_back(GLSL_DRAW_INTERIOR_TRIANGLES);
sources.push_back(pls::glsl::draw_path_common);
sources.push_back(interlockMode == pls::InterlockMode::atomics ? pls::glsl::atomic_draw
: pls::glsl::draw_path);
sources.push_back(gpu::glsl::draw_path_common);
sources.push_back(interlockMode == gpu::InterlockMode::atomics ? gpu::glsl::atomic_draw
: gpu::glsl::draw_path);
break;
case pls::DrawType::imageRect:
assert(interlockMode == pls::InterlockMode::atomics);
case gpu::DrawType::imageRect:
assert(interlockMode == gpu::InterlockMode::atomics);
defines.push_back(GLSL_DRAW_IMAGE);
defines.push_back(GLSL_DRAW_IMAGE_RECT);
sources.push_back(pls::glsl::atomic_draw);
sources.push_back(gpu::glsl::atomic_draw);
break;
case pls::DrawType::imageMesh:
case gpu::DrawType::imageMesh:
defines.push_back(GLSL_DRAW_IMAGE);
defines.push_back(GLSL_DRAW_IMAGE_MESH);
sources.push_back(interlockMode == pls::InterlockMode::atomics
? pls::glsl::atomic_draw
: pls::glsl::draw_image_mesh);
sources.push_back(interlockMode == gpu::InterlockMode::atomics
? gpu::glsl::atomic_draw
: gpu::glsl::draw_image_mesh);
break;
case pls::DrawType::plsAtomicResolve:
assert(interlockMode == pls::InterlockMode::atomics);
case gpu::DrawType::gpuAtomicResolve:
assert(interlockMode == gpu::InterlockMode::atomics);
defines.push_back(GLSL_DRAW_RENDER_TARGET_UPDATE_BOUNDS);
defines.push_back(GLSL_RESOLVE_PLS);
if (shaderMiscFlags & pls::ShaderMiscFlags::coalescedResolveAndTransfer)
if (shaderMiscFlags & gpu::ShaderMiscFlags::coalescedResolveAndTransfer)
{
assert(shaderType == GL_FRAGMENT_SHADER);
defines.push_back(GLSL_COALESCED_PLS_RESOLVE_AND_TRANSFER);
}
sources.push_back(pls::glsl::atomic_draw);
sources.push_back(gpu::glsl::atomic_draw);
break;
case pls::DrawType::plsAtomicInitialize:
assert(interlockMode == pls::InterlockMode::atomics);
case gpu::DrawType::gpuAtomicInitialize:
assert(interlockMode == gpu::InterlockMode::atomics);
RIVE_UNREACHABLE();
}
if (plsContextImpl->m_capabilities.ARB_bindless_texture)
@@ -750,10 +750,10 @@ PLSRenderContextGLImpl::DrawShader::DrawShader(PLSRenderContextGLImpl* plsContex
}
PLSRenderContextGLImpl::DrawProgram::DrawProgram(PLSRenderContextGLImpl* plsContextImpl,
pls::DrawType drawType,
pls::ShaderFeatures shaderFeatures,
pls::InterlockMode interlockMode,
pls::ShaderMiscFlags fragmentShaderMiscFlags) :
gpu::DrawType drawType,
gpu::ShaderFeatures shaderFeatures,
gpu::InterlockMode interlockMode,
gpu::ShaderMiscFlags fragmentShaderMiscFlags) :
m_fragmentShader(plsContextImpl,
GL_FRAGMENT_SHADER,
drawType,
@@ -765,10 +765,10 @@ PLSRenderContextGLImpl::DrawProgram::DrawProgram(PLSRenderContextGLImpl* plsCont
// Not every vertex shader is unique. Cache them by just the vertex features and reuse when
// possible.
ShaderFeatures vertexShaderFeatures = shaderFeatures & kVertexShaderFeaturesMask;
uint32_t vertexShaderKey = pls::ShaderUniqueKey(drawType,
uint32_t vertexShaderKey = gpu::ShaderUniqueKey(drawType,
vertexShaderFeatures,
interlockMode,
pls::ShaderMiscFlags::none);
gpu::ShaderMiscFlags::none);
const DrawShader& vertexShader = plsContextImpl->m_vertexShaders
.try_emplace(vertexShaderKey,
plsContextImpl,
@@ -776,7 +776,7 @@ PLSRenderContextGLImpl::DrawProgram::DrawProgram(PLSRenderContextGLImpl* plsCont
drawType,
vertexShaderFeatures,
interlockMode,
pls::ShaderMiscFlags::none)
gpu::ShaderMiscFlags::none)
.first->second;
m_id = glCreateProgram();
@@ -811,8 +811,8 @@ PLSRenderContextGLImpl::DrawProgram::DrawProgram(PLSRenderContextGLImpl* plsCont
glUniform1i(glGetUniformLocation(m_id, GLSL_gradTexture), kPLSTexIdxOffset + GRAD_TEXTURE_IDX);
glUniform1i(glGetUniformLocation(m_id, GLSL_imageTexture),
kPLSTexIdxOffset + IMAGE_TEXTURE_IDX);
if (interlockMode == pls::InterlockMode::depthStencil &&
(shaderFeatures & pls::ShaderFeatures::ENABLE_ADVANCED_BLEND) &&
if (interlockMode == gpu::InterlockMode::depthStencil &&
(shaderFeatures & gpu::ShaderFeatures::ENABLE_ADVANCED_BLEND) &&
!plsContextImpl->m_capabilities.KHR_blend_equation_advanced_coherent)
{
glUniform1i(glGetUniformLocation(m_id, GLSL_dstColorTexture),
@@ -855,18 +855,18 @@ static void bind_storage_buffer(const GLCapabilities& capabilities,
void PLSRenderContextGLImpl::PLSImpl::ensureRasterOrderingEnabled(
PLSRenderContextGLImpl* plsContextImpl,
const pls::FlushDescriptor& desc,
const gpu::FlushDescriptor& desc,
bool enabled)
{
assert(!enabled || supportsRasterOrdering(plsContextImpl->m_capabilities));
auto rasterOrderState = enabled ? pls::TriState::yes : pls::TriState::no;
auto rasterOrderState = enabled ? gpu::TriState::yes : gpu::TriState::no;
if (m_rasterOrderingEnabled != rasterOrderState)
{
onEnableRasterOrdering(enabled);
m_rasterOrderingEnabled = rasterOrderState;
// We only need a barrier when turning raster ordering OFF, because PLS already inserts the
// necessary barriers after draws when it's disabled.
if (m_rasterOrderingEnabled == pls::TriState::no)
if (m_rasterOrderingEnabled == gpu::TriState::no)
{
onBarrier(desc);
}
@@ -956,7 +956,7 @@ void PLSRenderContextGLImpl::flush(const FlushDescriptor& desc)
FLUSH_UNIFORM_BUFFER_IDX,
gl_buffer_id(flushUniformBufferRing()),
desc.flushUniformDataOffsetInBytes,
sizeof(pls::FlushUniforms));
sizeof(gpu::FlushUniforms));
// All programs use the same storage buffers.
if (desc.pathCount > 0)
@@ -964,20 +964,20 @@ void PLSRenderContextGLImpl::flush(const FlushDescriptor& desc)
bind_storage_buffer(m_capabilities,
pathBufferRing(),
PATH_BUFFER_IDX,
desc.pathCount * sizeof(pls::PathData),
desc.firstPath * sizeof(pls::PathData));
desc.pathCount * sizeof(gpu::PathData),
desc.firstPath * sizeof(gpu::PathData));
bind_storage_buffer(m_capabilities,
paintBufferRing(),
PAINT_BUFFER_IDX,
desc.pathCount * sizeof(pls::PaintData),
desc.firstPaint * sizeof(pls::PaintData));
desc.pathCount * sizeof(gpu::PaintData),
desc.firstPaint * sizeof(gpu::PaintData));
bind_storage_buffer(m_capabilities,
paintAuxBufferRing(),
PAINT_AUX_BUFFER_IDX,
desc.pathCount * sizeof(pls::PaintAuxData),
desc.firstPaintAux * sizeof(pls::PaintAuxData));
desc.pathCount * sizeof(gpu::PaintAuxData),
desc.firstPaintAux * sizeof(gpu::PaintAuxData));
}
if (desc.contourCount > 0)
@@ -985,8 +985,8 @@ void PLSRenderContextGLImpl::flush(const FlushDescriptor& desc)
bind_storage_buffer(m_capabilities,
contourBufferRing(),
CONTOUR_BUFFER_IDX,
desc.contourCount * sizeof(pls::ContourData),
desc.firstContour * sizeof(pls::ContourData));
desc.contourCount * sizeof(gpu::ContourData),
desc.firstContour * sizeof(gpu::ContourData));
}
// Render the complex color ramps into the gradient texture.
@@ -1000,7 +1000,7 @@ void PLSRenderContextGLImpl::flush(const FlushDescriptor& desc)
4,
GL_UNSIGNED_INT,
0,
reinterpret_cast<const void*>(desc.firstComplexGradSpan * sizeof(pls::GradientSpan)));
reinterpret_cast<const void*>(desc.firstComplexGradSpan * sizeof(gpu::GradientSpan)));
glViewport(0, desc.complexGradRowsTop, kGradTextureWidth, desc.complexGradRowsHeight);
glBindFramebuffer(GL_FRAMEBUFFER, m_colorRampFBO);
m_state->bindProgram(m_colorRampProgram);
@@ -1046,7 +1046,7 @@ void PLSRenderContextGLImpl::flush(const FlushDescriptor& desc)
m_state->bindBuffer(GL_ARRAY_BUFFER, gl_buffer_id(tessSpanBufferRing()));
m_state->bindVAO(m_tessellateVAO);
m_state->setCullFace(GL_BACK);
size_t tessSpanOffsetInBytes = desc.firstTessVertexSpan * sizeof(pls::TessVertexSpan);
size_t tessSpanOffsetInBytes = desc.firstTessVertexSpan * sizeof(gpu::TessVertexSpan);
for (GLuint i = 0; i < 3; ++i)
{
glVertexAttribPointer(i,
@@ -1062,13 +1062,13 @@ void PLSRenderContextGLImpl::flush(const FlushDescriptor& desc)
GL_UNSIGNED_INT,
sizeof(TessVertexSpan),
reinterpret_cast<const void*>(tessSpanOffsetInBytes + offsetof(TessVertexSpan, x0x1)));
glViewport(0, 0, pls::kTessTextureWidth, desc.tessDataHeight);
glViewport(0, 0, gpu::kTessTextureWidth, desc.tessDataHeight);
glBindFramebuffer(GL_FRAMEBUFFER, m_tessellateFBO);
m_state->bindProgram(m_tessellateProgram);
GLenum colorAttachment0 = GL_COLOR_ATTACHMENT0;
glInvalidateFramebuffer(GL_FRAMEBUFFER, 1, &colorAttachment0);
glDrawElementsInstanced(GL_TRIANGLES,
std::size(pls::kTessSpanIndices),
std::size(gpu::kTessSpanIndices),
GL_UNSIGNED_SHORT,
0,
desc.tessVertexSpanCount);
@@ -1079,13 +1079,13 @@ void PLSRenderContextGLImpl::flush(const FlushDescriptor& desc)
// (ANGLE_shader_pixel_local_storage doesn't allow shader compilation while active.)
for (const DrawBatch& batch : *desc.drawList)
{
auto shaderFeatures = desc.interlockMode == pls::InterlockMode::atomics
auto shaderFeatures = desc.interlockMode == gpu::InterlockMode::atomics
? desc.combinedShaderFeatures
: batch.shaderFeatures;
auto fragmentShaderMiscFlags = m_plsImpl != nullptr
? m_plsImpl->shaderMiscFlags(desc, batch.drawType)
: pls::ShaderMiscFlags::none;
uint32_t fragmentShaderKey = pls::ShaderUniqueKey(batch.drawType,
: gpu::ShaderMiscFlags::none;
uint32_t fragmentShaderKey = gpu::ShaderUniqueKey(batch.drawType,
shaderFeatures,
desc.interlockMode,
fragmentShaderMiscFlags);
@@ -1117,7 +1117,7 @@ void PLSRenderContextGLImpl::flush(const FlushDescriptor& desc)
auto msaaResolveAction = PLSRenderTargetGL::MSAAResolveAction::automatic;
std::array<GLenum, 3> msaaDepthStencilColor;
if (desc.interlockMode != pls::InterlockMode::depthStencil)
if (desc.interlockMode != gpu::InterlockMode::depthStencil)
{
assert(desc.msaaSampleCount == 0);
m_plsImpl->activatePixelLocalStorage(this, desc);
@@ -1126,7 +1126,7 @@ void PLSRenderContextGLImpl::flush(const FlushDescriptor& desc)
{
// Render with MSAA in depthStencil mode.
assert(desc.msaaSampleCount > 0);
bool preserveRenderTarget = desc.colorLoadAction == pls::LoadAction::preserveRenderTarget;
bool preserveRenderTarget = desc.colorLoadAction == gpu::LoadAction::preserveRenderTarget;
bool isFBO0;
msaaResolveAction = renderTarget->bindMSAAFramebuffer(
this,
@@ -1150,7 +1150,7 @@ void PLSRenderContextGLImpl::flush(const FlushDescriptor& desc)
msaaDepthStencilColor.data());
GLbitfield buffersToClear = GL_STENCIL_BUFFER_BIT | GL_DEPTH_BUFFER_BIT;
if (desc.colorLoadAction == pls::LoadAction::clear)
if (desc.colorLoadAction == gpu::LoadAction::clear)
{
float cc[4];
UnpackColorToRGBA32F(desc.clearColor, cc);
@@ -1162,7 +1162,7 @@ void PLSRenderContextGLImpl::flush(const FlushDescriptor& desc)
glEnable(GL_STENCIL_TEST);
glEnable(GL_DEPTH_TEST);
if (desc.combinedShaderFeatures & pls::ShaderFeatures::ENABLE_ADVANCED_BLEND)
if (desc.combinedShaderFeatures & gpu::ShaderFeatures::ENABLE_ADVANCED_BLEND)
{
if (m_capabilities.KHR_blend_equation_advanced_coherent)
{
@@ -1187,13 +1187,13 @@ void PLSRenderContextGLImpl::flush(const FlushDescriptor& desc)
continue;
}
auto shaderFeatures = desc.interlockMode == pls::InterlockMode::atomics
auto shaderFeatures = desc.interlockMode == gpu::InterlockMode::atomics
? desc.combinedShaderFeatures
: batch.shaderFeatures;
auto fragmentShaderMiscFlags = m_plsImpl != nullptr
? m_plsImpl->shaderMiscFlags(desc, batch.drawType)
: pls::ShaderMiscFlags::none;
uint32_t fragmentShaderKey = pls::ShaderUniqueKey(batch.drawType,
: gpu::ShaderMiscFlags::none;
uint32_t fragmentShaderKey = gpu::ShaderUniqueKey(batch.drawType,
shaderFeatures,
desc.interlockMode,
fragmentShaderMiscFlags);
@@ -1211,14 +1211,14 @@ void PLSRenderContextGLImpl::flush(const FlushDescriptor& desc)
glBindTexture(GL_TEXTURE_2D, imageTextureGL->textureID());
}
if (desc.interlockMode == pls::InterlockMode::depthStencil)
if (desc.interlockMode == gpu::InterlockMode::depthStencil)
{
// Set up the next blend.
if (batch.drawContents & pls::DrawContents::opaquePaint)
if (batch.drawContents & gpu::DrawContents::opaquePaint)
{
m_state->disableBlending();
}
else if (!(batch.drawContents & pls::DrawContents::advancedBlend))
else if (!(batch.drawContents & gpu::DrawContents::advancedBlend))
{
assert(batch.internalDrawList->blendMode() == BlendMode::srcOver);
m_state->setBlendEquation(BlendMode::srcOver);
@@ -1245,7 +1245,7 @@ void PLSRenderContextGLImpl::flush(const FlushDescriptor& desc)
}
// Set up the next clipRect.
bool needsClipPlanes = (shaderFeatures & pls::ShaderFeatures::ENABLE_CLIP_RECT);
bool needsClipPlanes = (shaderFeatures & gpu::ShaderFeatures::ENABLE_CLIP_RECT);
if (needsClipPlanes != clipPlanesEnabled)
{
auto toggleEnableOrDisable = needsClipPlanes ? glEnable : glDisable;
@@ -1257,7 +1257,7 @@ void PLSRenderContextGLImpl::flush(const FlushDescriptor& desc)
}
}
switch (pls::DrawType drawType = batch.drawType)
switch (gpu::DrawType drawType = batch.drawType)
{
case DrawType::midpointFanPatches:
case DrawType::outerCurvePatches:
@@ -1270,14 +1270,14 @@ void PLSRenderContextGLImpl::flush(const FlushDescriptor& desc)
batch.elementCount,
batch.baseElement);
if (desc.interlockMode != pls::InterlockMode::depthStencil)
if (desc.interlockMode != gpu::InterlockMode::depthStencil)
{
m_plsImpl->ensureRasterOrderingEnabled(this,
desc,
desc.interlockMode ==
pls::InterlockMode::rasterOrdering);
drawHelper.setIndexRange(pls::PatchIndexCount(drawType),
pls::PatchBaseIndex(drawType));
gpu::InterlockMode::rasterOrdering);
drawHelper.setIndexRange(gpu::PatchIndexCount(drawType),
gpu::PatchBaseIndex(drawType));
m_state->setCullFace(GL_BACK);
drawHelper.draw();
break;
@@ -1285,19 +1285,19 @@ void PLSRenderContextGLImpl::flush(const FlushDescriptor& desc)
// MSAA path draws require different stencil settings, depending on their
// drawContents.
bool hasActiveClip = ((batch.drawContents & pls::DrawContents::activeClip));
bool isClipUpdate = ((batch.drawContents & pls::DrawContents::clipUpdate));
bool hasActiveClip = ((batch.drawContents & gpu::DrawContents::activeClip));
bool isClipUpdate = ((batch.drawContents & gpu::DrawContents::clipUpdate));
bool isNestedClipUpdate =
(batch.drawContents & pls::kNestedClipUpdateMask) == pls::kNestedClipUpdateMask;
bool isEvenOddFill = (batch.drawContents & pls::DrawContents::evenOddFill);
bool isStroke = (batch.drawContents & pls::DrawContents::stroke);
(batch.drawContents & gpu::kNestedClipUpdateMask) == gpu::kNestedClipUpdateMask;
bool isEvenOddFill = (batch.drawContents & gpu::DrawContents::evenOddFill);
bool isStroke = (batch.drawContents & gpu::DrawContents::stroke);
if (isStroke)
{
// MSAA strokes only use the "border" section of the patch.
// (The depth test prevents double hits.)
assert(drawType == pls::DrawType::midpointFanPatches);
drawHelper.setIndexRange(pls::kMidpointFanPatchBorderIndexCount,
pls::kMidpointFanPatchBaseIndex);
assert(drawType == gpu::DrawType::midpointFanPatches);
drawHelper.setIndexRange(gpu::kMidpointFanPatchBorderIndexCount,
gpu::kMidpointFanPatchBaseIndex);
m_state->setWriteMasks(true, true, 0xff);
m_state->setCullFace(GL_BACK);
drawHelper.drawWithStencilSettings(hasActiveClip ? GL_EQUAL : GL_ALWAYS,
@@ -1310,8 +1310,8 @@ void PLSRenderContextGLImpl::flush(const FlushDescriptor& desc)
}
// MSAA fills only use the "fan" section of the patch (the don't need AA borders).
drawHelper.setIndexRange(pls::PatchFanIndexCount(drawType),
pls::PatchFanBaseIndex(drawType));
drawHelper.setIndexRange(gpu::PatchFanIndexCount(drawType),
gpu::PatchFanBaseIndex(drawType));
// "nonZero" fill rules (that aren't nested clip updates) can be optimized to render
// directly instead of using a "stencil then cover" approach.
@@ -1378,12 +1378,12 @@ void PLSRenderContextGLImpl::flush(const FlushDescriptor& desc)
}
break;
}
case pls::DrawType::stencilClipReset:
case gpu::DrawType::stencilClipReset:
{
assert(desc.interlockMode == pls::InterlockMode::depthStencil);
assert(desc.interlockMode == gpu::InterlockMode::depthStencil);
m_state->bindVAO(m_trianglesVAO);
bool isNestedClipUpdate =
(batch.drawContents & pls::kNestedClipUpdateMask) == pls::kNestedClipUpdateMask;
(batch.drawContents & gpu::kNestedClipUpdateMask) == gpu::kNestedClipUpdateMask;
if (isNestedClipUpdate)
{
// The nested clip just got stencilled and left in the stencil buffer. Intersect
@@ -1403,18 +1403,18 @@ void PLSRenderContextGLImpl::flush(const FlushDescriptor& desc)
glDrawArrays(GL_TRIANGLES, batch.baseElement, batch.elementCount);
break;
}
case pls::DrawType::interiorTriangulation:
case gpu::DrawType::interiorTriangulation:
{
assert(desc.interlockMode != pls::InterlockMode::depthStencil); // TODO!
assert(desc.interlockMode != gpu::InterlockMode::depthStencil); // TODO!
m_plsImpl->ensureRasterOrderingEnabled(this, desc, false);
m_state->bindVAO(m_trianglesVAO);
m_state->setCullFace(GL_BACK);
glDrawArrays(GL_TRIANGLES, batch.baseElement, batch.elementCount);
break;
}
case pls::DrawType::imageRect:
case gpu::DrawType::imageRect:
{
assert(desc.interlockMode == pls::InterlockMode::atomics);
assert(desc.interlockMode == gpu::InterlockMode::atomics);
assert(!m_capabilities.ARB_bindless_texture);
assert(m_imageRectVAO != 0); // Should have gotten lazily allocated by now.
m_plsImpl->ensureRasterOrderingEnabled(this, desc, false);
@@ -1423,15 +1423,15 @@ void PLSRenderContextGLImpl::flush(const FlushDescriptor& desc)
IMAGE_DRAW_UNIFORM_BUFFER_IDX,
gl_buffer_id(imageDrawUniformBufferRing()),
batch.imageDrawDataOffset,
sizeof(pls::ImageDrawUniforms));
sizeof(gpu::ImageDrawUniforms));
m_state->setCullFace(GL_NONE);
glDrawElements(GL_TRIANGLES,
std::size(pls::kImageRectIndices),
std::size(gpu::kImageRectIndices),
GL_UNSIGNED_SHORT,
nullptr);
break;
}
case pls::DrawType::imageMesh:
case gpu::DrawType::imageMesh:
{
LITE_RTTI_CAST_OR_BREAK(vertexBuffer,
const PLSRenderBufferGLImpl*,
@@ -1450,8 +1450,8 @@ void PLSRenderContextGLImpl::flush(const FlushDescriptor& desc)
IMAGE_DRAW_UNIFORM_BUFFER_IDX,
gl_buffer_id(imageDrawUniformBufferRing()),
batch.imageDrawDataOffset,
sizeof(pls::ImageDrawUniforms));
if (desc.interlockMode != pls::InterlockMode::depthStencil)
sizeof(gpu::ImageDrawUniforms));
if (desc.interlockMode != gpu::InterlockMode::depthStencil)
{
// Try to enable raster ordering for image meshes in rasterOrdering and atomic
// mode both; we have no control over whether the internal geometry has self
@@ -1463,7 +1463,7 @@ void PLSRenderContextGLImpl::flush(const FlushDescriptor& desc)
}
else
{
bool hasActiveClip = ((batch.drawContents & pls::DrawContents::activeClip));
bool hasActiveClip = ((batch.drawContents & gpu::DrawContents::activeClip));
glStencilFunc(hasActiveClip ? GL_EQUAL : GL_ALWAYS, 0x80, 0xff);
glStencilOp(GL_KEEP, GL_KEEP, GL_KEEP);
m_state->setWriteMasks(true, true, 0xff);
@@ -1475,29 +1475,29 @@ void PLSRenderContextGLImpl::flush(const FlushDescriptor& desc)
reinterpret_cast<const void*>(batch.baseElement * sizeof(uint16_t)));
break;
}
case pls::DrawType::plsAtomicResolve:
case gpu::DrawType::gpuAtomicResolve:
{
assert(desc.interlockMode == pls::InterlockMode::atomics);
assert(desc.interlockMode == gpu::InterlockMode::atomics);
m_plsImpl->ensureRasterOrderingEnabled(this, desc, false);
m_state->bindVAO(m_emptyVAO);
m_plsImpl->setupAtomicResolve(this, desc);
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
break;
}
case pls::DrawType::plsAtomicInitialize:
case gpu::DrawType::gpuAtomicInitialize:
{
assert(desc.interlockMode == pls::InterlockMode::atomics);
assert(desc.interlockMode == gpu::InterlockMode::atomics);
RIVE_UNREACHABLE();
}
}
if (desc.interlockMode != pls::InterlockMode::depthStencil && batch.needsBarrier &&
batch.drawType != pls::DrawType::imageMesh /*EW!*/)
if (desc.interlockMode != gpu::InterlockMode::depthStencil && batch.needsBarrier &&
batch.drawType != gpu::DrawType::imageMesh /*EW!*/)
{
m_plsImpl->barrier(desc);
}
}
if (desc.interlockMode != pls::InterlockMode::depthStencil)
if (desc.interlockMode != gpu::InterlockMode::depthStencil)
{
m_plsImpl->deactivatePixelLocalStorage(this, desc);
}
@@ -1505,7 +1505,7 @@ void PLSRenderContextGLImpl::flush(const FlushDescriptor& desc)
{
// Depth/stencil don't need to be written out.
glInvalidateFramebuffer(GL_DRAW_FRAMEBUFFER, 2, msaaDepthStencilColor.data());
if ((desc.combinedShaderFeatures & pls::ShaderFeatures::ENABLE_ADVANCED_BLEND) &&
if ((desc.combinedShaderFeatures & gpu::ShaderFeatures::ENABLE_ADVANCED_BLEND) &&
m_capabilities.KHR_blend_equation_advanced_coherent)
{
glDisable(GL_BLEND_ADVANCED_COHERENT_KHR);
@@ -1792,7 +1792,7 @@ std::unique_ptr<PLSRenderContext> PLSRenderContextGLImpl::MakeContext(
{
int maxVertexShaderStorageBlocks;
glGetIntegerv(GL_MAX_VERTEX_SHADER_STORAGE_BLOCKS, &maxVertexShaderStorageBlocks);
if (maxVertexShaderStorageBlocks < pls::kMaxStorageBuffers)
if (maxVertexShaderStorageBlocks < gpu::kMaxStorageBuffers)
{
capabilities.ARB_shader_storage_buffer_object = false;
}
@@ -1880,4 +1880,4 @@ std::unique_ptr<PLSRenderContext> PLSRenderContextGLImpl::MakeContext(
new PLSRenderContextGLImpl(rendererString, capabilities, std::move(plsImpl)));
return std::make_unique<PLSRenderContext>(std::move(plsContextImpl));
}
} // namespace rive::pls
} // namespace rive::gpu

View File

@@ -2,13 +2,13 @@
* Copyright 2023 Rive
*/
#include "rive/pls/gl/pls_render_target_gl.hpp"
#include "rive/renderer/gl/render_target_gl.hpp"
#include "rive/pls/pls.hpp"
#include "rive/pls/gl/pls_render_context_gl_impl.hpp"
#include "rive/renderer/gpu.hpp"
#include "rive/renderer/gl/render_context_gl_impl.hpp"
#include "shaders/constants.glsl"
namespace rive::pls
namespace rive::gpu
{
TextureRenderTargetGL::~TextureRenderTargetGL() {}
@@ -21,7 +21,7 @@ static glutils::Texture make_backing_texture(GLenum internalformat, uint32_t wid
return texture;
}
void TextureRenderTargetGL::allocateInternalPLSTextures(pls::InterlockMode interlockMode)
void TextureRenderTargetGL::allocateInternalPLSTextures(gpu::InterlockMode interlockMode)
{
if (m_coverageTexture == 0)
{
@@ -300,7 +300,7 @@ void FramebufferRenderTargetGL::allocateOffscreenTargetTexture()
}
}
void FramebufferRenderTargetGL::allocateInternalPLSTextures(pls::InterlockMode interlockMode)
void FramebufferRenderTargetGL::allocateInternalPLSTextures(gpu::InterlockMode interlockMode)
{
m_textureRenderTarget.allocateInternalPLSTextures(interlockMode);
}
@@ -378,4 +378,4 @@ void FramebufferRenderTargetGL::bindInternalDstTexture(GLenum activeTexture)
allocateOffscreenTargetTexture();
m_textureRenderTarget.bindInternalDstTexture(activeTexture);
}
} // namespace rive::pls
} // namespace rive::gpu

View File

@@ -45,7 +45,7 @@ public:
size_t maxVertexCount() const { return m_maxVertexCount; }
size_t polysToTriangles(pls::WriteOnlyMappedMemory<pls::TriangleVertex>* bufferRing,
size_t polysToTriangles(gpu::WriteOnlyMappedMemory<gpu::TriangleVertex>* bufferRing,
uint16_t pathID) const
{

View File

@@ -114,7 +114,7 @@ bool GrTriangulator::Comparator::sweep_lt(const Vec2D& a, const Vec2D& b) const
static inline void emit_vertex(Vertex* v,
int winding,
uint16_t pathID,
pls::WriteOnlyMappedMemory<pls::TriangleVertex>* mappedMemory)
gpu::WriteOnlyMappedMemory<gpu::TriangleVertex>* mappedMemory)
{
// GrTriangulator and pls unfortunately have opposite winding senses.
int16_t plsWeight = -winding;
@@ -126,7 +126,7 @@ static void emit_triangle(Vertex* v0,
Vertex* v2,
int winding,
uint16_t pathID,
pls::WriteOnlyMappedMemory<pls::TriangleVertex>* mappedMemory)
gpu::WriteOnlyMappedMemory<gpu::TriangleVertex>* mappedMemory)
{
TESS_LOG("emit_triangle %g (%g, %g) %d\n", v0->fID, v0->fPoint.x, v0->fPoint.y, v0->fAlpha);
TESS_LOG(" %g (%g, %g) %d\n", v1->fID, v1->fPoint.x, v1->fPoint.y, v1->fAlpha);
@@ -416,7 +416,7 @@ void GrTriangulator::emitMonotonePoly(
const MonotonePoly* monotonePoly,
uint16_t pathID,
bool reverseTriangles,
pls::WriteOnlyMappedMemory<pls::TriangleVertex>* mappedMemory) const
gpu::WriteOnlyMappedMemory<gpu::TriangleVertex>* mappedMemory) const
{
assert(monotonePoly->fWinding != 0);
Edge* e = monotonePoly->fFirstEdge;
@@ -494,7 +494,7 @@ void GrTriangulator::emitTriangle(
int winding,
uint16_t pathID,
bool reverseTriangles,
pls::WriteOnlyMappedMemory<pls::TriangleVertex>* mappedMemory) const
gpu::WriteOnlyMappedMemory<gpu::TriangleVertex>* mappedMemory) const
{
if (reverseTriangles)
{
@@ -583,7 +583,7 @@ Poly* GrTriangulator::Poly::addEdge(Edge* e, Side side, GrTriangulator* tri)
void GrTriangulator::emitPoly(const Poly* poly,
uint16_t pathID,
bool reverseTriangles,
pls::WriteOnlyMappedMemory<pls::TriangleVertex>* mappedMemory) const
gpu::WriteOnlyMappedMemory<gpu::TriangleVertex>* mappedMemory) const
{
if (poly->fCount < 3)
{
@@ -2277,7 +2277,7 @@ void GrTriangulator::polysToTriangles(
FillRule overrideFillType,
uint16_t pathID,
bool reverseTriangles,
pls::WriteOnlyMappedMemory<pls::TriangleVertex>* mappedMemory) const
gpu::WriteOnlyMappedMemory<gpu::TriangleVertex>* mappedMemory) const
{
for (Poly* poly = polys; poly; poly = poly->fNext)
{
@@ -2372,14 +2372,14 @@ size_t GrTriangulator::polysToTriangles(
uint64_t maxVertexCount,
uint16_t pathID,
bool reverseTriangles,
pls::WriteOnlyMappedMemory<pls::TriangleVertex>* mappedMemory) const
gpu::WriteOnlyMappedMemory<gpu::TriangleVertex>* mappedMemory) const
{
if (0 == maxVertexCount || maxVertexCount > std::numeric_limits<int32_t>::max())
{
return 0;
}
size_t vertexStride = sizeof(pls::TriangleVertex);
size_t vertexStride = sizeof(gpu::TriangleVertex);
#if 0
if (fEmitCoverage)
{

View File

@@ -18,8 +18,8 @@
#include "rive/math/raw_path.hpp"
#include "rive/math/vec2d.hpp"
#include "rive/math/aabb.hpp"
#include "rive/pls/pls.hpp"
#include "rive/pls/trivial_block_allocator.hpp"
#include "rive/renderer/gpu.hpp"
#include "rive/renderer/trivial_block_allocator.hpp"
namespace rive
{
@@ -119,7 +119,7 @@ protected:
FillRule overrideFillRule,
uint16_t pathID,
bool reverseTriangles,
pls::WriteOnlyMappedMemory<pls::TriangleVertex>*) const;
gpu::WriteOnlyMappedMemory<gpu::TriangleVertex>*) const;
// The vertex sorting in step (3) is a merge sort, since it plays well with the linked list
// of vertices (and the necessity of inserting new vertices on intersection).
@@ -168,18 +168,18 @@ protected:
void emitMonotonePoly(const MonotonePoly*,
uint16_t pathID,
bool reverseTriangles,
pls::WriteOnlyMappedMemory<pls::TriangleVertex>*) const;
gpu::WriteOnlyMappedMemory<gpu::TriangleVertex>*) const;
void emitTriangle(Vertex* prev,
Vertex* curr,
Vertex* next,
int winding,
uint16_t pathID,
bool reverseTriangles,
pls::WriteOnlyMappedMemory<pls::TriangleVertex>*) const;
gpu::WriteOnlyMappedMemory<gpu::TriangleVertex>*) const;
void emitPoly(const Poly*,
uint16_t pathID,
bool reverseTriangles,
pls::WriteOnlyMappedMemory<pls::TriangleVertex>*) const;
gpu::WriteOnlyMappedMemory<gpu::TriangleVertex>*) const;
Poly* makePoly(Poly** head, Vertex* v, int winding) const;
void appendPointToContour(const Vec2D& p, VertexList* contour) const;
@@ -265,7 +265,7 @@ protected:
uint64_t maxVertexCount,
uint16_t pathID,
bool reverseTriangles,
pls::WriteOnlyMappedMemory<pls::TriangleVertex>*) const;
gpu::WriteOnlyMappedMemory<gpu::TriangleVertex>*) const;
Comparator::Direction fDirection;
FillRule fFillRule;

View File

@@ -2,13 +2,13 @@
* Copyright 2022 Rive
*/
#include "rive/pls/pls_image.hpp"
#include "rive/renderer/image.hpp"
namespace rive::pls
namespace rive::gpu
{
PLSTexture::PLSTexture(uint32_t width, uint32_t height) : m_width(width), m_height(height)
{
static std::atomic_uint32_t textureResourceHashCounter = 0;
m_textureResourceHash = ++textureResourceHashCounter;
}
} // namespace rive::pls
} // namespace rive::gpu

View File

@@ -13,7 +13,7 @@
#else
#endif
namespace rive::pls
namespace rive::gpu
{
void IntersectionTile::reset(int left, int top, int16_t baselineGroupIndex)
{
@@ -252,4 +252,4 @@ int16_t IntersectionBoard::addRectangle(int4 ltrb)
return nextGroupIndex;
}
} // namespace rive::pls
} // namespace rive::gpu

View File

@@ -7,7 +7,7 @@
#include "rive/math/simd.hpp"
#include <vector>
namespace rive::pls
namespace rive::gpu
{
// 255 x 255 tile that manages a set of rectangles and their groupIndex.
// From a given rectangle, finds the max groupIndex in the set of internal rectangles it intersects.
@@ -70,4 +70,4 @@ private:
int32_t m_rows;
std::vector<IntersectionTile> m_tiles;
};
} // namespace rive::pls
} // namespace rive::gpu

View File

@@ -4,24 +4,24 @@
#pragma once
#include "rive/pls/pls.hpp"
#include "rive/pls/metal/pls_render_context_metal_impl.h"
#include "rive/renderer/gpu.hpp"
#include "rive/renderer/metal/render_context_metal_impl.h"
#include <queue>
#include <thread>
#import <Metal/Metal.h>
namespace rive::pls
namespace rive::gpu
{
// Defines a job to compile a "draw" shader -- either draw_path.glsl or draw_image_mesh.glsl, with a
// specific set of features enabled.
struct BackgroundCompileJob
{
pls::DrawType drawType;
pls::ShaderFeatures shaderFeatures;
pls::InterlockMode interlockMode;
pls::ShaderMiscFlags shaderMiscFlags;
gpu::DrawType drawType;
gpu::ShaderFeatures shaderFeatures;
gpu::InterlockMode interlockMode;
gpu::ShaderMiscFlags shaderMiscFlags;
id<MTLLibrary> compiledLibrary;
};
@@ -55,4 +55,4 @@ private:
bool m_shouldQuit;
std::thread m_compilerThread;
};
} // namespace rive::pls
} // namespace rive::gpu

View File

@@ -19,7 +19,7 @@
#include <sstream>
namespace rive::pls
namespace rive::gpu
{
BackgroundShaderCompiler::~BackgroundShaderCompiler()
{
@@ -81,24 +81,24 @@ void BackgroundShaderCompiler::threadMain()
lock.unlock();
pls::DrawType drawType = job.drawType;
pls::ShaderFeatures shaderFeatures = job.shaderFeatures;
pls::InterlockMode interlockMode = job.interlockMode;
pls::ShaderMiscFlags shaderMiscFlags = job.shaderMiscFlags;
gpu::DrawType drawType = job.drawType;
gpu::ShaderFeatures shaderFeatures = job.shaderFeatures;
gpu::InterlockMode interlockMode = job.interlockMode;
gpu::ShaderMiscFlags shaderMiscFlags = job.shaderMiscFlags;
auto defines = [[NSMutableDictionary alloc] init];
defines[@GLSL_VERTEX] = @"";
defines[@GLSL_FRAGMENT] = @"";
for (size_t i = 0; i < pls::kShaderFeatureCount; ++i)
for (size_t i = 0; i < gpu::kShaderFeatureCount; ++i)
{
ShaderFeatures feature = static_cast<ShaderFeatures>(1 << i);
if (shaderFeatures & feature)
{
const char* macro = pls::GetShaderFeatureGLSLName(feature);
const char* macro = gpu::GetShaderFeatureGLSLName(feature);
defines[[NSString stringWithUTF8String:macro]] = @"1";
}
}
if (interlockMode == pls::InterlockMode::atomics)
if (interlockMode == gpu::InterlockMode::atomics)
{
// Atomic mode uses device buffers instead of framebuffer fetches.
defines[@GLSL_PLS_IMPL_DEVICE_BUFFER] = @"";
@@ -106,18 +106,18 @@ void BackgroundShaderCompiler::threadMain()
{
defines[@GLSL_PLS_IMPL_DEVICE_BUFFER_RASTER_ORDERED] = @"";
}
if (!(shaderFeatures & pls::ShaderFeatures::ENABLE_ADVANCED_BLEND))
if (!(shaderFeatures & gpu::ShaderFeatures::ENABLE_ADVANCED_BLEND))
{
defines[@GLSL_FIXED_FUNCTION_COLOR_BLEND] = @"";
}
}
auto source = [[NSMutableString alloc] initWithCString:pls::glsl::metal
auto source = [[NSMutableString alloc] initWithCString:gpu::glsl::metal
encoding:NSUTF8StringEncoding];
[source appendFormat:@"%s\n%s\n", pls::glsl::constants, pls::glsl::common];
[source appendFormat:@"%s\n%s\n", gpu::glsl::constants, gpu::glsl::common];
if (shaderFeatures & ShaderFeatures::ENABLE_ADVANCED_BLEND)
{
[source appendFormat:@"%s\n", pls::glsl::advanced_blend];
[source appendFormat:@"%s\n", gpu::glsl::advanced_blend];
}
switch (drawType)
@@ -127,26 +127,26 @@ void BackgroundShaderCompiler::threadMain()
// Add baseInstance to the instanceID for path draws.
defines[@GLSL_ENABLE_INSTANCE_INDEX] = @"";
defines[@GLSL_DRAW_PATH] = @"";
[source appendFormat:@"%s\n", pls::glsl::draw_path_common];
[source appendFormat:@"%s\n", gpu::glsl::draw_path_common];
#ifdef RIVE_IOS
[source appendFormat:@"%s\n", pls::glsl::draw_path];
[source appendFormat:@"%s\n", gpu::glsl::draw_path];
#else
[source appendFormat:@"%s\n",
interlockMode == pls::InterlockMode::rasterOrdering
? pls::glsl::draw_path
: pls::glsl::atomic_draw];
interlockMode == gpu::InterlockMode::rasterOrdering
? gpu::glsl::draw_path
: gpu::glsl::atomic_draw];
#endif
break;
case DrawType::interiorTriangulation:
defines[@GLSL_DRAW_INTERIOR_TRIANGLES] = @"";
[source appendFormat:@"%s\n", pls::glsl::draw_path_common];
[source appendFormat:@"%s\n", gpu::glsl::draw_path_common];
#ifdef RIVE_IOS
[source appendFormat:@"%s\n", pls::glsl::draw_path];
[source appendFormat:@"%s\n", gpu::glsl::draw_path];
#else
[source appendFormat:@"%s\n",
interlockMode == pls::InterlockMode::rasterOrdering
? pls::glsl::draw_path
: pls::glsl::atomic_draw];
interlockMode == gpu::InterlockMode::rasterOrdering
? gpu::glsl::draw_path
: gpu::glsl::atomic_draw];
#endif
break;
case DrawType::imageRect:
@@ -156,58 +156,58 @@ void BackgroundShaderCompiler::threadMain()
assert(interlockMode == InterlockMode::atomics);
defines[@GLSL_DRAW_IMAGE] = @"";
defines[@GLSL_DRAW_IMAGE_RECT] = @"";
[source appendFormat:@"%s\n", pls::glsl::atomic_draw];
[source appendFormat:@"%s\n", gpu::glsl::atomic_draw];
#endif
break;
case DrawType::imageMesh:
defines[@GLSL_DRAW_IMAGE] = @"";
defines[@GLSL_DRAW_IMAGE_MESH] = @"";
#ifdef RIVE_IOS
[source appendFormat:@"%s\n", pls::glsl::draw_image_mesh];
[source appendFormat:@"%s\n", gpu::glsl::draw_image_mesh];
#else
[source appendFormat:@"%s\n",
interlockMode == pls::InterlockMode::rasterOrdering
? pls::glsl::draw_image_mesh
: pls::glsl::atomic_draw];
interlockMode == gpu::InterlockMode::rasterOrdering
? gpu::glsl::draw_image_mesh
: gpu::glsl::atomic_draw];
#endif
break;
case DrawType::plsAtomicInitialize:
case DrawType::gpuAtomicInitialize:
#ifdef RIVE_IOS
RIVE_UNREACHABLE();
#else
assert(interlockMode == InterlockMode::atomics);
defines[@GLSL_DRAW_RENDER_TARGET_UPDATE_BOUNDS] = @"";
defines[@GLSL_INITIALIZE_PLS] = @"";
if (shaderMiscFlags & pls::ShaderMiscFlags::storeColorClear)
if (shaderMiscFlags & gpu::ShaderMiscFlags::storeColorClear)
{
defines[@GLSL_STORE_COLOR_CLEAR] = @"";
}
if (shaderMiscFlags & pls::ShaderMiscFlags::swizzleColorBGRAToRGBA)
if (shaderMiscFlags & gpu::ShaderMiscFlags::swizzleColorBGRAToRGBA)
{
defines[@GLSL_SWIZZLE_COLOR_BGRA_TO_RGBA] = @"";
}
[source appendFormat:@"%s\n", pls::glsl::atomic_draw];
[source appendFormat:@"%s\n", gpu::glsl::atomic_draw];
#endif
break;
case DrawType::plsAtomicResolve:
case DrawType::gpuAtomicResolve:
#ifdef RIVE_IOS
RIVE_UNREACHABLE();
#else
assert(interlockMode == InterlockMode::atomics);
defines[@GLSL_DRAW_RENDER_TARGET_UPDATE_BOUNDS] = @"";
defines[@GLSL_RESOLVE_PLS] = @"";
if (shaderMiscFlags & pls::ShaderMiscFlags::coalescedResolveAndTransfer)
if (shaderMiscFlags & gpu::ShaderMiscFlags::coalescedResolveAndTransfer)
{
defines[@GLSL_COALESCED_PLS_RESOLVE_AND_TRANSFER] = @"";
}
[source appendFormat:@"%s\n", pls::glsl::atomic_draw];
[source appendFormat:@"%s\n", gpu::glsl::atomic_draw];
#endif
break;
case DrawType::stencilClipReset:
RIVE_UNREACHABLE();
}
NSError* err = [NSError errorWithDomain:@"pls_compile" code:200 userInfo:nil];
NSError* err = [NSError errorWithDomain:@"compile" code:200 userInfo:nil];
MTLCompileOptions* compileOptions = [MTLCompileOptions new];
#if defined(RIVE_IOS) || defined(RIVE_IOS_SIMULATOR)
compileOptions.languageVersion = MTLLanguageVersion2_2; // On ios, we need version 2.2+
@@ -241,4 +241,4 @@ void BackgroundShaderCompiler::threadMain()
m_workFinishedCondition.notify_all();
}
}
} // namespace rive::pls
} // namespace rive::gpu

View File

@@ -2,11 +2,11 @@
* Copyright 2023 Rive
*/
#include "rive/pls/metal/pls_render_context_metal_impl.h"
#include "rive/renderer/metal/render_context_metal_impl.h"
#include "background_shader_compiler.h"
#include "rive/pls/buffer_ring.hpp"
#include "rive/pls/pls_image.hpp"
#include "rive/renderer/buffer_ring.hpp"
#include "rive/renderer/image.hpp"
#include "shaders/constants.glsl"
#include <sstream>
@@ -17,7 +17,7 @@
#import <mach-o/arch.h>
#endif
namespace rive::pls
namespace rive::gpu
{
#ifdef RIVE_IOS
#include "generated/shaders/rive_pls_ios.metallib.c"
@@ -30,7 +30,7 @@ namespace rive::pls
static id<MTLRenderPipelineState> make_pipeline_state(id<MTLDevice> gpu,
MTLRenderPipelineDescriptor* desc)
{
NSError* err = [NSError errorWithDomain:@"pls_pipeline_create" code:201 userInfo:nil];
NSError* err = [NSError errorWithDomain:@"pipeline_create" code:201 userInfo:nil];
id<MTLRenderPipelineState> state = [gpu newRenderPipelineStateWithDescriptor:desc error:&err];
if (!state)
{
@@ -96,7 +96,7 @@ public:
{
namespaceID[0] = '1';
}
for (size_t i = 0; i < pls::kShaderFeatureCount; ++i)
for (size_t i = 0; i < gpu::kShaderFeatureCount; ++i)
{
ShaderFeatures feature = static_cast<ShaderFeatures>(1 << i);
if (shaderFeatures & feature)
@@ -124,8 +124,8 @@ public:
case DrawType::imageMesh:
namespacePrefix = 'm';
break;
case DrawType::plsAtomicInitialize:
case DrawType::plsAtomicResolve:
case DrawType::gpuAtomicInitialize:
case DrawType::gpuAtomicResolve:
case DrawType::stencilClipReset:
RIVE_UNREACHABLE();
}
@@ -138,10 +138,10 @@ public:
id<MTLLibrary> library,
NSString* vertexFunctionName,
NSString* fragmentFunctionName,
pls::DrawType drawType,
pls::InterlockMode interlockMode,
pls::ShaderFeatures shaderFeatures,
pls::ShaderMiscFlags shaderMiscFlags)
gpu::DrawType drawType,
gpu::InterlockMode interlockMode,
gpu::ShaderFeatures shaderFeatures,
gpu::ShaderMiscFlags shaderMiscFlags)
{
auto makePipelineState = [=](id<MTLFunction> vertexMain,
id<MTLFunction> fragmentMain,
@@ -155,16 +155,16 @@ public:
switch (interlockMode)
{
case pls::InterlockMode::rasterOrdering:
case gpu::InterlockMode::rasterOrdering:
// In rasterOrdering mode, the PLS planes are accessed as color attachments.
desc.colorAttachments[CLIP_PLANE_IDX].pixelFormat = MTLPixelFormatR32Uint;
desc.colorAttachments[SCRATCH_COLOR_PLANE_IDX].pixelFormat = pixelFormat;
desc.colorAttachments[COVERAGE_PLANE_IDX].pixelFormat = MTLPixelFormatR32Uint;
break;
case pls::InterlockMode::atomics:
case gpu::InterlockMode::atomics:
// In atomic mode, the PLS planes are accessed as device buffers. We only use
// the "framebuffer" attachment configured above.
if (shaderMiscFlags & pls::ShaderMiscFlags::fixedFunctionColorBlend)
if (shaderMiscFlags & gpu::ShaderMiscFlags::fixedFunctionColorBlend)
{
// The shader expectes a "src-over" blend function in order to to implement
// antialiasing and opacity.
@@ -177,7 +177,7 @@ public:
framebuffer.alphaBlendOperation = MTLBlendOperationAdd;
framebuffer.writeMask = MTLColorWriteMaskAll;
}
else if (drawType == pls::DrawType::plsAtomicResolve)
else if (drawType == gpu::DrawType::gpuAtomicResolve)
{
// We're resolving from the offscreen color buffer to the framebuffer
// attachment. Write out the final color directly without any blend modes.
@@ -192,7 +192,7 @@ public:
framebuffer.writeMask = MTLColorWriteMaskNone;
}
break;
case pls::InterlockMode::depthStencil:
case gpu::InterlockMode::depthStencil:
RIVE_UNREACHABLE();
}
return make_pipeline_state(gpu, desc);
@@ -358,7 +358,7 @@ PLSRenderContextMetalImpl::PLSRenderContextMetalImpl(id<MTLDevice> gpu,
#endif
nil,
nil);
NSError* err = [NSError errorWithDomain:@"pls_metallib_load" code:200 userInfo:nil];
NSError* err = [NSError errorWithDomain:@"metallib_load" code:200 userInfo:nil];
m_plsPrecompiledLibrary = [m_gpu newLibraryWithData:metallibData error:&err];
if (m_plsPrecompiledLibrary == nil)
{
@@ -369,8 +369,8 @@ PLSRenderContextMetalImpl::PLSRenderContextMetalImpl(id<MTLDevice> gpu,
m_colorRampPipeline = std::make_unique<ColorRampPipeline>(m_gpu, m_plsPrecompiledLibrary);
m_tessPipeline = std::make_unique<TessellatePipeline>(m_gpu, m_plsPrecompiledLibrary);
m_tessSpanIndexBuffer = [m_gpu newBufferWithBytes:pls::kTessSpanIndices
length:sizeof(pls::kTessSpanIndices)
m_tessSpanIndexBuffer = [m_gpu newBufferWithBytes:gpu::kTessSpanIndices
length:sizeof(gpu::kTessSpanIndices)
options:MTLResourceStorageModeShared];
// The precompiled static library has a fully-featured shader for each drawType in
@@ -381,26 +381,26 @@ PLSRenderContextMetalImpl::PLSRenderContextMetalImpl(id<MTLDevice> gpu,
for (auto drawType :
{DrawType::midpointFanPatches, DrawType::interiorTriangulation, DrawType::imageMesh})
{
pls::ShaderFeatures allShaderFeatures =
pls::ShaderFeaturesMaskFor(drawType, pls::InterlockMode::rasterOrdering);
gpu::ShaderFeatures allShaderFeatures =
gpu::ShaderFeaturesMaskFor(drawType, gpu::InterlockMode::rasterOrdering);
uint32_t pipelineKey = ShaderUniqueKey(drawType,
allShaderFeatures,
pls::InterlockMode::rasterOrdering,
pls::ShaderMiscFlags::none);
gpu::InterlockMode::rasterOrdering,
gpu::ShaderMiscFlags::none);
m_drawPipelines[pipelineKey] = std::make_unique<DrawPipeline>(
m_gpu,
m_plsPrecompiledLibrary,
DrawPipeline::GetPrecompiledFunctionName(drawType,
allShaderFeatures &
pls::kVertexShaderFeaturesMask,
gpu::kVertexShaderFeaturesMask,
m_plsPrecompiledLibrary,
GLSL_drawVertexMain),
DrawPipeline::GetPrecompiledFunctionName(
drawType, allShaderFeatures, m_plsPrecompiledLibrary, GLSL_drawFragmentMain),
drawType,
pls::InterlockMode::rasterOrdering,
gpu::InterlockMode::rasterOrdering,
allShaderFeatures,
pls::ShaderMiscFlags::none);
gpu::ShaderMiscFlags::none);
}
}
@@ -413,12 +413,12 @@ PLSRenderContextMetalImpl::PLSRenderContextMetalImpl(id<MTLDevice> gpu,
GeneratePatchBufferData(reinterpret_cast<PatchVertex*>(m_pathPatchVertexBuffer.contents),
reinterpret_cast<uint16_t*>(m_pathPatchIndexBuffer.contents));
// Set up the imageRect rendering buffers. (pls::InterlockMode::atomics only.)
m_imageRectVertexBuffer = [m_gpu newBufferWithBytes:pls::kImageRectVertices
length:sizeof(pls::kImageRectVertices)
// Set up the imageRect rendering buffers. (gpu::InterlockMode::atomics only.)
m_imageRectVertexBuffer = [m_gpu newBufferWithBytes:gpu::kImageRectVertices
length:sizeof(gpu::kImageRectVertices)
options:MTLResourceStorageModeShared];
m_imageRectIndexBuffer = [m_gpu newBufferWithBytes:pls::kImageRectIndices
length:sizeof(pls::kImageRectIndices)
m_imageRectIndexBuffer = [m_gpu newBufferWithBytes:gpu::kImageRectIndices
length:sizeof(gpu::kImageRectIndices)
options:MTLResourceStorageModeShared];
}
@@ -485,7 +485,7 @@ public:
lite_rtti_override(renderBufferType, renderBufferFlags, sizeInBytes), m_gpu(gpu)
{
int bufferCount =
flags() & RenderBufferFlags::mappedOnceAtInitialization ? 1 : pls::kBufferRingSize;
flags() & RenderBufferFlags::mappedOnceAtInitialization ? 1 : gpu::kBufferRingSize;
for (int i = 0; i < bufferCount; ++i)
{
m_buffers[i] = [gpu newBufferWithLength:sizeInBytes
@@ -498,7 +498,7 @@ public:
protected:
void* onMap() override
{
m_submittedBufferIdx = (m_submittedBufferIdx + 1) % pls::kBufferRingSize;
m_submittedBufferIdx = (m_submittedBufferIdx + 1) % gpu::kBufferRingSize;
assert(m_buffers[m_submittedBufferIdx] != nil);
return m_buffers[m_submittedBufferIdx].contents;
}
@@ -507,7 +507,7 @@ protected:
private:
id<MTLDevice> m_gpu;
id<MTLBuffer> m_buffers[pls::kBufferRingSize];
id<MTLBuffer> m_buffers[gpu::kBufferRingSize];
int m_submittedBufferIdx = -1;
};
@@ -580,7 +580,7 @@ std::unique_ptr<BufferRing> PLSRenderContextMetalImpl::makeUniformBufferRing(siz
}
std::unique_ptr<BufferRing> PLSRenderContextMetalImpl::makeStorageBufferRing(
size_t capacityInBytes, pls::StorageBufferStructure)
size_t capacityInBytes, gpu::StorageBufferStructure)
{
return BufferRingMetalImpl::Make(m_gpu, capacityInBytes);
}
@@ -633,13 +633,13 @@ void PLSRenderContextMetalImpl::resizeTessellationTexture(uint32_t width, uint32
}
const PLSRenderContextMetalImpl::DrawPipeline* PLSRenderContextMetalImpl::
findCompatibleDrawPipeline(pls::DrawType drawType,
pls::ShaderFeatures shaderFeatures,
pls::InterlockMode interlockMode,
pls::ShaderMiscFlags shaderMiscFlags)
findCompatibleDrawPipeline(gpu::DrawType drawType,
gpu::ShaderFeatures shaderFeatures,
gpu::InterlockMode interlockMode,
gpu::ShaderMiscFlags shaderMiscFlags)
{
uint32_t pipelineKey =
pls::ShaderUniqueKey(drawType, shaderFeatures, interlockMode, shaderMiscFlags);
gpu::ShaderUniqueKey(drawType, shaderFeatures, interlockMode, shaderMiscFlags);
auto pipelineIter = m_drawPipelines.find(pipelineKey);
if (pipelineIter == m_drawPipelines.end())
{
@@ -663,8 +663,8 @@ const PLSRenderContextMetalImpl::DrawPipeline* PLSRenderContextMetalImpl::
// The shader for this pipeline hasn't finished compiling yet. Start by finding a fully-featured
// superset of features whose pipeline we can fall back on while waiting for it to compile.
ShaderFeatures fullyFeaturedPipelineFeatures =
pls::ShaderFeaturesMaskFor(drawType, interlockMode);
if (interlockMode == pls::InterlockMode::atomics)
gpu::ShaderFeaturesMaskFor(drawType, interlockMode);
if (interlockMode == gpu::InterlockMode::atomics)
{
// Never add ENABLE_ADVANCED_BLEND to an atomic pipeline that doesn't use advanced blend,
// since in atomic mode, the shaders behave differently depending on whether advanced blend
@@ -680,7 +680,7 @@ const PLSRenderContextMetalImpl::DrawPipeline* PLSRenderContextMetalImpl::
// Fully-featured "rasterOrdering" pipelines should have already been pre-loaded from the static
// library.
assert(shaderFeatures != fullyFeaturedPipelineFeatures ||
interlockMode != pls::InterlockMode::rasterOrdering);
interlockMode != gpu::InterlockMode::rasterOrdering);
// Poll to see if the shader is actually done compiling, but only wait if it's a fully-feature
// pipeline. Otherwise, we can fall back on the fully-featured pipeline while we wait for
@@ -690,7 +690,7 @@ const PLSRenderContextMetalImpl::DrawPipeline* PLSRenderContextMetalImpl::
m_contextOptions.synchronousShaderCompilations;
while (m_backgroundShaderCompiler->popFinishedJob(&job, shouldWaitForBackgroundCompilation))
{
uint32_t jobKey = pls::ShaderUniqueKey(
uint32_t jobKey = gpu::ShaderUniqueKey(
job.drawType, job.shaderFeatures, job.interlockMode, job.shaderMiscFlags);
m_drawPipelines[jobKey] = std::make_unique<DrawPipeline>(m_gpu,
job.compiledLibrary,
@@ -742,10 +742,10 @@ static MTLViewport make_viewport(uint32_t x, uint32_t y, uint32_t width, uint32_
}
id<MTLRenderCommandEncoder> PLSRenderContextMetalImpl::makeRenderPassForDraws(
const pls::FlushDescriptor& flushDesc,
const gpu::FlushDescriptor& flushDesc,
MTLRenderPassDescriptor* passDesc,
id<MTLCommandBuffer> commandBuffer,
pls::ShaderMiscFlags baselineShaderMiscFlags)
gpu::ShaderMiscFlags baselineShaderMiscFlags)
{
auto* renderTarget = static_cast<PLSRenderTargetMetal*>(flushDesc.renderTarget);
@@ -764,45 +764,45 @@ id<MTLRenderCommandEncoder> PLSRenderContextMetalImpl::makeRenderPassForDraws(
if (flushDesc.pathCount > 0)
{
[encoder setVertexBuffer:mtl_buffer(pathBufferRing())
offset:flushDesc.firstPath * sizeof(pls::PathData)
offset:flushDesc.firstPath * sizeof(gpu::PathData)
atIndex:PATH_BUFFER_IDX];
if (flushDesc.interlockMode == pls::InterlockMode::atomics)
if (flushDesc.interlockMode == gpu::InterlockMode::atomics)
{
[encoder setFragmentBuffer:mtl_buffer(paintBufferRing())
offset:flushDesc.firstPaint * sizeof(pls::PaintData)
offset:flushDesc.firstPaint * sizeof(gpu::PaintData)
atIndex:PAINT_BUFFER_IDX];
[encoder setFragmentBuffer:mtl_buffer(paintAuxBufferRing())
offset:flushDesc.firstPaintAux * sizeof(pls::PaintAuxData)
offset:flushDesc.firstPaintAux * sizeof(gpu::PaintAuxData)
atIndex:PAINT_AUX_BUFFER_IDX];
}
else
{
[encoder setVertexBuffer:mtl_buffer(paintBufferRing())
offset:flushDesc.firstPaint * sizeof(pls::PaintData)
offset:flushDesc.firstPaint * sizeof(gpu::PaintData)
atIndex:PAINT_BUFFER_IDX];
[encoder setVertexBuffer:mtl_buffer(paintAuxBufferRing())
offset:flushDesc.firstPaintAux * sizeof(pls::PaintAuxData)
offset:flushDesc.firstPaintAux * sizeof(gpu::PaintAuxData)
atIndex:PAINT_AUX_BUFFER_IDX];
}
}
if (flushDesc.contourCount > 0)
{
[encoder setVertexBuffer:mtl_buffer(contourBufferRing())
offset:flushDesc.firstContour * sizeof(pls::ContourData)
offset:flushDesc.firstContour * sizeof(gpu::ContourData)
atIndex:CONTOUR_BUFFER_IDX];
}
if (flushDesc.interlockMode == pls::InterlockMode::atomics)
if (flushDesc.interlockMode == gpu::InterlockMode::atomics)
{
// In atomic mode, the PLS planes are buffers that we need to bind separately.
// Since the PLS plane indices collide with other buffer bindings, offset the binding
// indices of these buffers by DEFAULT_BINDINGS_SET_SIZE.
if (!(baselineShaderMiscFlags & pls::ShaderMiscFlags::fixedFunctionColorBlend))
if (!(baselineShaderMiscFlags & gpu::ShaderMiscFlags::fixedFunctionColorBlend))
{
[encoder setFragmentBuffer:renderTarget->colorAtomicBuffer()
offset:0
atIndex:COLOR_PLANE_IDX + DEFAULT_BINDINGS_SET_SIZE];
}
if (flushDesc.combinedShaderFeatures & pls::ShaderFeatures::ENABLE_CLIPPING)
if (flushDesc.combinedShaderFeatures & gpu::ShaderFeatures::ENABLE_CLIPPING)
{
[encoder setFragmentBuffer:renderTarget->clipAtomicBuffer()
offset:0
@@ -821,7 +821,7 @@ id<MTLRenderCommandEncoder> PLSRenderContextMetalImpl::makeRenderPassForDraws(
void PLSRenderContextMetalImpl::flush(const FlushDescriptor& desc)
{
assert(desc.interlockMode != pls::InterlockMode::depthStencil); // TODO: msaa.
assert(desc.interlockMode != gpu::InterlockMode::depthStencil); // TODO: msaa.
auto* renderTarget = static_cast<PLSRenderTargetMetal*>(desc.renderTarget);
id<MTLCommandBuffer> commandBuffer = (__bridge id<MTLCommandBuffer>)desc.externalCommandBuffer;
@@ -847,7 +847,7 @@ void PLSRenderContextMetalImpl::flush(const FlushDescriptor& desc)
offset:desc.flushUniformDataOffsetInBytes
atIndex:FLUSH_UNIFORM_BUFFER_IDX];
[gradEncoder setVertexBuffer:mtl_buffer(gradSpanBufferRing())
offset:desc.firstComplexGradSpan * sizeof(pls::GradientSpan)
offset:desc.firstComplexGradSpan * sizeof(gpu::GradientSpan)
atIndex:0];
[gradEncoder setCullMode:MTLCullModeBack];
[gradEncoder drawPrimitives:MTLPrimitiveTypeTriangleStrip
@@ -893,19 +893,19 @@ void PLSRenderContextMetalImpl::flush(const FlushDescriptor& desc)
offset:desc.flushUniformDataOffsetInBytes
atIndex:FLUSH_UNIFORM_BUFFER_IDX];
[tessEncoder setVertexBuffer:mtl_buffer(tessSpanBufferRing())
offset:desc.firstTessVertexSpan * sizeof(pls::TessVertexSpan)
offset:desc.firstTessVertexSpan * sizeof(gpu::TessVertexSpan)
atIndex:0];
assert(desc.pathCount > 0);
[tessEncoder setVertexBuffer:mtl_buffer(pathBufferRing())
offset:desc.firstPath * sizeof(pls::PathData)
offset:desc.firstPath * sizeof(gpu::PathData)
atIndex:PATH_BUFFER_IDX];
assert(desc.contourCount > 0);
[tessEncoder setVertexBuffer:mtl_buffer(contourBufferRing())
offset:desc.firstContour * sizeof(pls::ContourData)
offset:desc.firstContour * sizeof(gpu::ContourData)
atIndex:CONTOUR_BUFFER_IDX];
[tessEncoder setCullMode:MTLCullModeBack];
[tessEncoder drawIndexedPrimitives:MTLPrimitiveTypeTriangle
indexCount:std::size(pls::kTessSpanIndices)
indexCount:std::size(gpu::kTessSpanIndices)
indexType:MTLIndexTypeUInt16
indexBuffer:m_tessSpanIndexBuffer
indexBufferOffset:0
@@ -929,7 +929,7 @@ void PLSRenderContextMetalImpl::flush(const FlushDescriptor& desc)
pass.colorAttachments[COLOR_PLANE_IDX].texture = renderTarget->targetTexture();
switch (desc.colorLoadAction)
{
case pls::LoadAction::clear:
case gpu::LoadAction::clear:
{
float cc[4];
UnpackColorToRGBA32F(desc.clearColor, cc);
@@ -938,25 +938,25 @@ void PLSRenderContextMetalImpl::flush(const FlushDescriptor& desc)
MTLClearColorMake(cc[0], cc[1], cc[2], cc[3]);
break;
}
case pls::LoadAction::preserveRenderTarget:
case gpu::LoadAction::preserveRenderTarget:
pass.colorAttachments[COLOR_PLANE_IDX].loadAction = MTLLoadActionLoad;
break;
case pls::LoadAction::dontCare:
case gpu::LoadAction::dontCare:
pass.colorAttachments[COLOR_PLANE_IDX].loadAction = MTLLoadActionDontCare;
break;
}
pass.colorAttachments[COLOR_PLANE_IDX].storeAction = MTLStoreActionStore;
auto baselineShaderMiscFlags = pls::ShaderMiscFlags::none;
auto baselineShaderMiscFlags = gpu::ShaderMiscFlags::none;
if (desc.interlockMode == pls::InterlockMode::rasterOrdering)
if (desc.interlockMode == gpu::InterlockMode::rasterOrdering)
{
// In rasterOrdering mode, the PLS planes are accessed as color attachments.
pass.colorAttachments[CLIP_PLANE_IDX].texture = renderTarget->m_clipMemorylessTexture;
pass.colorAttachments[CLIP_PLANE_IDX].loadAction = MTLLoadActionClear;
pass.colorAttachments[CLIP_PLANE_IDX].clearColor = MTLClearColorMake(0, 0, 0, 0);
pass.colorAttachments[CLIP_PLANE_IDX].storeAction =
desc.interlockMode == pls::InterlockMode::atomics ? MTLStoreActionStore
desc.interlockMode == gpu::InterlockMode::atomics ? MTLStoreActionStore
: MTLStoreActionDontCare;
pass.colorAttachments[SCRATCH_COLOR_PLANE_IDX].texture =
@@ -970,19 +970,19 @@ void PLSRenderContextMetalImpl::flush(const FlushDescriptor& desc)
pass.colorAttachments[COVERAGE_PLANE_IDX].clearColor =
MTLClearColorMake(desc.coverageClearValue, 0, 0, 0);
pass.colorAttachments[COVERAGE_PLANE_IDX].storeAction =
desc.interlockMode == pls::InterlockMode::atomics ? MTLStoreActionStore
desc.interlockMode == gpu::InterlockMode::atomics ? MTLStoreActionStore
: MTLStoreActionDontCare;
}
else if (!(desc.combinedShaderFeatures & pls::ShaderFeatures::ENABLE_ADVANCED_BLEND))
else if (!(desc.combinedShaderFeatures & gpu::ShaderFeatures::ENABLE_ADVANCED_BLEND))
{
assert(desc.interlockMode == pls::InterlockMode::atomics);
baselineShaderMiscFlags |= pls::ShaderMiscFlags::fixedFunctionColorBlend;
assert(desc.interlockMode == gpu::InterlockMode::atomics);
baselineShaderMiscFlags |= gpu::ShaderMiscFlags::fixedFunctionColorBlend;
}
else if (desc.colorLoadAction == pls::LoadAction::preserveRenderTarget)
else if (desc.colorLoadAction == gpu::LoadAction::preserveRenderTarget)
{
// Since we need to preserve the renderTarget during load, and since we're rendering to an
// offscreen color buffer, we have to literally copy the renderTarget into the color buffer.
assert(desc.interlockMode == pls::InterlockMode::atomics);
assert(desc.interlockMode == gpu::InterlockMode::atomics);
id<MTLBlitCommandEncoder> copyEncoder = [commandBuffer blitCommandEncoder];
auto updateOrigin =
MTLOriginMake(desc.renderTargetUpdateBounds.left, desc.renderTargetUpdateBounds.top, 0);
@@ -1013,30 +1013,30 @@ void PLSRenderContextMetalImpl::flush(const FlushDescriptor& desc)
}
// Setup the pipeline for this specific drawType and shaderFeatures.
pls::ShaderFeatures shaderFeatures = desc.interlockMode == pls::InterlockMode::atomics
gpu::ShaderFeatures shaderFeatures = desc.interlockMode == gpu::InterlockMode::atomics
? desc.combinedShaderFeatures
: batch.shaderFeatures;
pls::ShaderMiscFlags batchMiscFlags = baselineShaderMiscFlags;
if (!(batchMiscFlags & pls::ShaderMiscFlags::fixedFunctionColorBlend))
gpu::ShaderMiscFlags batchMiscFlags = baselineShaderMiscFlags;
if (!(batchMiscFlags & gpu::ShaderMiscFlags::fixedFunctionColorBlend))
{
if (batch.drawType == pls::DrawType::plsAtomicResolve)
if (batch.drawType == gpu::DrawType::gpuAtomicResolve)
{
// Atomic mode can always do a coalesced resolve when rendering to an offscreen
// color buffer.
batchMiscFlags |= pls::ShaderMiscFlags::coalescedResolveAndTransfer;
batchMiscFlags |= gpu::ShaderMiscFlags::coalescedResolveAndTransfer;
}
else if (batch.drawType == pls::DrawType::plsAtomicInitialize)
else if (batch.drawType == gpu::DrawType::gpuAtomicInitialize)
{
if (desc.colorLoadAction == pls::LoadAction::clear)
if (desc.colorLoadAction == gpu::LoadAction::clear)
{
batchMiscFlags |= pls::ShaderMiscFlags::storeColorClear;
batchMiscFlags |= gpu::ShaderMiscFlags::storeColorClear;
}
else if (desc.colorLoadAction == pls::LoadAction::preserveRenderTarget &&
else if (desc.colorLoadAction == gpu::LoadAction::preserveRenderTarget &&
renderTarget->pixelFormat() == MTLPixelFormatBGRA8Unorm)
{
// We already copied the renderTarget to our color buffer, but since the target
// is BGRA, we also need to swizzle it to RGBA before it's ready for PLS.
batchMiscFlags |= pls::ShaderMiscFlags::swizzleColorBGRAToRGBA;
batchMiscFlags |= gpu::ShaderMiscFlags::swizzleColorBGRAToRGBA;
}
}
}
@@ -1097,10 +1097,10 @@ void PLSRenderContextMetalImpl::flush(const FlushDescriptor& desc)
[encoder setCullMode:MTLCullModeNone];
if (drawType == DrawType::imageRect)
{
assert(desc.interlockMode == pls::InterlockMode::atomics);
assert(desc.interlockMode == gpu::InterlockMode::atomics);
[encoder setVertexBuffer:m_imageRectVertexBuffer offset:0 atIndex:0];
[encoder drawIndexedPrimitives:MTLPrimitiveTypeTriangle
indexCount:std::size(pls::kImageRectIndices)
indexCount:std::size(gpu::kImageRectIndices)
indexType:MTLIndexTypeUInt16
indexBuffer:m_imageRectIndexBuffer
indexBufferOffset:0];
@@ -1122,10 +1122,10 @@ void PLSRenderContextMetalImpl::flush(const FlushDescriptor& desc)
}
break;
}
case DrawType::plsAtomicInitialize:
case DrawType::plsAtomicResolve:
case DrawType::gpuAtomicInitialize:
case DrawType::gpuAtomicResolve:
{
assert(desc.interlockMode == pls::InterlockMode::atomics);
assert(desc.interlockMode == gpu::InterlockMode::atomics);
[encoder setRenderPipelineState:drawPipelineState];
[encoder drawPrimitives:MTLPrimitiveTypeTriangleStrip vertexStart:0 vertexCount:4];
break;
@@ -1135,7 +1135,7 @@ void PLSRenderContextMetalImpl::flush(const FlushDescriptor& desc)
RIVE_UNREACHABLE();
}
}
if (desc.interlockMode == pls::InterlockMode::atomics && batch.needsBarrier)
if (desc.interlockMode == gpu::InterlockMode::atomics && batch.needsBarrier)
{
switch (m_metalFeatures.atomicBarrierType)
{
@@ -1181,4 +1181,4 @@ void PLSRenderContextMetalImpl::flush(const FlushDescriptor& desc)
}];
}
}
} // namespace rive::pls
} // namespace rive::gpu

View File

@@ -2,16 +2,16 @@
* Copyright 2022 Rive
*/
#include "rive/pls/pls.hpp"
#include "rive/renderer/gpu.hpp"
#include "rive/pls/pls_render_target.hpp"
#include "rive/renderer/render_target.hpp"
#include "shaders/constants.glsl"
#include "rive/pls/pls_image.hpp"
#include "pls_paint.hpp"
#include "rive/renderer/image.hpp"
#include "rive_render_paint.hpp"
#include "generated/shaders/draw_path.exports.h"
namespace rive::pls
namespace rive::gpu
{
static_assert(kGradTextureWidth == GRAD_TEXTURE_WIDTH);
static_assert(kTessTextureWidth == TESS_TEXTURE_WIDTH);
@@ -24,13 +24,13 @@ uint32_t ShaderUniqueKey(DrawType drawType,
{
if (miscFlags & ShaderMiscFlags::coalescedResolveAndTransfer)
{
assert(drawType == DrawType::plsAtomicResolve);
assert(drawType == DrawType::gpuAtomicResolve);
assert(shaderFeatures & ShaderFeatures::ENABLE_ADVANCED_BLEND);
assert(interlockMode == InterlockMode::atomics);
}
if (miscFlags & (ShaderMiscFlags::storeColorClear | ShaderMiscFlags::swizzleColorBGRAToRGBA))
{
assert(drawType == DrawType::plsAtomicInitialize);
assert(drawType == DrawType::gpuAtomicInitialize);
}
uint32_t drawTypeKey;
switch (drawType)
@@ -48,16 +48,16 @@ uint32_t ShaderUniqueKey(DrawType drawType,
case DrawType::imageMesh:
drawTypeKey = 3;
break;
case DrawType::plsAtomicInitialize:
assert(interlockMode == pls::InterlockMode::atomics);
case DrawType::gpuAtomicInitialize:
assert(interlockMode == gpu::InterlockMode::atomics);
drawTypeKey = 4;
break;
case DrawType::plsAtomicResolve:
assert(interlockMode == pls::InterlockMode::atomics);
case DrawType::gpuAtomicResolve:
assert(interlockMode == gpu::InterlockMode::atomics);
drawTypeKey = 5;
break;
case DrawType::stencilClipReset:
assert(interlockMode == pls::InterlockMode::depthStencil);
assert(interlockMode == gpu::InterlockMode::depthStencil);
drawTypeKey = 6;
break;
}
@@ -456,7 +456,7 @@ void PaintAuxData::set(const Mat2D& viewMatrix,
const PLSTexture* imageTexture,
const ClipRectInverseMatrix* clipRectInverseMatrix,
const PLSRenderTarget* renderTarget,
const pls::PlatformFeatures& platformFeatures)
const gpu::PlatformFeatures& platformFeatures)
{
switch (paintType)
{
@@ -561,9 +561,9 @@ ImageDrawUniforms::ImageDrawUniforms(const Mat2D& matrix,
std::tuple<uint32_t, uint32_t> StorageTextureSize(size_t bufferSizeInBytes,
StorageBufferStructure bufferStructure)
{
assert(bufferSizeInBytes % pls::StorageBufferElementSizeInBytes(bufferStructure) == 0);
assert(bufferSizeInBytes % gpu::StorageBufferElementSizeInBytes(bufferStructure) == 0);
uint32_t elementCount = math::lossless_numeric_cast<uint32_t>(bufferSizeInBytes) /
pls::StorageBufferElementSizeInBytes(bufferStructure);
gpu::StorageBufferElementSizeInBytes(bufferStructure);
uint32_t height = (elementCount + STORAGE_TEXTURE_WIDTH - 1) / STORAGE_TEXTURE_WIDTH;
// PLSRenderContext is responsible for breaking up a flush before any storage buffer grows
// larger than can be supported by a GL texture of width "STORAGE_TEXTURE_WIDTH".
@@ -579,7 +579,7 @@ size_t StorageTextureBufferSize(size_t bufferSizeInBytes, StorageBufferStructure
// The polyfill texture needs to be updated in entire rows at a time. Extend the buffer's length
// to be able to service a worst-case scenario.
return bufferSizeInBytes +
(STORAGE_TEXTURE_WIDTH - 1) * pls::StorageBufferElementSizeInBytes(bufferStructure);
(STORAGE_TEXTURE_WIDTH - 1) * gpu::StorageBufferElementSizeInBytes(bufferStructure);
}
float FindTransformedArea(const AABB& bounds, const Mat2D& matrix)
@@ -595,4 +595,4 @@ float FindTransformedArea(const AABB& bounds, const Mat2D& matrix)
screenSpacePts[3] - screenSpacePts[0]};
return (fabsf(Vec2D::cross(v[0], v[1])) + fabsf(Vec2D::cross(v[1], v[2]))) * .5f;
}
} // namespace rive::pls
} // namespace rive::gpu

View File

@@ -2,19 +2,19 @@
* Copyright 2022 Rive
*/
#include "rive/pls/pls_render_context.hpp"
#include "rive/renderer/render_context.hpp"
#include "gr_inner_fan_triangulator.hpp"
#include "intersection_board.hpp"
#include "pls_paint.hpp"
#include "rive/pls/pls_draw.hpp"
#include "rive/pls/pls_image.hpp"
#include "rive/pls/pls_render_context_impl.hpp"
#include "rive_render_paint.hpp"
#include "rive/renderer/draw.hpp"
#include "rive/renderer/image.hpp"
#include "rive/renderer/render_context_impl.hpp"
#include "shaders/constants.glsl"
#include <string_view>
namespace rive::pls
namespace rive::gpu
{
constexpr size_t kDefaultSimpleGradientCapacity = 512;
constexpr size_t kDefaultComplexGradientCapacity = 1024;
@@ -23,14 +23,14 @@ constexpr size_t kDefaultDrawCapacity = 2048;
constexpr uint32_t kMaxTextureHeight = 2048; // TODO: Move this variable to PlatformFeatures.
constexpr size_t kMaxTessellationVertexCount = kMaxTextureHeight * kTessTextureWidth;
constexpr size_t kMaxTessellationPaddingVertexCount =
pls::kMidpointFanPatchSegmentSpan + // Padding at the beginning of the tess texture
(pls::kOuterCurvePatchSegmentSpan - 1) + // Max padding between patch types in the tess texture
gpu::kMidpointFanPatchSegmentSpan + // Padding at the beginning of the tess texture
(gpu::kOuterCurvePatchSegmentSpan - 1) + // Max padding between patch types in the tess texture
1; // Padding at the end of the tessellation texture
constexpr size_t kMaxTessellationVertexCountBeforePadding =
kMaxTessellationVertexCount - kMaxTessellationPaddingVertexCount;
// Metal requires vertex buffers to be 256-byte aligned.
constexpr size_t kMaxTessellationAlignmentVertices = pls::kTessVertexBufferAlignmentInElements - 1;
constexpr size_t kMaxTessellationAlignmentVertices = gpu::kTessVertexBufferAlignmentInElements - 1;
// We can only reorder 32767 draws at a time since the one-based groupIndex returned by
// IntersectionBoard is a signed 16-bit integer.
@@ -44,7 +44,7 @@ template <size_t WidthInItems> constexpr static size_t resource_texture_height(s
constexpr static size_t gradient_data_height(size_t simpleRampCount, size_t complexRampCount)
{
return resource_texture_height<pls::kGradTextureWidthInSimpleRamps>(simpleRampCount) +
return resource_texture_height<gpu::kGradTextureWidthInSimpleRamps>(simpleRampCount) +
complexRampCount;
}
@@ -103,7 +103,7 @@ PLSRenderContext::~PLSRenderContext()
m_logicalFlushes.clear();
}
const pls::PlatformFeatures& PLSRenderContext::platformFeatures() const
const gpu::PlatformFeatures& PLSRenderContext::platformFeatures() const
{
return m_impl->platformFeatures();
}
@@ -176,10 +176,10 @@ void PLSRenderContext::LogicalFlush::rewind()
m_flushDesc = FlushDescriptor();
m_drawList.reset();
m_combinedShaderFeatures = pls::ShaderFeatures::NONE;
m_combinedShaderFeatures = gpu::ShaderFeatures::NONE;
m_currentPathIsStroked = false;
m_currentPathContourDirections = pls::ContourDirections::none;
m_currentPathContourDirections = gpu::ContourDirections::none;
m_currentPathID = 0;
m_currentContourID = 0;
m_currentContourPaddingVertexCount = 0;
@@ -231,17 +231,17 @@ void PLSRenderContext::beginFrame(const FrameDescriptor& frameDescriptor)
}
if (m_frameDescriptor.msaaSampleCount > 0)
{
m_frameInterlockMode = pls::InterlockMode::depthStencil;
m_frameInterlockMode = gpu::InterlockMode::depthStencil;
}
else if (m_frameDescriptor.disableRasterOrdering || !platformFeatures().supportsRasterOrdering)
{
m_frameInterlockMode = pls::InterlockMode::atomics;
m_frameInterlockMode = gpu::InterlockMode::atomics;
}
else
{
m_frameInterlockMode = pls::InterlockMode::rasterOrdering;
m_frameInterlockMode = gpu::InterlockMode::rasterOrdering;
}
m_frameShaderFeaturesMask = pls::ShaderFeaturesMaskFor(m_frameInterlockMode);
m_frameShaderFeaturesMask = gpu::ShaderFeaturesMaskFor(m_frameInterlockMode);
if (m_logicalFlushes.empty())
{
m_logicalFlushes.emplace_back(new LogicalFlush(this));
@@ -261,14 +261,14 @@ bool PLSRenderContext::isOutsideCurrentFrame(const IAABB& pixelBounds)
bool PLSRenderContext::frameSupportsClipRects() const
{
assert(m_didBeginFrame);
return m_frameInterlockMode != pls::InterlockMode::depthStencil ||
return m_frameInterlockMode != gpu::InterlockMode::depthStencil ||
platformFeatures().supportsClipPlanes;
}
bool PLSRenderContext::frameSupportsImagePaintForPaths() const
{
assert(m_didBeginFrame);
return m_frameInterlockMode != pls::InterlockMode::atomics ||
return m_frameInterlockMode != gpu::InterlockMode::atomics ||
platformFeatures().supportsBindlessTextures;
}
@@ -317,7 +317,7 @@ bool PLSRenderContext::LogicalFlush::pushDrawBatch(PLSDrawUniquePtr draws[], siz
{
assert(!m_hasDoneLayout);
if (m_flushDesc.interlockMode == pls::InterlockMode::atomics &&
if (m_flushDesc.interlockMode == gpu::InterlockMode::atomics &&
m_drawList.count() + drawCount > kMaxReorderedDrawCount)
{
// We can only reorder 64k draws at a time since the sort key addresses them with a 16-bit
@@ -367,7 +367,7 @@ bool PLSRenderContext::LogicalFlush::pushDrawBatch(PLSDrawUniquePtr draws[], siz
bool PLSRenderContext::LogicalFlush::allocateGradient(const PLSGradient* gradient,
PLSDraw::ResourceCounters* counters,
pls::ColorRampLocation* colorRampLocation)
gpu::ColorRampLocation* colorRampLocation)
{
assert(!m_hasDoneLayout);
@@ -481,7 +481,7 @@ void PLSRenderContext::flush(const FlushResources& flushResources)
// The gradient texture needs to be updated in entire rows at a time. Extend its
// texture-transfer buffer's length in order to be able to serve a worst-case scenario.
allocs.simpleGradientBufferCount =
layoutCounts.simpleGradCount + pls::kGradTextureWidthInSimpleRamps - 1;
layoutCounts.simpleGradCount + gpu::kGradTextureWidthInSimpleRamps - 1;
allocs.complexGradSpanBufferCount =
totalFrameResourceCounts.complexGradientSpanCount + layoutCounts.gradSpanPaddingCount;
allocs.tessSpanBufferCount = totalFrameResourceCounts.maxTessellatedSegmentCount;
@@ -594,16 +594,16 @@ void PLSRenderContext::LogicalFlush::layoutResources(const FlushResources& flush
// Storage buffer offsets are required to be aligned on multiples of 256.
m_pathPaddingCount =
pls::PaddingToAlignUp<pls::kPathBufferAlignmentInElements>(m_resourceCounts.pathCount);
gpu::PaddingToAlignUp<gpu::kPathBufferAlignmentInElements>(m_resourceCounts.pathCount);
m_paintPaddingCount =
pls::PaddingToAlignUp<pls::kPaintBufferAlignmentInElements>(m_resourceCounts.pathCount);
gpu::PaddingToAlignUp<gpu::kPaintBufferAlignmentInElements>(m_resourceCounts.pathCount);
m_paintAuxPaddingCount =
pls::PaddingToAlignUp<pls::kPaintAuxBufferAlignmentInElements>(m_resourceCounts.pathCount);
m_contourPaddingCount = pls::PaddingToAlignUp<pls::kContourBufferAlignmentInElements>(
gpu::PaddingToAlignUp<gpu::kPaintAuxBufferAlignmentInElements>(m_resourceCounts.pathCount);
m_contourPaddingCount = gpu::PaddingToAlignUp<gpu::kContourBufferAlignmentInElements>(
m_resourceCounts.contourCount);
// Metal requires vertex buffers to be 256-byte aligned.
m_gradSpanPaddingCount = pls::PaddingToAlignUp<pls::kGradSpanBufferAlignmentInElements>(
m_gradSpanPaddingCount = gpu::PaddingToAlignUp<gpu::kGradSpanBufferAlignmentInElements>(
m_resourceCounts.complexGradientSpanCount);
size_t totalTessVertexCountWithPadding = 0;
@@ -612,7 +612,7 @@ void PLSRenderContext::LogicalFlush::layoutResources(const FlushResources& flush
{
// midpointFan tessellation vertices reside at the beginning of the tessellation texture,
// after 1 patch of padding vertices.
constexpr uint32_t kPrePadding = pls::kMidpointFanPatchSegmentSpan;
constexpr uint32_t kPrePadding = gpu::kMidpointFanPatchSegmentSpan;
m_midpointFanTessVertexIdx = kPrePadding;
m_midpointFanTessEndLocation =
m_midpointFanTessVertexIdx +
@@ -621,7 +621,7 @@ void PLSRenderContext::LogicalFlush::layoutResources(const FlushResources& flush
// outerCubic tessellation vertices reside after the midpointFan vertices, aligned on a
// multiple of the outerCubic patch size.
uint32_t interiorPadding =
PaddingToAlignUp<pls::kOuterCurvePatchSegmentSpan>(m_midpointFanTessEndLocation);
PaddingToAlignUp<gpu::kOuterCurvePatchSegmentSpan>(m_midpointFanTessEndLocation);
m_outerCubicTessVertexIdx = m_midpointFanTessEndLocation + interiorPadding;
m_outerCubicTessEndLocation =
m_outerCubicTessVertexIdx +
@@ -662,16 +662,16 @@ void PLSRenderContext::LogicalFlush::layoutResources(const FlushResources& flush
if (logicalFlushIdx != 0)
{
// We always have to preserve the renderTarget between logical flushes.
m_flushDesc.colorLoadAction = pls::LoadAction::preserveRenderTarget;
m_flushDesc.colorLoadAction = gpu::LoadAction::preserveRenderTarget;
}
else if (frameDescriptor.loadAction == pls::LoadAction::clear)
else if (frameDescriptor.loadAction == gpu::LoadAction::clear)
{
// In atomic mode, we can clear during the resolve operation if the clearColor is opaque
// (because we don't want or have a "source only" blend mode).
doClearDuringAtomicResolve = m_ctx->frameInterlockMode() == pls::InterlockMode::atomics &&
doClearDuringAtomicResolve = m_ctx->frameInterlockMode() == gpu::InterlockMode::atomics &&
colorAlpha(frameDescriptor.clearColor) == 255;
m_flushDesc.colorLoadAction =
doClearDuringAtomicResolve ? pls::LoadAction::dontCare : pls::LoadAction::clear;
doClearDuringAtomicResolve ? gpu::LoadAction::dontCare : gpu::LoadAction::clear;
}
else
{
@@ -686,10 +686,10 @@ void PLSRenderContext::LogicalFlush::layoutResources(const FlushResources& flush
// pathID=0 to be a solid fill matching the clearColor, so if we just initialize coverage
// buffer to solid coverage with pathID=0, the resolve step will write out the correct clear
// color.
assert(m_flushDesc.interlockMode == pls::InterlockMode::atomics);
assert(m_flushDesc.interlockMode == gpu::InterlockMode::atomics);
m_flushDesc.coverageClearValue = static_cast<uint32_t>(FIXED_COVERAGE_ONE);
}
else if (m_flushDesc.interlockMode == pls::InterlockMode::atomics)
else if (m_flushDesc.interlockMode == gpu::InterlockMode::atomics)
{
// When we don't skip the initial clear in atomic mode, clear the coverage buffer to
// pathID=0 and a transparent coverage value.
@@ -704,7 +704,7 @@ void PLSRenderContext::LogicalFlush::layoutResources(const FlushResources& flush
m_flushDesc.coverageClearValue = 0;
}
if (doClearDuringAtomicResolve || m_flushDesc.colorLoadAction == pls::LoadAction::clear)
if (doClearDuringAtomicResolve || m_flushDesc.colorLoadAction == gpu::LoadAction::clear)
{
// If we're clearing then we always update the entire render target.
m_flushDesc.renderTargetUpdateBounds = m_flushDesc.renderTarget->bounds();
@@ -721,7 +721,7 @@ void PLSRenderContext::LogicalFlush::layoutResources(const FlushResources& flush
m_flushDesc.renderTargetUpdateBounds = {0, 0, 0, 0};
}
m_flushDesc.flushUniformDataOffsetInBytes = logicalFlushIdx * sizeof(pls::FlushUniforms);
m_flushDesc.flushUniformDataOffsetInBytes = logicalFlushIdx * sizeof(gpu::FlushUniforms);
m_flushDesc.pathCount = math::lossless_numeric_cast<uint32_t>(m_resourceCounts.pathCount);
m_flushDesc.firstPath =
runningFrameResourceCounts->pathCount + runningFrameLayoutCounts->pathPaddingCount;
@@ -738,12 +738,12 @@ void PLSRenderContext::LogicalFlush::layoutResources(const FlushResources& flush
runningFrameLayoutCounts->gradSpanPaddingCount;
m_flushDesc.simpleGradTexelsWidth =
std::min<uint32_t>(math::lossless_numeric_cast<uint32_t>(m_simpleGradients.size()),
pls::kGradTextureWidthInSimpleRamps) *
gpu::kGradTextureWidthInSimpleRamps) *
2;
m_flushDesc.simpleGradTexelsHeight = static_cast<uint32_t>(
resource_texture_height<pls::kGradTextureWidthInSimpleRamps>(m_simpleGradients.size()));
resource_texture_height<gpu::kGradTextureWidthInSimpleRamps>(m_simpleGradients.size()));
m_flushDesc.simpleGradDataOffsetInBytes =
runningFrameLayoutCounts->simpleGradCount * sizeof(pls::TwoTexelRamp);
runningFrameLayoutCounts->simpleGradCount * sizeof(gpu::TwoTexelRamp);
m_flushDesc.complexGradRowsTop = m_flushDesc.simpleGradTexelsHeight;
m_flushDesc.complexGradRowsHeight =
math::lossless_numeric_cast<uint32_t>(m_complexGradients.size());
@@ -771,17 +771,17 @@ void PLSRenderContext::LogicalFlush::layoutResources(const FlushResources& flush
runningFrameLayoutCounts->maxTessTextureHeight =
std::max(m_flushDesc.tessDataHeight, runningFrameLayoutCounts->maxTessTextureHeight);
assert(m_flushDesc.firstPath % pls::kPathBufferAlignmentInElements == 0);
assert(m_flushDesc.firstPaint % pls::kPaintBufferAlignmentInElements == 0);
assert(m_flushDesc.firstPaintAux % pls::kPaintAuxBufferAlignmentInElements == 0);
assert(m_flushDesc.firstContour % pls::kContourBufferAlignmentInElements == 0);
assert(m_flushDesc.firstComplexGradSpan % pls::kGradSpanBufferAlignmentInElements == 0);
assert(m_flushDesc.firstPath % gpu::kPathBufferAlignmentInElements == 0);
assert(m_flushDesc.firstPaint % gpu::kPaintBufferAlignmentInElements == 0);
assert(m_flushDesc.firstPaintAux % gpu::kPaintAuxBufferAlignmentInElements == 0);
assert(m_flushDesc.firstContour % gpu::kContourBufferAlignmentInElements == 0);
assert(m_flushDesc.firstComplexGradSpan % gpu::kGradSpanBufferAlignmentInElements == 0);
RIVE_DEBUG_CODE(m_hasDoneLayout = true;)
}
void PLSRenderContext::LogicalFlush::writeResources()
{
const pls::PlatformFeatures& platformFeatures = m_ctx->platformFeatures();
const gpu::PlatformFeatures& platformFeatures = m_ctx->platformFeatures();
assert(m_hasDoneLayout);
assert(m_flushDesc.firstPath == m_ctx->m_pathData.elementsWritten());
assert(m_flushDesc.firstPaint == m_ctx->m_paintData.elementsWritten());
@@ -798,7 +798,7 @@ void PLSRenderContext::LogicalFlush::writeResources()
// Metal requires vertex buffers to be 256-byte aligned.
size_t tessAlignmentPadding =
pls::PaddingToAlignUp<pls::kTessVertexBufferAlignmentInElements>(firstTessVertexSpan);
gpu::PaddingToAlignUp<gpu::kTessVertexBufferAlignmentInElements>(firstTessVertexSpan);
assert(tessAlignmentPadding <= kMaxTessellationAlignmentVertices);
m_ctx->m_tessSpanData.push_back_n(nullptr, tessAlignmentPadding);
m_flushDesc.firstTessVertexSpan = firstTessVertexSpan + tessAlignmentPadding;
@@ -850,7 +850,7 @@ void PLSRenderContext::LogicalFlush::writeResources()
// Write a path record for the clearColor paint (used by atomic mode).
// This also allows us to index the storage buffers directly by pathID.
pls::SimplePaintValue clearColorValue;
gpu::SimplePaintValue clearColorValue;
clearColorValue.color = m_ctx->frameDescriptor().clearColor;
m_ctx->m_pathData.skip_back();
m_ctx->m_paintData.set_back(FillRule::nonZero,
@@ -866,7 +866,7 @@ void PLSRenderContext::LogicalFlush::writeResources()
if (m_flushDesc.tessDataHeight > 0)
{
// Padding at the beginning of the tessellation texture.
pushPaddingVertices(0, pls::kMidpointFanPatchSegmentSpan);
pushPaddingVertices(0, gpu::kMidpointFanPatchSegmentSpan);
// Padding between patch types in the tessellation texture.
if (m_outerCubicTessVertexIdx > m_midpointFanTessEndLocation)
{
@@ -881,7 +881,7 @@ void PLSRenderContext::LogicalFlush::writeResources()
}
// Write out all the data for our high level draws, and build up a low-level draw list.
if (m_ctx->frameInterlockMode() == pls::InterlockMode::rasterOrdering)
if (m_ctx->frameInterlockMode() == gpu::InterlockMode::rasterOrdering)
{
for (const PLSDrawUniquePtr& draw : m_plsDraws)
{
@@ -934,7 +934,7 @@ void PLSRenderContext::LogicalFlush::writeResources()
// to maximize batching while preserving correctness.
int64_t drawGroupIdx = intersectionBoard->addRectangle(drawBounds);
assert(drawGroupIdx > 0);
if (m_flushDesc.interlockMode == pls::InterlockMode::depthStencil && draw->isOpaque())
if (m_flushDesc.interlockMode == gpu::InterlockMode::depthStencil && draw->isOpaque())
{
// In depthStencil mode we can reverse-sort opaque paths front to back, draw them
// first, and take advantage of early Z culling.
@@ -943,7 +943,7 @@ void PLSRenderContext::LogicalFlush::writeResources()
// (Otherwise if a clip affects both opaque and transparent content, we would have
// to apply it twice.)
bool usesClipping = draw->drawContents() &
(pls::DrawContents::activeClip | pls::DrawContents::clipUpdate);
(gpu::DrawContents::activeClip | gpu::DrawContents::clipUpdate);
if (!usesClipping)
{
drawGroupIdx = -drawGroupIdx;
@@ -967,7 +967,7 @@ void PLSRenderContext::LogicalFlush::writeResources()
// different blend modes.
// If not using KHR_blend_equation_advanced, sorting by blend mode may still give us
// better branching on the GPU.
int64_t blendMode = pls::ConvertBlendModeToPLSBlendMode(draw->blendMode());
int64_t blendMode = gpu::ConvertBlendModeToPLSBlendMode(draw->blendMode());
assert(blendMode <= kBlendModeMask >> kBlendModeShift);
key |= blendMode << kBlendModeShift;
@@ -995,11 +995,11 @@ void PLSRenderContext::LogicalFlush::writeResources()
// Atomic mode sometimes needs to initialize PLS with a draw when the backend can't do it
// with typical clear/load APIs.
if (m_ctx->frameInterlockMode() == pls::InterlockMode::atomics &&
if (m_ctx->frameInterlockMode() == gpu::InterlockMode::atomics &&
platformFeatures.atomicPLSMustBeInitializedAsDraw)
{
m_drawList.emplace_back(m_ctx->perFrameAllocator(),
DrawType::plsAtomicInitialize,
DrawType::gpuAtomicInitialize,
nullptr,
1,
0);
@@ -1009,7 +1009,7 @@ void PLSRenderContext::LogicalFlush::writeResources()
// Draws with the same drawGroupIdx don't overlap, but once we cross into a new draw group,
// we need to insert a barrier between the overlaps.
int64_t needsBarrierMask = kDrawGroupMask;
if (m_flushDesc.interlockMode == pls::InterlockMode::depthStencil)
if (m_flushDesc.interlockMode == gpu::InterlockMode::depthStencil)
{
// depthStencil mode also draws clips, strokes, fills, and even/odd with different
// stencil settings, so these also need a barrier.
@@ -1040,11 +1040,11 @@ void PLSRenderContext::LogicalFlush::writeResources()
}
// Atomic mode needs one more draw to resolve all the pixels.
if (m_ctx->frameInterlockMode() == pls::InterlockMode::atomics)
if (m_ctx->frameInterlockMode() == gpu::InterlockMode::atomics)
{
pushBarrier();
m_drawList.emplace_back(m_ctx->perFrameAllocator(),
DrawType::plsAtomicResolve,
DrawType::gpuAtomicResolve,
nullptr,
1,
0);
@@ -1128,7 +1128,7 @@ void PLSRenderContext::setResourceSizes(ResourceAllocationCounts allocs, bool fo
logger.logSize(#NAME, \
m_currentResourceAllocations.NAME, \
allocs.NAME, \
allocs.NAME* ITEM_SIZE_IN_BYTES* pls::kBufferRingSize)
allocs.NAME* ITEM_SIZE_IN_BYTES* gpu::kBufferRingSize)
#define LOG_TEXTURE_HEIGHT(NAME, BYTES_PER_ROW) \
logger.logSize(#NAME, \
m_currentResourceAllocations.NAME, \
@@ -1139,102 +1139,102 @@ void PLSRenderContext::setResourceSizes(ResourceAllocationCounts allocs, bool fo
#define LOG_TEXTURE_HEIGHT(NAME, BYTES_PER_ROW)
#endif
LOG_BUFFER_RING_SIZE(flushUniformBufferCount, sizeof(pls::FlushUniforms));
LOG_BUFFER_RING_SIZE(flushUniformBufferCount, sizeof(gpu::FlushUniforms));
if (allocs.flushUniformBufferCount != m_currentResourceAllocations.flushUniformBufferCount ||
forceRealloc)
{
m_impl->resizeFlushUniformBuffer(allocs.flushUniformBufferCount *
sizeof(pls::FlushUniforms));
sizeof(gpu::FlushUniforms));
}
LOG_BUFFER_RING_SIZE(imageDrawUniformBufferCount, sizeof(pls::ImageDrawUniforms));
LOG_BUFFER_RING_SIZE(imageDrawUniformBufferCount, sizeof(gpu::ImageDrawUniforms));
if (allocs.imageDrawUniformBufferCount !=
m_currentResourceAllocations.imageDrawUniformBufferCount ||
forceRealloc)
{
m_impl->resizeImageDrawUniformBuffer(allocs.imageDrawUniformBufferCount *
sizeof(pls::ImageDrawUniforms));
sizeof(gpu::ImageDrawUniforms));
}
LOG_BUFFER_RING_SIZE(pathBufferCount, sizeof(pls::PathData));
LOG_BUFFER_RING_SIZE(pathBufferCount, sizeof(gpu::PathData));
if (allocs.pathBufferCount != m_currentResourceAllocations.pathBufferCount || forceRealloc)
{
m_impl->resizePathBuffer(allocs.pathBufferCount * sizeof(pls::PathData),
pls::PathData::kBufferStructure);
m_impl->resizePathBuffer(allocs.pathBufferCount * sizeof(gpu::PathData),
gpu::PathData::kBufferStructure);
}
LOG_BUFFER_RING_SIZE(paintBufferCount, sizeof(pls::PaintData));
LOG_BUFFER_RING_SIZE(paintBufferCount, sizeof(gpu::PaintData));
if (allocs.paintBufferCount != m_currentResourceAllocations.paintBufferCount || forceRealloc)
{
m_impl->resizePaintBuffer(allocs.paintBufferCount * sizeof(pls::PaintData),
pls::PaintData::kBufferStructure);
m_impl->resizePaintBuffer(allocs.paintBufferCount * sizeof(gpu::PaintData),
gpu::PaintData::kBufferStructure);
}
LOG_BUFFER_RING_SIZE(paintAuxBufferCount, sizeof(pls::PaintAuxData));
LOG_BUFFER_RING_SIZE(paintAuxBufferCount, sizeof(gpu::PaintAuxData));
if (allocs.paintAuxBufferCount != m_currentResourceAllocations.paintAuxBufferCount ||
forceRealloc)
{
m_impl->resizePaintAuxBuffer(allocs.paintAuxBufferCount * sizeof(pls::PaintAuxData),
pls::PaintAuxData::kBufferStructure);
m_impl->resizePaintAuxBuffer(allocs.paintAuxBufferCount * sizeof(gpu::PaintAuxData),
gpu::PaintAuxData::kBufferStructure);
}
LOG_BUFFER_RING_SIZE(contourBufferCount, sizeof(pls::ContourData));
LOG_BUFFER_RING_SIZE(contourBufferCount, sizeof(gpu::ContourData));
if (allocs.contourBufferCount != m_currentResourceAllocations.contourBufferCount ||
forceRealloc)
{
m_impl->resizeContourBuffer(allocs.contourBufferCount * sizeof(pls::ContourData),
pls::ContourData::kBufferStructure);
m_impl->resizeContourBuffer(allocs.contourBufferCount * sizeof(gpu::ContourData),
gpu::ContourData::kBufferStructure);
}
LOG_BUFFER_RING_SIZE(simpleGradientBufferCount, sizeof(pls::TwoTexelRamp));
LOG_BUFFER_RING_SIZE(simpleGradientBufferCount, sizeof(gpu::TwoTexelRamp));
if (allocs.simpleGradientBufferCount !=
m_currentResourceAllocations.simpleGradientBufferCount ||
forceRealloc)
{
m_impl->resizeSimpleColorRampsBuffer(allocs.simpleGradientBufferCount *
sizeof(pls::TwoTexelRamp));
sizeof(gpu::TwoTexelRamp));
}
LOG_BUFFER_RING_SIZE(complexGradSpanBufferCount, sizeof(pls::GradientSpan));
LOG_BUFFER_RING_SIZE(complexGradSpanBufferCount, sizeof(gpu::GradientSpan));
if (allocs.complexGradSpanBufferCount !=
m_currentResourceAllocations.complexGradSpanBufferCount ||
forceRealloc)
{
m_impl->resizeGradSpanBuffer(allocs.complexGradSpanBufferCount * sizeof(pls::GradientSpan));
m_impl->resizeGradSpanBuffer(allocs.complexGradSpanBufferCount * sizeof(gpu::GradientSpan));
}
LOG_BUFFER_RING_SIZE(tessSpanBufferCount, sizeof(pls::TessVertexSpan));
LOG_BUFFER_RING_SIZE(tessSpanBufferCount, sizeof(gpu::TessVertexSpan));
if (allocs.tessSpanBufferCount != m_currentResourceAllocations.tessSpanBufferCount ||
forceRealloc)
{
m_impl->resizeTessVertexSpanBuffer(allocs.tessSpanBufferCount *
sizeof(pls::TessVertexSpan));
sizeof(gpu::TessVertexSpan));
}
LOG_BUFFER_RING_SIZE(triangleVertexBufferCount, sizeof(pls::TriangleVertex));
LOG_BUFFER_RING_SIZE(triangleVertexBufferCount, sizeof(gpu::TriangleVertex));
if (allocs.triangleVertexBufferCount !=
m_currentResourceAllocations.triangleVertexBufferCount ||
forceRealloc)
{
m_impl->resizeTriangleVertexBuffer(allocs.triangleVertexBufferCount *
sizeof(pls::TriangleVertex));
sizeof(gpu::TriangleVertex));
}
allocs.gradTextureHeight = std::min<size_t>(allocs.gradTextureHeight, kMaxTextureHeight);
LOG_TEXTURE_HEIGHT(gradTextureHeight, pls::kGradTextureWidth * 4);
LOG_TEXTURE_HEIGHT(gradTextureHeight, gpu::kGradTextureWidth * 4);
if (allocs.gradTextureHeight != m_currentResourceAllocations.gradTextureHeight || forceRealloc)
{
m_impl->resizeGradientTexture(
pls::kGradTextureWidth,
gpu::kGradTextureWidth,
math::lossless_numeric_cast<uint32_t>(allocs.gradTextureHeight));
}
allocs.tessTextureHeight = std::min<size_t>(allocs.tessTextureHeight, kMaxTextureHeight);
LOG_TEXTURE_HEIGHT(tessTextureHeight, pls::kTessTextureWidth * 4 * 4);
LOG_TEXTURE_HEIGHT(tessTextureHeight, gpu::kTessTextureWidth * 4 * 4);
if (allocs.tessTextureHeight != m_currentResourceAllocations.tessTextureHeight || forceRealloc)
{
m_impl->resizeTessellationTexture(
pls::kTessTextureWidth,
gpu::kTessTextureWidth,
math::lossless_numeric_cast<uint32_t>(allocs.tessTextureHeight));
}
@@ -1397,8 +1397,8 @@ void PLSRenderContext::LogicalFlush::pushPaddingVertices(uint32_t tessLocation,
assert(m_pathTessLocation == m_expectedPathTessLocationAtEndOfPath);
}
void PLSRenderContext::LogicalFlush::pushPath(PLSPathDraw* draw,
pls::PatchType patchType,
void PLSRenderContext::LogicalFlush::pushPath(RiveRenderPathDraw* draw,
gpu::PatchType patchType,
uint32_t tessVertexCount)
{
assert(m_hasDoneLayout);
@@ -1432,7 +1432,7 @@ void PLSRenderContext::LogicalFlush::pushPath(PLSPathDraw* draw,
assert(m_flushDesc.firstPaintAux + m_currentPathID + 1 ==
m_ctx->m_paintAuxData.elementsWritten());
pls::DrawType drawType;
gpu::DrawType drawType;
uint32_t tessLocation;
if (patchType == PatchType::midpointFan)
{
@@ -1455,18 +1455,18 @@ void PLSRenderContext::LogicalFlush::pushPath(PLSPathDraw* draw,
uint32_t baseInstance = math::lossless_numeric_cast<uint32_t>(tessLocation / patchSize);
assert(baseInstance * patchSize == tessLocation); // flush() is responsible for alignment.
if (m_currentPathContourDirections == pls::ContourDirections::reverseAndForward)
if (m_currentPathContourDirections == gpu::ContourDirections::reverseAndForward)
{
assert(tessVertexCount % 2 == 0);
m_pathTessLocation = m_pathMirroredTessLocation = tessLocation + tessVertexCount / 2;
}
else if (m_currentPathContourDirections == pls::ContourDirections::forward)
else if (m_currentPathContourDirections == gpu::ContourDirections::forward)
{
m_pathTessLocation = m_pathMirroredTessLocation = tessLocation;
}
else
{
assert(m_currentPathContourDirections == pls::ContourDirections::reverse);
assert(m_currentPathContourDirections == gpu::ContourDirections::reverse);
m_pathTessLocation = m_pathMirroredTessLocation = tessLocation + tessVertexCount;
}
@@ -1489,17 +1489,17 @@ void PLSRenderContext::LogicalFlush::pushContour(Vec2D midpoint,
midpoint.x = closed ? 1 : 0;
}
// If the contour is closed, the shader needs a vertex to wrap back around to at the end of it.
uint32_t vertexIndex0 = m_currentPathContourDirections & pls::ContourDirections::forward
uint32_t vertexIndex0 = m_currentPathContourDirections & gpu::ContourDirections::forward
? m_pathTessLocation
: m_pathMirroredTessLocation - 1;
m_ctx->m_contourData.emplace_back(midpoint, m_currentPathID, vertexIndex0);
++m_currentContourID;
assert(0 < m_currentContourID && m_currentContourID <= pls::kMaxContourID);
assert(0 < m_currentContourID && m_currentContourID <= gpu::kMaxContourID);
assert(m_flushDesc.firstContour + m_currentContourID == m_ctx->m_contourData.elementsWritten());
// The first curve of the contour will be pre-padded with 'paddingVertexCount' tessellation
// vertices, colocated at T=0. The caller must use this argument align the end of the contour on
// a boundary of the patch size. (See pls::PaddingToAlignUp().)
// a boundary of the patch size. (See gpu::PaddingToAlignUp().)
m_currentContourPaddingVertexCount = paddingVertexCount;
}
@@ -1526,7 +1526,7 @@ void PLSRenderContext::LogicalFlush::pushCubic(const Vec2D pts[4],
// Only the first curve of a contour gets padding vertices.
m_currentContourPaddingVertexCount = 0;
if (m_currentPathContourDirections == pls::ContourDirections::reverseAndForward)
if (m_currentPathContourDirections == gpu::ContourDirections::reverseAndForward)
{
pushMirroredAndForwardTessellationSpans(pts,
joinTangent,
@@ -1536,7 +1536,7 @@ void PLSRenderContext::LogicalFlush::pushCubic(const Vec2D pts[4],
joinSegmentCount,
m_currentContourID | additionalContourFlags);
}
else if (m_currentPathContourDirections == pls::ContourDirections::forward)
else if (m_currentPathContourDirections == gpu::ContourDirections::forward)
{
pushTessellationSpans(pts,
joinTangent,
@@ -1548,7 +1548,7 @@ void PLSRenderContext::LogicalFlush::pushCubic(const Vec2D pts[4],
}
else
{
assert(m_currentPathContourDirections == pls::ContourDirections::reverse);
assert(m_currentPathContourDirections == gpu::ContourDirections::reverse);
pushMirroredTessellationSpans(pts,
joinTangent,
totalVertexCount,
@@ -1786,7 +1786,7 @@ void PLSRenderContext::LogicalFlush::pushStencilClipReset(StencilClipReset* draw
void PLSRenderContext::LogicalFlush::pushBarrier()
{
assert(m_hasDoneLayout);
assert(m_flushDesc.interlockMode != pls::InterlockMode::rasterOrdering);
assert(m_flushDesc.interlockMode != gpu::InterlockMode::rasterOrdering);
if (!m_drawList.empty())
{
@@ -1794,7 +1794,7 @@ void PLSRenderContext::LogicalFlush::pushBarrier()
}
}
pls::DrawBatch& PLSRenderContext::LogicalFlush::pushPathDraw(PLSPathDraw* draw,
gpu::DrawBatch& PLSRenderContext::LogicalFlush::pushPathDraw(RiveRenderPathDraw* draw,
DrawType drawType,
uint32_t vertexCount,
uint32_t baseVertex)
@@ -1802,7 +1802,7 @@ pls::DrawBatch& PLSRenderContext::LogicalFlush::pushPathDraw(PLSPathDraw* draw,
assert(m_hasDoneLayout);
DrawBatch& batch = pushDraw(draw, drawType, draw->paintType(), vertexCount, baseVertex);
auto pathShaderFeatures = pls::ShaderFeatures::NONE;
auto pathShaderFeatures = gpu::ShaderFeatures::NONE;
if (draw->fillRule() == FillRule::evenOdd)
{
pathShaderFeatures |= ShaderFeatures::ENABLE_EVEN_ODD;
@@ -1814,7 +1814,7 @@ pls::DrawBatch& PLSRenderContext::LogicalFlush::pushPathDraw(PLSPathDraw* draw,
batch.shaderFeatures |= pathShaderFeatures & m_ctx->m_frameShaderFeaturesMask;
m_combinedShaderFeatures |= batch.shaderFeatures;
assert((batch.shaderFeatures &
pls::ShaderFeaturesMaskFor(drawType, m_ctx->frameInterlockMode())) ==
gpu::ShaderFeaturesMaskFor(drawType, m_ctx->frameInterlockMode())) ==
batch.shaderFeatures);
return batch;
}
@@ -1832,9 +1832,9 @@ RIVE_ALWAYS_INLINE static bool can_combine_draw_images(const PLSTexture* current
return currentDrawTexture == nextDrawTexture;
}
pls::DrawBatch& PLSRenderContext::LogicalFlush::pushDraw(PLSDraw* draw,
gpu::DrawBatch& PLSRenderContext::LogicalFlush::pushDraw(PLSDraw* draw,
DrawType drawType,
pls::PaintType paintType,
gpu::PaintType paintType,
uint32_t elementCount,
uint32_t baseElement)
{
@@ -1845,8 +1845,8 @@ pls::DrawBatch& PLSRenderContext::LogicalFlush::pushDraw(PLSDraw* draw,
{
case DrawType::midpointFanPatches:
case DrawType::outerCurvePatches:
case DrawType::plsAtomicInitialize:
case DrawType::plsAtomicResolve:
case DrawType::gpuAtomicInitialize:
case DrawType::gpuAtomicResolve:
case DrawType::stencilClipReset:
needsNewBatch =
m_drawList.empty() || m_drawList.tail().drawType != drawType ||
@@ -1872,11 +1872,11 @@ pls::DrawBatch& PLSRenderContext::LogicalFlush::pushDraw(PLSDraw* draw,
assert(batch.drawType == drawType);
assert(can_combine_draw_images(batch.imageTexture, draw->imageTexture()));
assert(!batch.needsBarrier);
if (m_flushDesc.interlockMode == pls::InterlockMode::depthStencil)
if (m_flushDesc.interlockMode == gpu::InterlockMode::depthStencil)
{
// depthStencil can't mix drawContents in a batch.
assert(batch.drawContents == draw->drawContents());
assert((batch.shaderFeatures & pls::ShaderFeatures::ENABLE_ADVANCED_BLEND) ==
assert((batch.shaderFeatures & gpu::ShaderFeatures::ENABLE_ADVANCED_BLEND) ==
(draw->blendMode() != BlendMode::srcOver));
// If using KHR_blend_equation_advanced, we can't mix blend modes in a batch.
assert(!m_ctx->platformFeatures().supportsKHRBlendEquations ||
@@ -1938,8 +1938,8 @@ pls::DrawBatch& PLSRenderContext::LogicalFlush::pushDraw(PLSDraw* draw,
m_combinedShaderFeatures |= batch.shaderFeatures;
batch.drawContents |= draw->drawContents();
assert((batch.shaderFeatures &
pls::ShaderFeaturesMaskFor(drawType, m_ctx->frameInterlockMode())) ==
gpu::ShaderFeaturesMaskFor(drawType, m_ctx->frameInterlockMode())) ==
batch.shaderFeatures);
return batch;
}
} // namespace rive::pls
} // namespace rive::gpu

View File

@@ -2,16 +2,16 @@
* Copyright 2022 Rive
*/
#include "rive/pls/pls_render_context_helper_impl.hpp"
#include "rive/renderer/render_context_helper_impl.hpp"
#include "rive/pls/pls_image.hpp"
#include "rive/renderer/image.hpp"
#include "shaders/constants.glsl"
#ifdef RIVE_DECODERS
#include "rive/decoders/bitmap_decoder.hpp"
#endif
namespace rive::pls
namespace rive::gpu
{
rcp<PLSTexture> PLSRenderContextHelperImpl::decodeImageTexture(Span<const uint8_t> encodedBytes)
{
@@ -44,25 +44,25 @@ void PLSRenderContextHelperImpl::resizeImageDrawUniformBuffer(size_t sizeInBytes
}
void PLSRenderContextHelperImpl::resizePathBuffer(size_t sizeInBytes,
pls::StorageBufferStructure bufferStructure)
gpu::StorageBufferStructure bufferStructure)
{
m_pathBuffer = makeStorageBufferRing(sizeInBytes, bufferStructure);
}
void PLSRenderContextHelperImpl::resizePaintBuffer(size_t sizeInBytes,
pls::StorageBufferStructure bufferStructure)
gpu::StorageBufferStructure bufferStructure)
{
m_paintBuffer = makeStorageBufferRing(sizeInBytes, bufferStructure);
}
void PLSRenderContextHelperImpl::resizePaintAuxBuffer(size_t sizeInBytes,
pls::StorageBufferStructure bufferStructure)
gpu::StorageBufferStructure bufferStructure)
{
m_paintAuxBuffer = makeStorageBufferRing(sizeInBytes, bufferStructure);
}
void PLSRenderContextHelperImpl::resizeContourBuffer(size_t sizeInBytes,
pls::StorageBufferStructure bufferStructure)
gpu::StorageBufferStructure bufferStructure)
{
m_contourBuffer = makeStorageBufferRing(sizeInBytes, bufferStructure);
}
@@ -171,4 +171,4 @@ void PLSRenderContextHelperImpl::unmapTriangleVertexBuffer()
{
m_triangleBuffer->unmapAndSubmitBuffer();
}
} // namespace rive::pls
} // namespace rive::gpu

View File

@@ -0,0 +1,44 @@
/*
* Copyright 2022 Rive
*/
#include "rive/renderer/rive_render_factory.hpp"
#include "rive_render_paint.hpp"
#include "rive_render_path.hpp"
#include "rive/renderer/rive_renderer.hpp"
namespace rive::gpu
{
rcp<RenderShader> RiveRenderFactory::makeLinearGradient(float sx,
float sy,
float ex,
float ey,
const ColorInt colors[], // [count]
const float stops[], // [count]
size_t count)
{
return PLSGradient::MakeLinear(sx, sy, ex, ey, colors, stops, count);
}
rcp<RenderShader> RiveRenderFactory::makeRadialGradient(float cx,
float cy,
float radius,
const ColorInt colors[], // [count]
const float stops[], // [count]
size_t count)
{
return PLSGradient::MakeRadial(cx, cy, radius, colors, stops, count);
}
rcp<RenderPath> RiveRenderFactory::makeRenderPath(RawPath& rawPath, FillRule fillRule)
{
return make_rcp<RiveRenderPath>(fillRule, rawPath);
}
rcp<RenderPath> RiveRenderFactory::makeEmptyRenderPath() { return make_rcp<RiveRenderPath>(); }
rcp<RenderPaint> RiveRenderFactory::makeRenderPaint() { return make_rcp<RiveRenderPaint>(); }
} // namespace rive::gpu

View File

@@ -2,15 +2,15 @@
* Copyright 2022 Rive
*/
#include "pls_paint.hpp"
#include "rive_render_paint.hpp"
#include "rive/pls/pls_image.hpp"
#include "rive/renderer/image.hpp"
namespace rive::pls
namespace rive::gpu
{
PLSPaint::PLSPaint() {}
RiveRenderPaint::RiveRenderPaint() {}
PLSPaint::~PLSPaint() {}
RiveRenderPaint::~RiveRenderPaint() {}
// Ensure the given gradient stops are in a format expected by PLS.
static bool validate_gradient_stops(const ColorInt colors[], // [count]
@@ -173,19 +173,19 @@ rcp<PLSGradient> PLSGradient::MakeRadial(float cx,
bool PLSGradient::isOpaque() const
{
if (m_isOpaque == pls::TriState::unknown)
if (m_isOpaque == gpu::TriState::unknown)
{
ColorInt allColors = ~0;
for (int i = 0; i < m_count; ++i)
{
allColors &= m_colors[i];
}
m_isOpaque = colorAlpha(allColors) == 0xff ? pls::TriState::yes : pls::TriState::no;
m_isOpaque = colorAlpha(allColors) == 0xff ? gpu::TriState::yes : gpu::TriState::no;
}
return m_isOpaque == pls::TriState::yes;
return m_isOpaque == gpu::TriState::yes;
}
void PLSPaint::color(ColorInt color)
void RiveRenderPaint::color(ColorInt color)
{
m_paintType = PaintType::solidColor;
m_simpleValue.color = color;
@@ -193,7 +193,7 @@ void PLSPaint::color(ColorInt color)
m_imageTexture.reset();
}
void PLSPaint::shader(rcp<RenderShader> shader)
void RiveRenderPaint::shader(rcp<RenderShader> shader)
{
m_gradient = static_rcp_cast<PLSGradient>(std::move(shader));
m_paintType = m_gradient ? m_gradient->paintType() : PaintType::solidColor;
@@ -203,7 +203,7 @@ void PLSPaint::shader(rcp<RenderShader> shader)
m_imageTexture.reset();
}
void PLSPaint::image(rcp<const PLSTexture> imageTexture, float opacity)
void RiveRenderPaint::image(rcp<const PLSTexture> imageTexture, float opacity)
{
m_paintType = PaintType::image;
m_simpleValue.imageOpacity = opacity;
@@ -211,7 +211,7 @@ void PLSPaint::image(rcp<const PLSTexture> imageTexture, float opacity)
m_imageTexture = std::move(imageTexture);
}
void PLSPaint::clipUpdate(uint32_t outerClipID)
void RiveRenderPaint::clipUpdate(uint32_t outerClipID)
{
m_paintType = PaintType::clipUpdate;
m_simpleValue.outerClipID = outerClipID;
@@ -219,19 +219,19 @@ void PLSPaint::clipUpdate(uint32_t outerClipID)
m_imageTexture.reset();
}
bool PLSPaint::getIsOpaque() const
bool RiveRenderPaint::getIsOpaque() const
{
switch (m_paintType)
{
case pls::PaintType::solidColor:
case gpu::PaintType::solidColor:
return colorAlpha(m_simpleValue.color) == 0xff;
case pls::PaintType::linearGradient:
case pls::PaintType::radialGradient:
case gpu::PaintType::linearGradient:
case gpu::PaintType::radialGradient:
return m_gradient->isOpaque();
case pls::PaintType::image:
case pls::PaintType::clipUpdate:
case gpu::PaintType::image:
case gpu::PaintType::clipUpdate:
return false;
}
RIVE_UNREACHABLE();
}
} // namespace rive::pls
} // namespace rive::gpu

View File

@@ -4,11 +4,11 @@
#pragma once
#include "rive/pls/pls.hpp"
#include "rive/renderer/gpu.hpp"
#include "rive/renderer.hpp"
#include <array>
namespace rive::pls
namespace rive::gpu
{
// Copies an array of colors or stops for a gradient.
// Stores the data locally if there are 4 values or fewer.
@@ -103,15 +103,15 @@ private:
PLSGradDataArray<float> m_stops;
size_t m_count;
std::array<float, 3> m_coeffs;
mutable pls::TriState m_isOpaque = pls::TriState::unknown;
mutable gpu::TriState m_isOpaque = gpu::TriState::unknown;
};
// RenderPaint implementation for Rive's pixel local storage renderer.
class PLSPaint : public lite_rtti_override<RenderPaint, PLSPaint>
class RiveRenderPaint : public lite_rtti_override<RenderPaint, RiveRenderPaint>
{
public:
PLSPaint();
~PLSPaint();
RiveRenderPaint();
~RiveRenderPaint();
void style(RenderPaintStyle style) override { m_stroked = style == RenderPaintStyle::stroke; }
void color(ColorInt color) override;
@@ -135,12 +135,12 @@ public:
StrokeJoin getJoin() const { return m_join; }
StrokeCap getCap() const { return m_cap; }
BlendMode getBlendMode() const { return m_blendMode; }
pls::SimplePaintValue getSimpleValue() const { return m_simpleValue; }
gpu::SimplePaintValue getSimpleValue() const { return m_simpleValue; }
bool getIsOpaque() const;
private:
PaintType m_paintType = PaintType::solidColor;
pls::SimplePaintValue m_simpleValue;
gpu::SimplePaintValue m_simpleValue;
rcp<const PLSGradient> m_gradient;
rcp<const PLSTexture> m_imageTexture;
float m_thickness = 1;
@@ -149,4 +149,4 @@ private:
BlendMode m_blendMode = BlendMode::srcOver;
bool m_stroked = false;
};
} // namespace rive::pls
} // namespace rive::gpu

View File

@@ -2,35 +2,35 @@
* Copyright 2022 Rive
*/
#include "pls_path.hpp"
#include "rive_render_path.hpp"
#include "eval_cubic.hpp"
#include "rive/math/simd.hpp"
#include "rive/math/wangs_formula.hpp"
namespace rive::pls
namespace rive::gpu
{
PLSPath::PLSPath(FillRule fillRule, RawPath& rawPath)
RiveRenderPath::RiveRenderPath(FillRule fillRule, RawPath& rawPath)
{
m_rawPath.swap(rawPath);
m_rawPath.pruneEmptySegments();
}
void PLSPath::rewind()
void RiveRenderPath::rewind()
{
assert(m_rawPathMutationLockCount == 0);
m_rawPath.rewind();
m_dirt = kAllDirt;
}
void PLSPath::moveTo(float x, float y)
void RiveRenderPath::moveTo(float x, float y)
{
assert(m_rawPathMutationLockCount == 0);
m_rawPath.moveTo(x, y);
m_dirt = kAllDirt;
}
void PLSPath::lineTo(float x, float y)
void RiveRenderPath::lineTo(float x, float y)
{
assert(m_rawPathMutationLockCount == 0);
@@ -46,7 +46,7 @@ void PLSPath::lineTo(float x, float y)
m_dirt = kAllDirt;
}
void PLSPath::cubicTo(float ox, float oy, float ix, float iy, float x, float y)
void RiveRenderPath::cubicTo(float ox, float oy, float ix, float iy, float x, float y)
{
assert(m_rawPathMutationLockCount == 0);
@@ -64,17 +64,17 @@ void PLSPath::cubicTo(float ox, float oy, float ix, float iy, float x, float y)
m_dirt = kAllDirt;
}
void PLSPath::close()
void RiveRenderPath::close()
{
assert(m_rawPathMutationLockCount == 0);
m_rawPath.close();
m_dirt = kAllDirt;
}
void PLSPath::addRenderPath(RenderPath* path, const Mat2D& matrix)
void RiveRenderPath::addRenderPath(RenderPath* path, const Mat2D& matrix)
{
assert(m_rawPathMutationLockCount == 0);
PLSPath* plsPath = static_cast<PLSPath*>(path);
RiveRenderPath* plsPath = static_cast<RiveRenderPath*>(path);
RawPath::Iter transformedPathIter = m_rawPath.addPath(plsPath->m_rawPath, &matrix);
if (matrix != Mat2D())
{
@@ -84,7 +84,7 @@ void PLSPath::addRenderPath(RenderPath* path, const Mat2D& matrix)
m_dirt = kAllDirt;
}
const AABB& PLSPath::getBounds() const
const AABB& RiveRenderPath::getBounds() const
{
if (m_dirt & kPathBoundsDirt)
{
@@ -94,7 +94,7 @@ const AABB& PLSPath::getBounds() const
return m_bounds;
}
float PLSPath::getCoarseArea() const
float RiveRenderPath::getCoarseArea() const
{
if (m_dirt & kPathCoarseAreaDirt)
{
@@ -153,7 +153,7 @@ float PLSPath::getCoarseArea() const
return m_coarseArea;
}
uint64_t PLSPath::getRawPathMutationID() const
uint64_t RiveRenderPath::getRawPathMutationID() const
{
static std::atomic<uint64_t> uniqueIDCounter = 0;
if (m_dirt & kRawPathMutationIDDirt)
@@ -163,4 +163,4 @@ uint64_t PLSPath::getRawPathMutationID() const
}
return m_rawPathMutationID;
}
} // namespace rive::pls
} // namespace rive::gpu

View File

@@ -7,14 +7,14 @@
#include "rive/math/raw_path.hpp"
#include "rive/renderer.hpp"
namespace rive::pls
namespace rive::gpu
{
// RenderPath implementation for Rive's pixel local storage renderer.
class PLSPath : public lite_rtti_override<RenderPath, PLSPath>
class RiveRenderPath : public lite_rtti_override<RenderPath, RiveRenderPath>
{
public:
PLSPath() = default;
PLSPath(FillRule fillRule, RawPath& rawPath);
RiveRenderPath() = default;
RiveRenderPath(FillRule fillRule, RawPath& rawPath);
void rewind() override;
void fillRule(FillRule rule) override { m_fillRule = rule; }
@@ -65,4 +65,4 @@ private:
mutable uint32_t m_dirt = kAllDirt;
RIVE_DEBUG_CODE(mutable int m_rawPathMutationLockCount = 0;)
};
} // namespace rive::pls
} // namespace rive::gpu

View File

@@ -2,18 +2,18 @@
* Copyright 2022 Rive
*/
#include "rive/pls/pls_renderer.hpp"
#include "rive/renderer/rive_renderer.hpp"
#include "pls_paint.hpp"
#include "pls_path.hpp"
#include "rive_render_paint.hpp"
#include "rive_render_path.hpp"
#include "rive/math/math_types.hpp"
#include "rive/math/simd.hpp"
#include "rive/pls/pls_image.hpp"
#include "rive/renderer/image.hpp"
#include "shaders/constants.glsl"
namespace rive::pls
namespace rive::gpu
{
bool PLSRenderer::IsAABB(const RawPath& path, AABB* result)
bool RiveRenderer::IsAABB(const RawPath& path, AABB* result)
{
// Any quadrilateral begins with a move plus 3 lines.
constexpr static size_t kAABBVerbCount = 4;
@@ -49,16 +49,18 @@ bool PLSRenderer::IsAABB(const RawPath& path, AABB* result)
return false;
}
PLSRenderer::ClipElement::ClipElement(const Mat2D& matrix_,
const PLSPath* path_,
FillRule fillRule_)
RiveRenderer::ClipElement::ClipElement(const Mat2D& matrix_,
const RiveRenderPath* path_,
FillRule fillRule_)
{
reset(matrix_, path_, fillRule_);
}
PLSRenderer::ClipElement::~ClipElement() {}
RiveRenderer::ClipElement::~ClipElement() {}
void PLSRenderer::ClipElement::reset(const Mat2D& matrix_, const PLSPath* path_, FillRule fillRule_)
void RiveRenderer::ClipElement::reset(const Mat2D& matrix_,
const RiveRenderPath* path_,
FillRule fillRule_)
{
matrix = matrix_;
rawPathMutationID = path_->getRawPathMutationID();
@@ -68,17 +70,18 @@ void PLSRenderer::ClipElement::reset(const Mat2D& matrix_, const PLSPath* path_,
clipID = 0; // This gets initialized lazily.
}
bool PLSRenderer::ClipElement::isEquivalent(const Mat2D& matrix_, const PLSPath* path_) const
bool RiveRenderer::ClipElement::isEquivalent(const Mat2D& matrix_,
const RiveRenderPath* path_) const
{
return matrix_ == matrix && path_->getRawPathMutationID() == rawPathMutationID &&
path_->getFillRule() == fillRule;
}
PLSRenderer::PLSRenderer(PLSRenderContext* context) : m_context(context) {}
RiveRenderer::RiveRenderer(PLSRenderContext* context) : m_context(context) {}
PLSRenderer::~PLSRenderer() {}
RiveRenderer::~RiveRenderer() {}
void PLSRenderer::save()
void RiveRenderer::save()
{
// Copy the back of the stack before pushing, in case the vector grows and invalidates the
// reference.
@@ -86,22 +89,22 @@ void PLSRenderer::save()
m_stack.push_back(copy);
}
void PLSRenderer::restore()
void RiveRenderer::restore()
{
assert(m_stack.size() > 1);
assert(m_stack.back().clipStackHeight >= m_stack[m_stack.size() - 2].clipStackHeight);
m_stack.pop_back();
}
void PLSRenderer::transform(const Mat2D& matrix)
void RiveRenderer::transform(const Mat2D& matrix)
{
m_stack.back().matrix = m_stack.back().matrix * matrix;
}
void PLSRenderer::drawPath(RenderPath* renderPath, RenderPaint* renderPaint)
void RiveRenderer::drawPath(RenderPath* renderPath, RenderPaint* renderPaint)
{
LITE_RTTI_CAST_OR_RETURN(path, PLSPath*, renderPath);
LITE_RTTI_CAST_OR_RETURN(paint, PLSPaint*, renderPaint);
LITE_RTTI_CAST_OR_RETURN(path, RiveRenderPath*, renderPath);
LITE_RTTI_CAST_OR_RETURN(paint, RiveRenderPaint*, renderPaint);
if (path->getRawPath().empty())
{
@@ -122,17 +125,17 @@ void PLSRenderer::drawPath(RenderPath* renderPath, RenderPaint* renderPaint)
return;
}
clipAndPushDraw(PLSPathDraw::Make(m_context,
m_stack.back().matrix,
ref_rcp(path),
path->getFillRule(),
paint,
&m_scratchPath));
clipAndPushDraw(RiveRenderPathDraw::Make(m_context,
m_stack.back().matrix,
ref_rcp(path),
path->getFillRule(),
paint,
&m_scratchPath));
}
void PLSRenderer::clipPath(RenderPath* renderPath)
void RiveRenderer::clipPath(RenderPath* renderPath)
{
LITE_RTTI_CAST_OR_RETURN(path, PLSPath*, renderPath);
LITE_RTTI_CAST_OR_RETURN(path, RiveRenderPath*, renderPath);
if (path->getRawPath().empty())
{
@@ -190,7 +193,7 @@ static bool transform_rect_to_new_space(AABB* rect,
return true;
}
void PLSRenderer::clipRectImpl(AABB rect, const PLSPath* originalPath)
void RiveRenderer::clipRectImpl(AABB rect, const RiveRenderPath* originalPath)
{
bool hasClipRect = m_stack.back().clipRectInverseMatrix != nullptr;
if (rect.isEmptyOrNaN())
@@ -225,11 +228,11 @@ void PLSRenderer::clipRectImpl(AABB rect, const PLSPath* originalPath)
}
m_stack.back().clipRectInverseMatrix =
m_context->make<pls::ClipRectInverseMatrix>(m_stack.back().clipRectMatrix,
m_context->make<gpu::ClipRectInverseMatrix>(m_stack.back().clipRectMatrix,
m_stack.back().clipRect);
}
void PLSRenderer::clipPathImpl(const PLSPath* path)
void RiveRenderer::clipPathImpl(const RiveRenderPath* path)
{
if (path->getBounds().isEmptyOrNaN())
{
@@ -254,7 +257,7 @@ void PLSRenderer::clipPathImpl(const PLSPath* path)
m_stack.back().clipStackHeight = clipStackHeight + 1;
}
void PLSRenderer::drawImage(const RenderImage* renderImage, BlendMode blendMode, float opacity)
void RiveRenderer::drawImage(const RenderImage* renderImage, BlendMode blendMode, float opacity)
{
LITE_RTTI_CAST_OR_RETURN(image, const PLSImage*, renderImage);
@@ -281,13 +284,13 @@ void PLSRenderer::drawImage(const RenderImage* renderImage, BlendMode blendMode,
// Implement drawImage() as drawPath() with a rectangular path and an image paint.
if (m_unitRectPath == nullptr)
{
m_unitRectPath = make_rcp<PLSPath>();
m_unitRectPath = make_rcp<RiveRenderPath>();
m_unitRectPath->line({1, 0});
m_unitRectPath->line({1, 1});
m_unitRectPath->line({0, 1});
}
PLSPaint paint;
RiveRenderPaint paint;
paint.image(image->refTexture(), opacity);
paint.blendMode(blendMode);
drawPath(m_unitRectPath.get(), &paint);
@@ -296,14 +299,14 @@ void PLSRenderer::drawImage(const RenderImage* renderImage, BlendMode blendMode,
restore();
}
void PLSRenderer::drawImageMesh(const RenderImage* renderImage,
rcp<RenderBuffer> vertices_f32,
rcp<RenderBuffer> uvCoords_f32,
rcp<RenderBuffer> indices_u16,
uint32_t vertexCount,
uint32_t indexCount,
BlendMode blendMode,
float opacity)
void RiveRenderer::drawImageMesh(const RenderImage* renderImage,
rcp<RenderBuffer> vertices_f32,
rcp<RenderBuffer> uvCoords_f32,
rcp<RenderBuffer> indices_u16,
uint32_t vertexCount,
uint32_t indexCount,
BlendMode blendMode,
float opacity)
{
LITE_RTTI_CAST_OR_RETURN(image, const PLSImage*, renderImage);
const PLSTexture* plsTexture = image->getTexture();
@@ -323,7 +326,7 @@ void PLSRenderer::drawImageMesh(const RenderImage* renderImage,
opacity)));
}
void PLSRenderer::clipAndPushDraw(PLSDrawUniquePtr draw)
void RiveRenderer::clipAndPushDraw(PLSDrawUniquePtr draw)
{
if (m_stack.back().clipIsEmpty)
{
@@ -342,14 +345,14 @@ void PLSRenderer::clipAndPushDraw(PLSDrawUniquePtr draw)
struct AutoResetInternalDrawBatch
{
public:
AutoResetInternalDrawBatch(PLSRenderer* renderer) : m_renderer(renderer)
AutoResetInternalDrawBatch(RiveRenderer* renderer) : m_renderer(renderer)
{
assert(m_renderer->m_internalDrawBatch.empty());
}
~AutoResetInternalDrawBatch() { m_renderer->m_internalDrawBatch.clear(); }
private:
PLSRenderer* m_renderer;
RiveRenderer* m_renderer;
};
AutoResetInternalDrawBatch aridb(this);
@@ -379,10 +382,10 @@ void PLSRenderer::clipAndPushDraw(PLSDrawUniquePtr draw)
// We failed to process the draw. Release its refs.
fprintf(stderr,
"PLSRenderer::clipAndPushDraw failed. The draw and/or clip stack are too complex.\n");
"RiveRenderer::clipAndPushDraw failed. The draw and/or clip stack are too complex.\n");
}
bool PLSRenderer::applyClip(PLSDraw* draw)
bool RiveRenderer::applyClip(PLSDraw* draw)
{
draw->setClipRect(m_stack.back().clipRectInverseMatrix);
@@ -412,7 +415,7 @@ bool PLSRenderer::applyClip(PLSDraw* draw)
uint32_t lastClipID = clipIdxCurrentlyInClipBuffer == -1
? 0 // The next clip to be drawn is not nested.
: m_clipStack[clipIdxCurrentlyInClipBuffer].clipID;
if (m_context->frameInterlockMode() == pls::InterlockMode::depthStencil)
if (m_context->frameInterlockMode() == gpu::InterlockMode::depthStencil)
{
if (lastClipID == 0 && m_context->getClipContentID() != 0)
{
@@ -436,14 +439,14 @@ bool PLSRenderer::applyClip(PLSDraw* draw)
IAABB clipDrawBounds;
{
PLSPaint clipUpdatePaint;
RiveRenderPaint clipUpdatePaint;
clipUpdatePaint.clipUpdate(/*clip THIS clipDraw against:*/ lastClipID);
auto clipDraw = PLSPathDraw::Make(m_context,
clip.matrix,
clip.path,
clip.fillRule,
&clipUpdatePaint,
&m_scratchPath);
auto clipDraw = RiveRenderPathDraw::Make(m_context,
clip.matrix,
clip.path,
clip.fillRule,
&clipUpdatePaint,
&m_scratchPath);
clipDrawBounds = clipDraw->pixelBounds();
// Generate a new clipID every time we (re-)render an element to the clip buffer.
// (Each embodiment of the element needs its own separate readBounds.)
@@ -463,7 +466,7 @@ bool PLSRenderer::applyClip(PLSDraw* draw)
if (lastClipID != 0)
{
m_context->addClipReadBounds(lastClipID, clipDrawBounds);
if (m_context->frameInterlockMode() == pls::InterlockMode::depthStencil)
if (m_context->frameInterlockMode() == gpu::InterlockMode::depthStencil)
{
// When drawing nested stencil clips, we need to intersect them, which involves
// erasing the region of the current clip in the stencil buffer that is outside the
@@ -487,4 +490,4 @@ bool PLSRenderer::applyClip(PLSDraw* draw)
m_context->setClipContentID(lastClipID);
return true;
}
} // namespace rive::pls
} // namespace rive::gpu

Some files were not shown because too many files have changed in this diff Show More