Add FillRule::clockwise to the runtime

Diffs=
11d4e46498 Add FillRule::clockwise to the runtime (#8786)

Co-authored-by: Chris Dalton <99840794+csmartdalton@users.noreply.github.com>
This commit is contained in:
csmartdalton
2024-12-20 23:45:02 +00:00
parent 2490a1bdd6
commit 4356a5c53d
29 changed files with 414 additions and 153 deletions

View File

@@ -1 +1 @@
49e1109e3f8b3b04d4cf152bf2a826e6bea8aae3
11d4e46498b239fd56c15edcfe2c9d3e752e12cc

View File

@@ -23,8 +23,8 @@ static CGAffineTransform convert(const Mat2D& m)
static CGPathDrawingMode convert(FillRule rule)
{
return (rule == FillRule::nonZero) ? CGPathDrawingMode::kCGPathFill
: CGPathDrawingMode::kCGPathEOFill;
return (rule == FillRule::evenOdd) ? CGPathDrawingMode::kCGPathEOFill
: CGPathDrawingMode::kCGPathFill;
}
static CGLineJoin convert(StrokeJoin j)

View File

@@ -14,6 +14,7 @@ enum class FillRule
{
nonZero,
evenOdd,
clockwise,
};
enum class PathDirection

View File

@@ -224,7 +224,6 @@ public:
gpu::InterlockMode);
const Gradient* gradient() const { return m_gradientRef; }
FillRule fillRule() const { return m_fillRule; }
gpu::PaintType paintType() const { return m_paintType; }
float strokeRadius() const { return m_strokeRadius; }
gpu::ContourDirections contourDirections() const
@@ -310,8 +309,6 @@ protected:
const RiveRenderPath* const m_pathRef;
const Gradient* m_gradientRef;
const FillRule m_fillRule; // Bc RiveRenderPath fillRule can mutate during
// the artboard draw process.
const gpu::PaintType m_paintType;
float m_strokeRadius = 0;
gpu::ContourDirections m_contourDirections;

View File

@@ -773,10 +773,12 @@ enum class DrawContents
none = 0,
opaquePaint = 1 << 0,
stroke = 1 << 1,
evenOddFill = 1 << 2,
activeClip = 1 << 3,
clipUpdate = 1 << 4,
advancedBlend = 1 << 5,
clockwiseFill = 1 << 2,
nonZeroFill = 1 << 3,
evenOddFill = 1 << 4,
activeClip = 1 << 5,
clipUpdate = 1 << 6,
advancedBlend = 1 << 7,
};
RIVE_MAKE_ENUM_BITSET(DrawContents)
@@ -917,7 +919,8 @@ struct FlushDescriptor
uint32_t complexGradRowsTop = 0;
uint32_t complexGradRowsHeight = 0;
uint32_t tessDataHeight = 0;
bool clockwiseFill = false; // Override path fill rules with "clockwise".
// Override path fill rules with "clockwise".
bool clockwiseFillOverride = false;
bool hasTriangleVertices = false;
bool wireframe = false;
bool isFinalFlushOfFrame = false;
@@ -1108,7 +1111,7 @@ public:
constexpr static StorageBufferStructure kBufferStructure =
StorageBufferStructure::uint32x2;
void set(FillRule,
void set(DrawContents singleDrawContents,
PaintType,
SimplePaintValue,
GradTextureLayout,

View File

@@ -100,7 +100,7 @@ public:
bool strokesDisabled = false;
// Override all paths' fill rules (winding or even/odd) to emulate
// clockwiseAtomic mode.
bool clockwiseFill = false;
bool clockwiseFillOverride = false;
};
// Called at the beginning of a frame and establishes where and how it will

View File

@@ -755,7 +755,7 @@ void riveMainLoop()
.wireframe = s_wireframe,
.fillsDisabled = s_disableFill,
.strokesDisabled = s_disableStroke,
.clockwiseFill = s_clockwiseFill,
.clockwiseFillOverride = s_clockwiseFill,
});
int instances = 1;

View File

@@ -1113,7 +1113,7 @@ void RenderContextD3DImpl::setPipelineLayoutAndShaders(
}
if (pixelShaderMiscFlags & gpu::ShaderMiscFlags::clockwiseFill)
{
s << "#define " << GLSL_CLOCKWISE_FILL << '\n';
s << "#define " << GLSL_CLOCKWISE_FILL << " 1\n";
}
switch (drawType)
{
@@ -1704,7 +1704,8 @@ void RenderContextD3DImpl::flush(const FlushDescriptor& desc)
pixelShaderMiscFlags |=
gpu::ShaderMiscFlags::fixedFunctionColorOutput;
}
if (desc.clockwiseFill)
if (desc.interlockMode == gpu::InterlockMode::rasterOrdering &&
(batch.drawContents & gpu::DrawContents::clockwiseFill))
{
pixelShaderMiscFlags |= gpu::ShaderMiscFlags::clockwiseFill;
}

View File

@@ -456,7 +456,7 @@ RiveRenderPathDraw::RiveRenderPathDraw(
AABB bounds,
const Mat2D& matrix,
rcp<const RiveRenderPath> path,
FillRule fillRule,
FillRule initialFillRule,
const RiveRenderPaint* paint,
Type type,
const RenderContext::FrameDescriptor& frameDesc,
@@ -468,12 +468,6 @@ RiveRenderPathDraw::RiveRenderPathDraw(
type),
m_pathRef(path.release()),
m_gradientRef(safe_ref(paint->getGradient())),
m_fillRule(paint->getIsStroked() || frameDesc.clockwiseFill
// Fill rule is irrelevant for stroking and clockwiseFill,
// but override it to nonZero so the triangulator doesn't do
// evenOdd optimizations in "clockwiseFill" mode.
? FillRule::nonZero
: fillRule),
m_paintType(paint->getType())
{
assert(m_pathRef != nullptr);
@@ -495,7 +489,16 @@ RiveRenderPathDraw::RiveRenderPathDraw(
// RiveRenderer::drawPath().
assert(m_strokeRadius > 0);
}
else if (m_fillRule == FillRule::evenOdd)
else if (initialFillRule == FillRule::clockwise ||
frameDesc.clockwiseFillOverride)
{
m_drawContents |= gpu::DrawContents::clockwiseFill;
}
else if (initialFillRule == FillRule::nonZero)
{
m_drawContents |= gpu::DrawContents::nonZeroFill;
}
else if (initialFillRule == FillRule::evenOdd)
{
m_drawContents |= gpu::DrawContents::evenOddFill;
}
@@ -513,10 +516,32 @@ RiveRenderPathDraw::RiveRenderPathDraw(
// Stroke triangles are always forward.
m_contourDirections = gpu::ContourDirections::forward;
}
else if (initialFillRule == FillRule::clockwise)
{
// Clockwise paths need to be reversed when the matrix is left-handed,
// so that the intended forward triangles remain clockwise.
float det = matrix.xx() * matrix.yy() - matrix.yx() * matrix.xy();
if (det < 0)
{
m_contourDirections =
interlockMode == gpu::InterlockMode::msaa
? gpu::ContourDirections::reverse
: gpu::ContourDirections::forwardThenReverse;
m_contourFlags |= NEGATE_PATH_FILL_COVERAGE_FLAG; // ignored by msaa
}
else
{
m_contourDirections =
interlockMode == gpu::InterlockMode::msaa
? gpu::ContourDirections::forward
: gpu::ContourDirections::reverseThenForward;
}
}
else if (interlockMode != gpu::InterlockMode::msaa)
{
// atomic and rasterOrdering fills need reverse AND forward triangles.
if (frameDesc.clockwiseFill && !m_pathRef->isClockwiseDominant(matrix))
if (frameDesc.clockwiseFillOverride &&
!m_pathRef->isClockwiseDominant(matrix))
{
// For clockwiseFill, this is also our opportunity to logically
// reverse the winding of the path, if it is predominantly
@@ -529,20 +554,25 @@ RiveRenderPathDraw::RiveRenderPathDraw(
m_contourDirections = gpu::ContourDirections::reverseThenForward;
}
}
else if (m_fillRule != FillRule::evenOdd)
{
// Emit "nonZero" msaa fills in a direction such that the dominant
// triangle winding area is always clockwise. This maximizes pixel
// throughput since we will draw counterclockwise triangles twice and
// clockwise only once.
m_contourDirections = m_pathRef->isClockwiseDominant(matrix)
? gpu::ContourDirections::forward
: gpu::ContourDirections::reverse;
}
else
{
// "evenOdd" msaa fills just get drawn twice, so any direction is fine.
m_contourDirections = gpu::ContourDirections::forward;
if (initialFillRule == FillRule::nonZero ||
frameDesc.clockwiseFillOverride)
{
// Emit "nonZero" msaa fills in a direction such that the dominant
// triangle winding area is always clockwise. This maximizes pixel
// throughput since we will draw counterclockwise triangles twice
// and clockwise only once.
m_contourDirections = m_pathRef->isClockwiseDominant(matrix)
? gpu::ContourDirections::forward
: gpu::ContourDirections::reverse;
}
else
{
// "evenOdd" msaa fills just get drawn twice, so any direction is
// fine.
m_contourDirections = gpu::ContourDirections::forward;
}
}
m_simplePaintValue = paint->getSimpleValue();
@@ -2100,7 +2130,10 @@ void RiveRenderPathDraw::iterateInteriorTriangulation(
triangulatorAxis == TriangulatorAxis::horizontal
? GrTriangulator::Comparator::Direction::kHorizontal
: GrTriangulator::Comparator::Direction::kVertical,
m_fillRule,
// clockwise and nonZero paths both get triangulated as nonZero,
// because clockwise fill still needs the backwards triangles for
// borrowed coverage.
isEvenOddFill() ? FillRule::evenOdd : FillRule::nonZero,
allocator);
float matrixDeterminant =
m_matrix[0] * m_matrix[3] - m_matrix[2] * m_matrix[1];

View File

@@ -1185,7 +1185,8 @@ void RenderContextGLImpl::flush(const FlushDescriptor& desc)
m_plsImpl != nullptr
? m_plsImpl->shaderMiscFlags(desc, batch.drawType)
: gpu::ShaderMiscFlags::none;
if (desc.clockwiseFill)
if (desc.interlockMode == gpu::InterlockMode::rasterOrdering &&
(batch.drawContents & gpu::DrawContents::clockwiseFill))
{
fragmentShaderMiscFlags |= gpu::ShaderMiscFlags::clockwiseFill;
}
@@ -1303,7 +1304,8 @@ void RenderContextGLImpl::flush(const FlushDescriptor& desc)
m_plsImpl != nullptr
? m_plsImpl->shaderMiscFlags(desc, batch.drawType)
: gpu::ShaderMiscFlags::none;
if (desc.clockwiseFill)
if (desc.interlockMode == gpu::InterlockMode::rasterOrdering &&
(batch.drawContents & gpu::DrawContents::clockwiseFill))
{
fragmentShaderMiscFlags |= gpu::ShaderMiscFlags::clockwiseFill;
}
@@ -1499,8 +1501,17 @@ void RenderContextGLImpl::flush(const FlushDescriptor& desc)
drawHelper.draw();
// Clean up backward triangles in the stencil buffer, (also
// filling negative winding numbers).
// filling negative winding numbers for nonZero fill).
m_state->setCullFace(GL_FRONT);
if (batch.drawContents & gpu::DrawContents::clockwiseFill)
{
// For clockwise fill, disable the color mask when
// cleaning up backward triangles. This mode only fills
// in forward triangles.
m_state->setWriteMasks(false,
false,
isClipUpdate ? 0xff : 0x7f);
}
drawHelper.draw();
break;
}

View File

@@ -435,7 +435,7 @@ void PathData::set(const Mat2D& m,
m_coverageBufferRange.offsetY = coverageBufferRange.offsetY;
}
void PaintData::set(FillRule fillRule,
void PaintData::set(DrawContents singleDrawContents,
PaintType paintType,
SimplePaintValue simplePaintValue,
GradTextureLayout gradTextureLayout,
@@ -483,9 +483,13 @@ void PaintData::set(FillRule fillRule,
break;
}
}
if (fillRule == FillRule::evenOdd)
if (singleDrawContents & gpu::DrawContents::nonZeroFill)
{
localParams |= PAINT_FLAG_EVEN_ODD;
localParams |= PAINT_FLAG_NON_ZERO_FILL;
}
else if (singleDrawContents & gpu::DrawContents::evenOddFill)
{
localParams |= PAINT_FLAG_EVEN_ODD_FILL;
}
if (hasClipRect)
{

View File

@@ -118,7 +118,7 @@ void BackgroundShaderCompiler::threadMain()
}
if (shaderMiscFlags & gpu::ShaderMiscFlags::clockwiseFill)
{
defines[@GLSL_CLOCKWISE_FILL] = @"";
defines[@GLSL_CLOCKWISE_FILL] = @"1";
}
auto source =

View File

@@ -104,7 +104,8 @@ public:
// qualified name of the desired function, including its namespace.
static NSString* GetPrecompiledFunctionName(
DrawType drawType,
ShaderFeatures shaderFeatures,
gpu::ShaderFeatures shaderFeatures,
gpu::ShaderMiscFlags shaderMiscFlags,
id<MTLLibrary> precompiledLibrary,
const char* functionBaseName)
{
@@ -138,7 +139,10 @@ public:
case DrawType::midpointFanPatches:
case DrawType::outerCurvePatches:
case DrawType::interiorTriangulation:
namespacePrefix = 'p';
namespacePrefix =
(shaderMiscFlags & gpu::ShaderMiscFlags::clockwiseFill)
? 'c'
: 'p';
break;
case DrawType::imageRect:
RIVE_UNREACHABLE();
@@ -461,30 +465,37 @@ RenderContextMetalImpl::RenderContextMetalImpl(
DrawType::interiorTriangulation,
DrawType::imageMesh})
{
gpu::ShaderFeatures allShaderFeatures = gpu::ShaderFeaturesMaskFor(
drawType, gpu::InterlockMode::rasterOrdering);
uint32_t pipelineKey =
ShaderUniqueKey(drawType,
allShaderFeatures,
gpu::InterlockMode::rasterOrdering,
gpu::ShaderMiscFlags::none);
m_drawPipelines[pipelineKey] = std::make_unique<DrawPipeline>(
m_gpu,
m_plsPrecompiledLibrary,
DrawPipeline::GetPrecompiledFunctionName(
drawType,
allShaderFeatures & gpu::kVertexShaderFeaturesMask,
for (auto shaderMiscFlags : {gpu::ShaderMiscFlags::none,
gpu::ShaderMiscFlags::clockwiseFill})
{
gpu::ShaderFeatures allShaderFeatures =
gpu::ShaderFeaturesMaskFor(
drawType, gpu::InterlockMode::rasterOrdering);
uint32_t pipelineKey =
ShaderUniqueKey(drawType,
allShaderFeatures,
gpu::InterlockMode::rasterOrdering,
shaderMiscFlags);
m_drawPipelines[pipelineKey] = std::make_unique<DrawPipeline>(
m_gpu,
m_plsPrecompiledLibrary,
GLSL_drawVertexMain),
DrawPipeline::GetPrecompiledFunctionName(
DrawPipeline::GetPrecompiledFunctionName(
drawType,
allShaderFeatures & gpu::kVertexShaderFeaturesMask,
gpu::ShaderMiscFlags::none,
m_plsPrecompiledLibrary,
GLSL_drawVertexMain),
DrawPipeline::GetPrecompiledFunctionName(
drawType,
allShaderFeatures,
shaderMiscFlags,
m_plsPrecompiledLibrary,
GLSL_drawFragmentMain),
drawType,
gpu::InterlockMode::rasterOrdering,
allShaderFeatures,
m_plsPrecompiledLibrary,
GLSL_drawFragmentMain),
drawType,
gpu::InterlockMode::rasterOrdering,
allShaderFeatures,
gpu::ShaderMiscFlags::none);
shaderMiscFlags);
}
}
}
@@ -780,11 +791,10 @@ const RenderContextMetalImpl::DrawPipeline* RenderContextMetalImpl::
}
shaderFeatures &= fullyFeaturedPipelineFeatures;
// Fully-featured "rasterOrdering" pipelines without miscFlags should have
// already been pre-loaded from the static library.
// Fully-featured "rasterOrdering" pipelines should have already been
// pre-loaded from the static library.
assert(shaderFeatures != fullyFeaturedPipelineFeatures ||
interlockMode != gpu::InterlockMode::rasterOrdering ||
shaderMiscFlags != ShaderMiscFlags::none);
interlockMode != gpu::InterlockMode::rasterOrdering);
// Poll to see if the shader is actually done compiling, but only wait if
// it's a fully-feature pipeline. Otherwise, we can fall back on the
@@ -1101,10 +1111,7 @@ void RenderContextMetalImpl::flush(const FlushDescriptor& desc)
}
pass.colorAttachments[COLOR_PLANE_IDX].storeAction = MTLStoreActionStore;
auto baselineShaderMiscFlags = desc.clockwiseFill
? gpu::ShaderMiscFlags::clockwiseFill
: gpu::ShaderMiscFlags::none;
auto baselineShaderMiscFlags = gpu::ShaderMiscFlags::none;
if (desc.interlockMode == gpu::InterlockMode::rasterOrdering)
{
// In rasterOrdering mode, the PLS planes are accessed as color
@@ -1185,6 +1192,11 @@ void RenderContextMetalImpl::flush(const FlushDescriptor& desc)
? desc.combinedShaderFeatures
: batch.shaderFeatures;
gpu::ShaderMiscFlags batchMiscFlags = baselineShaderMiscFlags;
if (desc.interlockMode == gpu::InterlockMode::rasterOrdering &&
(batch.drawContents & gpu::DrawContents::clockwiseFill))
{
batchMiscFlags |= gpu::ShaderMiscFlags::clockwiseFill;
}
if (!(batchMiscFlags & gpu::ShaderMiscFlags::fixedFunctionColorOutput))
{
if (batch.drawType == gpu::DrawType::atomicResolve)

View File

@@ -259,7 +259,7 @@ void RenderContext::beginFrame(const FrameDescriptor& frameDescriptor)
{
m_frameInterlockMode = gpu::InterlockMode::rasterOrdering;
}
else if (frameDescriptor.clockwiseFill &&
else if (frameDescriptor.clockwiseFillOverride &&
platformFeatures().supportsClockwiseAtomicRendering)
{
assert(platformFeatures().supportsFragmentShaderAtomics);
@@ -887,7 +887,7 @@ void RenderContext::LogicalFlush::layoutResources(
m_flushDesc.complexGradRowsHeight =
math::lossless_numeric_cast<uint32_t>(m_complexGradients.size());
m_flushDesc.tessDataHeight = tessDataHeight;
m_flushDesc.clockwiseFill = frameDescriptor.clockwiseFill;
m_flushDesc.clockwiseFillOverride = frameDescriptor.clockwiseFillOverride;
m_flushDesc.wireframe = frameDescriptor.wireframe;
m_flushDesc.isFinalFlushOfFrame = isFinalFlushOfFrame;
@@ -1022,7 +1022,7 @@ void RenderContext::LogicalFlush::writeResources()
gpu::SimplePaintValue clearColorValue;
clearColorValue.color = m_ctx->frameDescriptor().clearColor;
m_ctx->m_pathData.skip_back();
m_ctx->m_paintData.set_back(FillRule::nonZero,
m_ctx->m_paintData.set_back(gpu::DrawContents::none,
PaintType::solidColor,
clearColorValue,
GradTextureLayout(),
@@ -1094,13 +1094,13 @@ void RenderContext::LogicalFlush::writeResources()
constexpr static int kDrawTypeShift = 45;
constexpr static int64_t kDrawTypeMask RIVE_MAYBE_UNUSED =
7llu << kDrawTypeShift;
constexpr static int kTextureHashShift = 27;
constexpr static int64_t kTextureHashMask = 0x3ffffllu
constexpr static int kTextureHashShift = 29;
constexpr static int64_t kTextureHashMask = 0xffffllu
<< kTextureHashShift;
constexpr static int kBlendModeShift = 23;
constexpr static int kBlendModeShift = 25;
constexpr static int kBlendModeMask = 0xf << kBlendModeShift;
constexpr static int kDrawContentsShift = 17;
constexpr static int64_t kDrawContentsMask = 0x3fllu
constexpr static int64_t kDrawContentsMask = 0xffllu
<< kDrawContentsShift;
constexpr static int kDrawIndexShift = 1;
constexpr static int64_t kDrawIndexMask = 0x7fff << kDrawIndexShift;
@@ -1777,7 +1777,7 @@ uint32_t RenderContext::LogicalFlush::pushPath(const RiveRenderPathDraw* draw)
draw->coverageBufferRange());
}
m_ctx->m_paintData.set_back(draw->fillRule(),
m_ctx->m_paintData.set_back(draw->drawContents(),
draw->paintType(),
draw->simplePaintValue(),
m_gradTextureLayout,
@@ -2293,10 +2293,11 @@ gpu::DrawBatch& RenderContext::LogicalFlush::pushPathDraw(
draw->paintType(),
vertexCount,
baseVertex);
if (!(shaderMiscFlags & gpu::ShaderMiscFlags::borrowedCoveragePrepass))
{
auto pathShaderFeatures = gpu::ShaderFeatures::NONE;
if (draw->fillRule() == FillRule::evenOdd)
if (draw->isEvenOddFill())
{
pathShaderFeatures |= ShaderFeatures::ENABLE_EVEN_ODD;
}
@@ -2318,6 +2319,28 @@ gpu::DrawBatch& RenderContext::LogicalFlush::pushPathDraw(
return batch;
}
RIVE_ALWAYS_INLINE static bool can_combine_draw_contents(
gpu::InterlockMode interlockMode,
gpu::DrawContents batchContents,
const Draw* draw)
{
constexpr static auto ANY_FILL = gpu::DrawContents::clockwiseFill |
gpu::DrawContents::evenOddFill |
gpu::DrawContents::nonZeroFill;
// Raster ordering uses a different shader for clockwise fills, so we
// can't combine both legacy and clockwise fills into the same draw.
if (interlockMode == gpu::InterlockMode::rasterOrdering &&
// Anything can be combined if either the existing batch or the new draw
// don't have fills yet.
(batchContents & ANY_FILL) && (draw->drawContents() & ANY_FILL))
{
assert(!draw->isStroked());
return (batchContents & gpu::DrawContents::clockwiseFill).bits() ==
(draw->drawContents() & gpu::DrawContents::clockwiseFill).bits();
}
return true;
}
RIVE_ALWAYS_INLINE static bool can_combine_draw_images(
const Texture* currentDrawTexture,
const Texture* nextDrawTexture)
@@ -2350,13 +2373,22 @@ gpu::DrawBatch& RenderContext::LogicalFlush::pushDraw(
case DrawType::outerCurvePatches:
case DrawType::interiorTriangulation:
case DrawType::stencilClipReset:
canMergeWithPreviousBatch =
!m_drawList.empty() && m_drawList.tail().drawType == drawType &&
m_drawList.tail().shaderMiscFlags == shaderMiscFlags &&
!m_drawList.tail().needsBarrier &&
can_combine_draw_images(m_drawList.tail().imageTexture,
draw->imageTexture());
break;
if (!m_drawList.empty())
{
const DrawBatch& currentBatch = m_drawList.tail();
canMergeWithPreviousBatch =
currentBatch.drawType == drawType &&
currentBatch.shaderMiscFlags == shaderMiscFlags &&
!currentBatch.needsBarrier &&
can_combine_draw_contents(m_ctx->frameInterlockMode(),
currentBatch.drawContents,
draw) &&
can_combine_draw_images(currentBatch.imageTexture,
draw->imageTexture());
break;
}
[[fallthrough]];
// Image draws can't be combined for now because they each have their
// own unique uniforms.
case DrawType::imageRect:

View File

@@ -387,19 +387,21 @@ INLINE void resolve_paint(uint pathID,
FRAGMENT_CONTEXT_DECL PLS_CONTEXT_DECL)
{
uint2 paintData = STORAGE_BUFFER_LOAD2(@paintBuffer, pathID);
#ifdef @CLOCKWISE_FILL
half coverage = clamp(coverageCount, make_half(.0), make_half(1.));
#else
half coverage = abs(coverageCount);
#ifdef @ENABLE_EVEN_ODD
if (@ENABLE_EVEN_ODD && (paintData.x & PAINT_FLAG_EVEN_ODD) != 0u)
half coverage = coverageCount;
if ((paintData.x & (PAINT_FLAG_NON_ZERO_FILL | PAINT_FLAG_EVEN_ODD_FILL)) !=
0u)
{
coverage = 1. - abs(fract(coverage * .5) * 2. + -1.);
}
// This path has a legacy (non-clockwise) fill.
coverage = abs(coverage);
#ifdef @ENABLE_EVEN_ODD
if (@ENABLE_EVEN_ODD && (paintData.x & PAINT_FLAG_EVEN_ODD_FILL) != 0u)
{
coverage = 1. - abs(fract(coverage * .5) * 2. + -1.);
}
#endif
}
// This also caps stroke coverage, which can be >1.
coverage = min(coverage, make_half(1.));
#endif // !CLOCKWISE_FILL
coverage = clamp(coverage, make_half(.0), make_half(1.));
#ifdef @ENABLE_CLIPPING
if (@ENABLE_CLIPPING)
{

View File

@@ -71,8 +71,9 @@
#define IMAGE_PAINT_TYPE 4u
// Paint flags, found in the x-component value of @paintBuffer.
#define PAINT_FLAG_EVEN_ODD 0x100u
#define PAINT_FLAG_HAS_CLIP_RECT 0x200u
#define PAINT_FLAG_NON_ZERO_FILL 0x100u
#define PAINT_FLAG_EVEN_ODD_FILL 0x200u
#define PAINT_FLAG_HAS_CLIP_RECT 0x400u
// PLS draw resources are either updated per flush or per draw. They go into set
// 0 or set 1, depending on how often they are updated.
@@ -170,5 +171,6 @@
#define EVEN_ODD_SPECIALIZATION_IDX 3
#define NESTED_CLIPPING_SPECIALIZATION_IDX 4
#define HSL_BLEND_MODES_SPECIALIZATION_IDX 5
#define BORROWED_COVERAGE_PREPASS_SPECIALIZATION_IDX 6
#define SPECIALIZATION_COUNT 7
#define CLOCKWISE_FILL_SPECIALIZATION_IDX 6
#define BORROWED_COVERAGE_PREPASS_SPECIALIZATION_IDX 7
#define SPECIALIZATION_COUNT 8

View File

@@ -102,7 +102,7 @@ VERTEX_MAIN(@drawVertexMain, Attrs, attrs, _vertexID, _instanceID)
v_pathID = id_bits_to_f16(pathID, uniforms.pathIDGranularity);
// Indicate even-odd fill rule by making pathID negative.
if ((paintData.x & PAINT_FLAG_EVEN_ODD) != 0u)
if ((paintData.x & PAINT_FLAG_EVEN_ODD_FILL) != 0u)
v_pathID = -v_pathID;
#endif // !RENDER_MODE_MSAA
@@ -376,19 +376,25 @@ PLS_MAIN(@drawFragmentMain)
#endif
// Convert coverageCount to coverage.
half coverage;
#ifdef @CLOCKWISE_FILL
half coverage = clamp(coverageCount, make_half(.0), make_half(1.));
#else
half coverage = abs(coverageCount);
#ifdef @ENABLE_EVEN_ODD
if (@ENABLE_EVEN_ODD && v_pathID < .0 /*even-odd*/)
if (@CLOCKWISE_FILL)
{
coverage = 1. - make_half(abs(fract(coverage * .5) * 2. + -1.));
coverage = clamp(coverageCount, make_half(.0), make_half(1.));
}
else
#endif // CLOCKWISE_FILL
{
coverage = abs(coverageCount);
#ifdef @ENABLE_EVEN_ODD
if (@ENABLE_EVEN_ODD && v_pathID < .0 /*even-odd*/)
{
coverage = 1. - make_half(abs(fract(coverage * .5) * 2. + -1.));
}
#endif
// This also caps stroke coverage, which can be >1.
coverage = min(coverage, make_half(1.));
#endif // !CLOCKWISE_FILL
// This also caps stroke coverage, which can be >1.
coverage = min(coverage, make_half(1.));
}
#ifdef @ENABLE_CLIPPING
if (@ENABLE_CLIPPING && v_clipID < .0) // Update the clip buffer.

View File

@@ -56,8 +56,9 @@ def is_image_mesh_feature_set(feature_set):
ShaderType = Enum('ShaderType', ['VERTEX', 'FRAGMENT'])
DrawType = Enum('DrawType', ['PATH', 'IMAGE_MESH'])
FillType = Enum('FillType', ['CLOCKWISE', 'LEGACY'])
def emit_shader(out, shader_type, draw_type, feature_set):
def emit_shader(out, shader_type, draw_type, fill_type, feature_set):
assert(is_valid_feature_set(feature_set))
if shader_type == ShaderType.VERTEX:
assert(is_unique_vertex_feature_set(feature_set))
@@ -71,8 +72,12 @@ def emit_shader(out, shader_type, draw_type, feature_set):
namespace_id[feature.index] = '1'
for feature in feature_set:
out.write('#define %s 1\n' % feature.name)
if fill_type == FillType.CLOCKWISE:
out.write('#define CLOCKWISE_FILL 1\n')
if draw_type == DrawType.PATH:
out.write('namespace p%s\n' % ''.join(namespace_id))
out.write('namespace %s%s\n' %
('c' if fill_type == FillType.CLOCKWISE else 'p',
''.join(namespace_id)))
out.write('{\n')
out.write('#include "draw_path.minified.glsl"\n')
out.write('}\n')
@@ -87,6 +92,8 @@ def emit_shader(out, shader_type, draw_type, feature_set):
out.write('#undef VERTEX\n')
else:
out.write('#undef FRAGMENT\n')
if fill_type == FillType.CLOCKWISE:
out.write('#undef CLOCKWISE_FILL\n')
out.write('\n')
# Organize all combinations of valid features into their own namespace.
@@ -95,15 +102,21 @@ out = open(sys.argv[1], 'w', newline='\n')
# Precompile the bare minimum set of shaders required to draw everything. We can compile more
# specialized shaders in the background at runtime, and use the fully-featured (slower) shaders
# while waiting for the compilations to complete.
emit_shader(out, ShaderType.VERTEX, DrawType.PATH, whole_program_features)
emit_shader(out, ShaderType.FRAGMENT, DrawType.PATH, all_features)
emit_shader(out, ShaderType.VERTEX, DrawType.PATH,
emit_shader(out, ShaderType.VERTEX, DrawType.PATH, FillType.LEGACY,
whole_program_features)
emit_shader(out, ShaderType.FRAGMENT, DrawType.PATH, FillType.LEGACY, all_features)
emit_shader(out, ShaderType.FRAGMENT, DrawType.PATH, FillType.CLOCKWISE, all_features)
emit_shader(out, ShaderType.VERTEX, DrawType.PATH, FillType.LEGACY,
whole_program_features.difference({DRAW_INTERIOR_TRIANGLES}))
emit_shader(out, ShaderType.FRAGMENT, DrawType.PATH,
emit_shader(out, ShaderType.FRAGMENT, DrawType.PATH, FillType.LEGACY,
all_features.difference({DRAW_INTERIOR_TRIANGLES}))
emit_shader(out, ShaderType.VERTEX, DrawType.IMAGE_MESH,
emit_shader(out, ShaderType.FRAGMENT, DrawType.PATH, FillType.CLOCKWISE,
all_features.difference({DRAW_INTERIOR_TRIANGLES}))
emit_shader(out, ShaderType.VERTEX, DrawType.IMAGE_MESH, FillType.LEGACY,
whole_program_features.difference(non_image_mesh_features))
emit_shader(out, ShaderType.FRAGMENT, DrawType.IMAGE_MESH,
emit_shader(out, ShaderType.FRAGMENT, DrawType.IMAGE_MESH, FillType.LEGACY,
all_features.difference(non_image_mesh_features))
# If we wanted to emit all combos...

View File

@@ -10,6 +10,8 @@ layout(constant_id = NESTED_CLIPPING_SPECIALIZATION_IDX) const
bool kEnableNestedClipping = false;
layout(constant_id = HSL_BLEND_MODES_SPECIALIZATION_IDX) const
bool kEnableHSLBlendModes = false;
layout(constant_id = CLOCKWISE_FILL_SPECIALIZATION_IDX) const
bool kClockwiseFill = false;
layout(constant_id = BORROWED_COVERAGE_PREPASS_SPECIALIZATION_IDX) const
bool kBorrowedCoveragePrepass = false;
@@ -19,4 +21,5 @@ layout(constant_id = BORROWED_COVERAGE_PREPASS_SPECIALIZATION_IDX) const
#define @ENABLE_EVEN_ODD kEnableEvenOdd
#define @ENABLE_NESTED_CLIPPING kEnableNestedClipping
#define @ENABLE_HSL_BLEND_MODES kEnableHSLBlendModes
#define @CLOCKWISE_FILL kClockwiseFill
#define @BORROWED_COVERAGE_PREPASS kBorrowedCoveragePrepass

View File

@@ -1659,6 +1659,7 @@ public:
shaderFeatures & gpu::ShaderFeatures::ENABLE_EVEN_ODD,
shaderFeatures & gpu::ShaderFeatures::ENABLE_NESTED_CLIPPING,
shaderFeatures & gpu::ShaderFeatures::ENABLE_HSL_BLEND_MODES,
shaderMiscFlags & gpu::ShaderMiscFlags::clockwiseFill,
shaderMiscFlags & gpu::ShaderMiscFlags::borrowedCoveragePrepass,
};
static_assert(CLIPPING_SPECIALIZATION_IDX == 0);
@@ -1667,8 +1668,9 @@ public:
static_assert(EVEN_ODD_SPECIALIZATION_IDX == 3);
static_assert(NESTED_CLIPPING_SPECIALIZATION_IDX == 4);
static_assert(HSL_BLEND_MODES_SPECIALIZATION_IDX == 5);
static_assert(BORROWED_COVERAGE_PREPASS_SPECIALIZATION_IDX == 6);
static_assert(SPECIALIZATION_COUNT == 7);
static_assert(CLOCKWISE_FILL_SPECIALIZATION_IDX == 6);
static_assert(BORROWED_COVERAGE_PREPASS_SPECIALIZATION_IDX == 7);
static_assert(SPECIALIZATION_COUNT == 8);
VkSpecializationMapEntry permutationMapEntries[SPECIALIZATION_COUNT];
for (uint32_t i = 0; i < SPECIALIZATION_COUNT; ++i)
@@ -3367,6 +3369,11 @@ void RenderContextVulkanImpl::flush(const FlushDescriptor& desc)
{
shaderMiscFlags |= gpu::ShaderMiscFlags::fixedFunctionColorOutput;
}
if (desc.interlockMode == gpu::InterlockMode::rasterOrdering &&
(batch.drawContents & gpu::DrawContents::clockwiseFill))
{
shaderMiscFlags |= gpu::ShaderMiscFlags::clockwiseFill;
}
uint32_t pipelineKey = gpu::ShaderUniqueKey(drawType,
shaderFeatures,
desc.interlockMode,

View File

@@ -40,7 +40,8 @@ public:
static SkPathFillType convert(FillRule value) {
switch (value) {
case FillRule::evenOdd: return SkPathFillType::kEvenOdd;
case FillRule::nonZero: return SkPathFillType::kWinding;
case FillRule::nonZero:
case FillRule::clockwise: return SkPathFillType::kWinding;
}
assert(false);
return SkPathFillType::kWinding;

View File

@@ -210,7 +210,7 @@ public:
: gpu::LoadAction::preserveRenderTarget,
.clearColor = clearColor,
.wireframe = wireframe,
.clockwiseFill = m_clockwiseFill,
.clockwiseFillOverride = m_clockwiseFill,
});
return std::make_unique<RiveRenderer>(m_renderContext.get());

View File

@@ -291,7 +291,7 @@ public:
.clearColor = clearColor,
.msaaSampleCount = m_msaaSampleCount,
.wireframe = wireframe,
.clockwiseFill = m_clockwiseFill,
.clockwiseFillOverride = m_clockwiseFill,
};
m_fiddleContext->begin(std::move(frameDescriptor));
return m_fiddleContext->makeRenderer(m_width, m_height);

View File

@@ -128,7 +128,7 @@ public:
: rive::gpu::LoadAction::preserveRenderTarget,
.clearColor = clearColor,
.wireframe = wireframe,
.clockwiseFill = m_clockwiseFill,
.clockwiseFillOverride = m_clockwiseFill,
};
m_renderContext->beginFrame(frameDescriptor);
return std::make_unique<RiveRenderer>(m_renderContext.get());

View File

@@ -5,6 +5,7 @@
#include "gmutils.hpp"
#include "rive/math/mat2d.hpp"
#include "rive/math/math_types.hpp"
#include <chrono>
#include <vector>
@@ -257,4 +258,17 @@ rive::rcp<rive::RenderImage> LoadImage(rive::Span<uint8_t> bytes)
return TestingWindow::Get()->factory()->decodeImage(bytes);
}
void path_add_star(Path& path, int count, float anglePhase, float dir)
{
assert(count & 1);
float da = 2 * rive::math::PI * (count >> 1) / count;
float angle = anglePhase;
for (int i = 0; i < count; ++i)
{
rive::Vec2D p = {cosf(angle), sinf(angle)};
i == 0 ? path->move(p) : path->line(p);
angle += da * dir;
}
}
} // namespace rivegm

View File

@@ -158,6 +158,8 @@ private:
float m_LastY = 0;
};
void path_add_star(Path& path, int count, float anglePhase, float dir);
} // namespace rivegm
#endif

View File

@@ -0,0 +1,104 @@
/*
* Copyright 2024 Rive
*/
#include "gm.hpp"
#include "gmutils.hpp"
#include "rive/math/math_types.hpp"
using namespace rivegm;
namespace rive::gpu
{
// Verifies that fill rules still get drawn properly when the content
// interleaves them.
class InterleavedFillRuleGM : public GM
{
public:
InterleavedFillRuleGM() : GM(1700, 1700, "interleavedfillrule") {}
ColorInt clearColor() const override { return 0xff000000; }
void onDraw(Renderer* renderer) override
{
renderer->scale(2, 2);
std::vector<Path> stars;
for (uint32_t x = 0; x < 7; ++x)
{
Path& path = stars.emplace_back();
uint32_t n = 5 + x;
if (n >= 8)
{
++n;
}
if (n % 3 == 0 && !((n / 3) & 1))
{
int ntri = n / 3;
float dtheta = 2 * rive::math::PI / ntri;
for (int i = 0; i < ntri; ++i)
{
path_add_star(path, 3, dtheta * i, (i & 1) ? -1 : 1);
}
}
else if (n & 1)
{
path_add_star(path, n, 0, 1);
}
else
{
path_add_star(path, (n / 2) | 1, 0, 1);
path_add_star(path, (n / 2) | 1, rive::math::PI, -1);
}
}
for (uint32_t y = 0; y < 7; ++y)
{
renderer->save();
for (uint32_t x = 0; x < 7; ++x)
{
renderer->save();
bool flipMatrix = false;
if (y == 0)
{
stars[x]->fillRule(FillRule::nonZero);
}
else if (y == 1)
{
stars[x]->fillRule(FillRule::evenOdd);
flipMatrix = true;
}
else if (y == 2)
{
stars[x]->fillRule(FillRule::clockwise);
}
else if (y == 3)
{
stars[x]->fillRule(FillRule::clockwise);
flipMatrix = true;
}
else
{
stars[x]->fillRule(static_cast<FillRule>((x + y) / 2 % 3));
flipMatrix = (x ^ y) & 1;
}
renderer->translate(60, 60);
renderer->scale(50, 50);
if (flipMatrix)
{
if (x & 1)
renderer->scale(-1, 1);
else
renderer->scale(1, -1);
}
renderer->drawPath(
stars[x].get(),
Paint(((y * x + 123458383u) * 285018463u) | 0xff808080));
renderer->restore();
renderer->translate(120, 0);
}
renderer->restore();
renderer->translate(0, 120);
}
}
};
GMREGISTER(return new InterleavedFillRuleGM)
} // namespace rive::gpu

View File

@@ -8,40 +8,39 @@
using namespace rivegm;
static void make_star(Path& path, int count, float anglePhase, float dir)
{
assert(count & 1);
float da = 2 * rive::math::PI * (count >> 1) / count;
float angle = anglePhase;
for (int i = 0; i < count; ++i)
{
rive::Vec2D p = {cosf(angle), sinf(angle)};
i == 0 ? path->move(p) : path->line(p);
angle += da * dir;
}
}
static void add_star(Path& path, int count, float dir)
{
if (count & 1)
{
make_star(path, count, 0, dir);
path_add_star(path, count, 0, dir);
}
else
{
count >>= 1;
make_star(path, count, 0, dir);
make_star(path,
count,
rive::math::PI,
1); // always wind one on the 2nd contour
path_add_star(path, count, 0, dir);
path_add_star(path,
count,
rive::math::PI,
1); // always wind one on the 2nd contour
}
}
static std::string fillrule_to_name(rive::FillRule fr)
{
return std::string("poly_") +
(fr == rive::FillRule::nonZero ? "nonZero" : "evenOdd");
std::string polyName("poly_");
switch (fr)
{
case rive::FillRule::nonZero:
polyName += "nonZero";
break;
case rive::FillRule::evenOdd:
polyName += "evenOdd";
break;
case rive::FillRule::clockwise:
polyName += "clockwise";
break;
}
return polyName;
}
class PolyGM : public GM
@@ -80,3 +79,7 @@ GMREGISTER(return new PolyGM(rive::FillRule::nonZero))
// Expect all to have a hole
GMREGISTER(return new PolyGM(rive::FillRule::evenOdd))
// Expect all to be filled but the black-6-pointer, and the black-6-pointer will
// also be missing half the triangle tips.
GMREGISTER(return new PolyGM(rive::FillRule::clockwise))

View File

@@ -38,7 +38,9 @@ public:
RiveRenderPathDraw(kFullscreenPixelBounds,
Mat2D(),
make_nonempty_placeholder_path(),
FillRule::nonZero,
context->frameDescriptor().clockwiseFillOverride
? FillRule::clockwise
: FillRule::nonZero,
paint,
Type::interiorTriangulationPath,
context->frameDescriptor(),
@@ -134,7 +136,15 @@ public:
RETROFITTED_TRIANGLE_CONTOUR_FLAG);
}
flush->pushOuterCubicsDraw(this, tessVertexCount, tessLocation);
auto shaderMiscFlags = gpu::ShaderMiscFlags::none;
if (flush->frameDescriptor().clockwiseFillOverride)
{
m_drawContents |= gpu::DrawContents::clockwiseFill;
}
flush->pushOuterCubicsDraw(this,
tessVertexCount,
tessLocation,
shaderMiscFlags);
}
}
};