Fix bit rotting in WebGPU

Update Dawn integration and peg the build script at a specific commit.

Implement feathering.

Fix image textures.

Diffs=
3eae4e45b0 Fix bit rotting in WebGPU (#9183)

Co-authored-by: Chris Dalton <99840794+csmartdalton@users.noreply.github.com>
This commit is contained in:
csmartdalton
2025-03-07 04:32:24 +00:00
parent ca53078a62
commit 1d97f0dd2f
15 changed files with 849 additions and 666 deletions

View File

@@ -1 +1 @@
7fe2e819696a098f52eae52af718f036b7e08abf 3eae4e45b04cbcf14afbc989b8546a6d6c3843fd

View File

@@ -155,6 +155,7 @@ private:
void resizeGradientTexture(uint32_t width, uint32_t height) override; void resizeGradientTexture(uint32_t width, uint32_t height) override;
void resizeTessellationTexture(uint32_t width, uint32_t height) override; void resizeTessellationTexture(uint32_t width, uint32_t height) override;
void resizeAtlasTexture(uint32_t width, uint32_t height) override;
void prepareToMapBuffers() override {} void prepareToMapBuffers() override {}
@@ -164,6 +165,13 @@ private:
const wgpu::Queue m_queue; const wgpu::Queue m_queue;
const ContextOptions m_contextOptions; const ContextOptions m_contextOptions;
constexpr static int COLOR_RAMP_BINDINGS_COUNT = 1;
constexpr static int TESS_BINDINGS_COUNT = 6;
constexpr static int ATLAS_BINDINGS_COUNT = 7;
constexpr static int DRAW_BINDINGS_COUNT = 10;
std::array<wgpu::BindGroupLayoutEntry, DRAW_BINDINGS_COUNT>
m_perFlushBindingLayouts;
// Draws emulated render-pass load/store actions for // Draws emulated render-pass load/store actions for
// EXT_shader_pixel_local_storage. // EXT_shader_pixel_local_storage.
class LoadStoreEXTPipeline; class LoadStoreEXTPipeline;
@@ -185,6 +193,12 @@ private:
wgpu::Texture m_tessVertexTexture; wgpu::Texture m_tessVertexTexture;
wgpu::TextureView m_tessVertexTextureView; wgpu::TextureView m_tessVertexTextureView;
// Renders feathers to the atlas.
class AtlasPipeline;
std::unique_ptr<AtlasPipeline> m_atlasPipeline;
wgpu::Texture m_atlasTexture;
wgpu::TextureView m_atlasTextureView;
// Draw paths and image meshes using the gradient and tessellation textures. // Draw paths and image meshes using the gradient and tessellation textures.
class DrawPipeline; class DrawPipeline;
std::map<uint32_t, DrawPipeline> m_drawPipelines; std::map<uint32_t, DrawPipeline> m_drawPipelines;
@@ -195,8 +209,13 @@ private:
wgpu::PipelineLayout m_drawPipelineLayout; wgpu::PipelineLayout m_drawPipelineLayout;
wgpu::Buffer m_pathPatchVertexBuffer; wgpu::Buffer m_pathPatchVertexBuffer;
wgpu::Buffer m_pathPatchIndexBuffer; wgpu::Buffer m_pathPatchIndexBuffer;
wgpu::Texture
m_nullImagePaintTexture; // Bound when there is not an image paint. // Gaussian integral table for feathering.
wgpu::Texture m_featherTexture;
wgpu::TextureView m_featherTextureView;
// Bound when there is not an image paint.
wgpu::Texture m_nullImagePaintTexture;
wgpu::TextureView m_nullImagePaintTextureView; wgpu::TextureView m_nullImagePaintTextureView;
}; };
} // namespace rive::gpu } // namespace rive::gpu

View File

@@ -14,8 +14,8 @@ else
fi fi
cd dawn cd dawn
git checkout origin/main git checkout 50f469b60b89ac3575abc43f1d6bbe7dcd39e647
cp scripts/standalone.gclient .gclient cp scripts/standalone.gclient .gclient
gclient sync gclient sync -f -D
gn gen --args='is_debug=false dawn_complete_static_libs=true use_custom_libcxx=true dawn_use_swiftshader=false angle_enable_swiftshader=false' out/release gn gen --args='is_debug=false dawn_complete_static_libs=true use_custom_libcxx=false dawn_use_swiftshader=false angle_enable_swiftshader=false' out/release
ninja -C out/release -j20 webgpu_dawn_static cpp proc_static ninja -C out/release -j20 webgpu_dawn_static cpp proc_static

View File

@@ -10,187 +10,94 @@ std::unique_ptr<FiddleContext> FiddleContext::MakeDawnPLS(
#else #else
#include "dawn/native/DawnNative.h"
#include "dawn/dawn_proc.h"
#include "rive/renderer/rive_render_factory.hpp"
#include "rive/renderer/rive_renderer.hpp" #include "rive/renderer/rive_renderer.hpp"
#include "rive/renderer/webgpu/render_context_webgpu_impl.hpp" #include "rive/renderer/webgpu/render_context_webgpu_impl.hpp"
#include <array>
#include <thread>
using namespace rive; using namespace rive;
using namespace rive::gpu; using namespace rive::gpu;
static void print_device_error(WGPUErrorType errorType,
const char* message,
void*)
{
const char* errorTypeName = "";
switch (errorType)
{
case WGPUErrorType_Validation:
errorTypeName = "Validation";
break;
case WGPUErrorType_OutOfMemory:
errorTypeName = "Out of memory";
break;
case WGPUErrorType_Unknown:
errorTypeName = "Unknown";
break;
case WGPUErrorType_DeviceLost:
errorTypeName = "Device lost";
break;
default:
RIVE_UNREACHABLE();
return;
}
printf("%s error: %s\n", errorTypeName, message);
}
static void device_lost_callback(WGPUDeviceLostReason reason,
const char* message,
void*)
{
printf("device lost: %s\n", message);
}
static void device_log_callback(WGPULoggingType type,
const char* message,
void*)
{
printf("Device log %s\n", message);
}
#ifdef __APPLE__
extern float GetDawnWindowBackingScaleFactor(GLFWwindow*, bool retina);
extern std::unique_ptr<wgpu::ChainedStruct>
SetupDawnWindowAndGetSurfaceDescriptor(GLFWwindow*, bool retina);
#else
#define GLFW_EXPOSE_NATIVE_WIN32 #define GLFW_EXPOSE_NATIVE_WIN32
#include <GLFW/glfw3.h> #include <GLFW/glfw3.h>
#include <GLFW/glfw3native.h> #include <GLFW/glfw3native.h>
static float GetDawnWindowBackingScaleFactor(GLFWwindow*, bool retina) #define _CRT_SECURE_NO_DEPRECATE
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
// Dawn integration based on:
// https://gist.github.com/mmozeiko/4c68b91faff8b7026e8c5e44ff810b62
static void on_device_error(WGPUDevice const* device,
WGPUErrorType type,
WGPUStringView message,
void* userdata1,
void* userdata2)
{ {
return 1; fprintf(stderr, "WebGPU Error: %s\n", message.data);
abort();
} }
static std::unique_ptr<wgpu::ChainedStruct> static void on_adapter_request_ended(WGPURequestAdapterStatus status,
SetupDawnWindowAndGetSurfaceDescriptor(GLFWwindow* window, bool retina) WGPUAdapter adapter,
struct WGPUStringView message,
void* userdata1,
void* userdata2)
{ {
std::unique_ptr<wgpu::SurfaceDescriptorFromWindowsHWND> desc = if (status != WGPURequestAdapterStatus_Success)
std::make_unique<wgpu::SurfaceDescriptorFromWindowsHWND>(); {
desc->hwnd = glfwGetWin32Window(window); // cannot find adapter?
desc->hinstance = GetModuleHandle(nullptr); fprintf(stderr, "Failed to find an adapter: %s\n", message.data);
return std::move(desc); abort();
}
else
{
// use first adapter provided
WGPUAdapter* result = (WGPUAdapter*)userdata1;
if (*result == NULL)
{
*result = adapter;
}
}
} }
#endif
const WGPUTextureFormat SWAPCHAIN_FORMAT = WGPUTextureFormat_RGBA8Unorm;
class FiddleContextDawnPLS : public FiddleContext class FiddleContextDawnPLS : public FiddleContext
{ {
public: public:
FiddleContextDawnPLS(FiddleContextOptions options) : m_options(options) FiddleContextDawnPLS(FiddleContextOptions options) : m_options(options)
{ {
WGPUInstanceDescriptor instanceDescriptor{}; // optionally use WGPUInstanceDescriptor::nextInChain for
instanceDescriptor.features.timedWaitAnyEnable = true; // WGPUDawnTogglesDescriptor with various toggles enabled or
// disabled:
// https://dawn.googlesource.com/dawn/+/refs/heads/main/src/dawn/native/Toggles.cpp
WGPUInstanceDescriptor instanceDescriptor = {
.capabilities.timedWaitAnyEnable = true,
};
m_instance = m_instance =
std::make_unique<dawn::native::Instance>(&instanceDescriptor); wgpu::Instance::Acquire(wgpuCreateInstance(&instanceDescriptor));
assert(m_instance && "Failed to create WebGPU instance");
wgpu::RequestAdapterOptions adapterOptions = {
.powerPreference = wgpu::PowerPreference::HighPerformance,
};
// Get an adapter for the backend to use, and create the device.
auto adapters = m_instance->EnumerateAdapters(&adapterOptions);
wgpu::DawnAdapterPropertiesPowerPreference power_props{};
wgpu::AdapterProperties adapterProperties{};
adapterProperties.nextInChain = &power_props;
// Find the first adapter which satisfies the adapterType requirement.
auto isAdapterType = [&adapterProperties](const auto& adapter) -> bool {
adapter.GetProperties(&adapterProperties);
return adapterProperties.adapterType ==
wgpu::AdapterType::DiscreteGPU;
};
auto preferredAdapter =
std::find_if(adapters.begin(), adapters.end(), isAdapterType);
if (preferredAdapter == adapters.end())
{
fprintf(stderr,
"Failed to find an adapter! Please try another adapter "
"type.\n");
return;
}
std::vector<const char*> enableToggleNames = {
"allow_unsafe_apis",
"turn_off_vsync",
// "skip_validation",
};
std::vector<const char*> disabledToggleNames;
WGPUDawnTogglesDescriptor toggles = {
.chain =
{
.next = nullptr,
.sType = WGPUSType_DawnTogglesDescriptor,
},
.enabledToggleCount = enableToggleNames.size(),
.enabledToggles = enableToggleNames.data(),
.disabledToggleCount = disabledToggleNames.size(),
.disabledToggles = disabledToggleNames.data(),
};
std::vector<WGPUFeatureName> requiredFeatures = {
// WGPUFeatureName_IndirectFirstInstance,
// WGPUFeatureName_ShaderF16,
// WGPUFeatureName_BGRA8UnormStorage,
// WGPUFeatureName_Float32Filterable,
// WGPUFeatureName_DawnInternalUsages,
// WGPUFeatureName_DawnMultiPlanarFormats,
// WGPUFeatureName_DawnNative,
// WGPUFeatureName_ImplicitDeviceSynchronization,
WGPUFeatureName_SurfaceCapabilities,
// WGPUFeatureName_TransientAttachments,
// WGPUFeatureName_DualSourceBlending,
// WGPUFeatureName_Norm16TextureFormats,
// WGPUFeatureName_HostMappedPointer,
// WGPUFeatureName_ChromiumExperimentalReadWriteStorageTexture,
};
WGPUDeviceDescriptor deviceDesc = {
.nextInChain = reinterpret_cast<WGPUChainedStruct*>(&toggles),
.requiredFeatureCount = requiredFeatures.size(),
.requiredFeatures = requiredFeatures.data(),
};
m_backendDevice = preferredAdapter->CreateDevice(&deviceDesc);
DawnProcTable backendProcs = dawn::native::GetProcs();
dawnProcSetProcs(&backendProcs);
backendProcs.deviceSetUncapturedErrorCallback(m_backendDevice,
print_device_error,
nullptr);
backendProcs.deviceSetDeviceLostCallback(m_backendDevice,
device_lost_callback,
nullptr);
backendProcs.deviceSetLoggingCallback(m_backendDevice,
device_log_callback,
nullptr);
m_device = wgpu::Device::Acquire(m_backendDevice);
m_queue = m_device.GetQueue();
m_renderContext = RenderContextWebGPUImpl::MakeContext(
m_device,
m_queue,
RenderContextWebGPUImpl::ContextOptions());
} }
float dpiScale(GLFWwindow* window) const override ~FiddleContextDawnPLS()
{ {
return GetDawnWindowBackingScaleFactor(window, m_options.retinaDisplay); // Destroy in reverse order so objects go before their owners.
if (m_currentSurfaceTextureView != nullptr)
{
wgpuTextureViewRelease(m_currentSurfaceTextureView);
m_currentSurfaceTextureView = nullptr;
}
m_queue = nullptr;
m_device = nullptr;
m_adapter = nullptr;
if (m_surfaceIsConfigured)
{
m_surface.Unconfigure();
}
m_surface = nullptr;
} }
float dpiScale(GLFWwindow* window) const override { return 1; }
Factory* factory() override { return m_renderContext.get(); } Factory* factory() override { return m_renderContext.get(); }
rive::gpu::RenderContext* renderContextOrNull() override rive::gpu::RenderContext* renderContextOrNull() override
@@ -208,40 +115,140 @@ public:
int height, int height,
uint32_t sampleCount) override uint32_t sampleCount) override
{ {
DawnProcTable backendProcs = dawn::native::GetProcs(); if (m_renderContext == nullptr)
// Create the swapchain
auto surfaceChainedDesc =
SetupDawnWindowAndGetSurfaceDescriptor(window,
m_options.retinaDisplay);
WGPUSurfaceDescriptor surfaceDesc = {
.nextInChain =
reinterpret_cast<WGPUChainedStruct*>(surfaceChainedDesc.get()),
};
WGPUSurface surface =
backendProcs.instanceCreateSurface(m_instance->Get(), &surfaceDesc);
WGPUSwapChainDescriptor swapChainDesc = {
.usage = WGPUTextureUsage_RenderAttachment,
.format = WGPUTextureFormat_BGRA8Unorm,
.width = static_cast<uint32_t>(width),
.height = static_cast<uint32_t>(height),
.presentMode = WGPUPresentMode_Immediate, // No vsync.
};
if (m_options.enableReadPixels)
{ {
swapChainDesc.usage |= WGPUTextureUsage_CopySrc; WGPUSurfaceSourceWindowsHWND surfaceDescWin = {
.chain.sType = WGPUSType_SurfaceSourceWindowsHWND,
.hinstance = GetModuleHandle(nullptr),
.hwnd = glfwGetWin32Window(window),
};
WGPUSurfaceDescriptor surfaceDesc = {
.nextInChain = &surfaceDescWin.chain,
};
m_surface = wgpu::Surface::Acquire(
wgpuInstanceCreateSurface(m_instance.Get(), &surfaceDesc));
assert(m_surface && "Failed to create WebGPU surface");
WGPURequestAdapterOptions options = {
.compatibleSurface = m_surface.Get(),
.powerPreference = WGPUPowerPreference_HighPerformance,
};
WGPUAdapter adapter = nullptr;
await(wgpuInstanceRequestAdapter(
m_instance.Get(),
&options,
{
.mode = WGPUCallbackMode_WaitAnyOnly,
.callback = on_adapter_request_ended,
.userdata1 = &adapter,
}));
m_adapter = wgpu::Adapter::Acquire(adapter);
assert(m_adapter && "Failed to get WebGPU adapter");
// can query extra details on what adapter supports:
// wgpuAdapterEnumerateFeatures
// wgpuAdapterGetLimits
// wgpuAdapterGetProperties
// wgpuAdapterHasFeature
WGPUAdapterInfo info = {0};
wgpuAdapterGetInfo(m_adapter.Get(), &info);
printf("WebGPU GPU: %s\n", info.description.data);
#if 0
const char* adapter_types[] = {
[WGPUAdapterType_DiscreteGPU] = "Discrete GPU",
[WGPUAdapterType_IntegratedGPU] = "Integrated GPU",
[WGPUAdapterType_CPU] = "CPU",
[WGPUAdapterType_Unknown] = "unknown",
};
printf("Device = %.*s\n"
"Description = %.*s\n"
"Vendor = %.*s\n"
"Architecture = %.*s\n"
"Adapter Type = %s\n",
(int)info.device.length,
info.device.data,
(int)info.description.length,
info.description.data,
(int)info.vendor.length,
info.vendor.data,
(int)info.architecture.length,
info.architecture.data,
adapter_types[info.adapterType]);
#endif
// if you want to be sure device will support things you'll use,
// you can specify requirements here:
// WGPUSupportedLimits supported = { 0 };
// wgpuAdapterGetLimits(adapter, &supported);
// supported.limits.maxTextureDimension2D = kTextureWidth;
// supported.limits.maxBindGroups = 1;
// supported.limits.maxBindingsPerBindGroup = 3; // uniform
// buffer for vertex shader, and texture + sampler for fragment
// supported.limits.maxSampledTexturesPerShaderStage = 1;
// supported.limits.maxSamplersPerShaderStage = 1;
// supported.limits.maxUniformBuffersPerShaderStage = 1;
// supported.limits.maxUniformBufferBindingSize = 4 * 4 *
// sizeof(float);
// // 4x4 matrix supported.limits.maxVertexBuffers = 1;
// supported.limits.maxBufferSize = sizeof(kVertexData);
// supported.limits.maxVertexAttributes = 3; // pos, texcoord,
// color supported.limits.maxVertexBufferArrayStride =
// kVertexStride; supported.limits.maxColorAttachments = 1;
WGPUDeviceDescriptor deviceDesc = {
// notify on errors
.uncapturedErrorCallbackInfo.callback = &on_device_error,
// extra features:
// https://dawn.googlesource.com/dawn/+/refs/heads/main/src/dawn/native/Features.cpp
//.requiredFeaturesCount = n
//.requiredFeatures = (WGPUFeatureName[]) { ... }
//.requiredLimits = &(WGPURequiredLimits) { .limits =
// supported.limits },
};
m_device = wgpu::Device::Acquire(
wgpuAdapterCreateDevice(m_adapter.Get(), &deviceDesc));
assert(m_device && "Failed to create WebGPU device");
// default device queue
m_queue = m_device.GetQueue();
m_renderContext = RenderContextWebGPUImpl::MakeContext(
m_device,
m_queue,
RenderContextWebGPUImpl::ContextOptions());
} }
WGPUSwapChain backendSwapChain = if (m_surfaceIsConfigured)
backendProcs.deviceCreateSwapChain(m_backendDevice, {
surface, // release old swap chain
&swapChainDesc); m_surface.Unconfigure();
m_swapchain = wgpu::SwapChain::Acquire(backendSwapChain); m_surfaceIsConfigured = false;
}
WGPUSurfaceConfiguration surfaceConfig = {
.device = m_device.Get(),
.format = SWAPCHAIN_FORMAT,
.usage =
WGPUTextureUsage_CopySrc | WGPUTextureUsage_RenderAttachment,
// .alphaMode = WGPUCompositeAlphaMode_Premultiplied,
.width = static_cast<uint32_t>(width),
.height = static_cast<uint32_t>(height),
.presentMode = WGPUPresentMode_Immediate,
};
wgpuSurfaceConfigure(m_surface.Get(), &surfaceConfig);
m_surfaceIsConfigured = true;
m_renderTarget = m_renderTarget =
m_renderContext->static_impl_cast<RenderContextWebGPUImpl>() m_renderContext->static_impl_cast<RenderContextWebGPUImpl>()
->makeRenderTarget(wgpu::TextureFormat::BGRA8Unorm, ->makeRenderTarget(wgpu::TextureFormat::RGBA8Unorm,
width, width,
height); height);
m_pixelReadBuff = {}; m_pixelReadBuff = {};
@@ -256,12 +263,26 @@ public:
void begin(const RenderContext::FrameDescriptor& frameDescriptor) override void begin(const RenderContext::FrameDescriptor& frameDescriptor) override
{ {
assert(m_swapchain.GetCurrentTexture().GetWidth() == wgpuSurfaceGetCurrentTexture(m_surface.Get(), &m_currentSurfaceTexture);
assert(wgpuTextureGetWidth(m_currentSurfaceTexture.texture) ==
m_renderTarget->width()); m_renderTarget->width());
assert(m_swapchain.GetCurrentTexture().GetHeight() == assert(wgpuTextureGetHeight(m_currentSurfaceTexture.texture) ==
m_renderTarget->height()); m_renderTarget->height());
m_renderTarget->setTargetTextureView( WGPUTextureViewDescriptor textureViewDesc = {
m_swapchain.GetCurrentTextureView()); .format = SWAPCHAIN_FORMAT,
.dimension = WGPUTextureViewDimension_2D,
.baseMipLevel = 0,
.mipLevelCount = 1,
.baseArrayLayer = 0,
.arrayLayerCount = 1,
.aspect = WGPUTextureAspect_All,
.usage =
WGPUTextureUsage_CopySrc | WGPUTextureUsage_RenderAttachment,
};
m_currentSurfaceTextureView =
wgpuTextureCreateView(m_currentSurfaceTexture.texture,
&textureViewDesc);
m_renderTarget->setTargetTextureView(m_currentSurfaceTextureView);
m_renderContext->beginFrame(std::move(frameDescriptor)); m_renderContext->beginFrame(std::move(frameDescriptor));
} }
@@ -295,12 +316,13 @@ public:
assert(m_pixelReadBuff.GetSize() == h * rowBytesInReadBuff); assert(m_pixelReadBuff.GetSize() == h * rowBytesInReadBuff);
// Blit the framebuffer into m_pixelReadBuff. // Blit the framebuffer into m_pixelReadBuff.
wgpu::CommandEncoder readEncoder = m_device.CreateCommandEncoder(); wgpu::CommandEncoder readEncoder =
wgpu::ImageCopyTexture srcTexture = { m_device.CreateCommandEncoder(NULL);
.texture = m_swapchain.GetCurrentTexture(), wgpu::TexelCopyTextureInfo srcTexture = {
.texture = m_currentSurfaceTexture.texture,
.origin = {0, 0, 0}, .origin = {0, 0, 0},
}; };
wgpu::ImageCopyBuffer dstBuffer = { wgpu::TexelCopyBufferInfo dstBuffer = {
.layout = .layout =
{ {
.offset = 0, .offset = 0,
@@ -314,31 +336,24 @@ public:
}; };
readEncoder.CopyTextureToBuffer(&srcTexture, &dstBuffer, &copySize); readEncoder.CopyTextureToBuffer(&srcTexture, &dstBuffer, &copySize);
wgpu::CommandBuffer commands = readEncoder.Finish(); wgpu::CommandBuffer commands = readEncoder.Finish(NULL);
m_queue.Submit(1, &commands); m_queue.Submit(1, &commands);
// Request a mapping of m_pixelReadBuff and wait for it to complete. // Request a mapping of m_pixelReadBuff and wait for it to complete.
bool mapped = false; await(m_pixelReadBuff.MapAsync(
m_pixelReadBuff.MapAsync(
wgpu::MapMode::Read, wgpu::MapMode::Read,
0, 0,
h * rowBytesInReadBuff, h * rowBytesInReadBuff,
[](WGPUBufferMapAsyncStatus status, void* mapped) { wgpu::CallbackMode::WaitAnyOnly,
if (status != WGPUBufferMapAsyncStatus_Success) [](wgpu::MapAsyncStatus status, wgpu::StringView message) {
if (status != wgpu::MapAsyncStatus::Success)
{ {
fprintf(stderr, "failed to map m_pixelReadBuff\n"); fprintf(stderr,
"failed to map m_pixelReadBuff: %s\n",
message.data);
abort(); abort();
} }
*reinterpret_cast<bool*>(mapped) = true; }));
},
&mapped);
while (!mapped)
{
// Spin until the GPU is finished with m_pixelReadBuff and we
// can read it.
std::this_thread::sleep_for(std::chrono::milliseconds(1));
tick();
}
// Copy the image data from m_pixelReadBuff to pixelData. // Copy the image data from m_pixelReadBuff to pixelData.
pixelData->resize(h * w * 4); pixelData->resize(h * w * 4);
@@ -350,30 +365,42 @@ public:
const uint8_t* src = const uint8_t* src =
&pixelReadBuffData[(h - y - 1) * rowBytesInReadBuff]; &pixelReadBuffData[(h - y - 1) * rowBytesInReadBuff];
size_t row = y * w * 4; size_t row = y * w * 4;
for (size_t x = 0; x < w * 4; x += 4) memcpy(pixelData->data() + row, src, w * 4);
{
// BGBRA -> RGBA.
(*pixelData)[row + x + 0] = src[x + 2];
(*pixelData)[row + x + 1] = src[x + 1];
(*pixelData)[row + x + 2] = src[x + 0];
(*pixelData)[row + x + 3] = src[x + 3];
}
} }
m_pixelReadBuff.Unmap(); m_pixelReadBuff.Unmap();
} }
m_swapchain.Present(); wgpuTextureViewRelease(m_currentSurfaceTextureView);
m_currentSurfaceTextureView = nullptr;
m_surface.Present();
} }
void tick() override { m_device.Tick(); } void tick() override { wgpuInstanceProcessEvents(m_instance.Get()); }
private: private:
void await(WGPUFuture future)
{
WGPUFutureWaitInfo futureWait = {future};
if (wgpuInstanceWaitAny(m_instance.Get(), 1, &futureWait, -1) !=
WGPUWaitStatus_Success)
{
fprintf(stderr, "wgpuInstanceWaitAny failed.");
abort();
}
}
const FiddleContextOptions m_options; const FiddleContextOptions m_options;
WGPUDevice m_backendDevice = {};
wgpu::Device m_device = {}; wgpu::Instance m_instance = nullptr;
wgpu::Queue m_queue = {}; wgpu::Surface m_surface = nullptr;
wgpu::SwapChain m_swapchain = {}; wgpu::Adapter m_adapter = nullptr;
std::unique_ptr<dawn::native::Instance> m_instance; wgpu::Device m_device = nullptr;
wgpu::Queue m_queue = nullptr;
bool m_surfaceIsConfigured = false;
WGPUSurfaceTexture m_currentSurfaceTexture = {};
WGPUTextureView m_currentSurfaceTextureView = {};
std::unique_ptr<RenderContext> m_renderContext; std::unique_ptr<RenderContext> m_renderContext;
rcp<RenderTargetWebGPU> m_renderTarget; rcp<RenderTargetWebGPU> m_renderTarget;
wgpu::Buffer m_pixelReadBuff; wgpu::Buffer m_pixelReadBuff;

View File

@@ -732,14 +732,6 @@ static void update_window_title(double fps,
void riveMainLoop() void riveMainLoop()
{ {
if (rivName && !rivFile)
{
std::ifstream rivStream(rivName, std::ios::binary);
std::vector<uint8_t> rivBytes(std::istreambuf_iterator<char>(rivStream),
{});
rivFile = File::import(rivBytes, fiddleContext->factory());
}
#ifdef __EMSCRIPTEN__ #ifdef __EMSCRIPTEN__
{ {
// Fit the canvas to the browser window size. // Fit the canvas to the browser window size.
@@ -780,6 +772,14 @@ void riveMainLoop()
needsTitleUpdate = false; needsTitleUpdate = false;
} }
if (rivName && !rivFile)
{
std::ifstream rivStream(rivName, std::ios::binary);
std::vector<uint8_t> rivBytes(std::istreambuf_iterator<char>(rivStream),
{});
rivFile = File::import(rivBytes, fiddleContext->factory());
}
// Call right before begin() // Call right before begin()
if (hotloadShaders) if (hotloadShaders)
{ {

View File

@@ -123,10 +123,11 @@ if not _OPTIONS['with-webgpu'] then
'dependencies/dawn/out/release/obj/src/dawn/platform', 'dependencies/dawn/out/release/obj/src/dawn/platform',
}) })
links({ links({
'dawn_native_static', 'winmm',
'webgpu_dawn', 'webgpu_dawn',
'dawn_platform_static', 'dawn_native_static',
'dawn_proc_static', 'dawn_proc_static',
'dawn_platform_static',
}) })
end end
@@ -214,6 +215,8 @@ if _OPTIONS['with-webgpu'] or _OPTIONS['with-dawn'] then
'webgpu_player/rivs/stopwatch.riv', 'webgpu_player/rivs/stopwatch.riv',
'webgpu_player/rivs/volume_bars.riv', 'webgpu_player/rivs/volume_bars.riv',
'webgpu_player/rivs/travel_icons.riv', 'webgpu_player/rivs/travel_icons.riv',
'webgpu_player/rivs/falling.riv',
'webgpu_player/rivs/tape.riv',
}) })
links({ links({
@@ -259,6 +262,9 @@ if _OPTIONS['with-webgpu'] or _OPTIONS['with-dawn'] then
'dependencies/dawn/include', 'dependencies/dawn/include',
'dependencies/dawn/out/release/gen/include', 'dependencies/dawn/out/release/gen/include',
}) })
files({
'path_fiddle/fiddle_context_dawn.cpp',
})
libdirs({ libdirs({
'dependencies/dawn/out/release/obj/src/dawn', 'dependencies/dawn/out/release/obj/src/dawn',
'dependencies/dawn/out/release/obj/src/dawn/native', 'dependencies/dawn/out/release/obj/src/dawn/native',
@@ -266,10 +272,11 @@ if _OPTIONS['with-webgpu'] or _OPTIONS['with-dawn'] then
'dependencies/dawn/out/release/obj/src/dawn/platform', 'dependencies/dawn/out/release/obj/src/dawn/platform',
}) })
links({ links({
'dawn_native_static', 'winmm',
'webgpu_dawn', 'webgpu_dawn',
'dawn_platform_static', 'dawn_native_static',
'dawn_proc_static', 'dawn_proc_static',
'dawn_platform_static',
}) })
end end

View File

@@ -240,7 +240,6 @@ do
'dependencies/dawn/include', 'dependencies/dawn/include',
'dependencies/dawn/out/release/gen/include', 'dependencies/dawn/out/release/gen/include',
}) })
files({ 'dependencies/dawn/out/release/gen/src/dawn/webgpu_cpp.cpp' })
end end
filter({ 'options:with-webgpu or with-dawn' }) filter({ 'options:with-webgpu or with-dawn' })

View File

@@ -228,29 +228,29 @@ half3 advanced_blend_coeffs(half3 src, half3 dst, ushort mode)
{ {
src.rgb = clamp(src.rgb, make_half3(.0), make_half3(1.)); src.rgb = clamp(src.rgb, make_half3(.0), make_half3(1.));
coeffs = set_lum_sat(src.rgb, dst.rgb, dst.rgb); coeffs = set_lum_sat(src.rgb, dst.rgb, dst.rgb);
break;
} }
break;
case BLEND_MODE_SATURATION: case BLEND_MODE_SATURATION:
if (@ENABLE_HSL_BLEND_MODES) if (@ENABLE_HSL_BLEND_MODES)
{ {
src.rgb = clamp(src.rgb, make_half3(.0), make_half3(1.)); src.rgb = clamp(src.rgb, make_half3(.0), make_half3(1.));
coeffs = set_lum_sat(dst.rgb, src.rgb, dst.rgb); coeffs = set_lum_sat(dst.rgb, src.rgb, dst.rgb);
break;
} }
break;
case BLEND_MODE_COLOR: case BLEND_MODE_COLOR:
if (@ENABLE_HSL_BLEND_MODES) if (@ENABLE_HSL_BLEND_MODES)
{ {
src.rgb = clamp(src.rgb, make_half3(.0), make_half3(1.)); src.rgb = clamp(src.rgb, make_half3(.0), make_half3(1.));
coeffs = set_lum(src.rgb, dst.rgb); coeffs = set_lum(src.rgb, dst.rgb);
break;
} }
break;
case BLEND_MODE_LUMINOSITY: case BLEND_MODE_LUMINOSITY:
if (@ENABLE_HSL_BLEND_MODES) if (@ENABLE_HSL_BLEND_MODES)
{ {
src.rgb = clamp(src.rgb, make_half3(.0), make_half3(1.)); src.rgb = clamp(src.rgb, make_half3(.0), make_half3(1.));
coeffs = set_lum(dst.rgb, src.rgb); coeffs = set_lum(dst.rgb, src.rgb);
break;
} }
break;
#endif #endif
} }
return coeffs; return coeffs;

View File

@@ -1,21 +1,21 @@
layout(constant_id = CLIPPING_SPECIALIZATION_IDX) const layout(constant_id = CLIPPING_SPECIALIZATION_IDX) const
bool kEnableClipping = false; bool kEnableClipping = true;
layout(constant_id = CLIP_RECT_SPECIALIZATION_IDX) const layout(constant_id = CLIP_RECT_SPECIALIZATION_IDX) const
bool kEnableClipRect = false; bool kEnableClipRect = true;
layout(constant_id = ADVANCED_BLEND_SPECIALIZATION_IDX) const layout(constant_id = ADVANCED_BLEND_SPECIALIZATION_IDX) const
bool kEnableAdvancedBlend = false; bool kEnableAdvancedBlend = true;
layout(constant_id = FEATHER_SPECIALIZATION_IDX) const layout(constant_id = FEATHER_SPECIALIZATION_IDX) const
bool kEnableFeather = false; bool kEnableFeather = true;
layout(constant_id = EVEN_ODD_SPECIALIZATION_IDX) const layout(constant_id = EVEN_ODD_SPECIALIZATION_IDX) const
bool kEnableEvenOdd = false; bool kEnableEvenOdd = true;
layout(constant_id = NESTED_CLIPPING_SPECIALIZATION_IDX) const layout(constant_id = NESTED_CLIPPING_SPECIALIZATION_IDX) const
bool kEnableNestedClipping = false; bool kEnableNestedClipping = true;
layout(constant_id = HSL_BLEND_MODES_SPECIALIZATION_IDX) const layout(constant_id = HSL_BLEND_MODES_SPECIALIZATION_IDX) const
bool kEnableHSLBlendModes = false; bool kEnableHSLBlendModes = true;
layout(constant_id = CLOCKWISE_FILL_SPECIALIZATION_IDX) const layout(constant_id = CLOCKWISE_FILL_SPECIALIZATION_IDX) const
bool kClockwiseFill = false; bool kClockwiseFill = true;
layout(constant_id = BORROWED_COVERAGE_PREPASS_SPECIALIZATION_IDX) const layout(constant_id = BORROWED_COVERAGE_PREPASS_SPECIALIZATION_IDX) const
bool kBorrowedCoveragePrepass = false; bool kBorrowedCoveragePrepass = true;
#define @ENABLE_CLIPPING kEnableClipping #define @ENABLE_CLIPPING kEnableClipping
#define @ENABLE_CLIP_RECT kEnableClipRect #define @ENABLE_CLIP_RECT kEnableClipRect

View File

@@ -11,6 +11,9 @@
#include "generated/shaders/spirv/color_ramp.frag.h" #include "generated/shaders/spirv/color_ramp.frag.h"
#include "generated/shaders/spirv/tessellate.vert.h" #include "generated/shaders/spirv/tessellate.vert.h"
#include "generated/shaders/spirv/tessellate.frag.h" #include "generated/shaders/spirv/tessellate.frag.h"
#include "generated/shaders/spirv/render_atlas.vert.h"
#include "generated/shaders/spirv/render_atlas_fill.frag.h"
#include "generated/shaders/spirv/render_atlas_stroke.frag.h"
#include "generated/shaders/spirv/draw_path.vert.h" #include "generated/shaders/spirv/draw_path.vert.h"
#include "generated/shaders/spirv/draw_path.frag.h" #include "generated/shaders/spirv/draw_path.frag.h"
#include "generated/shaders/spirv/draw_interior_triangles.vert.h" #include "generated/shaders/spirv/draw_interior_triangles.vert.h"
@@ -25,6 +28,7 @@
#include "generated/shaders/common.glsl.hpp" #include "generated/shaders/common.glsl.hpp"
#include "generated/shaders/bezier_utils.glsl.hpp" #include "generated/shaders/bezier_utils.glsl.hpp"
#include "generated/shaders/tessellate.glsl.hpp" #include "generated/shaders/tessellate.glsl.hpp"
#include "generated/shaders/render_atlas.glsl.hpp"
#include "generated/shaders/advanced_blend.glsl.hpp" #include "generated/shaders/advanced_blend.glsl.hpp"
#include "generated/shaders/draw_path.glsl.hpp" #include "generated/shaders/draw_path.glsl.hpp"
#include "generated/shaders/draw_path_common.glsl.hpp" #include "generated/shaders/draw_path_common.glsl.hpp"
@@ -36,6 +40,13 @@
#ifdef RIVE_DAWN #ifdef RIVE_DAWN
#include <dawn/webgpu_cpp.h> #include <dawn/webgpu_cpp.h>
namespace wgpu
{
using ImageCopyBuffer = TexelCopyBufferInfo;
using ImageCopyTexture = TexelCopyTextureInfo;
using TextureDataLayout = TexelCopyBufferLayout;
}; // namespace wgpu
static void enable_shader_pixel_local_storage_ext(wgpu::RenderPassEncoder, static void enable_shader_pixel_local_storage_ext(wgpu::RenderPassEncoder,
bool enabled) bool enabled)
{ {
@@ -44,6 +55,8 @@ static void enable_shader_pixel_local_storage_ext(wgpu::RenderPassEncoder,
static void write_texture(wgpu::Queue queue, static void write_texture(wgpu::Queue queue,
wgpu::Texture texture, wgpu::Texture texture,
uint32_t x,
uint32_t y,
uint32_t bytesPerRow, uint32_t bytesPerRow,
uint32_t width, uint32_t width,
uint32_t height, uint32_t height,
@@ -52,7 +65,7 @@ static void write_texture(wgpu::Queue queue,
{ {
wgpu::ImageCopyTexture dest = { wgpu::ImageCopyTexture dest = {
.texture = texture, .texture = texture,
.mipLevel = 0, .origin = {x, y},
}; };
wgpu::TextureDataLayout layout = { wgpu::TextureDataLayout layout = {
.bytesPerRow = bytesPerRow, .bytesPerRow = bytesPerRow,
@@ -100,6 +113,8 @@ EM_JS(void,
write_texture_js, write_texture_js,
(int queue, (int queue,
int texture, int texture,
uint32_t x,
uint32_t y,
uint32_t bytesPerRow, uint32_t bytesPerRow,
uint32_t width, uint32_t width,
uint32_t height, uint32_t height,
@@ -111,7 +126,7 @@ EM_JS(void,
// Copy data off the WASM heap before sending it to WebGPU bindings. // Copy data off the WASM heap before sending it to WebGPU bindings.
const data = new Uint8Array(dataSize); const data = new Uint8Array(dataSize);
data.set(Module.HEAPU8.subarray(indexU8, indexU8 + dataSize)); data.set(Module.HEAPU8.subarray(indexU8, indexU8 + dataSize));
queue.writeTexture({texture}, queue.writeTexture({texture : texture, origin : [ x, y, 0 ]},
data, data,
{bytesPerRow : bytesPerRow}, {bytesPerRow : bytesPerRow},
{width : width, height : height}); {width : width, height : height});
@@ -119,6 +134,8 @@ EM_JS(void,
static void write_texture(wgpu::Queue queue, static void write_texture(wgpu::Queue queue,
wgpu::Texture texture, wgpu::Texture texture,
uint32_t x,
uint32_t y,
uint32_t bytesPerRow, uint32_t bytesPerRow,
uint32_t width, uint32_t width,
uint32_t height, uint32_t height,
@@ -127,6 +144,8 @@ static void write_texture(wgpu::Queue queue,
{ {
write_texture_js(emscripten_webgpu_export_queue(queue.Get()), write_texture_js(emscripten_webgpu_export_queue(queue.Get()),
emscripten_webgpu_export_texture(texture.Get()), emscripten_webgpu_export_texture(texture.Get()),
x,
y,
bytesPerRow, bytesPerRow,
width, width,
height, height,
@@ -282,25 +301,17 @@ private:
class RenderContextWebGPUImpl::ColorRampPipeline class RenderContextWebGPUImpl::ColorRampPipeline
{ {
public: public:
ColorRampPipeline(wgpu::Device device) ColorRampPipeline(RenderContextWebGPUImpl* impl)
{ {
wgpu::BindGroupLayoutEntry bindingLayouts[] = { const wgpu::Device device = impl->device();
{
.binding = FLUSH_UNIFORM_BUFFER_IDX, wgpu::BindGroupLayoutDescriptor colorRampBindingsDesc = {
.visibility = wgpu::ShaderStage::Vertex, .entryCount = COLOR_RAMP_BINDINGS_COUNT,
.buffer = .entries = impl->m_perFlushBindingLayouts.data(),
{
.type = wgpu::BufferBindingType::Uniform,
},
},
}; };
wgpu::BindGroupLayoutDescriptor bindingsDesc = { m_bindGroupLayout =
.entryCount = std::size(bindingLayouts), device.CreateBindGroupLayout(&colorRampBindingsDesc);
.entries = bindingLayouts,
};
m_bindGroupLayout = device.CreateBindGroupLayout(&bindingsDesc);
wgpu::PipelineLayoutDescriptor pipelineLayoutDesc = { wgpu::PipelineLayoutDescriptor pipelineLayoutDesc = {
.bindGroupLayoutCount = 1, .bindGroupLayoutCount = 1,
@@ -386,73 +397,35 @@ private:
class RenderContextWebGPUImpl::TessellatePipeline class RenderContextWebGPUImpl::TessellatePipeline
{ {
public: public:
TessellatePipeline(wgpu::Device device, TessellatePipeline(RenderContextWebGPUImpl* impl)
const ContextOptions& contextOptions)
{ {
wgpu::BindGroupLayoutEntry bindingLayouts[] = { const wgpu::Device device = impl->device();
contextOptions.disableStorageBuffers ?
wgpu::BindGroupLayoutEntry{ wgpu::BindGroupLayoutDescriptor perFlushBindingsDesc = {
.binding = PATH_BUFFER_IDX, .entryCount = TESS_BINDINGS_COUNT,
.visibility = wgpu::ShaderStage::Vertex, .entries = impl->m_perFlushBindingLayouts.data(),
.texture =
{
.sampleType = wgpu::TextureSampleType::Uint,
.viewDimension = wgpu::TextureViewDimension::e2D,
},
} :
wgpu::BindGroupLayoutEntry{
.binding = PATH_BUFFER_IDX,
.visibility = wgpu::ShaderStage::Vertex,
.buffer =
{
.type = wgpu::BufferBindingType::ReadOnlyStorage,
},
},
contextOptions.disableStorageBuffers ?
wgpu::BindGroupLayoutEntry{
.binding = CONTOUR_BUFFER_IDX,
.visibility = wgpu::ShaderStage::Vertex,
.texture =
{
.sampleType = wgpu::TextureSampleType::Uint,
.viewDimension = wgpu::TextureViewDimension::e2D,
},
} :
wgpu::BindGroupLayoutEntry{
.binding = CONTOUR_BUFFER_IDX,
.visibility = wgpu::ShaderStage::Vertex,
.buffer =
{
.type = wgpu::BufferBindingType::ReadOnlyStorage,
},
},
{
.binding = FLUSH_UNIFORM_BUFFER_IDX,
.visibility = wgpu::ShaderStage::Vertex,
.buffer =
{
.type = wgpu::BufferBindingType::Uniform,
},
},
}; };
wgpu::BindGroupLayoutDescriptor bindingsDesc = { m_perFlushBindingsLayout =
.entryCount = std::size(bindingLayouts), device.CreateBindGroupLayout(&perFlushBindingsDesc);
.entries = bindingLayouts,
};
m_bindGroupLayout = device.CreateBindGroupLayout(&bindingsDesc); wgpu::BindGroupLayout layouts[] = {
m_perFlushBindingsLayout,
wgpu::BindGroupLayout(),
impl->m_drawBindGroupLayouts[SAMPLER_BINDINGS_SET],
};
static_assert(SAMPLER_BINDINGS_SET == 2);
wgpu::PipelineLayoutDescriptor pipelineLayoutDesc = { wgpu::PipelineLayoutDescriptor pipelineLayoutDesc = {
.bindGroupLayoutCount = 1, .bindGroupLayoutCount = std::size(layouts),
.bindGroupLayouts = &m_bindGroupLayout, .bindGroupLayouts = layouts,
}; };
wgpu::PipelineLayout pipelineLayout = wgpu::PipelineLayout pipelineLayout =
device.CreatePipelineLayout(&pipelineLayoutDesc); device.CreatePipelineLayout(&pipelineLayoutDesc);
wgpu::ShaderModule vertexShader; wgpu::ShaderModule vertexShader;
if (contextOptions.disableStorageBuffers) if (impl->m_contextOptions.disableStorageBuffers)
{ {
// The built-in SPIRV does not #define // The built-in SPIRV does not #define
// DISABLE_SHADER_STORAGE_BUFFERS. Recompile the tessellation shader // DISABLE_SHADER_STORAGE_BUFFERS. Recompile the tessellation shader
@@ -552,19 +525,176 @@ public:
m_renderPipeline = device.CreateRenderPipeline(&desc); m_renderPipeline = device.CreateRenderPipeline(&desc);
} }
const wgpu::BindGroupLayout& bindGroupLayout() const wgpu::BindGroupLayout perFlushBindingsLayout() const
{ {
return m_bindGroupLayout; return m_perFlushBindingsLayout;
} }
wgpu::RenderPipeline renderPipeline() const { return m_renderPipeline; } wgpu::RenderPipeline renderPipeline() const { return m_renderPipeline; }
private: private:
wgpu::BindGroupLayout m_bindGroupLayout; wgpu::BindGroupLayout m_perFlushBindingsLayout;
EmJsHandle m_vertexShaderHandle; EmJsHandle m_vertexShaderHandle;
EmJsHandle m_fragmentShaderHandle; EmJsHandle m_fragmentShaderHandle;
wgpu::RenderPipeline m_renderPipeline; wgpu::RenderPipeline m_renderPipeline;
}; };
// Renders tessellated vertices to the tessellation texture.
class RenderContextWebGPUImpl::AtlasPipeline
{
public:
AtlasPipeline(RenderContextWebGPUImpl* impl)
{
const wgpu::Device device = impl->device();
wgpu::BindGroupLayoutDescriptor perFlushBindingsDesc = {
.entryCount = ATLAS_BINDINGS_COUNT,
.entries = impl->m_perFlushBindingLayouts.data(),
};
m_perFlushBindingsLayout =
device.CreateBindGroupLayout(&perFlushBindingsDesc);
wgpu::BindGroupLayout layouts[] = {
m_perFlushBindingsLayout,
wgpu::BindGroupLayout(),
impl->m_drawBindGroupLayouts[SAMPLER_BINDINGS_SET],
};
static_assert(SAMPLER_BINDINGS_SET == 2);
wgpu::PipelineLayoutDescriptor pipelineLayoutDesc = {
.bindGroupLayoutCount = std::size(layouts),
.bindGroupLayouts = layouts,
};
wgpu::PipelineLayout pipelineLayout =
device.CreatePipelineLayout(&pipelineLayoutDesc);
wgpu::ShaderModule vertexShader;
if (impl->m_contextOptions.disableStorageBuffers)
{
// The built-in SPIRV does not #define
// DISABLE_SHADER_STORAGE_BUFFERS. Recompile the tessellation shader
// with storage buffers disabled.
std::ostringstream vertexGLSL;
vertexGLSL << "#version 460\n";
vertexGLSL
<< "#extension GL_EXT_samplerless_texture_functions : enable\n";
vertexGLSL << "#pragma shader_stage(vertex)\n";
vertexGLSL << "#define " GLSL_VERTEX " true\n";
vertexGLSL << "#define " GLSL_DISABLE_SHADER_STORAGE_BUFFERS
" true\n";
vertexGLSL << "#define " GLSL_TARGET_VULKAN " true\n";
vertexGLSL << "#define " << GLSL_DRAW_PATH << '\n';
vertexGLSL << "#define " << GLSL_ENABLE_FEATHER << "true\n";
vertexGLSL << glsl::glsl << '\n';
vertexGLSL << glsl::constants << '\n';
vertexGLSL << glsl::common << '\n';
vertexGLSL << glsl::draw_path_common << '\n';
vertexGLSL << glsl::render_atlas << '\n';
vertexShader = m_vertexShaderHandle.compileShaderModule(
device,
vertexGLSL.str().c_str(),
"glsl");
}
else
{
vertexShader = m_vertexShaderHandle.compileSPIRVShaderModule(
device,
render_atlas_vert,
std::size(render_atlas_vert));
}
wgpu::VertexAttribute attrs[] = {
{
.format = wgpu::VertexFormat::Float32x4,
.offset = 0,
.shaderLocation = 0,
},
{
.format = wgpu::VertexFormat::Float32x4,
.offset = 4 * sizeof(float),
.shaderLocation = 1,
},
};
wgpu::VertexBufferLayout vertexBufferLayout = {
.arrayStride = sizeof(gpu::PatchVertex),
.stepMode = wgpu::VertexStepMode::Vertex,
.attributeCount = std::size(attrs),
.attributes = attrs,
};
wgpu::ShaderModule fillFragmentShader =
m_fragmentShaderHandle.compileSPIRVShaderModule(
device,
render_atlas_fill_frag,
std::size(render_atlas_fill_frag));
wgpu::ShaderModule strokeFragmentShader =
m_fragmentShaderHandle.compileSPIRVShaderModule(
device,
render_atlas_stroke_frag,
std::size(render_atlas_stroke_frag));
wgpu::BlendState blendState = {
.color = {
.operation = wgpu::BlendOperation::Add,
.srcFactor = wgpu::BlendFactor::One,
.dstFactor = wgpu::BlendFactor::One,
}};
wgpu::ColorTargetState colorTargetState = {
.format = wgpu::TextureFormat::R16Float,
.blend = &blendState,
};
wgpu::FragmentState fragmentState = {
.module = fillFragmentShader,
.entryPoint = "main",
.targetCount = 1,
.targets = &colorTargetState,
};
wgpu::RenderPipelineDescriptor desc = {
.layout = pipelineLayout,
.vertex =
{
.module = vertexShader,
.entryPoint = "main",
.bufferCount = 1,
.buffers = &vertexBufferLayout,
},
.primitive =
{
.topology = wgpu::PrimitiveTopology::TriangleList,
.frontFace = kFrontFaceForOffscreenDraws,
.cullMode = wgpu::CullMode::Back,
},
.fragment = &fragmentState,
};
m_fillPipeline = device.CreateRenderPipeline(&desc);
blendState.color.operation = wgpu::BlendOperation::Max;
fragmentState.module = strokeFragmentShader;
m_strokePipeline = device.CreateRenderPipeline(&desc);
}
wgpu::BindGroupLayout perFlushBindingsLayout() const
{
return m_perFlushBindingsLayout;
}
wgpu::RenderPipeline fillPipeline() const { return m_fillPipeline; }
wgpu::RenderPipeline strokePipeline() const { return m_strokePipeline; }
private:
wgpu::BindGroupLayout m_perFlushBindingsLayout;
EmJsHandle m_vertexShaderHandle;
EmJsHandle m_fragmentShaderHandle;
wgpu::RenderPipeline m_fillPipeline;
wgpu::RenderPipeline m_strokePipeline;
};
// Draw paths and image meshes using the gradient and tessellation textures. // Draw paths and image meshes using the gradient and tessellation textures.
class RenderContextWebGPUImpl::DrawPipeline class RenderContextWebGPUImpl::DrawPipeline
{ {
@@ -840,12 +970,7 @@ RenderContextWebGPUImpl::RenderContextWebGPUImpl(
wgpu::Device device, wgpu::Device device,
wgpu::Queue queue, wgpu::Queue queue,
const ContextOptions& contextOptions) : const ContextOptions& contextOptions) :
m_device(device), m_device(device), m_queue(queue), m_contextOptions(contextOptions)
m_queue(queue),
m_contextOptions(contextOptions),
m_colorRampPipeline(std::make_unique<ColorRampPipeline>(m_device)),
m_tessellatePipeline(
std::make_unique<TessellatePipeline>(m_device, m_contextOptions))
{ {
// All backends currently use raster ordered shaders. // All backends currently use raster ordered shaders.
// TODO: update this flag once we have msaa and atomic modes. // TODO: update this flag once we have msaa and atomic modes.
@@ -856,23 +981,14 @@ RenderContextWebGPUImpl::RenderContextWebGPUImpl(
void RenderContextWebGPUImpl::initGPUObjects() void RenderContextWebGPUImpl::initGPUObjects()
{ {
wgpu::BindGroupLayoutEntry perFlushBindingLayouts[] = { m_perFlushBindingLayouts = {{
{ {
.binding = TESS_VERTEX_TEXTURE_IDX, .binding = FLUSH_UNIFORM_BUFFER_IDX,
.visibility = wgpu::ShaderStage::Vertex, .visibility =
.texture = wgpu::ShaderStage::Vertex | wgpu::ShaderStage::Fragment,
.buffer =
{ {
.sampleType = wgpu::TextureSampleType::Uint, .type = wgpu::BufferBindingType::Uniform,
.viewDimension = wgpu::TextureViewDimension::e2D,
},
},
{
.binding = GRAD_TEXTURE_IDX,
.visibility = wgpu::ShaderStage::Fragment,
.texture =
{
.sampleType = wgpu::TextureSampleType::Float,
.viewDimension = wgpu::TextureViewDimension::e2D,
}, },
}, },
m_contextOptions.disableStorageBuffers ? m_contextOptions.disableStorageBuffers ?
@@ -948,16 +1064,46 @@ void RenderContextWebGPUImpl::initGPUObjects()
}, },
}, },
{ {
.binding = FLUSH_UNIFORM_BUFFER_IDX, .binding = FEATHER_TEXTURE_IDX,
.visibility = wgpu::ShaderStage::Vertex, .visibility =
.buffer = wgpu::ShaderStage::Vertex | wgpu::ShaderStage::Fragment,
.texture =
{ {
.type = wgpu::BufferBindingType::Uniform, .sampleType = wgpu::TextureSampleType::Float,
.viewDimension = wgpu::TextureViewDimension::e2D,
},
},
{
.binding = TESS_VERTEX_TEXTURE_IDX,
.visibility = wgpu::ShaderStage::Vertex,
.texture =
{
.sampleType = wgpu::TextureSampleType::Uint,
.viewDimension = wgpu::TextureViewDimension::e2D,
},
},
{
.binding = ATLAS_TEXTURE_IDX,
.visibility = wgpu::ShaderStage::Fragment,
.texture =
{
.sampleType = wgpu::TextureSampleType::Float,
.viewDimension = wgpu::TextureViewDimension::e2D,
},
},
{
.binding = GRAD_TEXTURE_IDX,
.visibility = wgpu::ShaderStage::Fragment,
.texture =
{
.sampleType = wgpu::TextureSampleType::Float,
.viewDimension = wgpu::TextureViewDimension::e2D,
}, },
}, },
{ {
.binding = IMAGE_DRAW_UNIFORM_BUFFER_IDX, .binding = IMAGE_DRAW_UNIFORM_BUFFER_IDX,
.visibility = wgpu::ShaderStage::Vertex | wgpu::ShaderStage::Fragment, .visibility =
wgpu::ShaderStage::Vertex | wgpu::ShaderStage::Fragment,
.buffer = .buffer =
{ {
.type = wgpu::BufferBindingType::Uniform, .type = wgpu::BufferBindingType::Uniform,
@@ -965,11 +1111,14 @@ void RenderContextWebGPUImpl::initGPUObjects()
.minBindingSize = sizeof(gpu::ImageDrawUniforms), .minBindingSize = sizeof(gpu::ImageDrawUniforms),
}, },
}, },
}; }};
static_assert(DRAW_BINDINGS_COUNT == 10);
static_assert(sizeof(m_perFlushBindingLayouts) ==
DRAW_BINDINGS_COUNT * sizeof(wgpu::BindGroupLayoutEntry));
wgpu::BindGroupLayoutDescriptor perFlushBindingsDesc = { wgpu::BindGroupLayoutDescriptor perFlushBindingsDesc = {
.entryCount = std::size(perFlushBindingLayouts), .entryCount = DRAW_BINDINGS_COUNT,
.entries = perFlushBindingLayouts, .entries = m_perFlushBindingLayouts.data(),
}; };
m_drawBindGroupLayouts[PER_FLUSH_BINDINGS_SET] = m_drawBindGroupLayouts[PER_FLUSH_BINDINGS_SET] =
@@ -1004,6 +1153,23 @@ void RenderContextWebGPUImpl::initGPUObjects()
.type = wgpu::SamplerBindingType::Filtering, .type = wgpu::SamplerBindingType::Filtering,
}, },
}, },
{
.binding = FEATHER_TEXTURE_IDX,
.visibility =
wgpu::ShaderStage::Vertex | wgpu::ShaderStage::Fragment,
.sampler =
{
.type = wgpu::SamplerBindingType::Filtering,
},
},
{
.binding = ATLAS_TEXTURE_IDX,
.visibility = wgpu::ShaderStage::Fragment,
.sampler =
{
.type = wgpu::SamplerBindingType::Filtering,
},
},
{ {
.binding = IMAGE_TEXTURE_IDX, .binding = IMAGE_TEXTURE_IDX,
.visibility = wgpu::ShaderStage::Fragment, .visibility = wgpu::ShaderStage::Fragment,
@@ -1047,6 +1213,14 @@ void RenderContextWebGPUImpl::initGPUObjects()
.binding = GRAD_TEXTURE_IDX, .binding = GRAD_TEXTURE_IDX,
.sampler = m_linearSampler, .sampler = m_linearSampler,
}, },
{
.binding = FEATHER_TEXTURE_IDX,
.sampler = m_linearSampler,
},
{
.binding = ATLAS_TEXTURE_IDX,
.sampler = m_linearSampler,
},
{ {
.binding = IMAGE_TEXTURE_IDX, .binding = IMAGE_TEXTURE_IDX,
.sampler = m_mipmapSampler, .sampler = m_mipmapSampler,
@@ -1139,6 +1313,35 @@ void RenderContextWebGPUImpl::initGPUObjects()
m_pathPatchVertexBuffer.Unmap(); m_pathPatchVertexBuffer.Unmap();
m_pathPatchIndexBuffer.Unmap(); m_pathPatchIndexBuffer.Unmap();
wgpu::TextureDescriptor featherTextureDesc = {
.usage =
wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::CopyDst,
.dimension = wgpu::TextureDimension::e2D,
.size = {gpu::GAUSSIAN_TABLE_SIZE, FEATHER_TEXTURE_1D_ARRAY_LENGTH},
.format = wgpu::TextureFormat::R16Float,
};
m_featherTexture = m_device.CreateTexture(&featherTextureDesc);
write_texture(m_queue,
m_featherTexture,
0,
0,
sizeof(gpu::g_gaussianIntegralTableF16),
gpu::GAUSSIAN_TABLE_SIZE,
1,
gpu::g_gaussianIntegralTableF16,
sizeof(gpu::g_gaussianIntegralTableF16));
write_texture(m_queue,
m_featherTexture,
0,
1,
sizeof(gpu::g_inverseGaussianIntegralTableF16),
gpu::GAUSSIAN_TABLE_SIZE,
1,
gpu::g_inverseGaussianIntegralTableF16,
sizeof(gpu::g_inverseGaussianIntegralTableF16));
m_featherTextureView = m_featherTexture.CreateView();
wgpu::TextureDescriptor nullImagePaintTextureDesc = { wgpu::TextureDescriptor nullImagePaintTextureDesc = {
.usage = wgpu::TextureUsage::TextureBinding, .usage = wgpu::TextureUsage::TextureBinding,
.dimension = wgpu::TextureDimension::e2D, .dimension = wgpu::TextureDimension::e2D,
@@ -1149,6 +1352,10 @@ void RenderContextWebGPUImpl::initGPUObjects()
m_nullImagePaintTexture = m_nullImagePaintTexture =
m_device.CreateTexture(&nullImagePaintTextureDesc); m_device.CreateTexture(&nullImagePaintTextureDesc);
m_nullImagePaintTextureView = m_nullImagePaintTexture.CreateView(); m_nullImagePaintTextureView = m_nullImagePaintTexture.CreateView();
m_colorRampPipeline = std::make_unique<ColorRampPipeline>(this);
m_tessellatePipeline = std::make_unique<TessellatePipeline>(this);
m_atlasPipeline = std::make_unique<AtlasPipeline>(this);
} }
RenderContextWebGPUImpl::~RenderContextWebGPUImpl() {} RenderContextWebGPUImpl::~RenderContextWebGPUImpl() {}
@@ -1317,6 +1524,8 @@ public:
// TODO: implement mipmap generation. // TODO: implement mipmap generation.
write_texture(queue, write_texture(queue,
m_texture, m_texture,
0,
0,
width * 4, width * 4,
width, width,
height, height,
@@ -1361,13 +1570,16 @@ public:
BufferWebGPU(wgpu::Device device, BufferWebGPU(wgpu::Device device,
wgpu::Queue queue, wgpu::Queue queue,
size_t capacityInBytes, size_t capacityInBytesUnRounded,
wgpu::BufferUsage usage) : wgpu::BufferUsage usage) :
BufferRing(std::max<size_t>(capacityInBytes, 1)), m_queue(queue) // Storage buffers must be multiples of 4 in size.
BufferRing(math::round_up_to_multiple_of<4>(
std::max<size_t>(capacityInBytesUnRounded, 1))),
m_queue(queue)
{ {
wgpu::BufferDescriptor desc = { wgpu::BufferDescriptor desc = {
.usage = wgpu::BufferUsage::CopyDst | usage, .usage = wgpu::BufferUsage::CopyDst | usage,
.size = capacityInBytes, .size = capacityInBytes(),
}; };
for (int i = 0; i < gpu::kBufferRingSize; ++i) for (int i = 0; i < gpu::kBufferRingSize; ++i)
{ {
@@ -1555,6 +1767,23 @@ void RenderContextWebGPUImpl::resizeTessellationTexture(uint32_t width,
m_tessVertexTextureView = m_tessVertexTexture.CreateView(); m_tessVertexTextureView = m_tessVertexTexture.CreateView();
} }
void RenderContextWebGPUImpl::resizeAtlasTexture(uint32_t width,
uint32_t height)
{
width = std::max(width, 1u);
height = std::max(height, 1u);
wgpu::TextureDescriptor desc{
.usage = wgpu::TextureUsage::RenderAttachment |
wgpu::TextureUsage::TextureBinding,
.size = {static_cast<uint32_t>(width), static_cast<uint32_t>(height)},
.format = wgpu::TextureFormat::R16Float,
};
m_atlasTexture = m_device.CreateTexture(&desc);
m_atlasTextureView = m_atlasTexture.CreateView();
}
wgpu::RenderPipeline RenderContextWebGPUImpl::makeDrawPipeline( wgpu::RenderPipeline RenderContextWebGPUImpl::makeDrawPipeline(
rive::gpu::DrawType drawType, rive::gpu::DrawType drawType,
wgpu::TextureFormat framebufferFormat, wgpu::TextureFormat framebufferFormat,
@@ -1651,8 +1880,18 @@ wgpu::RenderPipeline RenderContextWebGPUImpl::makeDrawPipeline(
RIVE_UNREACHABLE(); RIVE_UNREACHABLE();
} }
wgpu::BlendState srcOverBlend = {
.color = {.dstFactor = wgpu::BlendFactor::OneMinusSrcAlpha},
.alpha = {.dstFactor = wgpu::BlendFactor::OneMinusSrcAlpha},
};
wgpu::ColorTargetState colorTargets[] = { wgpu::ColorTargetState colorTargets[] = {
{.format = framebufferFormat}, {
.format = framebufferFormat,
.blend = (m_contextOptions.plsType == PixelLocalStorageType::none)
? &srcOverBlend
: nullptr,
},
{.format = wgpu::TextureFormat::R32Uint}, {.format = wgpu::TextureFormat::R32Uint},
{.format = framebufferFormat}, {.format = framebufferFormat},
{.format = wgpu::TextureFormat::R32Uint}, {.format = wgpu::TextureFormat::R32Uint},
@@ -1812,25 +2051,84 @@ void RenderContextWebGPUImpl::flush(const FlushDescriptor& desc)
} }
} }
wgpu::BindGroupEntry perFlushBindingEntries[DRAW_BINDINGS_COUNT] = {
{
.binding = FLUSH_UNIFORM_BUFFER_IDX,
.buffer = webgpu_buffer(flushUniformBufferRing()),
.offset = desc.flushUniformDataOffsetInBytes,
},
m_contextOptions.disableStorageBuffers ?
wgpu::BindGroupEntry{
.binding = PATH_BUFFER_IDX,
.textureView = webgpu_storage_texture_view(pathBufferRing())
} :
wgpu::BindGroupEntry{
.binding = PATH_BUFFER_IDX,
.buffer = webgpu_buffer(pathBufferRing()),
.offset = desc.firstPath * sizeof(gpu::PathData),
},
m_contextOptions.disableStorageBuffers ?
wgpu::BindGroupEntry{
.binding = PAINT_BUFFER_IDX,
.textureView = webgpu_storage_texture_view(paintBufferRing()),
} :
wgpu::BindGroupEntry{
.binding = PAINT_BUFFER_IDX,
.buffer = webgpu_buffer(paintBufferRing()),
.offset = desc.firstPaint * sizeof(gpu::PaintData),
},
m_contextOptions.disableStorageBuffers ?
wgpu::BindGroupEntry{
.binding = PAINT_AUX_BUFFER_IDX,
.textureView = webgpu_storage_texture_view(paintAuxBufferRing()),
} :
wgpu::BindGroupEntry{
.binding = PAINT_AUX_BUFFER_IDX,
.buffer = webgpu_buffer(paintAuxBufferRing()),
.offset = desc.firstPaintAux * sizeof(gpu::PaintAuxData),
},
m_contextOptions.disableStorageBuffers ?
wgpu::BindGroupEntry{
.binding = CONTOUR_BUFFER_IDX,
.textureView = webgpu_storage_texture_view(contourBufferRing()),
} :
wgpu::BindGroupEntry{
.binding = CONTOUR_BUFFER_IDX,
.buffer = webgpu_buffer(contourBufferRing()),
.offset = desc.firstContour * sizeof(gpu::ContourData),
},
{
.binding = FEATHER_TEXTURE_IDX,
.textureView = m_featherTextureView,
},
{
.binding = TESS_VERTEX_TEXTURE_IDX,
.textureView = m_tessVertexTextureView,
},
{
.binding = ATLAS_TEXTURE_IDX,
.textureView = m_atlasTextureView,
},
{
.binding = GRAD_TEXTURE_IDX,
.textureView = m_gradientTextureView,
},
{
.binding = IMAGE_DRAW_UNIFORM_BUFFER_IDX,
.buffer = webgpu_buffer(imageDrawUniformBufferRing()),
.size = sizeof(gpu::ImageDrawUniforms),
},
};
// Render the complex color ramps to the gradient texture. // Render the complex color ramps to the gradient texture.
if (desc.gradDataHeight > 0) if (desc.gradDataHeight > 0)
{ {
wgpu::BindGroupEntry bindingEntries[] = { wgpu::BindGroupDescriptor colorRampBindGroupDesc = {
{
.binding = FLUSH_UNIFORM_BUFFER_IDX,
.buffer = webgpu_buffer(flushUniformBufferRing()),
.offset = desc.flushUniformDataOffsetInBytes,
},
};
wgpu::BindGroupDescriptor bindGroupDesc = {
.layout = m_colorRampPipeline->bindGroupLayout(), .layout = m_colorRampPipeline->bindGroupLayout(),
.entryCount = std::size(bindingEntries), .entryCount = COLOR_RAMP_BINDINGS_COUNT,
.entries = bindingEntries, .entries = perFlushBindingEntries,
}; };
wgpu::BindGroup bindings = m_device.CreateBindGroup(&bindGroupDesc);
wgpu::RenderPassColorAttachment attachment = { wgpu::RenderPassColorAttachment attachment = {
.view = m_gradientTextureView, .view = m_gradientTextureView,
.loadOp = wgpu::LoadOp::Clear, .loadOp = wgpu::LoadOp::Clear,
@@ -1856,7 +2154,9 @@ void RenderContextWebGPUImpl::flush(const FlushDescriptor& desc)
webgpu_buffer(gradSpanBufferRing()), webgpu_buffer(gradSpanBufferRing()),
desc.firstGradSpan * desc.firstGradSpan *
sizeof(gpu::GradientSpan)); sizeof(gpu::GradientSpan));
gradPass.SetBindGroup(0, bindings); gradPass.SetBindGroup(
0,
m_device.CreateBindGroup(&colorRampBindGroupDesc));
gradPass.Draw(gpu::GRAD_SPAN_TRI_STRIP_VERTEX_COUNT, gradPass.Draw(gpu::GRAD_SPAN_TRI_STRIP_VERTEX_COUNT,
desc.gradSpanCount, desc.gradSpanCount,
0, 0,
@@ -1867,42 +2167,12 @@ void RenderContextWebGPUImpl::flush(const FlushDescriptor& desc)
// Tessellate all curves into vertices in the tessellation texture. // Tessellate all curves into vertices in the tessellation texture.
if (desc.tessVertexSpanCount > 0) if (desc.tessVertexSpanCount > 0)
{ {
wgpu::BindGroupEntry bindingEntries[] = { wgpu::BindGroupDescriptor tessBindGroupDesc = {
m_contextOptions.disableStorageBuffers ? .layout = m_tessellatePipeline->perFlushBindingsLayout(),
wgpu::BindGroupEntry{ .entryCount = TESS_BINDINGS_COUNT,
.binding = PATH_BUFFER_IDX, .entries = perFlushBindingEntries,
.textureView = webgpu_storage_texture_view(pathBufferRing())
} :
wgpu::BindGroupEntry{
.binding = PATH_BUFFER_IDX,
.buffer = webgpu_buffer(pathBufferRing()),
.offset = desc.firstPath * sizeof(gpu::PathData),
},
m_contextOptions.disableStorageBuffers ?
wgpu::BindGroupEntry{
.binding = CONTOUR_BUFFER_IDX,
.textureView = webgpu_storage_texture_view(contourBufferRing()),
} :
wgpu::BindGroupEntry{
.binding = CONTOUR_BUFFER_IDX,
.buffer = webgpu_buffer(contourBufferRing()),
.offset = desc.firstContour * sizeof(gpu::ContourData),
},
{
.binding = FLUSH_UNIFORM_BUFFER_IDX,
.buffer = webgpu_buffer(flushUniformBufferRing()),
.offset = desc.flushUniformDataOffsetInBytes,
},
}; };
wgpu::BindGroupDescriptor bindGroupDesc = {
.layout = m_tessellatePipeline->bindGroupLayout(),
.entryCount = std::size(bindingEntries),
.entries = bindingEntries,
};
wgpu::BindGroup bindings = m_device.CreateBindGroup(&bindGroupDesc);
wgpu::RenderPassColorAttachment attachment{ wgpu::RenderPassColorAttachment attachment{
.view = m_tessVertexTextureView, .view = m_tessVertexTextureView,
.loadOp = wgpu::LoadOp::Clear, .loadOp = wgpu::LoadOp::Clear,
@@ -1930,7 +2200,9 @@ void RenderContextWebGPUImpl::flush(const FlushDescriptor& desc)
sizeof(gpu::TessVertexSpan)); sizeof(gpu::TessVertexSpan));
tessPass.SetIndexBuffer(m_tessSpanIndexBuffer, tessPass.SetIndexBuffer(m_tessSpanIndexBuffer,
wgpu::IndexFormat::Uint16); wgpu::IndexFormat::Uint16);
tessPass.SetBindGroup(0, bindings); tessPass.SetBindGroup(PER_FLUSH_BINDINGS_SET,
m_device.CreateBindGroup(&tessBindGroupDesc));
tessPass.SetBindGroup(SAMPLER_BINDINGS_SET, m_samplerBindings);
tessPass.DrawIndexed(std::size(gpu::kTessSpanIndices), tessPass.DrawIndexed(std::size(gpu::kTessSpanIndices),
desc.tessVertexSpanCount, desc.tessVertexSpanCount,
0, 0,
@@ -1939,6 +2211,82 @@ void RenderContextWebGPUImpl::flush(const FlushDescriptor& desc)
tessPass.End(); tessPass.End();
} }
// Render the atlas if we have any offscreen feathers.
if ((desc.atlasFillBatchCount | desc.atlasStrokeBatchCount) != 0)
{
wgpu::BindGroupDescriptor atlasBindGroupDesc = {
.layout = m_atlasPipeline->perFlushBindingsLayout(),
.entryCount = ATLAS_BINDINGS_COUNT,
.entries = perFlushBindingEntries,
};
wgpu::RenderPassColorAttachment attachment{
.view = m_atlasTextureView,
.loadOp = wgpu::LoadOp::Clear,
.storeOp = wgpu::StoreOp::Store,
.clearValue = {0, 0, 0, 0},
};
wgpu::RenderPassDescriptor atlasPassDesc = {
.colorAttachmentCount = 1,
.colorAttachments = &attachment,
};
wgpu::RenderPassEncoder atlasPass =
encoder.BeginRenderPass(&atlasPassDesc);
atlasPass.SetViewport(0.f,
0.f,
desc.atlasContentWidth,
desc.atlasContentHeight,
0.0,
1.0);
atlasPass.SetVertexBuffer(0, m_pathPatchVertexBuffer);
atlasPass.SetIndexBuffer(m_pathPatchIndexBuffer,
wgpu::IndexFormat::Uint16);
atlasPass.SetBindGroup(PER_FLUSH_BINDINGS_SET,
m_device.CreateBindGroup(&atlasBindGroupDesc));
atlasPass.SetBindGroup(SAMPLER_BINDINGS_SET, m_samplerBindings);
if (desc.atlasFillBatchCount != 0)
{
atlasPass.SetPipeline(m_atlasPipeline->fillPipeline());
for (size_t i = 0; i < desc.atlasFillBatchCount; ++i)
{
const gpu::AtlasDrawBatch& fillBatch = desc.atlasFillBatches[i];
atlasPass.SetScissorRect(fillBatch.scissor.left,
fillBatch.scissor.top,
fillBatch.scissor.width(),
fillBatch.scissor.height());
atlasPass.DrawIndexed(gpu::kMidpointFanCenterAAPatchIndexCount,
fillBatch.patchCount,
gpu::kMidpointFanCenterAAPatchBaseIndex,
0,
fillBatch.basePatch);
}
}
if (desc.atlasStrokeBatchCount != 0)
{
atlasPass.SetPipeline(m_atlasPipeline->strokePipeline());
for (size_t i = 0; i < desc.atlasStrokeBatchCount; ++i)
{
const gpu::AtlasDrawBatch& strokeBatch =
desc.atlasStrokeBatches[i];
atlasPass.SetScissorRect(strokeBatch.scissor.left,
strokeBatch.scissor.top,
strokeBatch.scissor.width(),
strokeBatch.scissor.height());
atlasPass.DrawIndexed(gpu::kMidpointFanPatchBorderIndexCount,
strokeBatch.patchCount,
gpu::kMidpointFanPatchBaseIndex,
0,
strokeBatch.basePatch);
}
}
atlasPass.End();
}
wgpu::LoadOp loadOp; wgpu::LoadOp loadOp;
wgpu::Color clearColor; wgpu::Color clearColor;
if (desc.colorLoadAction == LoadAction::clear) if (desc.colorLoadAction == LoadAction::clear)
@@ -2049,67 +2397,6 @@ void RenderContextWebGPUImpl::flush(const FlushDescriptor& desc)
drawPass.SetBindGroup(SAMPLER_BINDINGS_SET, m_samplerBindings); drawPass.SetBindGroup(SAMPLER_BINDINGS_SET, m_samplerBindings);
wgpu::BindGroupEntry perFlushBindingEntries[] = {
{
.binding = TESS_VERTEX_TEXTURE_IDX,
.textureView = m_tessVertexTextureView,
},
{
.binding = GRAD_TEXTURE_IDX,
.textureView = m_gradientTextureView,
},
m_contextOptions.disableStorageBuffers ?
wgpu::BindGroupEntry{
.binding = PATH_BUFFER_IDX,
.textureView = webgpu_storage_texture_view(pathBufferRing())
} :
wgpu::BindGroupEntry{
.binding = PATH_BUFFER_IDX,
.buffer = webgpu_buffer(pathBufferRing()),
.offset = desc.firstPath * sizeof(gpu::PathData),
},
m_contextOptions.disableStorageBuffers ?
wgpu::BindGroupEntry{
.binding = PAINT_BUFFER_IDX,
.textureView = webgpu_storage_texture_view(paintBufferRing()),
} :
wgpu::BindGroupEntry{
.binding = PAINT_BUFFER_IDX,
.buffer = webgpu_buffer(paintBufferRing()),
.offset = desc.firstPaint * sizeof(gpu::PaintData),
},
m_contextOptions.disableStorageBuffers ?
wgpu::BindGroupEntry{
.binding = PAINT_AUX_BUFFER_IDX,
.textureView = webgpu_storage_texture_view(paintAuxBufferRing()),
} :
wgpu::BindGroupEntry{
.binding = PAINT_AUX_BUFFER_IDX,
.buffer = webgpu_buffer(paintAuxBufferRing()),
.offset = desc.firstPaintAux * sizeof(gpu::PaintAuxData),
},
m_contextOptions.disableStorageBuffers ?
wgpu::BindGroupEntry{
.binding = CONTOUR_BUFFER_IDX,
.textureView = webgpu_storage_texture_view(contourBufferRing()),
} :
wgpu::BindGroupEntry{
.binding = CONTOUR_BUFFER_IDX,
.buffer = webgpu_buffer(contourBufferRing()),
.offset = desc.firstContour * sizeof(gpu::ContourData),
},
{
.binding = FLUSH_UNIFORM_BUFFER_IDX,
.buffer = webgpu_buffer(flushUniformBufferRing()),
.offset = desc.flushUniformDataOffsetInBytes,
},
{
.binding = IMAGE_DRAW_UNIFORM_BUFFER_IDX,
.buffer = webgpu_buffer(imageDrawUniformBufferRing()),
.size = sizeof(gpu::ImageDrawUniforms),
},
};
wgpu::BindGroupDescriptor perFlushBindGroupDesc = { wgpu::BindGroupDescriptor perFlushBindGroupDesc = {
.layout = m_drawBindGroupLayouts[PER_FLUSH_BINDINGS_SET], .layout = m_drawBindGroupLayouts[PER_FLUSH_BINDINGS_SET],
.entryCount = std::size(perFlushBindingEntries), .entryCount = std::size(perFlushBindingEntries),

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@@ -34,6 +34,7 @@ using namespace rive;
using namespace rive::gpu; using namespace rive::gpu;
using PixelLocalStorageType = RenderContextWebGPUImpl::PixelLocalStorageType; using PixelLocalStorageType = RenderContextWebGPUImpl::PixelLocalStorageType;
#ifdef RIVE_WEBGPU
static std::unique_ptr<RenderContext> s_renderContext; static std::unique_ptr<RenderContext> s_renderContext;
static rcp<RenderTargetWebGPU> s_renderTarget; static rcp<RenderTargetWebGPU> s_renderTarget;
static std::unique_ptr<Renderer> s_renderer; static std::unique_ptr<Renderer> s_renderer;
@@ -61,8 +62,6 @@ void riveInitPlayer(int w,
s_renderer = std::make_unique<RiveRenderer>(s_renderContext.get()); s_renderer = std::make_unique<RiveRenderer>(s_renderContext.get());
} }
#ifdef RIVE_WEBGPU
EmJsHandle s_deviceHandle; EmJsHandle s_deviceHandle;
EmJsHandle s_queueHandle; EmJsHandle s_queueHandle;
EmJsHandle s_textureViewHandle; EmJsHandle s_textureViewHandle;
@@ -346,87 +345,21 @@ extern "C"
#endif #endif
#ifdef RIVE_DAWN #ifdef RIVE_DAWN
#include "../path_fiddle/fiddle_context.hpp"
#include <GLFW/glfw3.h> #include <GLFW/glfw3.h>
#ifndef __APPLE__
#define GLFW_EXPOSE_NATIVE_WIN32 #define GLFW_EXPOSE_NATIVE_WIN32
#include <GLFW/glfw3native.h> #include <GLFW/glfw3native.h>
#endif
#include <fstream> #include <fstream>
static GLFWwindow* window = nullptr;
static std::unique_ptr<FiddleContext> fiddleContextDawn;
static void glfw_error_callback(int code, const char* message) static void glfw_error_callback(int code, const char* message)
{ {
printf("GLFW error: %i - %s\n", code, message); printf("GLFW error: %i - %s\n", code, message);
} }
static void print_device_error(WGPUErrorType errorType,
const char* message,
void*)
{
const char* errorTypeName = "";
switch (errorType)
{
case WGPUErrorType_Validation:
errorTypeName = "Validation";
break;
case WGPUErrorType_OutOfMemory:
errorTypeName = "Out of memory";
break;
case WGPUErrorType_Unknown:
errorTypeName = "Unknown";
break;
case WGPUErrorType_DeviceLost:
errorTypeName = "Device lost";
break;
default:
RIVE_UNREACHABLE();
return;
}
printf("%s error: %s\n", errorTypeName, message);
}
static void device_lost_callback(WGPUDeviceLostReason reason,
const char* message,
void*)
{
printf("device lost: %s\n", message);
}
static void device_log_callback(WGPULoggingType type,
const char* message,
void*)
{
printf("Device log %s\n", message);
}
static GLFWwindow* s_window = nullptr;
static WGPUDevice s_backendDevice = {};
static wgpu::SwapChain s_swapchain = {};
static std::unique_ptr<dawn::native::Instance> s_instance;
#ifdef __APPLE__
extern float GetDawnWindowBackingScaleFactor(GLFWwindow*, bool retina);
extern std::unique_ptr<wgpu::ChainedStruct>
SetupDawnWindowAndGetSurfaceDescriptor(GLFWwindow*, bool retina);
#else
#define GLFW_EXPOSE_NATIVE_WIN32
#include <GLFW/glfw3.h>
#include <GLFW/glfw3native.h>
static std::unique_ptr<wgpu::ChainedStruct>
SetupDawnWindowAndGetSurfaceDescriptor(GLFWwindow* window, bool retina)
{
std::unique_ptr<wgpu::SurfaceDescriptorFromWindowsHWND> desc =
std::make_unique<wgpu::SurfaceDescriptorFromWindowsHWND>();
desc->hwnd = glfwGetWin32Window(window);
desc->hinstance = GetModuleHandle(nullptr);
return std::move(desc);
}
#endif
#endif
#ifdef RIVE_DAWN
int main(int argc, const char** argv) int main(int argc, const char** argv)
{ {
// Cause stdout and stderr to print immediately without buffering. // Cause stdout and stderr to print immediately without buffering.
@@ -447,151 +380,55 @@ int main(int argc, const char** argv)
glfwWindowHint(GLFW_COCOA_RETINA_FRAMEBUFFER, GLFW_TRUE); glfwWindowHint(GLFW_COCOA_RETINA_FRAMEBUFFER, GLFW_TRUE);
glfwWindowHint(GLFW_FOCUS_ON_SHOW, GLFW_TRUE); glfwWindowHint(GLFW_FOCUS_ON_SHOW, GLFW_TRUE);
glfwWindowHint(GLFW_FLOATING, GLFW_TRUE); glfwWindowHint(GLFW_FLOATING, GLFW_TRUE);
s_window = glfwCreateWindow(1600, 1600, "Rive Renderer", nullptr, nullptr); window = glfwCreateWindow(1600, 1600, "Rive Renderer", nullptr, nullptr);
if (!s_window) if (!window)
{ {
glfwTerminate(); glfwTerminate();
fprintf(stderr, "Failed to create window.\n"); fprintf(stderr, "Failed to create window.\n");
return -1; return -1;
} }
glfwSetWindowTitle(s_window, "Rive Renderer"); glfwSetWindowTitle(window, "Rive Renderer");
WGPUInstanceDescriptor instanceDescriptor{}; std::unique_ptr<FiddleContext> fiddleContextDawn =
instanceDescriptor.features.timedWaitAnyEnable = true; FiddleContext::MakeDawnPLS({});
s_instance = std::make_unique<dawn::native::Instance>(&instanceDescriptor);
wgpu::RequestAdapterOptions adapterOptions = {};
// Get an adapter for the backend to use, and create the device.
auto adapters = s_instance->EnumerateAdapters(&adapterOptions);
wgpu::DawnAdapterPropertiesPowerPreference power_props{};
wgpu::AdapterProperties adapterProperties{};
adapterProperties.nextInChain = &power_props;
// Find the first adapter which satisfies the adapterType requirement.
auto isAdapterType = [&adapterProperties](const auto& adapter) -> bool {
adapter.GetProperties(&adapterProperties);
return adapterProperties.adapterType == wgpu::AdapterType::DiscreteGPU;
};
auto preferredAdapter =
std::find_if(adapters.begin(), adapters.end(), isAdapterType);
if (preferredAdapter == adapters.end())
{
fprintf(
stderr,
"Failed to find an adapter! Please try another adapter type.\n");
abort();
}
std::vector<const char*> enableToggleNames = {
"allow_unsafe_apis",
"turn_off_vsync",
// "skip_validation",
};
std::vector<const char*> disabledToggleNames;
WGPUDawnTogglesDescriptor toggles = {
.chain =
{
.next = nullptr,
.sType = WGPUSType_DawnTogglesDescriptor,
},
.enabledToggleCount = enableToggleNames.size(),
.enabledToggles = enableToggleNames.data(),
.disabledToggleCount = disabledToggleNames.size(),
.disabledToggles = disabledToggleNames.data(),
};
std::vector<WGPUFeatureName> requiredFeatures = {};
WGPUDeviceDescriptor deviceDesc = {
.nextInChain = reinterpret_cast<WGPUChainedStruct*>(&toggles),
.requiredFeatureCount = requiredFeatures.size(),
.requiredFeatures = requiredFeatures.data(),
};
s_backendDevice = preferredAdapter->CreateDevice(&deviceDesc);
DawnProcTable backendProcs = dawn::native::GetProcs();
dawnProcSetProcs(&backendProcs);
backendProcs.deviceSetUncapturedErrorCallback(s_backendDevice,
print_device_error,
nullptr);
backendProcs.deviceSetDeviceLostCallback(s_backendDevice,
device_lost_callback,
nullptr);
backendProcs.deviceSetLoggingCallback(s_backendDevice,
device_log_callback,
nullptr);
wgpu::Device device = wgpu::Device::Acquire(s_backendDevice);
int w, h; int w, h;
glfwGetFramebufferSize(s_window, &w, &h); glfwGetFramebufferSize(window, &w, &h);
// Create the swapchain fiddleContextDawn->onSizeChanged(window, w, h, 1);
auto surfaceChainedDesc = std::unique_ptr<Renderer> renderer = fiddleContextDawn->makeRenderer(w, h);
SetupDawnWindowAndGetSurfaceDescriptor(s_window, true);
WGPUSurfaceDescriptor surfaceDesc = {
.nextInChain =
reinterpret_cast<WGPUChainedStruct*>(surfaceChainedDesc.get()),
};
WGPUSurface surface =
backendProcs.instanceCreateSurface(s_instance->Get(), &surfaceDesc);
WGPUSwapChainDescriptor swapChainDesc = { const char* filename = argc > 1 ? argv[1] : "webgpu_player/rivs/tape.riv";
.usage = WGPUTextureUsage_RenderAttachment, std::ifstream rivStream(filename, std::ios::binary);
.format = WGPUTextureFormat_BGRA8Unorm,
.width = static_cast<uint32_t>(w),
.height = static_cast<uint32_t>(h),
.presentMode = WGPUPresentMode_Immediate, // No vsync.
};
WGPUSwapChain backendSwapChain =
backendProcs.deviceCreateSwapChain(s_backendDevice,
surface,
&swapChainDesc);
s_swapchain = wgpu::SwapChain::Acquire(backendSwapChain);
riveInitPlayer(w,
h,
/*invertRenderTargetFrontFace=*/false,
device.Get(),
device.GetQueue(),
PixelLocalStorageType::none,
8);
std::ifstream rivStream("../../../zzzgold/rivs/Santa_Claus.riv",
std::ios::binary);
std::vector<uint8_t> rivBytes(std::istreambuf_iterator<char>(rivStream), std::vector<uint8_t> rivBytes(std::istreambuf_iterator<char>(rivStream),
{}); {});
std::unique_ptr<File> rivFile = std::unique_ptr<File> rivFile =
File::import(rivBytes, s_renderContext.get()); File::import(rivBytes, fiddleContextDawn->factory());
std::unique_ptr<ArtboardInstance> artboard = rivFile->artboardDefault(); std::unique_ptr<ArtboardInstance> artboard = rivFile->artboardDefault();
std::unique_ptr<Scene> scene = artboard->defaultScene(); std::unique_ptr<Scene> scene = artboard->defaultScene();
scene->advanceAndApply(0); scene->advanceAndApply(0);
double lastTimestamp = 0; double lastTimestamp = 0;
while (!glfwWindowShouldClose(s_window)) while (!glfwWindowShouldClose(window))
{ {
double timestamp = glfwGetTime(); double timestamp = glfwGetTime();
s_renderTarget->setTargetTextureView(
s_swapchain.GetCurrentTextureView());
s_renderContext->beginFrame({ fiddleContextDawn->begin({
.renderTargetWidth = s_renderTarget->width(), .renderTargetWidth = static_cast<uint32_t>(w),
.renderTargetHeight = s_renderTarget->height(), .renderTargetHeight = static_cast<uint32_t>(h),
.clearColor = 0xff8030ff, .clearColor = 0xff8030ff,
}); });
s_renderer->save(); renderer->save();
s_renderer->transform(computeAlignment(rive::Fit::contain, renderer->transform(computeAlignment(rive::Fit::contain,
rive::Alignment::center, rive::Alignment::center,
rive::AABB(0, 0, w, h), rive::AABB(0, 0, w, h),
artboard->bounds())); artboard->bounds()));
scene->draw(s_renderer.get()); scene->draw(renderer.get());
s_renderer->restore(); renderer->restore();
s_renderContext->flush({.renderTarget = s_renderTarget.get()}); fiddleContextDawn->end(window);
s_swapchain.Present(); fiddleContextDawn->tick();
device.Tick();
glfwPollEvents(); glfwPollEvents();
if (lastTimestamp != 0) if (lastTimestamp != 0)

View File

@@ -235,11 +235,18 @@ function rive_tools_project(name, project_kind)
filter({ 'kind:ConsoleApp or SharedLib or WindowedApp', 'options:with-dawn' }) filter({ 'kind:ConsoleApp or SharedLib or WindowedApp', 'options:with-dawn' })
do do
libdirs({
RIVE_PLS_DIR .. '/dependencies/dawn/out/release/obj/src/dawn',
RIVE_PLS_DIR .. '/dependencies/dawn/out/release/obj/src/dawn/native',
RIVE_PLS_DIR .. '/dependencies/dawn/out/release/obj/src/dawn/platform',
RIVE_PLS_DIR .. '/dependencies/dawn/out/release/obj/src/dawn/platform',
})
links({ links({
'dawn_native_static', 'winmm',
'webgpu_dawn', 'webgpu_dawn',
'dawn_platform_static', 'dawn_native_static',
'dawn_proc_static', 'dawn_proc_static',
'dawn_platform_static',
}) })
end end