feat(wgpu): Add a wgpu backend to the test suite (#10586) 48bb6538bd

Co-authored-by: Chris Dalton <99840794+csmartdalton@users.noreply.github.com>
This commit is contained in:
csmartdalton
2025-09-16 18:32:10 +00:00
parent c8bda51ecd
commit edae0548c3
13 changed files with 703 additions and 52 deletions

View File

@@ -1 +1 @@
7220599bb1530c71b4ec6276281ad6231553e6d9
48bb6538bd3851219927e9f8a09d1d6d44040e2c

View File

@@ -14,7 +14,6 @@
namespace rive::gpu
{
class RenderContextWebGPUVulkan;
class RenderTargetWebGPU;
class RenderContextWebGPUImpl : public RenderContextHelperImpl
@@ -67,6 +66,11 @@ public:
virtual ~RenderContextWebGPUImpl();
wgpu::Device device() const { return m_device; }
wgpu::Queue queue() const { return m_queue; }
const ContextOptions& contextOptions() const { return m_contextOptions; }
const Capabilities& capabilities() const { return m_capabilities; }
virtual rcp<RenderTargetWebGPU> makeRenderTarget(wgpu::TextureFormat,
uint32_t width,
uint32_t height);
@@ -98,8 +102,6 @@ private:
wgpu::LoadOp,
const wgpu::Color& clearColor);
wgpu::Device device() const { return m_device; }
const ContextOptions& contextOptions() const { return m_contextOptions; }
wgpu::FrontFace frontFaceForRenderTargetDraws() const
{
return m_contextOptions.invertRenderTargetFrontFace
@@ -211,16 +213,16 @@ public:
void setTargetTextureView(wgpu::TextureView);
private:
friend class RenderContextWebGPUImpl;
friend class RenderContextWebGPUVulkan;
protected:
RenderTargetWebGPU(wgpu::Device device,
const RenderContextWebGPUImpl::Capabilities&,
wgpu::TextureFormat framebufferFormat,
uint32_t width,
uint32_t height);
private:
friend class RenderContextWebGPUImpl;
const wgpu::TextureFormat m_framebufferFormat;
wgpu::Texture m_coverageTexture;
@@ -232,4 +234,20 @@ private:
wgpu::TextureView m_clipTextureView;
wgpu::TextureView m_scratchColorTextureView;
};
class TextureWebGPUImpl : public Texture
{
public:
TextureWebGPUImpl(uint32_t width, uint32_t height, wgpu::Texture texture) :
Texture(width, height),
m_texture(std::move(texture)),
m_textureView(m_texture.CreateView())
{}
wgpu::TextureView textureView() const { return m_textureView; }
private:
wgpu::Texture m_texture;
wgpu::TextureView m_textureView;
};
} // namespace rive::gpu

View File

@@ -1655,22 +1655,6 @@ rcp<RenderBuffer> RenderContextWebGPUImpl::makeRenderBuffer(
sizeInBytes);
}
class TextureWebGPUImpl : public Texture
{
public:
TextureWebGPUImpl(uint32_t width, uint32_t height, wgpu::Texture texture) :
Texture(width, height),
m_texture(std::move(texture)),
m_textureView(m_texture.CreateView())
{}
wgpu::TextureView textureView() const { return m_textureView; }
private:
wgpu::Texture m_texture;
wgpu::TextureView m_textureView;
};
#ifndef RIVE_WAGYU
// Blits texture-to-texture using a draw command.
class RenderContextWebGPUImpl::BlitTextureAsDrawPipeline

View File

@@ -168,18 +168,78 @@ std::unique_ptr<TCPClient> TCPClient::clone() const
return clone;
}
#ifdef __EMSCRIPTEN__
// Derived from:
// https://stackoverflow.com/questions/180947/base64-decode-snippet-in-c
static std::string base64_encode(const char* buf, unsigned int bufLen)
{
static const std::string base64_chars =
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
std::string ret;
ret.reserve(((4u * bufLen / 3u) + 3u) & ~3u);
int i = 0;
int j = 0;
char char_array_3[3];
char char_array_4[4];
while (bufLen--)
{
char_array_3[i++] = *(buf++);
if (i == 3)
{
char_array_4[0] = (char_array_3[0] & 0xfc) >> 2;
char_array_4[1] = ((char_array_3[0] & 0x03) << 4) +
((char_array_3[1] & 0xf0) >> 4);
char_array_4[2] = ((char_array_3[1] & 0x0f) << 2) +
((char_array_3[2] & 0xc0) >> 6);
char_array_4[3] = char_array_3[2] & 0x3f;
for (i = 0; (i < 4); i++)
ret += base64_chars[char_array_4[i]];
i = 0;
}
}
if (i)
{
for (j = i; j < 3; j++)
char_array_3[j] = '\0';
char_array_4[0] = (char_array_3[0] & 0xfc) >> 2;
char_array_4[1] =
((char_array_3[0] & 0x03) << 4) + ((char_array_3[1] & 0xf0) >> 4);
char_array_4[2] =
((char_array_3[1] & 0x0f) << 2) + ((char_array_3[2] & 0xc0) >> 6);
char_array_4[3] = char_array_3[2] & 0x3f;
for (j = 0; (j < i + 1); j++)
ret += base64_chars[char_array_4[j]];
while ((i++ < 3))
ret += '=';
}
return ret;
}
uint32_t TCPClient::send(const char* data, uint32_t size)
{
#ifdef __EMSCRIPTEN__
if (emscripten_websocket_send_binary(m_sockfd,
const_cast<char*>(data),
size) < 0)
// Base64-encode our data and send it as text, in order to avoid landmines
// in binary data transmission.
std::string base64 = base64_encode(data, size);
if (emscripten_websocket_send_utf8_text(m_sockfd, base64.c_str()) < 0)
{
fprintf(stderr, "Failed to send %u bytes to websocket.\n", size);
fprintf(stderr,
"Failed to send %zu base64 chars to websocket.\n",
base64.size());
abort();
}
return size;
}
#else
uint32_t TCPClient::send(const char* data, uint32_t size)
{
size_t sent = ::send(m_sockfd, data, size, 0);
if (sent == -1)
{
@@ -187,8 +247,8 @@ uint32_t TCPClient::send(const char* data, uint32_t size)
abort();
}
return rive::math::lossless_numeric_cast<uint32_t>(sent);
#endif
}
#endif
uint32_t TCPClient::recv(char* buff, uint32_t size)
{

View File

@@ -37,6 +37,8 @@ const char* TestingWindow::BackendName(Backend backend)
return "angle";
case TestingWindow::Backend::dawn:
return "dawn";
case TestingWindow::Backend::wgpu:
return "wgpu";
case Backend::rhi:
return "rhi";
case TestingWindow::Backend::coregraphics:
@@ -184,6 +186,10 @@ TestingWindow::Backend TestingWindow::ParseBackend(const char* name,
{
return Backend::dawn;
}
if (nameStr == "wgpu")
{
return Backend::wgpu;
}
if (nameStr == "rhi")
{
return Backend::rhi;
@@ -325,6 +331,9 @@ TestingWindow* TestingWindow::Init(Backend backend,
visibility,
platformWindow);
break;
case Backend::wgpu:
s_TestingWindow = TestingWindow::MakeWGPU(backendParams);
break;
case Backend::rhi:
break;
case Backend::coregraphics:

View File

@@ -57,6 +57,7 @@ public:
angle,
dawn,
wgpu,
rhi,
coregraphics,
skia,
@@ -219,6 +220,7 @@ public:
static TestingWindow* MakeVulkanTexture(const BackendParams&);
static TestingWindow* MakeAndroidVulkan(const BackendParams&,
void* platformWindow);
static TestingWindow* MakeWGPU(const BackendParams&);
static TestingWindow* MakeSkia();
static TestingWindow* MakeNULL();

View File

@@ -266,6 +266,7 @@ public:
switch (backend)
{
case Backend::wgpu:
case Backend::rhi:
case Backend::coregraphics:
case Backend::skia:

View File

@@ -0,0 +1,551 @@
/*
* Copyright 2025 Rive
*/
#include "testing_window.hpp"
#if !defined(RIVE_WEBGPU) || (RIVE_WEBGPU < 2)
TestingWindow* TestingWindow::MakeWGPU(const BackendParams&) { return nullptr; }
#else
#include "common/offscreen_render_target.hpp"
#include "rive/renderer/rive_renderer.hpp"
#include "rive/renderer/rive_render_image.hpp"
#include "rive/renderer/webgpu/render_context_webgpu_impl.hpp"
#ifdef RIVE_WAGYU
#include <webgpu/webgpu_wagyu.h>
#endif
#ifdef __EMSCRIPTEN__
#include <emscripten/emscripten.h>
#include <emscripten/html5.h>
#endif
namespace rive::gpu
{
static const char* wgpu_backend_name(wgpu::BackendType backendType)
{
switch (backendType)
{
case wgpu::BackendType::Undefined:
return "<unknown backend>";
case wgpu::BackendType::Null:
return "<null backend>";
case wgpu::BackendType::WebGPU:
return "WebGPU";
case wgpu::BackendType::D3D11:
return "D3D11";
case wgpu::BackendType::D3D12:
return "D3D12";
case wgpu::BackendType::Metal:
return "Metal";
case wgpu::BackendType::Vulkan:
return "Vulkan";
case wgpu::BackendType::OpenGL:
return "OpenGL";
case wgpu::BackendType::OpenGLES:
return "OpenGLES";
}
RIVE_UNREACHABLE();
}
static const char* pls_impl_name(
const RenderContextWebGPUImpl::Capabilities& capabilities)
{
#ifdef RIVE_WAGYU
switch (capabilities.plsType)
{
case RenderContextWebGPUImpl::PixelLocalStorageType::
GL_EXT_shader_pixel_local_storage:
return "GL_EXT_shader_pixel_local_storage";
case RenderContextWebGPUImpl::PixelLocalStorageType::
VK_EXT_rasterization_order_attachment_access:
return "VK_EXT_rasterization_order_attachment_access";
case RenderContextWebGPUImpl::PixelLocalStorageType::none:
break;
}
#endif
return "<no pixel local storage>";
}
class OffscreenRenderTargetWGPU : public rive_tests::OffscreenRenderTarget
{
public:
OffscreenRenderTargetWGPU(
rive::gpu::RenderContextWebGPUImpl* renderContextImpl,
wgpu::TextureFormat format,
uint32_t width,
uint32_t height) :
m_textureTarget(rive::make_rcp<TextureTarget>(renderContextImpl,
format,
width,
height))
{}
rive::RenderImage* asRenderImage() override
{
return m_textureTarget->renderImage();
}
rive::gpu::RenderTarget* asRenderTarget() override
{
return m_textureTarget.get();
}
private:
class TextureTarget : public RenderTargetWebGPU
{
public:
TextureTarget(rive::gpu::RenderContextWebGPUImpl* renderContextImpl,
wgpu::TextureFormat format,
uint32_t width,
uint32_t height) :
RenderTargetWebGPU(renderContextImpl->device(),
renderContextImpl->capabilities(),
format,
width,
height)
{
wgpu::TextureDescriptor textureDesc = {
.usage = wgpu::TextureUsage::TextureBinding |
wgpu::TextureUsage::RenderAttachment,
.dimension = wgpu::TextureDimension::e2D,
.size = {width, height},
.format = format,
};
#ifdef RIVE_WAGYU
if (renderContextImpl->capabilities().plsType ==
RenderContextWebGPUImpl::PixelLocalStorageType::
VK_EXT_rasterization_order_attachment_access)
{
textureDesc.usage |= static_cast<wgpu::TextureUsage>(
WGPUTextureUsage_WagyuInputAttachment);
}
#endif
auto texture = make_rcp<TextureWebGPUImpl>(
width,
height,
renderContextImpl->device().CreateTexture(&textureDesc));
setTargetTextureView(texture->textureView());
m_renderImage =
rive::make_rcp<rive::RiveRenderImage>(std::move(texture));
}
rive::RiveRenderImage* renderImage() const
{
return m_renderImage.get();
}
private:
rive::rcp<rive::RiveRenderImage> m_renderImage;
};
rive::rcp<TextureTarget> m_textureTarget;
};
class TestingWindowWGPU : public TestingWindow
{
public:
TestingWindowWGPU(const BackendParams& backendParams) :
m_backendParams(backendParams)
{
m_instance = wgpu::CreateInstance(nullptr);
assert(m_instance);
m_instance.RequestAdapter(
{},
wgpu::CallbackMode::AllowSpontaneous,
[](wgpu::RequestAdapterStatus status,
wgpu::Adapter adapter,
wgpu::StringView message,
TestingWindowWGPU* this_) {
assert(status == wgpu::RequestAdapterStatus::Success);
this_->m_adapter = adapter;
},
this);
while (!m_adapter)
{
emscripten_sleep(1);
}
m_adapter.RequestDevice(
{},
wgpu::CallbackMode::AllowSpontaneous,
[](wgpu::RequestDeviceStatus status,
wgpu::Device device,
wgpu::StringView message,
TestingWindowWGPU* this_) {
assert(status == wgpu::RequestDeviceStatus::Success);
this_->m_device = device;
},
this);
while (!m_device)
{
emscripten_sleep(1);
}
m_queue = m_device.GetQueue();
assert(m_queue);
{
wgpu::EmscriptenSurfaceSourceCanvasHTMLSelector htmlSelector;
htmlSelector.selector = "#canvas";
wgpu::SurfaceDescriptor surfaceDesc = {
.nextInChain = &htmlSelector,
};
m_surface = m_instance.CreateSurface(&surfaceDesc);
assert(m_surface);
int w, h;
emscripten_get_canvas_element_size("#canvas", &w, &h);
m_width = w;
m_height = h;
}
{
wgpu::SurfaceCapabilities capabilities;
m_surface.GetCapabilities(m_adapter, &capabilities);
assert(capabilities.formatCount > 0);
m_format = capabilities.formats[0];
assert(m_format != wgpu::TextureFormat::Undefined);
if (m_format != wgpu::TextureFormat::RGBA8Unorm &&
m_format != wgpu::TextureFormat::BGRA8Unorm)
{
m_format = wgpu::TextureFormat::RGBA8Unorm;
}
}
{
wgpu::SurfaceConfiguration conf = {
.device = m_device,
.format = m_format,
};
m_surface.Configure(&conf);
}
RenderContextWebGPUImpl::ContextOptions contextOptions;
m_renderContext = RenderContextWebGPUImpl::MakeContext(m_adapter,
m_device,
m_queue,
contextOptions);
wgpu::AdapterInfo adapterInfo;
m_adapter.GetInfo(&adapterInfo);
printf("==== WGPU device: %s %s %s (%s, %s) ====\n",
adapterInfo.vendor.data,
adapterInfo.device.data,
adapterInfo.description.data,
wgpu_backend_name(impl()->capabilities().backendType),
pls_impl_name(impl()->capabilities()));
}
rive::Factory* factory() override { return m_renderContext.get(); }
rive::gpu::RenderContext* renderContext() const override
{
return m_renderContext.get();
}
rcp<rive_tests::OffscreenRenderTarget> makeOffscreenRenderTarget(
uint32_t width,
uint32_t height,
bool riveRenderable) const override
{
return make_rcp<OffscreenRenderTargetWGPU>(
impl(),
// The format has no impact on whether Rive can render directly to
// the texture, but switch on that flag to test both formats.
//
// NOTE: The WebGPU backend currently has no code to handle
// non-renderable textures. GL_EXT_shader_pixel_local_storage has no
// such restrictions and
// VK_EXT_rasterization_order_attachment_access mode requires the
// texture to support input attachments.
riveRenderable ? wgpu::TextureFormat::RGBA8Unorm
: wgpu::TextureFormat::BGRA8Unorm,
width,
height);
}
void resize(int width, int height) override
{
if (m_width != width || m_height != height)
{
m_overflowTexture = {};
m_overflowTextureView = {};
m_pixelReadBuff = {};
TestingWindow::resize(width, height);
}
}
std::unique_ptr<rive::Renderer> beginFrame(
const FrameOptions& options) override
{
assert(m_currentCanvasTexture == nullptr);
wgpu::SurfaceTexture surfaceTexture;
m_surface.GetCurrentTexture(&surfaceTexture);
m_currentCanvasTexture = surfaceTexture.texture;
uint32_t surfaceWidth = m_currentCanvasTexture.GetWidth();
uint32_t surfaceHeight = m_currentCanvasTexture.GetHeight();
wgpu::TextureViewDescriptor textureViewDesc = {
.format = m_format,
.dimension = wgpu::TextureViewDimension::e2D,
};
m_currentCanvasTextureView =
m_currentCanvasTexture.CreateView(&textureViewDesc);
if (surfaceWidth < m_width || surfaceHeight < m_height)
{
if (!m_overflowTexture)
{
wgpu::TextureDescriptor overflowTextureDesc = {
.usage = wgpu::TextureUsage::RenderAttachment |
wgpu::TextureUsage::CopySrc,
.dimension = wgpu::TextureDimension::e2D,
.size = {m_width, m_height},
.format = m_format,
};
m_overflowTexture =
m_device.CreateTexture(&overflowTextureDesc);
m_overflowTextureView = m_overflowTexture.CreateView();
}
assert(m_overflowTexture.GetWidth() == m_width);
assert(m_overflowTexture.GetHeight() == m_height);
m_renderTarget =
m_renderContext->static_impl_cast<RenderContextWebGPUImpl>()
->makeRenderTarget(m_format, m_width, m_height);
m_renderTarget->setTargetTextureView(m_overflowTextureView);
}
else
{
m_renderTarget =
m_renderContext->static_impl_cast<RenderContextWebGPUImpl>()
->makeRenderTarget(m_format, surfaceWidth, surfaceHeight);
m_renderTarget->setTargetTextureView(m_currentCanvasTextureView);
}
rive::gpu::RenderContext::FrameDescriptor frameDescriptor = {
.renderTargetWidth = surfaceWidth,
.renderTargetHeight = surfaceHeight,
.loadAction = options.doClear
? rive::gpu::LoadAction::clear
: rive::gpu::LoadAction::preserveRenderTarget,
.clearColor = options.clearColor,
.msaaSampleCount = m_backendParams.msaa ? 4 : 0,
.disableRasterOrdering = options.disableRasterOrdering,
.wireframe = options.wireframe,
.clockwiseFillOverride =
m_backendParams.clockwise || options.clockwiseFillOverride,
.synthesizedFailureType = options.synthesizedFailureType,
};
m_renderContext->beginFrame(frameDescriptor);
return std::make_unique<RiveRenderer>(m_renderContext.get());
}
void flushPLSContext(RenderTarget* offscreenRenderTarget) final
{
wgpu::CommandEncoder encoder = m_device.CreateCommandEncoder();
m_renderContext->flush({
.renderTarget = m_renderTarget.get(),
.externalCommandBuffer = encoder.Get(),
});
wgpu::CommandBuffer commands = encoder.Finish();
m_queue.Submit(1, &commands);
}
void endFrame(std::vector<uint8_t>* pixelData) override
{
flushPLSContext(nullptr);
assert(m_currentCanvasTexture != nullptr);
if (m_overflowTexture)
{
// Blit the overflow texture back to the canvas.
wgpu::CommandEncoder encoder = m_device.CreateCommandEncoder();
wgpu::TexelCopyTextureInfo src = {
.texture = m_overflowTexture,
.origin = {0, 0, 0},
};
wgpu::TexelCopyTextureInfo dst = {
.texture = m_currentCanvasTexture,
.origin = {0, 0, 0},
};
wgpu::Extent3D copySize{
std::min(m_width, m_currentCanvasTexture.GetWidth()),
std::min(m_height, m_currentCanvasTexture.GetHeight()),
};
encoder.CopyTextureToTexture(&src, &dst, &copySize);
wgpu::CommandBuffer commands = encoder.Finish();
m_queue.Submit(1, &commands);
}
if (pixelData != nullptr)
{
assert(m_format == wgpu::TextureFormat::RGBA8Unorm ||
m_format == wgpu::TextureFormat::BGRA8Unorm);
const uint32_t rowBytesInReadBuff =
math::round_up_to_multiple_of<256>(m_width * 4);
bool invertY = false;
#ifdef RIVE_WAGYU
if (impl()->capabilities().backendType ==
wgpu::BackendType::OpenGLES)
{
invertY = true;
}
#endif
// Create a buffer to receive the pixels.
if (!m_pixelReadBuff)
{
wgpu::BufferDescriptor buffDesc{
.usage =
wgpu::BufferUsage::MapRead | wgpu::BufferUsage::CopyDst,
.size = m_height * rowBytesInReadBuff,
};
m_pixelReadBuff = m_device.CreateBuffer(&buffDesc);
}
assert(m_pixelReadBuff.GetSize() == m_height * rowBytesInReadBuff);
// Blit the framebuffer into m_pixelReadBuff.
wgpu::CommandEncoder readEncoder = m_device.CreateCommandEncoder();
wgpu::TexelCopyTextureInfo srcTexture = {
.texture = m_overflowTexture ? m_overflowTexture
: m_currentCanvasTexture,
.origin = {0,
invertY ? m_renderTarget->height() - m_height : 0,
0},
};
wgpu::TexelCopyBufferInfo dstBuffer = {
.layout =
{
.offset = 0,
.bytesPerRow = rowBytesInReadBuff,
},
.buffer = m_pixelReadBuff,
};
wgpu::Extent3D copySize = {
.width = m_width,
.height = m_height,
};
readEncoder.CopyTextureToBuffer(&srcTexture, &dstBuffer, &copySize);
wgpu::CommandBuffer commands = readEncoder.Finish(NULL);
m_queue.Submit(1, &commands);
{
// Map m_pixelReadBuff.
bool mappingFinished = false;
m_pixelReadBuff.MapAsync(
wgpu::MapMode::Read,
0,
m_height * rowBytesInReadBuff,
wgpu::CallbackMode::AllowSpontaneous,
[](wgpu::MapAsyncStatus status,
wgpu::StringView message,
bool* mappingFinished) {
if (status != wgpu::MapAsyncStatus::Success)
{
fprintf(stderr,
"failed to map m_pixelReadBuff: %s\n",
message.data);
abort();
}
*mappingFinished = true;
},
&mappingFinished);
while (!mappingFinished)
{
emscripten_sleep(1);
}
}
// Copy the image data from m_pixelReadBuff to pixelData.
const size_t rowBytesInDst = m_width * 4;
pixelData->resize(m_height * rowBytesInDst);
const uint8_t* pixelReadBuffData = reinterpret_cast<const uint8_t*>(
m_pixelReadBuff.GetConstMappedRange());
for (size_t y = 0; y < m_height; ++y)
{
const uint8_t* src;
if (invertY)
{
src = &pixelReadBuffData[y * rowBytesInReadBuff];
}
else
{
src = &pixelReadBuffData[(m_height - y - 1) *
rowBytesInReadBuff];
}
uint8_t* dst = &(*pixelData)[y * rowBytesInDst];
if (m_format == wgpu::TextureFormat::RGBA8Unorm)
{
memcpy(dst, src, rowBytesInDst);
}
else
{
assert(m_format == wgpu::TextureFormat::BGRA8Unorm);
for (size_t x = 0; x < rowBytesInDst; x += 4)
{
// BGBRA -> RGBA.
dst[x + 0] = src[x + 2];
dst[x + 1] = src[x + 1];
dst[x + 2] = src[x + 0];
dst[x + 3] = src[x + 3];
}
}
}
m_pixelReadBuff.Unmap();
}
m_currentCanvasTextureView = {};
m_currentCanvasTexture = {};
}
private:
RenderContextWebGPUImpl* impl() const
{
return m_renderContext->static_impl_cast<RenderContextWebGPUImpl>();
}
const BackendParams m_backendParams;
wgpu::Instance m_instance = nullptr;
wgpu::Adapter m_adapter;
wgpu::Device m_device;
wgpu::Surface m_surface;
wgpu::TextureFormat m_format = wgpu::TextureFormat::Undefined;
wgpu::Queue m_queue;
wgpu::Texture m_currentCanvasTexture;
wgpu::TextureView m_currentCanvasTextureView;
wgpu::Texture m_overflowTexture;
wgpu::TextureView m_overflowTextureView;
wgpu::Buffer m_pixelReadBuff;
std::unique_ptr<RenderContext> m_renderContext;
rcp<RenderTargetWebGPU> m_renderTarget;
};
}; // namespace rive::gpu
TestingWindow* TestingWindow::MakeWGPU(const BackendParams& backendParams)
{
return new rive::gpu::TestingWindowWGPU(backendParams);
}
#endif

View File

@@ -2,12 +2,15 @@
import argparse
import atexit
import base64
import glob
import http.server
import os
import pathlib
import platform
import queue
import re
import shlex
import shutil
import signal
import socket
@@ -30,7 +33,7 @@ parser.add_argument("tools",
help="which tool(s) to run")
parser.add_argument("-B", "--builddir",
type=str,
default=None,
default=os.getenv("RIVE_BUILDDIR"),
help="output directory from build")
parser.add_argument("-b", "--backend",
type=str,
@@ -78,7 +81,7 @@ parser.add_argument("-u", "--ios_udid",
default=None,
help="unique id of iOS device to run on (--target=ios or iossim)")
parser.add_argument("-c", "--webclient",
default=None,
default=os.getenv("RIVE_WEBCLIENT"),
help="executable to launch when --target=webserver")
parser.add_argument("-k", "--options",
type=str,
@@ -144,7 +147,7 @@ class text_colors:
# Launch a process in a separate thread and crash if it fails.
class CheckProcess(threading.Thread):
def __init__(self, cmd):
def __init__(self, cmd, env=None):
threading.Thread.__init__(self)
self.cmd = cmd
if args.server_only:
@@ -154,14 +157,16 @@ class CheckProcess(threading.Thread):
if shutil.which(self.cmd[0]) is None:
print(f'{text_colors.ERROR}' + self.cmd[0] + ' does not exist!' + f'{text_colors.ENDCOL}')
else:
self.proc = subprocess.Popen(self.cmd)
self.proc = subprocess.Popen(self.cmd, env=env)
self._did_terminate = False
def run(self):
self.proc.wait()
if self.proc.returncode != 0:
if not self._did_terminate and self.proc.returncode != 0:
os._exit(self.proc.returncode)
def terminate(self):
self._did_terminate = True
self.proc.terminate()
def get_local_ip():
@@ -239,7 +244,8 @@ def start_websocket_bridge(tcp_server_address):
try:
async for message in websocket:
if isinstance(message, str):
message = message.encode()
# Text sent to our websocket is always base64.
message = base64.b64decode(message)
writer.write(message)
await writer.drain()
except:
@@ -275,7 +281,7 @@ class ToolServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
daemon_threads = True
def __init__(self, handler):
if args.remote:
if args.remote or args.target == "webserver":
# The device needs to connect over the network instead of localhost.
self.host = get_local_ip()
else:
@@ -471,7 +477,7 @@ class TestHarnessRequestHandler(socketserver.BaseRequestHandler):
# If we aren't deploying to the host, update the given command to deploy on its intended target.
def update_cmd_to_deploy_on_target(cmd, test_harness_server):
def update_cmd_to_deploy_on_target(cmd, test_harness_server, env):
dirname = os.path.dirname(cmd[0])
toolname = os.path.basename(cmd[0])
@@ -516,13 +522,14 @@ def update_cmd_to_deploy_on_target(cmd, test_harness_server):
return ["xcrun", "simctl", "launch", args.ios_udid, "rive.app.golden-test-app"] + cmd
elif args.target.startswith("web"):
env["RIVE_HTTP_SERVER_DIR"] = str(pathlib.Path(args.builddir).resolve())
if args.target == "webbrowser":
client = ["python3", "-m", "webbrowser", "-t"]
elif args.target == "webandroid":
client = ["adb", "shell", "am", "start", "-a",
"android.intent.action.VIEW", "-d"]
elif args.webclient:
client = [args.webclient]
client = shlex.split(args.webclient)
else:
client = ["echo", "\nPlease navigate your web client to:\n\n"]
return client + ["http://%s:%u/%s.html#%s" % (*test_harness_server.http_address,
@@ -536,6 +543,7 @@ def launch_gms(test_harness_server):
tool = os.path.join(args.builddir, "gms")
if platform.system() == "Windows" and args.target == 'host':
tool = tool + ".exe"
env = os.environ.copy()
cmd = [tool,
"--backend", args.backend,
"--test_harness", "%s:%u" % test_harness_server.server_address]
@@ -546,9 +554,9 @@ def launch_gms(test_harness_server):
cmd = cmd + ["--match", args.match];
if args.verbose:
cmd = cmd + ["--verbose"];
cmd = update_cmd_to_deploy_on_target(cmd, test_harness_server)
cmd = update_cmd_to_deploy_on_target(cmd, test_harness_server, env)
procs = [CheckProcess(cmd) for i in range(0, args.jobs_per_tool)]
procs = [CheckProcess(cmd, env) for i in range(0, args.jobs_per_tool)]
for proc in procs:
proc.start()
@@ -559,6 +567,7 @@ def launch_goldens(test_harness_server):
tool = os.path.join(args.builddir, "goldens")
if platform.system() == "Windows" and args.target == 'host':
tool = tool + ".exe"
env = os.environ.copy()
if args.verbose:
print("[server] Using '" + tool + "'", flush=True)
@@ -582,9 +591,9 @@ def launch_goldens(test_harness_server):
"-p%i" % args.png_threads];
if args.verbose:
cmd = cmd + ["--verbose"];
cmd = update_cmd_to_deploy_on_target(cmd, test_harness_server)
cmd = update_cmd_to_deploy_on_target(cmd, test_harness_server, env)
procs = [CheckProcess(cmd) for i in range(0, args.jobs_per_tool)]
procs = [CheckProcess(cmd, env) for i in range(0, args.jobs_per_tool)]
for proc in procs:
proc.start()
@@ -598,15 +607,16 @@ def launch_player(test_harness_server):
tool = os.path.join(args.builddir, "player")
if platform.system() == "Windows" and args.target == 'host':
tool = tool + ".exe"
env = os.environ.copy()
cmd = [tool,
"--test_harness", "%s:%u" % test_harness_server.server_address,
"--backend", args.backend]
if args.options:
cmd += ["--options", args.options]
cmd = update_cmd_to_deploy_on_target(cmd, test_harness_server)
cmd = update_cmd_to_deploy_on_target(cmd, test_harness_server, env)
rivsqueue.put(args.src)
player = CheckProcess(cmd)
player = CheckProcess(cmd, env)
player.start()
return player
@@ -671,7 +681,7 @@ def main():
elif args.target.startswith("web"):
args.jobs_per_tool = 1
if args.builddir == None:
args.builddir = f"out/wasm_debug"
args.builddir = "out/wasm_debug"
if args.backend == None:
args.backend = "gl"
else:
@@ -825,6 +835,11 @@ def main():
args.target.startswith("web"))
procs = []
# Serially deployed targets are finished once they've sent their
# shutdown event. If, 1.5 seconds after sending the shutdown event, they
# still haven't exited, terminate them.
SERIAL_TARGET_TERMINATE_SECONDS = 1.5
def keyboard_interrupt_handler(signal, frame):
if os.name == "nt":
print("^C", end="", flush=True)
@@ -842,7 +857,10 @@ def main():
procs = launch_gms(test_harness_server)
assert(len(procs) == 1)
test_harness_server.wait_for_shutdown_event()
procs[0].join()
procs[0].join(SERIAL_TARGET_TERMINATE_SECONDS)
if procs[0].is_alive():
procs[0].terminate()
procs[0].join()
procs = []
if args.target == "android":
force_stop_android_tests_apk()
@@ -857,7 +875,10 @@ def main():
procs = launch_goldens(test_harness_server)
assert(len(procs) == 1)
test_harness_server.wait_for_shutdown_event()
procs[0].join()
procs[0].join(SERIAL_TARGET_TERMINATE_SECONDS)
if procs[0].is_alive():
procs[0].terminate()
procs[0].join()
procs = []
if args.target == "android":
force_stop_android_tests_apk()
@@ -869,7 +890,10 @@ def main():
test_harness_server.reset_shutdown_event()
procs = [launch_player(test_harness_server)]
test_harness_server.wait_for_shutdown_event()
procs[0].join()
procs[0].join(SERIAL_TARGET_TERMINATE_SECONDS)
if procs[0].is_alive():
procs[0].terminate()
procs[0].join()
procs = []
# Wait for the processes to finish (if not in serial_deploy mode).

View File

@@ -428,7 +428,7 @@ int main(int argc, const char* argv[])
// we're done.
TestHarness::Instance().shutdown();
#ifdef __EMSCRIPTEN__
EM_ASM(window.close(););
EM_ASM(if (window && window.close) window.close(););
#endif
return 0;
}

View File

@@ -378,7 +378,7 @@ int main(int argc, const char* argv[])
// we're done.
TestHarness::Instance().shutdown();
#ifdef __EMSCRIPTEN__
EM_ASM(window.close(););
EM_ASM(if (window && window.close) window.close(););
#endif
return 0;
}

View File

@@ -183,7 +183,7 @@ public:
TestHarness::Instance().shutdown();
#ifdef __EMSCRIPTEN__
emscripten_cancel_main_loop();
EM_ASM(window.close(););
EM_ASM(if (window && window.close) window.close(););
#else
exit(0);
#endif

View File

@@ -317,6 +317,8 @@ function rive_tools_project(name, project_kind)
'-sASYNCIFY_IMPORTS="[async_sleep, wasi_snapshot_preview1.fd_write]"',
'-sASYNCIFY_STACK_SIZE=16384',
'-sGL_TESTING',
'-sALLOW_MEMORY_GROWTH',
'-sINITIAL_HEAP=134217728',
'-lwebsocket.js',
})
end