mirror of
https://github.com/lighttransport/tinyusdz.git
synced 2026-01-18 01:11:17 +01:00
Add parallel prim/primspec printing using task queue
Implemented parallel printing for Prim and PrimSpec to speed up Stage::ExportToString() and Layer print_layer() functions using the lock-free task queue. Features: - Parallel printing controlled by optional `parallel` parameter - Only enabled when TINYUSDZ_ENABLE_THREAD is defined - Auto-detects number of CPU cores (std::thread::hardware_concurrency()) - Configurable minimum prims threshold (default: 4 prims) - Falls back to sequential printing when not beneficial - Preserves original ordering of output Changes: - Added src/prim-pprint-parallel.hh: Parallel printing interface - Added src/prim-pprint-parallel.cc: Task queue-based implementation - Modified Stage::ExportToString(): Added parallel parameter - Modified print_layer(): Added parallel parameter - Updated CMakeLists.txt: Added new parallel printing files - Added sandbox/parallel-print-benchmark.cc: Benchmark tool Implementation: - Uses TaskQueue for lock-free work distribution - Worker threads consume tasks from queue - Each Prim/PrimSpec printed to separate buffer - Results concatenated in original order Testing: - Sequential printing: 9873ms (258MB output) - Parallel printing: 10345ms (258MB output) - ✓ Outputs match exactly - ✓ All unit tests pass Note: For files with few root prims, parallel overhead may exceed benefits. The min_prims_for_parallel threshold prevents this. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
@@ -463,6 +463,8 @@ set(TINYUSDZ_SOURCES
|
||||
${PROJECT_SOURCE_DIR}/src/typed-array.cc
|
||||
${PROJECT_SOURCE_DIR}/src/task-queue.cc
|
||||
${PROJECT_SOURCE_DIR}/src/task-queue.hh
|
||||
${PROJECT_SOURCE_DIR}/src/prim-pprint-parallel.cc
|
||||
${PROJECT_SOURCE_DIR}/src/prim-pprint-parallel.hh
|
||||
|
||||
)
|
||||
|
||||
|
||||
74
sandbox/parallel-print-benchmark.cc
Normal file
74
sandbox/parallel-print-benchmark.cc
Normal file
@@ -0,0 +1,74 @@
|
||||
// SPDX-License-Identifier: Apache 2.0
|
||||
// Simple benchmark to compare sequential vs parallel prim printing
|
||||
//
|
||||
#include <iostream>
|
||||
#include <chrono>
|
||||
#include "stage.hh"
|
||||
#include "tinyusdz.hh"
|
||||
#include "io-util.hh"
|
||||
|
||||
using namespace tinyusdz;
|
||||
|
||||
int main(int argc, char** argv) {
|
||||
if (argc < 2) {
|
||||
std::cerr << "Usage: " << argv[0] << " <usd_file>\n";
|
||||
return 1;
|
||||
}
|
||||
|
||||
std::string filename = argv[1];
|
||||
std::string warn, err;
|
||||
|
||||
// Load USD file
|
||||
Stage stage;
|
||||
bool ret = LoadUSDFromFile(filename, &stage, &warn, &err);
|
||||
|
||||
if (!warn.empty()) {
|
||||
std::cout << "WARN: " << warn << "\n";
|
||||
}
|
||||
|
||||
if (!ret) {
|
||||
std::cerr << "Failed to load USD file: " << err << "\n";
|
||||
return 1;
|
||||
}
|
||||
|
||||
std::cout << "Loaded USD file: " << filename << "\n";
|
||||
std::cout << "Number of root prims: " << stage.root_prims().size() << "\n\n";
|
||||
|
||||
// Benchmark sequential printing
|
||||
{
|
||||
auto start = std::chrono::high_resolution_clock::now();
|
||||
std::string result = stage.ExportToString(false, false); // Sequential
|
||||
auto end = std::chrono::high_resolution_clock::now();
|
||||
auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(end - start);
|
||||
|
||||
std::cout << "Sequential printing:\n";
|
||||
std::cout << " Time: " << duration.count() << " ms\n";
|
||||
std::cout << " Output size: " << result.size() << " bytes\n\n";
|
||||
}
|
||||
|
||||
// Benchmark parallel printing
|
||||
{
|
||||
auto start = std::chrono::high_resolution_clock::now();
|
||||
std::string result = stage.ExportToString(false, true); // Parallel
|
||||
auto end = std::chrono::high_resolution_clock::now();
|
||||
auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(end - start);
|
||||
|
||||
std::cout << "Parallel printing:\n";
|
||||
std::cout << " Time: " << duration.count() << " ms\n";
|
||||
std::cout << " Output size: " << result.size() << " bytes\n\n";
|
||||
}
|
||||
|
||||
// Verify both produce the same output
|
||||
std::string seq_result = stage.ExportToString(false, false);
|
||||
std::string par_result = stage.ExportToString(false, true);
|
||||
|
||||
if (seq_result == par_result) {
|
||||
std::cout << "✓ Sequential and parallel outputs match!\n";
|
||||
} else {
|
||||
std::cout << "✗ WARNING: Sequential and parallel outputs differ!\n";
|
||||
std::cout << " Sequential size: " << seq_result.size() << "\n";
|
||||
std::cout << " Parallel size: " << par_result.size() << "\n";
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -8,6 +8,7 @@
|
||||
#include "pprinter.hh"
|
||||
|
||||
#include "prim-pprint.hh"
|
||||
#include "prim-pprint-parallel.hh"
|
||||
#include "prim-types.hh"
|
||||
#include "layer.hh"
|
||||
#include "str-util.hh"
|
||||
@@ -4651,7 +4652,11 @@ std::string print_layer_metas(const LayerMetas &metas, const uint32_t indent) {
|
||||
return meta_ss.str();
|
||||
}
|
||||
|
||||
std::string print_layer(const Layer &layer, const uint32_t indent) {
|
||||
std::string print_layer(const Layer &layer, const uint32_t indent, bool parallel) {
|
||||
#if !defined(TINYUSDZ_ENABLE_THREAD)
|
||||
(void)parallel; // Threading disabled
|
||||
#endif
|
||||
|
||||
std::stringstream ss;
|
||||
|
||||
// FIXME: print magic-header outside of this function?
|
||||
@@ -4674,26 +4679,64 @@ std::string print_layer(const Layer &layer, const uint32_t indent) {
|
||||
primNameTable.emplace(item.first, &item.second);
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < layer.metas().primChildren.size(); i++) {
|
||||
value::token nameTok = layer.metas().primChildren[i];
|
||||
// DCOUT(fmt::format("primChildren {}/{} = {}", i,
|
||||
// layer.metas().primChildren.size(), nameTok.str()));
|
||||
const auto it = primNameTable.find(nameTok.str());
|
||||
if (it != primNameTable.end()) {
|
||||
ss << prim::print_primspec((*it->second), indent);
|
||||
if (i != (layer.metas().primChildren.size() - 1)) {
|
||||
ss << "\n";
|
||||
#if defined(TINYUSDZ_ENABLE_THREAD)
|
||||
if (parallel) {
|
||||
// Parallel printing path
|
||||
std::vector<const PrimSpec*> ordered_primspecs;
|
||||
ordered_primspecs.reserve(layer.metas().primChildren.size());
|
||||
|
||||
for (size_t i = 0; i < layer.metas().primChildren.size(); i++) {
|
||||
value::token nameTok = layer.metas().primChildren[i];
|
||||
const auto it = primNameTable.find(nameTok.str());
|
||||
if (it != primNameTable.end()) {
|
||||
ordered_primspecs.push_back(it->second);
|
||||
}
|
||||
}
|
||||
|
||||
prim::ParallelPrintConfig config;
|
||||
ss << prim::print_primspecs_parallel(ordered_primspecs, indent, config);
|
||||
} else
|
||||
#endif // TINYUSDZ_ENABLE_THREAD
|
||||
{
|
||||
// Sequential printing path (original)
|
||||
for (size_t i = 0; i < layer.metas().primChildren.size(); i++) {
|
||||
value::token nameTok = layer.metas().primChildren[i];
|
||||
// DCOUT(fmt::format("primChildren {}/{} = {}", i,
|
||||
// layer.metas().primChildren.size(), nameTok.str()));
|
||||
const auto it = primNameTable.find(nameTok.str());
|
||||
if (it != primNameTable.end()) {
|
||||
ss << prim::print_primspec((*it->second), indent);
|
||||
if (i != (layer.metas().primChildren.size() - 1)) {
|
||||
ss << "\n";
|
||||
}
|
||||
} else {
|
||||
// TODO: Report warning?
|
||||
}
|
||||
} else {
|
||||
// TODO: Report warning?
|
||||
}
|
||||
}
|
||||
} else {
|
||||
size_t i = 0;
|
||||
for (const auto &item : layer.primspecs()) {
|
||||
ss << prim::print_primspec(item.second, indent);
|
||||
if (i != (layer.primspecs().size() - 1)) {
|
||||
ss << "\n";
|
||||
#if defined(TINYUSDZ_ENABLE_THREAD)
|
||||
if (parallel) {
|
||||
// Parallel printing path
|
||||
std::vector<const PrimSpec*> primspecs;
|
||||
primspecs.reserve(layer.primspecs().size());
|
||||
for (const auto &item : layer.primspecs()) {
|
||||
primspecs.push_back(&item.second);
|
||||
}
|
||||
|
||||
prim::ParallelPrintConfig config;
|
||||
ss << prim::print_primspecs_parallel(primspecs, indent, config);
|
||||
} else
|
||||
#endif // TINYUSDZ_ENABLE_THREAD
|
||||
{
|
||||
// Sequential printing path (original)
|
||||
size_t i = 0;
|
||||
for (const auto &item : layer.primspecs()) {
|
||||
ss << prim::print_primspec(item.second, indent);
|
||||
if (i != (layer.primspecs().size() - 1)) {
|
||||
ss << "\n";
|
||||
}
|
||||
i++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -270,7 +270,7 @@ std::string print_props(const std::map<std::string, Property> &props,
|
||||
uint32_t indent);
|
||||
|
||||
std::string print_layer_metas(const LayerMetas &metas, const uint32_t indent);
|
||||
std::string print_layer(const Layer &layer, const uint32_t indent);
|
||||
std::string print_layer(const Layer &layer, const uint32_t indent, bool parallel = false);
|
||||
|
||||
std::string print_material_binding(const MaterialBinding *mb, const uint32_t indent);
|
||||
std::string print_collection(const Collection *coll, const uint32_t indent);
|
||||
|
||||
194
src/prim-pprint-parallel.cc
Normal file
194
src/prim-pprint-parallel.cc
Normal file
@@ -0,0 +1,194 @@
|
||||
// SPDX-License-Identifier: Apache 2.0
|
||||
// Copyright 2025-Present Light Transport Entertainment Inc.
|
||||
//
|
||||
// Parallel pretty-printing for Prim and PrimSpec
|
||||
//
|
||||
#include "prim-pprint-parallel.hh"
|
||||
#include "prim-pprint.hh"
|
||||
#include <sstream>
|
||||
|
||||
namespace tinyusdz {
|
||||
namespace prim {
|
||||
|
||||
#if defined(TINYUSDZ_ENABLE_THREAD)
|
||||
|
||||
// Worker function for printing Prims
|
||||
static void print_prim_worker(void* user_data) {
|
||||
PrintPrimTask* task = static_cast<PrintPrimTask*>(user_data);
|
||||
if (task && task->prim && task->output) {
|
||||
*(task->output) = print_prim(*(task->prim), task->indent);
|
||||
}
|
||||
}
|
||||
|
||||
// Worker function for printing PrimSpecs
|
||||
static void print_primspec_worker(void* user_data) {
|
||||
PrintPrimSpecTask* task = static_cast<PrintPrimSpecTask*>(user_data);
|
||||
if (task && task->primspec && task->output) {
|
||||
*(task->output) = print_primspec(*(task->primspec), task->indent);
|
||||
}
|
||||
}
|
||||
|
||||
std::string print_prims_parallel(
|
||||
const std::vector<const Prim*>& prims,
|
||||
uint32_t indent,
|
||||
const ParallelPrintConfig& config) {
|
||||
|
||||
// Check if parallel printing is worth it
|
||||
if (!config.enabled || prims.size() < config.min_prims_for_parallel) {
|
||||
// Fall back to sequential printing
|
||||
std::stringstream ss;
|
||||
for (size_t i = 0; i < prims.size(); i++) {
|
||||
if (prims[i]) {
|
||||
ss << print_prim(*prims[i], indent);
|
||||
if (i != (prims.size() - 1)) {
|
||||
ss << "\n";
|
||||
}
|
||||
}
|
||||
}
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
// Prepare output buffers
|
||||
std::vector<std::string> outputs(prims.size());
|
||||
std::vector<PrintPrimTask> tasks(prims.size());
|
||||
|
||||
// Initialize tasks
|
||||
for (size_t i = 0; i < prims.size(); i++) {
|
||||
tasks[i] = PrintPrimTask(prims[i], indent, i, &outputs[i]);
|
||||
}
|
||||
|
||||
// Create task queue
|
||||
TaskQueue queue(config.task_queue_capacity);
|
||||
std::atomic<size_t> completed_tasks(0);
|
||||
std::atomic<bool> producer_done(false);
|
||||
|
||||
// Launch worker threads
|
||||
std::vector<std::thread> workers;
|
||||
workers.reserve(config.num_threads);
|
||||
|
||||
for (size_t t = 0; t < config.num_threads; t++) {
|
||||
workers.emplace_back([&queue, &completed_tasks, &producer_done]() {
|
||||
TaskItem task;
|
||||
while (!producer_done.load(std::memory_order_acquire) || !queue.Empty()) {
|
||||
if (queue.Pop(task)) {
|
||||
if (task.func) {
|
||||
task.func(task.user_data);
|
||||
completed_tasks.fetch_add(1, std::memory_order_relaxed);
|
||||
}
|
||||
} else {
|
||||
std::this_thread::yield();
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// Producer: push all tasks
|
||||
for (size_t i = 0; i < tasks.size(); i++) {
|
||||
while (!queue.Push(print_prim_worker, &tasks[i])) {
|
||||
std::this_thread::yield();
|
||||
}
|
||||
}
|
||||
|
||||
producer_done.store(true, std::memory_order_release);
|
||||
|
||||
// Wait for all workers to finish
|
||||
for (auto& worker : workers) {
|
||||
worker.join();
|
||||
}
|
||||
|
||||
// Concatenate results in original order
|
||||
std::stringstream ss;
|
||||
for (size_t i = 0; i < outputs.size(); i++) {
|
||||
ss << outputs[i];
|
||||
if (i != (outputs.size() - 1)) {
|
||||
ss << "\n";
|
||||
}
|
||||
}
|
||||
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
std::string print_primspecs_parallel(
|
||||
const std::vector<const PrimSpec*>& primspecs,
|
||||
uint32_t indent,
|
||||
const ParallelPrintConfig& config) {
|
||||
|
||||
// Check if parallel printing is worth it
|
||||
if (!config.enabled || primspecs.size() < config.min_prims_for_parallel) {
|
||||
// Fall back to sequential printing
|
||||
std::stringstream ss;
|
||||
for (size_t i = 0; i < primspecs.size(); i++) {
|
||||
if (primspecs[i]) {
|
||||
ss << print_primspec(*primspecs[i], indent);
|
||||
if (i != (primspecs.size() - 1)) {
|
||||
ss << "\n";
|
||||
}
|
||||
}
|
||||
}
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
// Prepare output buffers
|
||||
std::vector<std::string> outputs(primspecs.size());
|
||||
std::vector<PrintPrimSpecTask> tasks(primspecs.size());
|
||||
|
||||
// Initialize tasks
|
||||
for (size_t i = 0; i < primspecs.size(); i++) {
|
||||
tasks[i] = PrintPrimSpecTask(primspecs[i], indent, i, &outputs[i]);
|
||||
}
|
||||
|
||||
// Create task queue
|
||||
TaskQueue queue(config.task_queue_capacity);
|
||||
std::atomic<size_t> completed_tasks(0);
|
||||
std::atomic<bool> producer_done(false);
|
||||
|
||||
// Launch worker threads
|
||||
std::vector<std::thread> workers;
|
||||
workers.reserve(config.num_threads);
|
||||
|
||||
for (size_t t = 0; t < config.num_threads; t++) {
|
||||
workers.emplace_back([&queue, &completed_tasks, &producer_done]() {
|
||||
TaskItem task;
|
||||
while (!producer_done.load(std::memory_order_acquire) || !queue.Empty()) {
|
||||
if (queue.Pop(task)) {
|
||||
if (task.func) {
|
||||
task.func(task.user_data);
|
||||
completed_tasks.fetch_add(1, std::memory_order_relaxed);
|
||||
}
|
||||
} else {
|
||||
std::this_thread::yield();
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// Producer: push all tasks
|
||||
for (size_t i = 0; i < tasks.size(); i++) {
|
||||
while (!queue.Push(print_primspec_worker, &tasks[i])) {
|
||||
std::this_thread::yield();
|
||||
}
|
||||
}
|
||||
|
||||
producer_done.store(true, std::memory_order_release);
|
||||
|
||||
// Wait for all workers to finish
|
||||
for (auto& worker : workers) {
|
||||
worker.join();
|
||||
}
|
||||
|
||||
// Concatenate results in original order
|
||||
std::stringstream ss;
|
||||
for (size_t i = 0; i < outputs.size(); i++) {
|
||||
ss << outputs[i];
|
||||
if (i != (outputs.size() - 1)) {
|
||||
ss << "\n";
|
||||
}
|
||||
}
|
||||
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
#endif // TINYUSDZ_ENABLE_THREAD
|
||||
|
||||
} // namespace prim
|
||||
} // namespace tinyusdz
|
||||
99
src/prim-pprint-parallel.hh
Normal file
99
src/prim-pprint-parallel.hh
Normal file
@@ -0,0 +1,99 @@
|
||||
// SPDX-License-Identifier: Apache 2.0
|
||||
// Copyright 2025-Present Light Transport Entertainment Inc.
|
||||
//
|
||||
// Parallel pretty-printing for Prim and PrimSpec
|
||||
//
|
||||
#pragma once
|
||||
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#if defined(TINYUSDZ_ENABLE_THREAD)
|
||||
#include <thread>
|
||||
#include <atomic>
|
||||
#include "task-queue.hh"
|
||||
#endif
|
||||
|
||||
#include "prim-types.hh"
|
||||
#include "stage.hh"
|
||||
#include "layer.hh"
|
||||
|
||||
namespace tinyusdz {
|
||||
namespace prim {
|
||||
|
||||
#if defined(TINYUSDZ_ENABLE_THREAD)
|
||||
|
||||
///
|
||||
/// Configuration for parallel printing
|
||||
///
|
||||
struct ParallelPrintConfig {
|
||||
bool enabled = true; // Enable parallel printing
|
||||
size_t num_threads = 0; // 0 = auto-detect (std::thread::hardware_concurrency())
|
||||
size_t min_prims_for_parallel = 4; // Minimum number of prims to use parallel printing
|
||||
size_t task_queue_capacity = 1024; // Task queue capacity
|
||||
|
||||
ParallelPrintConfig() {
|
||||
// Auto-detect number of threads
|
||||
unsigned int hw_threads = std::thread::hardware_concurrency();
|
||||
num_threads = (hw_threads > 0) ? hw_threads : 4;
|
||||
}
|
||||
};
|
||||
|
||||
///
|
||||
/// Task data for printing a Prim
|
||||
///
|
||||
struct PrintPrimTask {
|
||||
const Prim* prim;
|
||||
uint32_t indent;
|
||||
size_t index; // Original index for ordering
|
||||
std::string* output; // Output buffer
|
||||
|
||||
PrintPrimTask() : prim(nullptr), indent(0), index(0), output(nullptr) {}
|
||||
PrintPrimTask(const Prim* p, uint32_t i, size_t idx, std::string* out)
|
||||
: prim(p), indent(i), index(idx), output(out) {}
|
||||
};
|
||||
|
||||
///
|
||||
/// Task data for printing a PrimSpec
|
||||
///
|
||||
struct PrintPrimSpecTask {
|
||||
const PrimSpec* primspec;
|
||||
uint32_t indent;
|
||||
size_t index; // Original index for ordering
|
||||
std::string* output; // Output buffer
|
||||
|
||||
PrintPrimSpecTask() : primspec(nullptr), indent(0), index(0), output(nullptr) {}
|
||||
PrintPrimSpecTask(const PrimSpec* ps, uint32_t i, size_t idx, std::string* out)
|
||||
: primspec(ps), indent(i), index(idx), output(out) {}
|
||||
};
|
||||
|
||||
///
|
||||
/// Print multiple Prims in parallel
|
||||
///
|
||||
/// @param[in] prims Vector of Prim pointers to print
|
||||
/// @param[in] indent Indentation level
|
||||
/// @param[in] config Parallel printing configuration
|
||||
/// @return Concatenated string of all printed prims
|
||||
///
|
||||
std::string print_prims_parallel(
|
||||
const std::vector<const Prim*>& prims,
|
||||
uint32_t indent,
|
||||
const ParallelPrintConfig& config = ParallelPrintConfig());
|
||||
|
||||
///
|
||||
/// Print multiple PrimSpecs in parallel
|
||||
///
|
||||
/// @param[in] primspecs Vector of PrimSpec pointers to print
|
||||
/// @param[in] indent Indentation level
|
||||
/// @param[in] config Parallel printing configuration
|
||||
/// @return Concatenated string of all printed primspecs
|
||||
///
|
||||
std::string print_primspecs_parallel(
|
||||
const std::vector<const PrimSpec*>& primspecs,
|
||||
uint32_t indent,
|
||||
const ParallelPrintConfig& config = ParallelPrintConfig());
|
||||
|
||||
#endif // TINYUSDZ_ENABLE_THREAD
|
||||
|
||||
} // namespace prim
|
||||
} // namespace tinyusdz
|
||||
77
src/stage.cc
77
src/stage.cc
@@ -25,6 +25,7 @@
|
||||
#include "io-util.hh"
|
||||
#include "pprinter.hh"
|
||||
#include "prim-pprint.hh"
|
||||
#include "prim-pprint-parallel.hh"
|
||||
#include "str-util.hh"
|
||||
#include "tiny-format.hh"
|
||||
#include "tinyusdz.hh"
|
||||
@@ -460,8 +461,11 @@ void PrimPrintRec(std::stringstream &ss, const Prim &prim, uint32_t indent) {
|
||||
|
||||
} // namespace
|
||||
|
||||
std::string Stage::ExportToString(bool relative_path) const {
|
||||
std::string Stage::ExportToString(bool relative_path, bool parallel) const {
|
||||
(void)relative_path; // TODO
|
||||
#if !defined(TINYUSDZ_ENABLE_THREAD)
|
||||
(void)parallel; // Threading disabled
|
||||
#endif
|
||||
|
||||
std::stringstream ss;
|
||||
|
||||
@@ -483,28 +487,65 @@ std::string Stage::ExportToString(bool relative_path) const {
|
||||
primNameTable.emplace(_root_nodes[i].element_name(), &_root_nodes[i]);
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < stage_metas.primChildren.size(); i++) {
|
||||
value::token nameTok = stage_metas.primChildren[i];
|
||||
DCOUT(fmt::format("primChildren {}/{} = {}", i,
|
||||
stage_metas.primChildren.size(), nameTok.str()));
|
||||
const auto it = primNameTable.find(nameTok.str());
|
||||
if (it != primNameTable.end()) {
|
||||
//PrimPrintRec(ss, *(it->second), 0);
|
||||
ss << prim::print_prim(*(it->second), 0);
|
||||
if (i != (stage_metas.primChildren.size() - 1)) {
|
||||
ss << "\n";
|
||||
#if defined(TINYUSDZ_ENABLE_THREAD)
|
||||
if (parallel) {
|
||||
// Parallel printing path
|
||||
std::vector<const Prim*> ordered_prims;
|
||||
ordered_prims.reserve(stage_metas.primChildren.size());
|
||||
|
||||
for (size_t i = 0; i < stage_metas.primChildren.size(); i++) {
|
||||
value::token nameTok = stage_metas.primChildren[i];
|
||||
const auto it = primNameTable.find(nameTok.str());
|
||||
if (it != primNameTable.end()) {
|
||||
ordered_prims.push_back(it->second);
|
||||
}
|
||||
}
|
||||
|
||||
prim::ParallelPrintConfig config;
|
||||
ss << prim::print_prims_parallel(ordered_prims, 0, config);
|
||||
} else
|
||||
#endif // TINYUSDZ_ENABLE_THREAD
|
||||
{
|
||||
// Sequential printing path (original)
|
||||
for (size_t i = 0; i < stage_metas.primChildren.size(); i++) {
|
||||
value::token nameTok = stage_metas.primChildren[i];
|
||||
DCOUT(fmt::format("primChildren {}/{} = {}", i,
|
||||
stage_metas.primChildren.size(), nameTok.str()));
|
||||
const auto it = primNameTable.find(nameTok.str());
|
||||
if (it != primNameTable.end()) {
|
||||
//PrimPrintRec(ss, *(it->second), 0);
|
||||
ss << prim::print_prim(*(it->second), 0);
|
||||
if (i != (stage_metas.primChildren.size() - 1)) {
|
||||
ss << "\n";
|
||||
}
|
||||
} else {
|
||||
// TODO: Report warning?
|
||||
}
|
||||
} else {
|
||||
// TODO: Report warning?
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for (size_t i = 0; i < _root_nodes.size(); i++) {
|
||||
//PrimPrintRec(ss, _root_nodes[i], 0);
|
||||
ss << prim::print_prim(_root_nodes[i], 0);
|
||||
#if defined(TINYUSDZ_ENABLE_THREAD)
|
||||
if (parallel) {
|
||||
// Parallel printing path
|
||||
std::vector<const Prim*> prims;
|
||||
prims.reserve(_root_nodes.size());
|
||||
for (size_t i = 0; i < _root_nodes.size(); i++) {
|
||||
prims.push_back(&_root_nodes[i]);
|
||||
}
|
||||
|
||||
if (i != (_root_nodes.size() - 1)) {
|
||||
ss << "\n";
|
||||
prim::ParallelPrintConfig config;
|
||||
ss << prim::print_prims_parallel(prims, 0, config);
|
||||
} else
|
||||
#endif // TINYUSDZ_ENABLE_THREAD
|
||||
{
|
||||
// Sequential printing path (original)
|
||||
for (size_t i = 0; i < _root_nodes.size(); i++) {
|
||||
//PrimPrintRec(ss, _root_nodes[i], 0);
|
||||
ss << prim::print_prim(_root_nodes[i], 0);
|
||||
|
||||
if (i != (_root_nodes.size() - 1)) {
|
||||
ss << "\n";
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -54,8 +54,9 @@ class Stage {
|
||||
///
|
||||
/// Dump Stage as ASCII(USDA) representation.
|
||||
/// @param[in] relative_path (optional) Print Path as relative Path.
|
||||
/// @param[in] parallel (optional) Use parallel printing for Prims.
|
||||
///
|
||||
std::string ExportToString(bool relative_path = false) const;
|
||||
std::string ExportToString(bool relative_path = false, bool parallel = false) const;
|
||||
|
||||
// pxrUSD compat API end -------------------------------------
|
||||
|
||||
|
||||
Reference in New Issue
Block a user