move headers into potree directory and change logger interface
Some checks failed
Build Potree Converter / Build desktop (ubuntu-act_cpp-24.04) (pull_request) Failing after 17s
Build Potree Converter / Build iOS (pull_request) Failing after 16s
Build Potree Converter / Build desktop (mac_arm) (pull_request) Successful in 18s
Build Potree Converter / Build desktop (windows_x64) (pull_request) Successful in 44s

This commit is contained in:
2025-02-25 12:20:17 +02:00
parent 0d7de13381
commit 0b47900bb9
31 changed files with 2689 additions and 2713 deletions

View File

@@ -1,36 +0,0 @@
#pragma once
#include <string>
#include <fstream>
#include <mutex>
namespace Potree
{
enum LogLevel
{
INFO,
WARN,
ERROR
};
class ILogger
{
public:
virtual void log(const std::string& msg, LogLevel level) = 0;
virtual ~ILogger() = default;
};
class Logger : public ILogger
{
public:
Logger(const std::string& logFile);
~Logger();
void log(const std::string& msg, LogLevel level) override;
private:
std::ofstream m_fout;
std::mutex m_mtx;
};
inline ILogger* g_logger = nullptr;
}

View File

@@ -1,177 +1,177 @@
#pragma once
#include "Vector3.h"
#include "Constants.h"
#include <unordered_map>
#include <iostream>
#include <vector>
#include <array>
#include <string_view>
namespace Potree
{
enum class AttributeType {
INT8,
INT16,
INT32,
INT64,
UINT8,
UINT16,
UINT32,
UINT64,
FLOAT,
DOUBLE,
UNDEFINED
};
inline constexpr std::array<std::string_view, static_cast<int>(AttributeType::UNDEFINED) + 1> typenameMapping =
{
"int8", "int16", "int32", "int64", "uint8", "uint16", "uint32", "uint64", "float", "double", "undefined"
};
inline int getAttributeTypeSize(AttributeType type) {
std::unordered_map<AttributeType, int> mapping = {
{AttributeType::UNDEFINED, 0},
{AttributeType::UINT8, 1},
{AttributeType::UINT16, 2},
{AttributeType::UINT32, 4},
{AttributeType::UINT64, 8},
{AttributeType::INT8, 1},
{AttributeType::INT16, 2},
{AttributeType::INT32, 4},
{AttributeType::INT64, 8},
{AttributeType::FLOAT, 4},
{AttributeType::DOUBLE, 8},
};
return mapping[type];
}
inline std::string getAttributeTypename(AttributeType type)
{
return std::string(typenameMapping[static_cast<int>(type)]);
}
inline AttributeType typenameToType(const std::string& name) {
if (name == "int8") {
return AttributeType::INT8;
}
else if (name == "int16") {
return AttributeType::INT16;
}
else if (name == "int32") {
return AttributeType::INT32;
}
else if (name == "int64") {
return AttributeType::INT64;
}
else if (name == "uint8") {
return AttributeType::UINT8;
}
else if (name == "uint16") {
return AttributeType::UINT16;
}
else if (name == "uint32") {
return AttributeType::UINT32;
}
else if (name == "uint64") {
return AttributeType::UINT64;
}
else if (name == "float") {
return AttributeType::FLOAT;
}
else if (name == "double") {
return AttributeType::DOUBLE;
}
else if (name == "undefined") {
return AttributeType::UNDEFINED;
}
else {
std::cout << "ERROR: unkown AttributeType: '" << name << "'" << std::endl;
exit(123);
}
}
struct Attribute {
std::string name;
std::string description;
int size = 0;
int numElements = 0;
int elementSize = 0;
AttributeType type = AttributeType::UNDEFINED;
// TODO: should be type-dependent, not always double. won't work properly with 64 bit integers
Vector3 min = { Infinity };
Vector3 max = { -Infinity };
Vector3 scale = { 1.0, 1.0, 1.0 };
Vector3 offset = { 0.0, 0.0, 0.0 };
// histogram that counts occurances of points with same attribute value.
// only for 1 byte types, due to storage size
std::vector<int64_t> histogram = std::vector<int64_t>(256, 0);
Attribute() {
}
Attribute(const std::string& name, int size, int numElements, int elementSize, AttributeType type) {
this->name = name;
this->size = size;
this->numElements = numElements;
this->elementSize = elementSize;
this->type = type;
}
};
struct Attributes {
std::vector<Attribute> list;
int bytes = 0;
Vector3 posScale = Vector3{ 1.0, 1.0, 1.0 };
Vector3 posOffset = Vector3{ 0.0, 0.0, 0.0 };
Attributes() {
}
Attributes(const std::vector<Attribute>& attributes) {
this->list = attributes;
for (const auto& attribute : attributes) {
bytes += attribute.size;
}
}
int getOffset(const std::string& name) const {
int offset = 0;
for (const auto& attribute : list) {
if (attribute.name == name) {
return offset;
}
offset += attribute.size;
}
return -1;
}
Attribute* get(const std::string& name) {
for (auto& attribute : list) {
if (attribute.name == name) {
return &attribute;
}
}
return nullptr;
}
};
};
#pragma once
#include "Vector3.h"
#include "Constants.h"
#include <unordered_map>
#include <iostream>
#include <vector>
#include <array>
#include <string_view>
namespace Potree
{
enum class AttributeType {
INT8,
INT16,
INT32,
INT64,
UINT8,
UINT16,
UINT32,
UINT64,
FLOAT,
DOUBLE,
UNDEFINED
};
inline constexpr std::array<std::string_view, static_cast<int>(AttributeType::UNDEFINED) + 1> typenameMapping =
{
"int8", "int16", "int32", "int64", "uint8", "uint16", "uint32", "uint64", "float", "double", "undefined"
};
inline int getAttributeTypeSize(AttributeType type) {
std::unordered_map<AttributeType, int> mapping = {
{AttributeType::UNDEFINED, 0},
{AttributeType::UINT8, 1},
{AttributeType::UINT16, 2},
{AttributeType::UINT32, 4},
{AttributeType::UINT64, 8},
{AttributeType::INT8, 1},
{AttributeType::INT16, 2},
{AttributeType::INT32, 4},
{AttributeType::INT64, 8},
{AttributeType::FLOAT, 4},
{AttributeType::DOUBLE, 8},
};
return mapping[type];
}
inline std::string getAttributeTypename(AttributeType type)
{
return std::string(typenameMapping[static_cast<int>(type)]);
}
inline AttributeType typenameToType(const std::string& name) {
if (name == "int8") {
return AttributeType::INT8;
}
else if (name == "int16") {
return AttributeType::INT16;
}
else if (name == "int32") {
return AttributeType::INT32;
}
else if (name == "int64") {
return AttributeType::INT64;
}
else if (name == "uint8") {
return AttributeType::UINT8;
}
else if (name == "uint16") {
return AttributeType::UINT16;
}
else if (name == "uint32") {
return AttributeType::UINT32;
}
else if (name == "uint64") {
return AttributeType::UINT64;
}
else if (name == "float") {
return AttributeType::FLOAT;
}
else if (name == "double") {
return AttributeType::DOUBLE;
}
else if (name == "undefined") {
return AttributeType::UNDEFINED;
}
else {
std::cout << "ERROR: unkown AttributeType: '" << name << "'" << std::endl;
exit(123);
}
}
struct Attribute {
std::string name;
std::string description;
int size = 0;
int numElements = 0;
int elementSize = 0;
AttributeType type = AttributeType::UNDEFINED;
// TODO: should be type-dependent, not always double. won't work properly with 64 bit integers
Vector3 min = { Infinity };
Vector3 max = { -Infinity };
Vector3 scale = { 1.0, 1.0, 1.0 };
Vector3 offset = { 0.0, 0.0, 0.0 };
// histogram that counts occurances of points with same attribute value.
// only for 1 byte types, due to storage size
std::vector<int64_t> histogram = std::vector<int64_t>(256, 0);
Attribute() {
}
Attribute(const std::string& name, int size, int numElements, int elementSize, AttributeType type) {
this->name = name;
this->size = size;
this->numElements = numElements;
this->elementSize = elementSize;
this->type = type;
}
};
struct Attributes {
std::vector<Attribute> list;
int bytes = 0;
Vector3 posScale = Vector3{ 1.0, 1.0, 1.0 };
Vector3 posOffset = Vector3{ 0.0, 0.0, 0.0 };
Attributes() {
}
Attributes(const std::vector<Attribute>& attributes) {
this->list = attributes;
for (const auto& attribute : attributes) {
bytes += attribute.size;
}
}
int getOffset(const std::string& name) const {
int offset = 0;
for (const auto& attribute : list) {
if (attribute.name == name) {
return offset;
}
offset += attribute.size;
}
return -1;
}
Attribute* get(const std::string& name) {
for (auto& attribute : list) {
if (attribute.name == name) {
return &attribute;
}
}
return nullptr;
}
};
};

View File

@@ -1,194 +1,194 @@
#pragma once
#include "unsuck/unsuck.hpp"
#include "converter_utils.h"
namespace Potree
{
struct ConcurrentWriter {
std::unordered_map<std::string, std::vector<std::shared_ptr<Buffer>>> todo;
std::unordered_map<std::string, int> locks;
std::atomic_int64_t todoBytes = 0;
std::atomic_int64_t writtenBytes = 0;
std::vector<std::thread> threads;
std::mutex mtx_todo;
size_t numThreads = 1;
bool joinRequested = false;
std::mutex mtx_join;
double tStart = 0;
ConcurrentWriter(size_t numThreads, State& state) {
this->numThreads = numThreads;
this->tStart = now();
for (int64_t i = 0; i < numThreads; i++) {
threads.emplace_back([&]() {
flushThread();
});
}
using namespace std::chrono_literals;
threads.emplace_back([&]() {
while (true) {
{
std::lock_guard<std::mutex> lockT(mtx_todo);
std::lock_guard<std::mutex> lockJ(mtx_join);
bool nothingTodo = todo.size() == 0;
if (nothingTodo && joinRequested) {
return;
}
int64_t mbTodo = todoBytes / (1024 * 1024);
int64_t mbDone = writtenBytes / (1024 * 1024);
double duration = now() - tStart;
double throughput = mbDone / duration;
state.name = "DISTRIBUTING";
}
std::this_thread::sleep_for(100ms);
}
});
}
~ConcurrentWriter() {
this->join();
}
void waitUntilMemoryBelow(int64_t maxMegabytesOutstanding) {
using namespace std::chrono_literals;
while (todoBytes / (1024 * 1024) > maxMegabytesOutstanding) {
std::this_thread::sleep_for(10ms);
}
}
void flushThread() {
using namespace std::chrono_literals;
while (true) {
std::string path = "";
std::vector<std::shared_ptr<Buffer>> work;
{
//auto tStart = now();
std::lock_guard<std::mutex> lockT(mtx_todo);
std::lock_guard<std::mutex> lockJ(mtx_join);
//auto duration = now() - tStart;
//if (duration > 0.01) {
// cout << "long lock duration: " + to_string(duration) << endl;
//}
bool nothingTodo = todo.size() == 0;
if (nothingTodo && joinRequested) {
return;
}
else {
auto it = todo.begin();
while (it != todo.end()) {
const std::string& path = it->first;
if (locks.find(path) == locks.end()) {
break;
}
it++;
}
if (it != todo.end()) {
path = it->first;
work = it->second;
todo.erase(it);
locks[path] = 1;
}
}
}
// if no work available, sleep and try again later
if (work.size() == 0) {
std::this_thread::sleep_for(10ms);
continue;
}
std::fstream fout;
fout.open(path, std::ios::out | std::ios::app | std::ios::binary);
for (auto batch : work) {
fout.write(batch->data_char, batch->size);
todoBytes -= batch->size;
writtenBytes += batch->size;
}
fout.close();
{
std::lock_guard<std::mutex> lockT(mtx_todo);
std::lock_guard<std::mutex> lockJ(mtx_join);
auto itLocks = locks.find(path);
locks.erase(itLocks);
}
}
}
void write(const std::string& path, const std::shared_ptr<Buffer>& data) {
std::lock_guard<std::mutex> lock(mtx_todo);
todoBytes += data->size;
todo[path].push_back(data);
}
void join() {
{
std::lock_guard<std::mutex> lock(mtx_join);
//cout << "joinRequested" << endl;
joinRequested = true;
}
for (auto& t : threads) {
t.join();
}
threads.clear();
//cout << "writer joined \n";
}
};
}
#pragma once
#include "unsuck/unsuck.hpp"
#include "converter_utils.h"
namespace Potree
{
struct ConcurrentWriter {
std::unordered_map<std::string, std::vector<std::shared_ptr<Buffer>>> todo;
std::unordered_map<std::string, int> locks;
std::atomic_int64_t todoBytes = 0;
std::atomic_int64_t writtenBytes = 0;
std::vector<std::thread> threads;
std::mutex mtx_todo;
size_t numThreads = 1;
bool joinRequested = false;
std::mutex mtx_join;
double tStart = 0;
ConcurrentWriter(size_t numThreads, State& state) {
this->numThreads = numThreads;
this->tStart = now();
for (int64_t i = 0; i < numThreads; i++) {
threads.emplace_back([&]() {
flushThread();
});
}
using namespace std::chrono_literals;
threads.emplace_back([&]() {
while (true) {
{
std::lock_guard<std::mutex> lockT(mtx_todo);
std::lock_guard<std::mutex> lockJ(mtx_join);
bool nothingTodo = todo.size() == 0;
if (nothingTodo && joinRequested) {
return;
}
int64_t mbTodo = todoBytes / (1024 * 1024);
int64_t mbDone = writtenBytes / (1024 * 1024);
double duration = now() - tStart;
double throughput = mbDone / duration;
state.name = "DISTRIBUTING";
}
std::this_thread::sleep_for(100ms);
}
});
}
~ConcurrentWriter() {
this->join();
}
void waitUntilMemoryBelow(int64_t maxMegabytesOutstanding) {
using namespace std::chrono_literals;
while (todoBytes / (1024 * 1024) > maxMegabytesOutstanding) {
std::this_thread::sleep_for(10ms);
}
}
void flushThread() {
using namespace std::chrono_literals;
while (true) {
std::string path = "";
std::vector<std::shared_ptr<Buffer>> work;
{
//auto tStart = now();
std::lock_guard<std::mutex> lockT(mtx_todo);
std::lock_guard<std::mutex> lockJ(mtx_join);
//auto duration = now() - tStart;
//if (duration > 0.01) {
// cout << "long lock duration: " + to_string(duration) << endl;
//}
bool nothingTodo = todo.size() == 0;
if (nothingTodo && joinRequested) {
return;
}
else {
auto it = todo.begin();
while (it != todo.end()) {
const std::string& path = it->first;
if (locks.find(path) == locks.end()) {
break;
}
it++;
}
if (it != todo.end()) {
path = it->first;
work = it->second;
todo.erase(it);
locks[path] = 1;
}
}
}
// if no work available, sleep and try again later
if (work.size() == 0) {
std::this_thread::sleep_for(10ms);
continue;
}
std::fstream fout;
fout.open(path, std::ios::out | std::ios::app | std::ios::binary);
for (auto batch : work) {
fout.write(batch->data_char, batch->size);
todoBytes -= batch->size;
writtenBytes += batch->size;
}
fout.close();
{
std::lock_guard<std::mutex> lockT(mtx_todo);
std::lock_guard<std::mutex> lockJ(mtx_join);
auto itLocks = locks.find(path);
locks.erase(itLocks);
}
}
}
void write(const std::string& path, const std::shared_ptr<Buffer>& data) {
std::lock_guard<std::mutex> lock(mtx_todo);
todoBytes += data->size;
todo[path].push_back(data);
}
void join() {
{
std::lock_guard<std::mutex> lock(mtx_join);
//cout << "joinRequested" << endl;
joinRequested = true;
}
for (auto& t : threads) {
t.join();
}
threads.clear();
//cout << "writer joined \n";
}
};
}

View File

@@ -1,356 +1,356 @@
#pragma once
#include "structures.h"
namespace Potree
{
namespace HB {
template<class T>
void sortBreadthFirst(std::vector<T>& nodes) {
sort(nodes.begin(), nodes.end(), [](const T& a, const T& b) {
if (a.name.size() != b.name.size()) {
return a.name.size() < b.name.size();
}
else {
return a.name < b.name;
}
});
};
};
struct HierarchyBuilder {
// records have this structure, but guaranteed to be packed
// struct Record{ size offset
// uint8_t name[31]; 31 0
// uint32_t numPoints; 4 31
// int64_t byteOffset; 8 35
// int32_t byteSize; 4 43
// uint8_t end = '\n'; 1 47
// }; ===
// 48
int hierarchyStepSize = 0;
std::string path = "";
enum TYPE {
NORMAL = 0,
LEAF = 1,
PROXY = 2,
};
struct HNode {
std::string name = "";
int numPoints = 0;
uint8_t childMask = 0;
uint64_t byteOffset = 0;
uint64_t byteSize = 0;
TYPE type = TYPE::LEAF;
uint64_t proxyByteOffset = 0;
uint64_t proxyByteSize = 0;
};
struct HChunk {
std::string name;
std::vector<std::shared_ptr<HNode>> nodes;
int64_t byteOffset = 0;
};
struct HBatch {
std::string name;
std::string path;
int numNodes = 0;
int64_t byteSize = 0;
std::vector<std::shared_ptr<HNode>> nodes;
std::vector<std::shared_ptr<HChunk>> chunks;
std::unordered_map<std::string, std::shared_ptr<HNode>> nodeMap;
std::unordered_map<std::string, std::shared_ptr<HChunk>> chunkMap;
};
std::shared_ptr<HBatch> batch_root;
HierarchyBuilder(const std::string& path, int hierarchyStepSize) {
this->path = path;
this->hierarchyStepSize = hierarchyStepSize;
}
std::shared_ptr<HBatch> loadBatch(const std::string& path) {
std::shared_ptr<Buffer> buffer = readBinaryFile(path);
auto batch = std::make_shared<HBatch>();
batch->path = path;
batch->name = std::filesystem::path(path).stem().string();
batch->numNodes = buffer->size / 48;
// group this batch in chunks of <hierarchyStepSize>
for (int i = 0; i < batch->numNodes; i++) {
int recordOffset = 48 * i;
std::string nodeName = std::string(buffer->data_char + recordOffset, 31);
nodeName = stringReplace(nodeName, " ", "");
nodeName.erase(std::remove(nodeName.begin(), nodeName.end(), ' '), nodeName.end());
auto node = std::make_shared<HNode>();
node->name = nodeName;
node->numPoints = buffer->get<uint32_t>(recordOffset + 31);
node->byteOffset = buffer->get< int64_t>(recordOffset + 35);
node->byteSize = buffer->get< int32_t>(recordOffset + 43);
// r: 0, r0123: 1, r01230123: 2
int chunkLevel = (node->name.size() - 2) / 4;
std::string key = node->name.substr(0, hierarchyStepSize * chunkLevel + 1);
if (node->name == batch->name) {
key = node->name;
}
if (batch->chunkMap.find(key) == batch->chunkMap.end()) {
auto chunk = std::make_shared<HChunk>();
chunk->name = key;
batch->chunkMap[key] = chunk;
batch->chunks.push_back(chunk);
}
batch->chunkMap[key]->nodes.push_back(node);
batch->nodes.push_back(node);
batch->nodeMap[node->name] = batch->nodes[batch->nodes.size() - 1];
bool isChunkKey = ((node->name.size() - 1) % hierarchyStepSize) == 0;
bool isBatchSubChunk = node->name.size() > hierarchyStepSize + 1;
if (isChunkKey && isBatchSubChunk) {
if (batch->chunkMap.find(node->name) == batch->chunkMap.end()) {
auto chunk = std::make_shared<HChunk>();
chunk->name = node->name;
batch->chunkMap[node->name] = chunk;
batch->chunks.push_back(chunk);
}
batch->chunkMap[node->name]->nodes.push_back(node);
}
}
// breadth-first sorted list of chunks
std::sort(batch->chunks.begin(), batch->chunks.end(), [](const std::shared_ptr<HChunk>& a, const std::shared_ptr<HChunk>& b) {
if (a->name.size() != b->name.size()) {
return a->name.size() < b->name.size();
}
else {
return a->name < b->name;
}
});
// initialize all nodes as leaf nodes, turn into "normal" if child appears
// also notify parent that it has a child!
for (const auto& node : batch->nodes) {
node->type = TYPE::LEAF;
std::string parentName = node->name.substr(0, node->name.size() - 1);
auto ptrParent = batch->nodeMap.find(parentName);
if (ptrParent != batch->nodeMap.end()) {
int childIndex = node->name.back() - '0';
ptrParent->second->type = TYPE::NORMAL;
ptrParent->second->childMask = ptrParent->second->childMask | (1 << childIndex);
}
}
// find and flag proxy nodes (pseudo-leaf in one chunk pointing to root of a child-chunk)
for (const auto& chunk : batch->chunks) {
//if(chunk->name == batch->name) continue;
auto ptr = batch->nodeMap.find(chunk->name);
if (ptr != batch->nodeMap.end()) {
ptr->second->type = TYPE::PROXY;
}
else {
// could not find a node with the chunk's name
// should only happen if this chunk's root
// is equal to the batch root
if (chunk->name != batch->name) {
std::cout << "ERROR: could not find chunk " << chunk->name << " in batch " << batch->name << std::endl;
exit(123);
}
}
}
// sort nodes in chunks in breadth-first order
for (const auto& chunk : batch->chunks) {
std::sort(chunk->nodes.begin(), chunk->nodes.end(), [](const std::shared_ptr<HNode>& a, const std::shared_ptr<HNode>& b) {
if (a->name.size() != b->name.size()) {
return a->name.size() < b->name.size();
}
else {
return a->name < b->name;
}
});
}
return batch;
}
void processBatch(std::shared_ptr<HBatch> batch) {
// compute byte offsets of chunks relative to batch
int64_t byteOffset = 0;
for (const auto& chunk : batch->chunks) {
chunk->byteOffset = byteOffset;
// cout << "set offset: " << chunk->name << ", " << chunk->byteOffset << endl;
if (chunk->name != batch->name) {
// this chunk is not the root of the batch.
// find parent chunk within batch.
// there must be a leaf node in the parent chunk,
// which is the proxy node / pointer to this chunk.
const std::string parentName = chunk->name.substr(0, chunk->name.size() - hierarchyStepSize);
if (batch->chunkMap.find(parentName) != batch->chunkMap.end())
{
const auto& parent = batch->chunkMap[parentName];
const auto& proxyNode = batch->nodeMap[chunk->name];
if (proxyNode == nullptr) {
std::cout << "ERROR: didn't find proxy node " << chunk->name << std::endl;
exit(123);
}
proxyNode->type = TYPE::PROXY;
proxyNode->proxyByteOffset = chunk->byteOffset;
proxyNode->proxyByteSize = 22 * chunk->nodes.size();
}
else {
std::cout << "ERROR: didn't find chunk " << chunk->name << std::endl;
exit(123);
}
}
byteOffset += 22 * chunk->nodes.size();
}
batch->byteSize = byteOffset;
}
std::shared_ptr<Buffer> serializeBatch(std::shared_ptr<HBatch> batch, int64_t bytesWritten) {
int numRecords = 0;
for (const auto& chunk : batch->chunks) {
numRecords += chunk->nodes.size(); // all nodes in chunk except chunk root
}
auto buffer = std::make_shared<Buffer>(22 * numRecords);
int recordsProcessed = 0;
for (const auto& chunk : batch->chunks) {
//auto chunkRoot = batch->nodes[chunk->name];
// TODO...;
for (const auto& node : chunk->nodes) {
// proxy nodes exist twice - in the chunk and the parent-chunk that points to this chunk
// only the node in the parent-chunk is a proxy (to its non-proxy counterpart)
bool isProxyNode = (node->type == TYPE::PROXY) && node->name != chunk->name;
TYPE type = node->type;
if (node->type == TYPE::PROXY && !isProxyNode) {
type = TYPE::NORMAL;
}
uint64_t byteSize = isProxyNode ? node->proxyByteSize : node->byteSize;
uint64_t byteOffset = (isProxyNode ? bytesWritten + node->proxyByteOffset : node->byteOffset);
buffer->set<uint8_t >(type, 22 * recordsProcessed + 0);
buffer->set<uint8_t >(node->childMask, 22 * recordsProcessed + 1);
buffer->set<uint32_t>(node->numPoints, 22 * recordsProcessed + 2);
buffer->set<uint64_t>(byteOffset, 22 * recordsProcessed + 6);
buffer->set<uint64_t>(byteSize, 22 * recordsProcessed + 14);
recordsProcessed++;
}
}
batch->byteSize = buffer->size;
return buffer;
}
void build() {
std::string hierarchyFilePath = path + "/../hierarchy.bin";
std::fstream fout(hierarchyFilePath, std::ios::binary | std::ios::out);
int64_t bytesWritten = 0;
auto batch_root = loadBatch(path + "/r.bin");
this->batch_root = batch_root;
{ // reserve the first <x> bytes in the file for the root chunk
Buffer tmp(22 * batch_root->nodes.size());
std::memset(tmp.data, 0, tmp.size);
fout.write(tmp.data_char, tmp.size);
bytesWritten = tmp.size;
}
// now write all hierarchy batches, except root
// update proxy nodes in root with byteOffsets of written batches.
for (const auto& entry : std::filesystem::directory_iterator(path)) {
const auto& filepath = entry.path();
// r0626.txt
// skip root. it get's special treatment
if (filepath.filename().string() == "r.bin") continue;
// skip non *.bin files
if (!iEndsWith(filepath.string(), ".bin")) continue;
auto batch = loadBatch(filepath.string());
processBatch(batch);
auto buffer = serializeBatch(batch, bytesWritten);
if (batch->nodes.size() > 1) {
const auto& proxyNode = batch_root->nodeMap[batch->name];
proxyNode->type = TYPE::PROXY;
proxyNode->proxyByteOffset = bytesWritten;
proxyNode->proxyByteSize = 22 * batch->chunkMap[batch->name]->nodes.size();
}
else {
// if there is only one node in that batch,
// then we flag that node as leaf in the root-batch
const auto& root_batch_node = batch_root->nodeMap[batch->name];
root_batch_node->type = TYPE::LEAF;
}
fout.write(buffer->data_char, buffer->size);
bytesWritten += buffer->size;
}
// close/flush file so that we can reopen it to modify beginning
fout.close();
{ // update beginning of file with root chunk
std::fstream f(hierarchyFilePath, std::ios::ate | std::ios::binary | std::ios::out | std::ios::in);
f.seekg(0);
auto buffer = serializeBatch(batch_root, 0);
f.write(buffer->data_char, buffer->size);
f.close();
}
// redundant security check
if (iEndsWith(this->path, ".hierarchyChunks")) {
std::filesystem::remove_all(this->path);
}
return;
}
};
#pragma once
#include "structures.h"
namespace Potree
{
namespace HB {
template<class T>
void sortBreadthFirst(std::vector<T>& nodes) {
sort(nodes.begin(), nodes.end(), [](const T& a, const T& b) {
if (a.name.size() != b.name.size()) {
return a.name.size() < b.name.size();
}
else {
return a.name < b.name;
}
});
};
};
struct HierarchyBuilder {
// records have this structure, but guaranteed to be packed
// struct Record{ size offset
// uint8_t name[31]; 31 0
// uint32_t numPoints; 4 31
// int64_t byteOffset; 8 35
// int32_t byteSize; 4 43
// uint8_t end = '\n'; 1 47
// }; ===
// 48
int hierarchyStepSize = 0;
std::string path = "";
enum TYPE {
NORMAL = 0,
LEAF = 1,
PROXY = 2,
};
struct HNode {
std::string name = "";
int numPoints = 0;
uint8_t childMask = 0;
uint64_t byteOffset = 0;
uint64_t byteSize = 0;
TYPE type = TYPE::LEAF;
uint64_t proxyByteOffset = 0;
uint64_t proxyByteSize = 0;
};
struct HChunk {
std::string name;
std::vector<std::shared_ptr<HNode>> nodes;
int64_t byteOffset = 0;
};
struct HBatch {
std::string name;
std::string path;
int numNodes = 0;
int64_t byteSize = 0;
std::vector<std::shared_ptr<HNode>> nodes;
std::vector<std::shared_ptr<HChunk>> chunks;
std::unordered_map<std::string, std::shared_ptr<HNode>> nodeMap;
std::unordered_map<std::string, std::shared_ptr<HChunk>> chunkMap;
};
std::shared_ptr<HBatch> batch_root;
HierarchyBuilder(const std::string& path, int hierarchyStepSize) {
this->path = path;
this->hierarchyStepSize = hierarchyStepSize;
}
std::shared_ptr<HBatch> loadBatch(const std::string& path) {
std::shared_ptr<Buffer> buffer = readBinaryFile(path);
auto batch = std::make_shared<HBatch>();
batch->path = path;
batch->name = std::filesystem::path(path).stem().string();
batch->numNodes = buffer->size / 48;
// group this batch in chunks of <hierarchyStepSize>
for (int i = 0; i < batch->numNodes; i++) {
int recordOffset = 48 * i;
std::string nodeName = std::string(buffer->data_char + recordOffset, 31);
nodeName = stringReplace(nodeName, " ", "");
nodeName.erase(std::remove(nodeName.begin(), nodeName.end(), ' '), nodeName.end());
auto node = std::make_shared<HNode>();
node->name = nodeName;
node->numPoints = buffer->get<uint32_t>(recordOffset + 31);
node->byteOffset = buffer->get< int64_t>(recordOffset + 35);
node->byteSize = buffer->get< int32_t>(recordOffset + 43);
// r: 0, r0123: 1, r01230123: 2
int chunkLevel = (node->name.size() - 2) / 4;
std::string key = node->name.substr(0, hierarchyStepSize * chunkLevel + 1);
if (node->name == batch->name) {
key = node->name;
}
if (batch->chunkMap.find(key) == batch->chunkMap.end()) {
auto chunk = std::make_shared<HChunk>();
chunk->name = key;
batch->chunkMap[key] = chunk;
batch->chunks.push_back(chunk);
}
batch->chunkMap[key]->nodes.push_back(node);
batch->nodes.push_back(node);
batch->nodeMap[node->name] = batch->nodes[batch->nodes.size() - 1];
bool isChunkKey = ((node->name.size() - 1) % hierarchyStepSize) == 0;
bool isBatchSubChunk = node->name.size() > hierarchyStepSize + 1;
if (isChunkKey && isBatchSubChunk) {
if (batch->chunkMap.find(node->name) == batch->chunkMap.end()) {
auto chunk = std::make_shared<HChunk>();
chunk->name = node->name;
batch->chunkMap[node->name] = chunk;
batch->chunks.push_back(chunk);
}
batch->chunkMap[node->name]->nodes.push_back(node);
}
}
// breadth-first sorted list of chunks
std::sort(batch->chunks.begin(), batch->chunks.end(), [](const std::shared_ptr<HChunk>& a, const std::shared_ptr<HChunk>& b) {
if (a->name.size() != b->name.size()) {
return a->name.size() < b->name.size();
}
else {
return a->name < b->name;
}
});
// initialize all nodes as leaf nodes, turn into "normal" if child appears
// also notify parent that it has a child!
for (const auto& node : batch->nodes) {
node->type = TYPE::LEAF;
std::string parentName = node->name.substr(0, node->name.size() - 1);
auto ptrParent = batch->nodeMap.find(parentName);
if (ptrParent != batch->nodeMap.end()) {
int childIndex = node->name.back() - '0';
ptrParent->second->type = TYPE::NORMAL;
ptrParent->second->childMask = ptrParent->second->childMask | (1 << childIndex);
}
}
// find and flag proxy nodes (pseudo-leaf in one chunk pointing to root of a child-chunk)
for (const auto& chunk : batch->chunks) {
//if(chunk->name == batch->name) continue;
auto ptr = batch->nodeMap.find(chunk->name);
if (ptr != batch->nodeMap.end()) {
ptr->second->type = TYPE::PROXY;
}
else {
// could not find a node with the chunk's name
// should only happen if this chunk's root
// is equal to the batch root
if (chunk->name != batch->name) {
std::cout << "ERROR: could not find chunk " << chunk->name << " in batch " << batch->name << std::endl;
exit(123);
}
}
}
// sort nodes in chunks in breadth-first order
for (const auto& chunk : batch->chunks) {
std::sort(chunk->nodes.begin(), chunk->nodes.end(), [](const std::shared_ptr<HNode>& a, const std::shared_ptr<HNode>& b) {
if (a->name.size() != b->name.size()) {
return a->name.size() < b->name.size();
}
else {
return a->name < b->name;
}
});
}
return batch;
}
void processBatch(std::shared_ptr<HBatch> batch) {
// compute byte offsets of chunks relative to batch
int64_t byteOffset = 0;
for (const auto& chunk : batch->chunks) {
chunk->byteOffset = byteOffset;
// cout << "set offset: " << chunk->name << ", " << chunk->byteOffset << endl;
if (chunk->name != batch->name) {
// this chunk is not the root of the batch.
// find parent chunk within batch.
// there must be a leaf node in the parent chunk,
// which is the proxy node / pointer to this chunk.
const std::string parentName = chunk->name.substr(0, chunk->name.size() - hierarchyStepSize);
if (batch->chunkMap.find(parentName) != batch->chunkMap.end())
{
const auto& parent = batch->chunkMap[parentName];
const auto& proxyNode = batch->nodeMap[chunk->name];
if (proxyNode == nullptr) {
std::cout << "ERROR: didn't find proxy node " << chunk->name << std::endl;
exit(123);
}
proxyNode->type = TYPE::PROXY;
proxyNode->proxyByteOffset = chunk->byteOffset;
proxyNode->proxyByteSize = 22 * chunk->nodes.size();
}
else {
std::cout << "ERROR: didn't find chunk " << chunk->name << std::endl;
exit(123);
}
}
byteOffset += 22 * chunk->nodes.size();
}
batch->byteSize = byteOffset;
}
std::shared_ptr<Buffer> serializeBatch(std::shared_ptr<HBatch> batch, int64_t bytesWritten) {
int numRecords = 0;
for (const auto& chunk : batch->chunks) {
numRecords += chunk->nodes.size(); // all nodes in chunk except chunk root
}
auto buffer = std::make_shared<Buffer>(22 * numRecords);
int recordsProcessed = 0;
for (const auto& chunk : batch->chunks) {
//auto chunkRoot = batch->nodes[chunk->name];
// TODO...;
for (const auto& node : chunk->nodes) {
// proxy nodes exist twice - in the chunk and the parent-chunk that points to this chunk
// only the node in the parent-chunk is a proxy (to its non-proxy counterpart)
bool isProxyNode = (node->type == TYPE::PROXY) && node->name != chunk->name;
TYPE type = node->type;
if (node->type == TYPE::PROXY && !isProxyNode) {
type = TYPE::NORMAL;
}
uint64_t byteSize = isProxyNode ? node->proxyByteSize : node->byteSize;
uint64_t byteOffset = (isProxyNode ? bytesWritten + node->proxyByteOffset : node->byteOffset);
buffer->set<uint8_t >(type, 22 * recordsProcessed + 0);
buffer->set<uint8_t >(node->childMask, 22 * recordsProcessed + 1);
buffer->set<uint32_t>(node->numPoints, 22 * recordsProcessed + 2);
buffer->set<uint64_t>(byteOffset, 22 * recordsProcessed + 6);
buffer->set<uint64_t>(byteSize, 22 * recordsProcessed + 14);
recordsProcessed++;
}
}
batch->byteSize = buffer->size;
return buffer;
}
void build() {
std::string hierarchyFilePath = path + "/../hierarchy.bin";
std::fstream fout(hierarchyFilePath, std::ios::binary | std::ios::out);
int64_t bytesWritten = 0;
auto batch_root = loadBatch(path + "/r.bin");
this->batch_root = batch_root;
{ // reserve the first <x> bytes in the file for the root chunk
Buffer tmp(22 * batch_root->nodes.size());
std::memset(tmp.data, 0, tmp.size);
fout.write(tmp.data_char, tmp.size);
bytesWritten = tmp.size;
}
// now write all hierarchy batches, except root
// update proxy nodes in root with byteOffsets of written batches.
for (const auto& entry : std::filesystem::directory_iterator(path)) {
const auto& filepath = entry.path();
// r0626.txt
// skip root. it get's special treatment
if (filepath.filename().string() == "r.bin") continue;
// skip non *.bin files
if (!iEndsWith(filepath.string(), ".bin")) continue;
auto batch = loadBatch(filepath.string());
processBatch(batch);
auto buffer = serializeBatch(batch, bytesWritten);
if (batch->nodes.size() > 1) {
const auto& proxyNode = batch_root->nodeMap[batch->name];
proxyNode->type = TYPE::PROXY;
proxyNode->proxyByteOffset = bytesWritten;
proxyNode->proxyByteSize = 22 * batch->chunkMap[batch->name]->nodes.size();
}
else {
// if there is only one node in that batch,
// then we flag that node as leaf in the root-batch
const auto& root_batch_node = batch_root->nodeMap[batch->name];
root_batch_node->type = TYPE::LEAF;
}
fout.write(buffer->data_char, buffer->size);
bytesWritten += buffer->size;
}
// close/flush file so that we can reopen it to modify beginning
fout.close();
{ // update beginning of file with root chunk
std::fstream f(hierarchyFilePath, std::ios::ate | std::ios::binary | std::ios::out | std::ios::in);
f.seekg(0);
auto buffer = serializeBatch(batch_root, 0);
f.write(buffer->data_char, buffer->size);
f.close();
}
// redundant security check
if (iEndsWith(this->path, ".hierarchyChunks")) {
std::filesystem::remove_all(this->path);
}
return;
}
};
}

View File

@@ -1,81 +1,81 @@
#pragma once
#include "converter_utils.h"
#include <thread>
#include <map>
namespace Potree
{
struct Monitor {
std::thread t;
bool stopRequested = false;
State* state = nullptr;
std::map<std::string, std::string> messages;
Monitor(State* state) {
this->state = state;
}
void _print() {
auto ram = getMemoryData();
auto CPU = getCpuData();
double GB = 1024.0 * 1024.0 * 1024.0;
double throughput = (double(this->state->pointsProcessed) / this->state->duration) / 1'000'000.0;
double progressPass = 100.0 * this->state->progress();
double progressTotal = (100.0 * double(this->state->currentPass - 1) + progressPass) / double(this->state->numPasses);
std::string strProgressPass = formatNumber(progressPass) + "%";
std::string strProgressTotal = formatNumber(progressTotal) + "%";
std::string strTime = formatNumber(now()) + "s";
std::string strDuration = formatNumber(this->state->duration) + "s";
std::string strThroughput = formatNumber(throughput) + "MPs";
std::string strRAM = formatNumber(double(ram.virtual_usedByProcess) / GB, 1)
+ "GB (highest " + formatNumber(double(ram.virtual_usedByProcess_max) / GB, 1) + "GB)";
std::string strCPU = formatNumber(CPU.usage) + "%";
std::stringstream ss;
ss << "[" << strProgressTotal << ", " << strTime << "], "
<< "[" << this->state->name << ": " << strProgressPass
<< ", duration: " << strDuration
<< ", throughput: " << strThroughput << "]"
<< "[RAM: " << strRAM << ", CPU: " << strCPU << "]" << std::endl;
std::cout << ss.str() << std::flush;
}
void start() {
Monitor* _this = this;
this->t = std::thread([_this]() {
using namespace std::chrono_literals;
std::this_thread::sleep_for(1'000ms);
std::cout << std::endl;
while (!_this->stopRequested) {
_this->_print();
std::this_thread::sleep_for(1'000ms);
}
});
}
void stop() {
stopRequested = true;
t.join();
}
};
}
#pragma once
#include "converter_utils.h"
#include <thread>
#include <map>
namespace Potree
{
struct Monitor {
std::thread t;
bool stopRequested = false;
State* state = nullptr;
std::map<std::string, std::string> messages;
Monitor(State* state) {
this->state = state;
}
void _print() {
auto ram = getMemoryData();
auto CPU = getCpuData();
double GB = 1024.0 * 1024.0 * 1024.0;
double throughput = (double(this->state->pointsProcessed) / this->state->duration) / 1'000'000.0;
double progressPass = 100.0 * this->state->progress();
double progressTotal = (100.0 * double(this->state->currentPass - 1) + progressPass) / double(this->state->numPasses);
std::string strProgressPass = formatNumber(progressPass) + "%";
std::string strProgressTotal = formatNumber(progressTotal) + "%";
std::string strTime = formatNumber(now()) + "s";
std::string strDuration = formatNumber(this->state->duration) + "s";
std::string strThroughput = formatNumber(throughput) + "MPs";
std::string strRAM = formatNumber(double(ram.virtual_usedByProcess) / GB, 1)
+ "GB (highest " + formatNumber(double(ram.virtual_usedByProcess_max) / GB, 1) + "GB)";
std::string strCPU = formatNumber(CPU.usage) + "%";
std::stringstream ss;
ss << "[" << strProgressTotal << ", " << strTime << "], "
<< "[" << this->state->name << ": " << strProgressPass
<< ", duration: " << strDuration
<< ", throughput: " << strThroughput << "]"
<< "[RAM: " << strRAM << ", CPU: " << strCPU << "]" << std::endl;
std::cout << ss.str() << std::flush;
}
void start() {
Monitor* _this = this;
this->t = std::thread([_this]() {
using namespace std::chrono_literals;
std::this_thread::sleep_for(1'000ms);
std::cout << std::endl;
while (!_this->stopRequested) {
_this->_print();
std::this_thread::sleep_for(1'000ms);
}
});
}
void stop() {
stopRequested = true;
t.join();
}
};
}

View File

@@ -1,21 +1,21 @@
#pragma once
#include "structures.h"
#include <vector>
namespace Potree
{
class IPotreeDataProvider;
class DataDescription;
class ILogger;
struct Options;
class PotreeConverter
{
public:
static bool Convert(IPotreeDataProvider* provider, const Options& options, bool printInfoDuringProcessing = true, ILogger* logger = nullptr);
private:
static std::vector<ChunkInfo> SplitPointsData(const SourceDescription& desc, uint16_t pointSize);
};
#pragma once
#include "structures.h"
#include <vector>
namespace Potree
{
class IPotreeDataProvider;
class DataDescription;
class ILogger;
struct Options;
class PotreeConverter
{
public:
static bool Convert(IPotreeDataProvider* provider, const Options& options, bool printInfoDuringProcessing = true, ILogger* logger = nullptr);
private:
static std::vector<ChunkInfo> SplitPointsData(const SourceDescription& desc, uint16_t pointSize);
};
}

View File

@@ -1,112 +1,112 @@
#pragma once
#include <string>
#include <cmath>
#include <sstream>
#include <algorithm>
#include <iomanip>
namespace Potree
{
struct Vector3 {
double x = double(0.0);
double y = double(0.0);
double z = double(0.0);
Vector3() {
}
Vector3(double x, double y, double z) {
this->x = x;
this->y = y;
this->z = z;
}
Vector3(double val)
{
x = y = z = val;
}
Vector3(double value[3]) {
this->x = value[0];
this->y = value[1];
this->z = value[2];
}
double squaredDistanceTo(const Vector3& right) {
double dx = right.x - x;
double dy = right.y - y;
double dz = right.z - z;
double dd = dx * dx + dy * dy + dz * dz;
return dd;
}
double distanceTo(const Vector3& right) {
double dx = right.x - x;
double dy = right.y - y;
double dz = right.z - z;
double dd = dx * dx + dy * dy + dz * dz;
double d = std::sqrt(dd);
return d;
}
double length() {
return std::sqrt(x * x + y * y + z * z);
}
double max() {
double value = std::max(std::max(x, y), z);
return value;
}
Vector3 operator-(const Vector3& right) const {
return Vector3(x - right.x, y - right.y, z - right.z);
}
Vector3 operator+(const Vector3& right) const {
return Vector3(x + right.x, y + right.y, z + right.z);
}
Vector3 operator+(const double& scalar) const {
return Vector3(x + scalar, y + scalar, z + scalar);
}
Vector3 operator/(const double& scalar) const {
return Vector3(x / scalar, y / scalar, z / scalar);
}
Vector3 operator/(const Vector3& right) const {
return Vector3(x / right.x, y / right.y, z / right.z);
}
Vector3 operator*(const Vector3& right) const {
return Vector3(x * right.x, y * right.y, z * right.z);
}
Vector3 operator*(const double& scalar) const {
return Vector3(x * scalar, y * scalar, z * scalar);
}
bool operator==(const Vector3& other) const = default;
std::string toString() const {
auto digits = std::numeric_limits<double>::max_digits10;
std::stringstream ss;
ss << std::setprecision(digits);
ss << x << ", " << y << ", " << z;
return ss.str();
}
};
}
#pragma once
#include <string>
#include <cmath>
#include <sstream>
#include <algorithm>
#include <iomanip>
namespace Potree
{
struct Vector3 {
double x = double(0.0);
double y = double(0.0);
double z = double(0.0);
Vector3() {
}
Vector3(double x, double y, double z) {
this->x = x;
this->y = y;
this->z = z;
}
Vector3(double val)
{
x = y = z = val;
}
Vector3(double value[3]) {
this->x = value[0];
this->y = value[1];
this->z = value[2];
}
double squaredDistanceTo(const Vector3& right) {
double dx = right.x - x;
double dy = right.y - y;
double dz = right.z - z;
double dd = dx * dx + dy * dy + dz * dz;
return dd;
}
double distanceTo(const Vector3& right) {
double dx = right.x - x;
double dy = right.y - y;
double dz = right.z - z;
double dd = dx * dx + dy * dy + dz * dz;
double d = std::sqrt(dd);
return d;
}
double length() {
return std::sqrt(x * x + y * y + z * z);
}
double max() {
double value = std::max(std::max(x, y), z);
return value;
}
Vector3 operator-(const Vector3& right) const {
return Vector3(x - right.x, y - right.y, z - right.z);
}
Vector3 operator+(const Vector3& right) const {
return Vector3(x + right.x, y + right.y, z + right.z);
}
Vector3 operator+(const double& scalar) const {
return Vector3(x + scalar, y + scalar, z + scalar);
}
Vector3 operator/(const double& scalar) const {
return Vector3(x / scalar, y / scalar, z / scalar);
}
Vector3 operator/(const Vector3& right) const {
return Vector3(x / right.x, y / right.y, z / right.z);
}
Vector3 operator*(const Vector3& right) const {
return Vector3(x * right.x, y * right.y, z * right.z);
}
Vector3 operator*(const double& scalar) const {
return Vector3(x * scalar, y * scalar, z * scalar);
}
bool operator==(const Vector3& other) const = default;
std::string toString() const {
auto digits = std::numeric_limits<double>::max_digits10;
std::stringstream ss;
ss << std::setprecision(digits);
ss << x << ", " << y << ", " << z;
return ss.str();
}
};
}

View File

@@ -1,191 +1,191 @@
#pragma once
#include "Vector3.h"
#include "Constants.h"
#include "structures.h"
#include <atomic>
#include <map>
#include <array>
#include <sstream>
#include <locale>
#include <iostream>
namespace Potree
{
inline Vector3 min(const Vector3& p, const Vector3& p2)
{
return { std::min(p.x, p2.x), std::min(p.y, p2.y), std::min(p.z, p2.z) };
}
inline Vector3 max(const Vector3& p, const Vector3& p2)
{
return { std::max(p.x, p2.x), std::max(p.y, p2.y), std::max(p.z, p2.z) };
}
inline Vector3 IntPointToDouble(const int32_t position[3], const Vector3& scale, const Vector3& offset)
{
return (Vector3(position[0], position[1], position[2]) * scale) + offset;
}
inline std::array<int32_t, 3> DoublePointToInt(const Vector3& position, const Vector3& scale, const Vector3& offset)
{
std::array<int32_t, 3> res = {};
Vector3 scaled = (position - offset) / scale;
for (int i = 0; i < 3; i++)
res[i] = *(&scaled.x + i);
return res;
}
struct LASPointF2 {
int32_t x;
int32_t y;
int32_t z;
uint16_t intensity;
uint8_t returnNumber;
uint8_t classification;
uint8_t scanAngleRank;
uint8_t userData;
uint16_t pointSourceID;
uint16_t r;
uint16_t g;
uint16_t b;
};
struct BoundingBox {
Vector3 min;
Vector3 max;
BoundingBox() {
this->min = { Infinity,Infinity,Infinity };
this->max = { -Infinity,-Infinity,-Infinity };
}
BoundingBox(Vector3 min, Vector3 max) {
this->min = min;
this->max = max;
}
};
template<class T>
inline std::string formatNumber(T number, int decimals = 0) {
std::stringstream ss;
ss.imbue(std::locale(std::cout.getloc(), new punct_facet));
ss << std::fixed << std::setprecision(decimals);
ss << number;
return ss.str();
}
// see https://www.forceflow.be/2013/10/07/morton-encodingdecoding-through-bit-interleaving-implementations/
// method to seperate bits from a given integer 3 positions apart
inline uint64_t splitBy3(unsigned int a) {
uint64_t x = a & 0x1fffff; // we only look at the first 21 bits
x = (x | x << 32) & 0x1f00000000ffff; // shift left 32 bits, OR with self, and 00011111000000000000000000000000000000001111111111111111
x = (x | x << 16) & 0x1f0000ff0000ff; // shift left 32 bits, OR with self, and 00011111000000000000000011111111000000000000000011111111
x = (x | x << 8) & 0x100f00f00f00f00f; // shift left 32 bits, OR with self, and 0001000000001111000000001111000000001111000000001111000000000000
x = (x | x << 4) & 0x10c30c30c30c30c3; // shift left 32 bits, OR with self, and 0001000011000011000011000011000011000011000011000011000100000000
x = (x | x << 2) & 0x1249249249249249;
return x;
}
// see https://www.forceflow.be/2013/10/07/morton-encodingdecoding-through-bit-interleaving-implementations/
inline uint64_t mortonEncode_magicbits(unsigned int x, unsigned int y, unsigned int z) {
uint64_t answer = 0;
answer |= splitBy3(x) | splitBy3(y) << 1 | splitBy3(z) << 2;
return answer;
}
inline BoundingBox childBoundingBoxOf(Vector3 min, Vector3 max, int index) {
BoundingBox box;
auto size = max - min;
Vector3 center = min + (size * 0.5);
if ((index & 0b100) == 0) {
box.min.x = min.x;
box.max.x = center.x;
}
else {
box.min.x = center.x;
box.max.x = max.x;
}
if ((index & 0b010) == 0) {
box.min.y = min.y;
box.max.y = center.y;
}
else {
box.min.y = center.y;
box.max.y = max.y;
}
if ((index & 0b001) == 0) {
box.min.z = min.z;
box.max.z = center.z;
}
else {
box.min.z = center.z;
box.max.z = max.z;
}
return box;
}
inline Stats ComputeStats(const Source& source)
{
const Vector3& min = source.min;
Vector3 max = source.max;
const int64_t totalBytes = source.filesize;
const int64_t totalPoints = source.numPoints;
double cubeSize = (max - min).max();
Vector3 size = { cubeSize, cubeSize, cubeSize };
max = min + cubeSize;
std::string strMin = "[" + std::to_string(min.x) + ", " + std::to_string(min.y) + ", " + std::to_string(min.z) + "]";
std::string strMax = "[" + std::to_string(max.x) + ", " + std::to_string(max.y) + ", " + std::to_string(max.z) + "]";
std::string strSize = "[" + std::to_string(size.x) + ", " + std::to_string(size.y) + ", " + std::to_string(size.z) + "]";
std::string strTotalFileSize;
{
int64_t KB = 1024;
int64_t MB = 1024 * KB;
int64_t GB = 1024 * MB;
int64_t TB = 1024 * GB;
if (totalBytes >= TB) {
strTotalFileSize = formatNumber(double(totalBytes) / double(TB), 1) + " TB";
}
else if (totalBytes >= GB) {
strTotalFileSize = formatNumber(double(totalBytes) / double(GB), 1) + " GB";
}
else if (totalBytes >= MB) {
strTotalFileSize = formatNumber(double(totalBytes) / double(MB), 1) + " MB";
}
else {
strTotalFileSize = formatNumber(double(totalBytes), 1) + " bytes";
}
}
std::cout << "cubicAABB: {\n";
std::cout << " \"min\": " << strMin << ",\n";
std::cout << " \"max\": " << strMax << ",\n";
std::cout << " \"size\": " << strSize << "\n";
std::cout << "}\n";
std::cout << "#points: " << formatNumber(totalPoints) << std::endl;
std::cout << "total file size: " << strTotalFileSize << std::endl;
{ // sanity check
bool sizeError = (size.x == 0.0) || (size.y == 0.0) || (size.z == 0);
if (sizeError) {
std::cout << "invalid bounding box. at least one axis has a size of zero.";
exit(123);
}
}
return { min, max, totalBytes, totalPoints };
}
}
#pragma once
#include "Vector3.h"
#include "Constants.h"
#include "structures.h"
#include <atomic>
#include <map>
#include <array>
#include <sstream>
#include <locale>
#include <iostream>
namespace Potree
{
inline Vector3 min(const Vector3& p, const Vector3& p2)
{
return { std::min(p.x, p2.x), std::min(p.y, p2.y), std::min(p.z, p2.z) };
}
inline Vector3 max(const Vector3& p, const Vector3& p2)
{
return { std::max(p.x, p2.x), std::max(p.y, p2.y), std::max(p.z, p2.z) };
}
inline Vector3 IntPointToDouble(const int32_t position[3], const Vector3& scale, const Vector3& offset)
{
return (Vector3(position[0], position[1], position[2]) * scale) + offset;
}
inline std::array<int32_t, 3> DoublePointToInt(const Vector3& position, const Vector3& scale, const Vector3& offset)
{
std::array<int32_t, 3> res = {};
Vector3 scaled = (position - offset) / scale;
for (int i = 0; i < 3; i++)
res[i] = *(&scaled.x + i);
return res;
}
struct LASPointF2 {
int32_t x;
int32_t y;
int32_t z;
uint16_t intensity;
uint8_t returnNumber;
uint8_t classification;
uint8_t scanAngleRank;
uint8_t userData;
uint16_t pointSourceID;
uint16_t r;
uint16_t g;
uint16_t b;
};
struct BoundingBox {
Vector3 min;
Vector3 max;
BoundingBox() {
this->min = { Infinity,Infinity,Infinity };
this->max = { -Infinity,-Infinity,-Infinity };
}
BoundingBox(Vector3 min, Vector3 max) {
this->min = min;
this->max = max;
}
};
template<class T>
inline std::string formatNumber(T number, int decimals = 0) {
std::stringstream ss;
ss.imbue(std::locale(std::cout.getloc(), new punct_facet));
ss << std::fixed << std::setprecision(decimals);
ss << number;
return ss.str();
}
// see https://www.forceflow.be/2013/10/07/morton-encodingdecoding-through-bit-interleaving-implementations/
// method to seperate bits from a given integer 3 positions apart
inline uint64_t splitBy3(unsigned int a) {
uint64_t x = a & 0x1fffff; // we only look at the first 21 bits
x = (x | x << 32) & 0x1f00000000ffff; // shift left 32 bits, OR with self, and 00011111000000000000000000000000000000001111111111111111
x = (x | x << 16) & 0x1f0000ff0000ff; // shift left 32 bits, OR with self, and 00011111000000000000000011111111000000000000000011111111
x = (x | x << 8) & 0x100f00f00f00f00f; // shift left 32 bits, OR with self, and 0001000000001111000000001111000000001111000000001111000000000000
x = (x | x << 4) & 0x10c30c30c30c30c3; // shift left 32 bits, OR with self, and 0001000011000011000011000011000011000011000011000011000100000000
x = (x | x << 2) & 0x1249249249249249;
return x;
}
// see https://www.forceflow.be/2013/10/07/morton-encodingdecoding-through-bit-interleaving-implementations/
inline uint64_t mortonEncode_magicbits(unsigned int x, unsigned int y, unsigned int z) {
uint64_t answer = 0;
answer |= splitBy3(x) | splitBy3(y) << 1 | splitBy3(z) << 2;
return answer;
}
inline BoundingBox childBoundingBoxOf(Vector3 min, Vector3 max, int index) {
BoundingBox box;
auto size = max - min;
Vector3 center = min + (size * 0.5);
if ((index & 0b100) == 0) {
box.min.x = min.x;
box.max.x = center.x;
}
else {
box.min.x = center.x;
box.max.x = max.x;
}
if ((index & 0b010) == 0) {
box.min.y = min.y;
box.max.y = center.y;
}
else {
box.min.y = center.y;
box.max.y = max.y;
}
if ((index & 0b001) == 0) {
box.min.z = min.z;
box.max.z = center.z;
}
else {
box.min.z = center.z;
box.max.z = max.z;
}
return box;
}
inline Stats ComputeStats(const Source& source)
{
const Vector3& min = source.min;
Vector3 max = source.max;
const int64_t totalBytes = source.filesize;
const int64_t totalPoints = source.numPoints;
double cubeSize = (max - min).max();
Vector3 size = { cubeSize, cubeSize, cubeSize };
max = min + cubeSize;
std::string strMin = "[" + std::to_string(min.x) + ", " + std::to_string(min.y) + ", " + std::to_string(min.z) + "]";
std::string strMax = "[" + std::to_string(max.x) + ", " + std::to_string(max.y) + ", " + std::to_string(max.z) + "]";
std::string strSize = "[" + std::to_string(size.x) + ", " + std::to_string(size.y) + ", " + std::to_string(size.z) + "]";
std::string strTotalFileSize;
{
int64_t KB = 1024;
int64_t MB = 1024 * KB;
int64_t GB = 1024 * MB;
int64_t TB = 1024 * GB;
if (totalBytes >= TB) {
strTotalFileSize = formatNumber(double(totalBytes) / double(TB), 1) + " TB";
}
else if (totalBytes >= GB) {
strTotalFileSize = formatNumber(double(totalBytes) / double(GB), 1) + " GB";
}
else if (totalBytes >= MB) {
strTotalFileSize = formatNumber(double(totalBytes) / double(MB), 1) + " MB";
}
else {
strTotalFileSize = formatNumber(double(totalBytes), 1) + " bytes";
}
}
std::cout << "cubicAABB: {\n";
std::cout << " \"min\": " << strMin << ",\n";
std::cout << " \"max\": " << strMax << ",\n";
std::cout << " \"size\": " << strSize << "\n";
std::cout << "}\n";
std::cout << "#points: " << formatNumber(totalPoints) << std::endl;
std::cout << "total file size: " << strTotalFileSize << std::endl;
{ // sanity check
bool sizeError = (size.x == 0.0) || (size.y == 0.0) || (size.z == 0);
if (sizeError) {
std::cout << "invalid bounding box. at least one axis has a size of zero.";
exit(123);
}
}
return { min, max, totalBytes, totalPoints };
}
}

View File

@@ -1,328 +1,328 @@
#pragma once
#include "Attributes.h"
#include "Node.h"
#include <deque>
#include <condition_variable>
namespace Potree
{
namespace indexer
{
//constexpr int numSampleThreads = 10;
//constexpr int numFlushThreads = 36;
constexpr int maxPointsPerChunk = 10'000;
inline int numSampleThreads() {
return getCpuData().numProcessors;
}
struct Hierarchy {
int64_t stepSize = 0;
std::vector<uint8_t> buffer;
int64_t firstChunkSize = 0;
};
struct Chunk {
Vector3 min;
Vector3 max;
std::string file;
std::string id;
};
struct Chunks {
std::vector<std::shared_ptr<Chunk>> list;
Vector3 min;
Vector3 max;
Attributes attributes;
Chunks(const std::vector<std::shared_ptr<Chunk>>& list, Vector3 min, Vector3 max) {
this->list = list;
this->min = min;
this->max = max;
}
};
std::shared_ptr<Chunks> getChunks(const std::string& pathIn);
struct Indexer;
struct Writer {
Indexer* indexer = nullptr;
int64_t capacity = 16 * 1024 * 1024;
// copy node data here first
std::shared_ptr<Buffer> activeBuffer = nullptr;
// backlog of buffers that reached capacity and are ready to be written to disk
std::deque<std::shared_ptr<Buffer>> backlog;
bool closeRequested = false;
bool closed = false;
std::condition_variable cvClose;
std::fstream fsOctree;
//thread tWrite;
std::mutex mtx;
Writer(Indexer* indexer);
void writeAndUnload(Node* node);
void launchWriterThread();
void closeAndWait();
int64_t backlogSizeMB() const;
};
struct HierarchyFlusher {
struct HNode {
std::string name;
int64_t byteOffset = 0;
int64_t byteSize = 0;
int64_t numPoints = 0;
};
std::mutex mtx;
std::string path;
std::unordered_map<std::string, int> chunks;
std::vector<HNode> buffer;
HierarchyFlusher(const std::string& path) {
this->path = path;
this->clear();
}
void clear() {
std::filesystem::remove_all(path);
std::filesystem::create_directories(path);
}
void write(Node* node, int hierarchyStepSize) {
std::lock_guard<std::mutex> lock(mtx);
HNode hnode = {
.name = node->name,
.byteOffset = node->byteOffset,
.byteSize = node->byteSize,
.numPoints = node->numPoints,
};
buffer.push_back(hnode);
if (buffer.size() > 10'000) {
this->write(buffer, hierarchyStepSize);
buffer.clear();
}
}
void flush(int hierarchyStepSize) {
std::lock_guard<std::mutex> lock(mtx);
this->write(buffer, hierarchyStepSize);
buffer.clear();
}
void write(const std::vector<HNode>& nodes, int hierarchyStepSize) {
std::unordered_map<std::string, std::vector<HNode>> groups;
for (const auto& node : nodes) {
std::string key = node.name.substr(0, hierarchyStepSize + 1);
if (node.name.size() <= hierarchyStepSize + 1) {
key = "r";
}
groups[key].push_back(node);
// add batch roots to batches (in addition to root batch)
if (node.name.size() == hierarchyStepSize + 1) {
groups[node.name].push_back(node);
}
}
std::filesystem::create_directories(path);
// this structure, but guaranteed to be packed
// struct Record{ size offset
// uint8_t name[31]; 31 0
// uint32_t numPoints; 4 31
// int64_t byteOffset; 8 35
// int32_t byteSize; 4 43
// uint8_t end = '\n'; 1 47
// }; ===
// 48
for (const auto& [key, groupedNodes] : groups) {
Buffer buffer(48 * groupedNodes.size());
for (int i = 0; i < groupedNodes.size(); i++) {
const auto& node = groupedNodes[i];
memset(buffer.data_u8 + 48 * i, ' ', 31);
memcpy(buffer.data_u8 + 48 * i, node.name.c_str(), node.name.size());
buffer.set<uint32_t>(node.numPoints, 48 * i + 31);
buffer.set<uint64_t>(node.byteOffset, 48 * i + 35);
buffer.set<uint32_t>(node.byteSize, 48 * i + 43);
buffer.set<char >('\n', 48 * i + 47);
}
std::string filepath = path + "/" + key + ".bin";
std::fstream fout(filepath, std::ios::app | std::ios::out | std::ios::binary);
fout.write(buffer.data_char, buffer.size);
fout.close();
chunks[key] += groupedNodes.size();
}
}
};
struct HierarchyChunk {
std::string name = "";
std::vector<Node*> nodes;
};
struct FlushedChunkRoot {
std::shared_ptr<Node> node;
int64_t offset = 0;
int64_t size = 0;
};
struct CRNode {
std::string name = "";
Node* node;
std::vector<std::shared_ptr<CRNode>> children;
std::vector<FlushedChunkRoot> fcrs;
int numPoints = 0;
CRNode() {
children.resize(8, nullptr);
}
void traverse(std::function<void(CRNode*)> callback) {
callback(this);
for (const auto& child : children) {
if (child != nullptr) {
child->traverse(callback);
}
}
}
void traversePost(std::function<void(CRNode*)> callback) {
for (const auto& child : children) {
if (child != nullptr) {
child->traversePost(callback);
}
}
callback(this);
}
bool isLeaf() {
for (const auto& child : children) {
if (child != nullptr) {
return false;
}
}
return true;
}
};
struct Indexer {
std::string targetDir = "";
Options options;
Attributes attributes;
std::shared_ptr<Node> root;
std::shared_ptr<Writer> writer;
std::shared_ptr<HierarchyFlusher> hierarchyFlusher;
std::vector<std::shared_ptr<Node>> detachedParts;
std::atomic_int64_t byteOffset = 0;
double scale = 0.001;
double spacing = 1.0;
std::atomic_int64_t dbg = 0;
std::mutex mtx_depth;
int64_t octreeDepth = 0;
std::atomic_int64_t bytesInMemory = 0;
std::atomic_int64_t bytesToWrite = 0;
std::atomic_int64_t bytesWritten = 0;
std::mutex mtx_chunkRoot;
std::fstream fChunkRoots;
std::vector<FlushedChunkRoot> flushedChunkRoots;
Indexer(const std::string& targetDir) {
this->targetDir = targetDir;
writer = std::make_shared<Writer>(this);
hierarchyFlusher = make_shared<HierarchyFlusher>(targetDir + "/.hierarchyChunks");
std::string chunkRootFile = targetDir + "/tmpChunkRoots.bin";
fChunkRoots.open(chunkRootFile, std::ios::out | std::ios::binary);
}
~Indexer() {
fChunkRoots.close();
}
void waitUntilWriterBacklogBelow(int maxMegabytes);
void waitUntilMemoryBelow(int maxMegabytes);
std::string createMetadata(const Options& options, const State& state, const Hierarchy& hierarchy);
HierarchyChunk gatherChunk(Node* start, int levels);
std::vector<HierarchyChunk> createHierarchyChunks(Node* root, int hierarchyStepSize);
Hierarchy createHierarchy();
void flushChunkRoot(std::shared_ptr<Node> chunkRoot);
void reloadChunkRoots();
std::vector<CRNode> processChunkRoots();
};
class punct_facet : public std::numpunct<char> {
protected:
char do_decimal_point() const { return '.'; };
char do_thousands_sep() const { return '\''; };
std::string do_grouping() const { return "\3"; }
};
void doIndexing(const std::string& targetDir, State& state, const Options& options, Sampler& sampler);
}
#pragma once
#include "Attributes.h"
#include "Node.h"
#include <deque>
#include <condition_variable>
namespace Potree
{
namespace indexer
{
//constexpr int numSampleThreads = 10;
//constexpr int numFlushThreads = 36;
constexpr int maxPointsPerChunk = 10'000;
inline int numSampleThreads() {
return getCpuData().numProcessors;
}
struct Hierarchy {
int64_t stepSize = 0;
std::vector<uint8_t> buffer;
int64_t firstChunkSize = 0;
};
struct Chunk {
Vector3 min;
Vector3 max;
std::string file;
std::string id;
};
struct Chunks {
std::vector<std::shared_ptr<Chunk>> list;
Vector3 min;
Vector3 max;
Attributes attributes;
Chunks(const std::vector<std::shared_ptr<Chunk>>& list, Vector3 min, Vector3 max) {
this->list = list;
this->min = min;
this->max = max;
}
};
std::shared_ptr<Chunks> getChunks(const std::string& pathIn);
struct Indexer;
struct Writer {
Indexer* indexer = nullptr;
int64_t capacity = 16 * 1024 * 1024;
// copy node data here first
std::shared_ptr<Buffer> activeBuffer = nullptr;
// backlog of buffers that reached capacity and are ready to be written to disk
std::deque<std::shared_ptr<Buffer>> backlog;
bool closeRequested = false;
bool closed = false;
std::condition_variable cvClose;
std::fstream fsOctree;
//thread tWrite;
std::mutex mtx;
Writer(Indexer* indexer);
void writeAndUnload(Node* node);
void launchWriterThread();
void closeAndWait();
int64_t backlogSizeMB() const;
};
struct HierarchyFlusher {
struct HNode {
std::string name;
int64_t byteOffset = 0;
int64_t byteSize = 0;
int64_t numPoints = 0;
};
std::mutex mtx;
std::string path;
std::unordered_map<std::string, int> chunks;
std::vector<HNode> buffer;
HierarchyFlusher(const std::string& path) {
this->path = path;
this->clear();
}
void clear() {
std::filesystem::remove_all(path);
std::filesystem::create_directories(path);
}
void write(Node* node, int hierarchyStepSize) {
std::lock_guard<std::mutex> lock(mtx);
HNode hnode = {
.name = node->name,
.byteOffset = node->byteOffset,
.byteSize = node->byteSize,
.numPoints = node->numPoints,
};
buffer.push_back(hnode);
if (buffer.size() > 10'000) {
this->write(buffer, hierarchyStepSize);
buffer.clear();
}
}
void flush(int hierarchyStepSize) {
std::lock_guard<std::mutex> lock(mtx);
this->write(buffer, hierarchyStepSize);
buffer.clear();
}
void write(const std::vector<HNode>& nodes, int hierarchyStepSize) {
std::unordered_map<std::string, std::vector<HNode>> groups;
for (const auto& node : nodes) {
std::string key = node.name.substr(0, hierarchyStepSize + 1);
if (node.name.size() <= hierarchyStepSize + 1) {
key = "r";
}
groups[key].push_back(node);
// add batch roots to batches (in addition to root batch)
if (node.name.size() == hierarchyStepSize + 1) {
groups[node.name].push_back(node);
}
}
std::filesystem::create_directories(path);
// this structure, but guaranteed to be packed
// struct Record{ size offset
// uint8_t name[31]; 31 0
// uint32_t numPoints; 4 31
// int64_t byteOffset; 8 35
// int32_t byteSize; 4 43
// uint8_t end = '\n'; 1 47
// }; ===
// 48
for (const auto& [key, groupedNodes] : groups) {
Buffer buffer(48 * groupedNodes.size());
for (int i = 0; i < groupedNodes.size(); i++) {
const auto& node = groupedNodes[i];
memset(buffer.data_u8 + 48 * i, ' ', 31);
memcpy(buffer.data_u8 + 48 * i, node.name.c_str(), node.name.size());
buffer.set<uint32_t>(node.numPoints, 48 * i + 31);
buffer.set<uint64_t>(node.byteOffset, 48 * i + 35);
buffer.set<uint32_t>(node.byteSize, 48 * i + 43);
buffer.set<char >('\n', 48 * i + 47);
}
std::string filepath = path + "/" + key + ".bin";
std::fstream fout(filepath, std::ios::app | std::ios::out | std::ios::binary);
fout.write(buffer.data_char, buffer.size);
fout.close();
chunks[key] += groupedNodes.size();
}
}
};
struct HierarchyChunk {
std::string name = "";
std::vector<Node*> nodes;
};
struct FlushedChunkRoot {
std::shared_ptr<Node> node;
int64_t offset = 0;
int64_t size = 0;
};
struct CRNode {
std::string name = "";
Node* node;
std::vector<std::shared_ptr<CRNode>> children;
std::vector<FlushedChunkRoot> fcrs;
int numPoints = 0;
CRNode() {
children.resize(8, nullptr);
}
void traverse(std::function<void(CRNode*)> callback) {
callback(this);
for (const auto& child : children) {
if (child != nullptr) {
child->traverse(callback);
}
}
}
void traversePost(std::function<void(CRNode*)> callback) {
for (const auto& child : children) {
if (child != nullptr) {
child->traversePost(callback);
}
}
callback(this);
}
bool isLeaf() {
for (const auto& child : children) {
if (child != nullptr) {
return false;
}
}
return true;
}
};
struct Indexer {
std::string targetDir = "";
Options options;
Attributes attributes;
std::shared_ptr<Node> root;
std::shared_ptr<Writer> writer;
std::shared_ptr<HierarchyFlusher> hierarchyFlusher;
std::vector<std::shared_ptr<Node>> detachedParts;
std::atomic_int64_t byteOffset = 0;
double scale = 0.001;
double spacing = 1.0;
std::atomic_int64_t dbg = 0;
std::mutex mtx_depth;
int64_t octreeDepth = 0;
std::atomic_int64_t bytesInMemory = 0;
std::atomic_int64_t bytesToWrite = 0;
std::atomic_int64_t bytesWritten = 0;
std::mutex mtx_chunkRoot;
std::fstream fChunkRoots;
std::vector<FlushedChunkRoot> flushedChunkRoots;
Indexer(const std::string& targetDir) {
this->targetDir = targetDir;
writer = std::make_shared<Writer>(this);
hierarchyFlusher = make_shared<HierarchyFlusher>(targetDir + "/.hierarchyChunks");
std::string chunkRootFile = targetDir + "/tmpChunkRoots.bin";
fChunkRoots.open(chunkRootFile, std::ios::out | std::ios::binary);
}
~Indexer() {
fChunkRoots.close();
}
void waitUntilWriterBacklogBelow(int maxMegabytes);
void waitUntilMemoryBelow(int maxMegabytes);
std::string createMetadata(const Options& options, const State& state, const Hierarchy& hierarchy);
HierarchyChunk gatherChunk(Node* start, int levels);
std::vector<HierarchyChunk> createHierarchyChunks(Node* root, int hierarchyStepSize);
Hierarchy createHierarchy();
void flushChunkRoot(std::shared_ptr<Node> chunkRoot);
void reloadChunkRoots();
std::vector<CRNode> processChunkRoots();
};
class punct_facet : public std::numpunct<char> {
protected:
char do_decimal_point() const { return '.'; };
char do_thousands_sep() const { return '\''; };
std::string do_grouping() const { return "\3"; }
};
void doIndexing(const std::string& targetDir, State& state, const Options& options, Sampler& sampler);
}
}

View File

@@ -0,0 +1,18 @@
#pragma once
#include <string>
namespace Potree
{
class ILogger
{
public:
virtual void warn(const std::string& msg) = 0;
virtual void error(const std::string& msg) = 0;
virtual void info(const std::string& msg) = 0;
virtual ~ILogger() = default;
};
inline ILogger* g_logger = nullptr;
}

View File

@@ -1,267 +1,267 @@
#pragma once
#include "structures.h"
#include "Attributes.h"
#ifndef __clang__
#include <execution>
#endif // !__clang__
namespace Potree
{
struct SamplerPoisson : public Sampler {
// subsample a local octree from bottom up
void sample(Node* node, const Attributes& attributes, double baseSpacing,
std::function<void(Node*)> onNodeCompleted,
std::function<void(Node*)> onNodeDiscarded
) {
struct Point {
double x;
double y;
double z;
int32_t pointIndex;
int32_t childIndex;
};
std::function<void(Node*, std::function<void(Node*)>)> traversePost = [&traversePost](Node* node, std::function<void(Node*)> callback) {
for (const auto& child : node->children) {
if (child != nullptr && !child->sampled) {
traversePost(child.get(), callback);
}
}
callback(node);
};
traversePost(node, [baseSpacing, &onNodeCompleted, &onNodeDiscarded, &attributes](Node* node) {
node->sampled = true;
int64_t numPoints = node->numPoints;
const auto size = node->max - node->min;
const auto& scale = attributes.posScale;
const auto& offset = attributes.posOffset;
if (node->isLeaf()) {
return false;
}
// =================================================================
// SAMPLING
// =================================================================
//
// first, check for each point whether it's accepted or rejected
// save result in an array with one element for each point
int64_t numPointsInChildren = 0;
for (const auto& child : node->children) {
if (child == nullptr) {
continue;
}
numPointsInChildren += child->numPoints;
}
std::vector<Point> points;
points.reserve(numPointsInChildren);
std::vector<std::vector<int8_t>> acceptedChildPointFlags;
std::vector<int64_t> numRejectedPerChild(8, 0);
int64_t numAccepted = 0;
for (int64_t childIndex = 0; childIndex < 8; childIndex++) {
const auto& child = node->children[childIndex];
if (child == nullptr) {
acceptedChildPointFlags.push_back({});
numRejectedPerChild.push_back({});
continue;
}
acceptedChildPointFlags.emplace_back(child->numPoints, 0);
for (int64_t i = 0; i < child->numPoints; i++) {
int64_t pointOffset = i * attributes.bytes;
int32_t* xyz = reinterpret_cast<int32_t*>(child->points->data_u8 + pointOffset);
double x = (xyz[0] * scale.x) + offset.x;
double y = (xyz[1] * scale.y) + offset.y;
double z = (xyz[2] * scale.z) + offset.z;
points.emplace_back(x, y, z, static_cast<int32_t>(i), static_cast<int32_t>(childIndex));
}
}
unsigned seed = std::chrono::system_clock::now().time_since_epoch().count();
thread_local std::vector<Point> dbgAccepted(1'000'000);
int64_t dbgNumAccepted = 0;
double spacing = baseSpacing / pow(2.0, node->level());
double squaredSpacing = spacing * spacing;
auto squaredDistance = [](const Point& a, const Point& b) {
double dx = a.x - b.x;
double dy = a.y - b.y;
double dz = a.z - b.z;
double dd = dx * dx + dy * dy + dz * dz;
return dd;
};
auto center = (node->min + node->max) * 0.5;
auto checkAccept = [&dbgNumAccepted, spacing, squaredSpacing, &squaredDistance, center](const Point& candidate) {
auto cx = candidate.x - center.x;
auto cy = candidate.y - center.y;
auto cz = candidate.z - center.z;
auto cdd = cx * cx + cy * cy + cz * cz;
auto cd = sqrt(cdd);
auto limit = (cd - spacing);
auto limitSquared = limit * limit;
int64_t j = 0;
for (int64_t i = dbgNumAccepted - 1; i >= 0; i--)
{
auto& point = dbgAccepted[i];
// check distance to center
auto px = point.x - center.x;
auto py = point.y - center.y;
auto pz = point.z - center.z;
auto pdd = px * px + py * py + pz * pz;
//auto pd = sqrt(pdd);
// stop when differences to center between candidate and accepted exceeds the spacing
// any other previously accepted point will be even closer to the center.
if (pdd < limitSquared) {
return true;
}
double dd = squaredDistance(point, candidate);
if (dd < squaredSpacing) {
return false;
}
j++;
// also put a limit at x distance checks
if (j > 10'000) {
return true;
}
}
return true;
};
#ifdef __clang__
std::sort(points.begin(), points.end(), [&center = std::as_const(center)](const Point& a, const Point& b) -> bool {
#else
auto parallel = std::execution::par_unseq;
std::sort(parallel, points.begin(), points.end(), [&center = std::as_const(center)](const Point& a, const Point& b) -> bool {
#endif
auto ax = a.x - center.x;
auto ay = a.y - center.y;
auto az = a.z - center.z;
auto add = ax * ax + ay * ay + az * az;
auto bx = b.x - center.x;
auto by = b.y - center.y;
auto bz = b.z - center.z;
auto bdd = bx * bx + by * by + bz * bz;
// sort by distance to center
return add < bdd;
// sort by manhattan distance to center
//return (ax + ay + az) < (bx + by + bz);
// sort by z axis
//return a.z < b.z;
});
for (const Point& point : points) {
bool isAccepted = checkAccept(point);
if (isAccepted) {
dbgAccepted[dbgNumAccepted] = point;
dbgNumAccepted++;
numAccepted++;
}
else {
numRejectedPerChild[point.childIndex]++;
}
acceptedChildPointFlags[point.childIndex][point.pointIndex] = isAccepted;
}
auto accepted = std::make_shared<Buffer>(numAccepted * attributes.bytes);
for (int64_t childIndex = 0; childIndex < 8; childIndex++) {
const auto& child = node->children[childIndex];
if (child == nullptr) {
continue;
}
auto numRejected = numRejectedPerChild[childIndex];
auto& acceptedFlags = acceptedChildPointFlags[childIndex];
auto rejected = std::make_shared<Buffer>(numRejected * attributes.bytes);
for (int64_t i = 0; i < child->numPoints; i++) {
auto isAccepted = acceptedFlags[i];
int64_t pointOffset = i * attributes.bytes;
if (isAccepted) {
accepted->write(child->points->data_u8 + pointOffset, attributes.bytes);
// rejected->write(child->points->data_u8 + pointOffset, attributes.bytes);
}
else {
rejected->write(child->points->data_u8 + pointOffset, attributes.bytes);
}
}
if (numRejected == 0 && child->isLeaf())
{
onNodeDiscarded(child.get());
node->children[childIndex] = nullptr;
continue;
}
if (numRejected > 0)
{
child->points = rejected;
child->numPoints = numRejected;
onNodeCompleted(child.get());
}
else if (numRejected == 0) {
// the parent has taken all points from this child,
// so make this child an empty inner node.
// Otherwise, the hierarchy file will claim that
// this node has points but because it doesn't have any,
// decompressing the nonexistent point buffer fails
// https://github.com/potree/potree/issues/1125
child->points = nullptr;
child->numPoints = 0;
onNodeCompleted(child.get());
}
}
node->points = accepted;
node->numPoints = numAccepted;
return true;
});
}
};
}
#pragma once
#include "structures.h"
#include "Attributes.h"
#ifndef __clang__
#include <execution>
#endif // !__clang__
namespace Potree
{
struct SamplerPoisson : public Sampler {
// subsample a local octree from bottom up
void sample(Node* node, const Attributes& attributes, double baseSpacing,
std::function<void(Node*)> onNodeCompleted,
std::function<void(Node*)> onNodeDiscarded
) {
struct Point {
double x;
double y;
double z;
int32_t pointIndex;
int32_t childIndex;
};
std::function<void(Node*, std::function<void(Node*)>)> traversePost = [&traversePost](Node* node, std::function<void(Node*)> callback) {
for (const auto& child : node->children) {
if (child != nullptr && !child->sampled) {
traversePost(child.get(), callback);
}
}
callback(node);
};
traversePost(node, [baseSpacing, &onNodeCompleted, &onNodeDiscarded, &attributes](Node* node) {
node->sampled = true;
int64_t numPoints = node->numPoints;
const auto size = node->max - node->min;
const auto& scale = attributes.posScale;
const auto& offset = attributes.posOffset;
if (node->isLeaf()) {
return false;
}
// =================================================================
// SAMPLING
// =================================================================
//
// first, check for each point whether it's accepted or rejected
// save result in an array with one element for each point
int64_t numPointsInChildren = 0;
for (const auto& child : node->children) {
if (child == nullptr) {
continue;
}
numPointsInChildren += child->numPoints;
}
std::vector<Point> points;
points.reserve(numPointsInChildren);
std::vector<std::vector<int8_t>> acceptedChildPointFlags;
std::vector<int64_t> numRejectedPerChild(8, 0);
int64_t numAccepted = 0;
for (int64_t childIndex = 0; childIndex < 8; childIndex++) {
const auto& child = node->children[childIndex];
if (child == nullptr) {
acceptedChildPointFlags.push_back({});
numRejectedPerChild.push_back({});
continue;
}
acceptedChildPointFlags.emplace_back(child->numPoints, 0);
for (int64_t i = 0; i < child->numPoints; i++) {
int64_t pointOffset = i * attributes.bytes;
int32_t* xyz = reinterpret_cast<int32_t*>(child->points->data_u8 + pointOffset);
double x = (xyz[0] * scale.x) + offset.x;
double y = (xyz[1] * scale.y) + offset.y;
double z = (xyz[2] * scale.z) + offset.z;
points.emplace_back(x, y, z, static_cast<int32_t>(i), static_cast<int32_t>(childIndex));
}
}
unsigned seed = std::chrono::system_clock::now().time_since_epoch().count();
thread_local std::vector<Point> dbgAccepted(1'000'000);
int64_t dbgNumAccepted = 0;
double spacing = baseSpacing / pow(2.0, node->level());
double squaredSpacing = spacing * spacing;
auto squaredDistance = [](const Point& a, const Point& b) {
double dx = a.x - b.x;
double dy = a.y - b.y;
double dz = a.z - b.z;
double dd = dx * dx + dy * dy + dz * dz;
return dd;
};
auto center = (node->min + node->max) * 0.5;
auto checkAccept = [&dbgNumAccepted, spacing, squaredSpacing, &squaredDistance, center](const Point& candidate) {
auto cx = candidate.x - center.x;
auto cy = candidate.y - center.y;
auto cz = candidate.z - center.z;
auto cdd = cx * cx + cy * cy + cz * cz;
auto cd = sqrt(cdd);
auto limit = (cd - spacing);
auto limitSquared = limit * limit;
int64_t j = 0;
for (int64_t i = dbgNumAccepted - 1; i >= 0; i--)
{
auto& point = dbgAccepted[i];
// check distance to center
auto px = point.x - center.x;
auto py = point.y - center.y;
auto pz = point.z - center.z;
auto pdd = px * px + py * py + pz * pz;
//auto pd = sqrt(pdd);
// stop when differences to center between candidate and accepted exceeds the spacing
// any other previously accepted point will be even closer to the center.
if (pdd < limitSquared) {
return true;
}
double dd = squaredDistance(point, candidate);
if (dd < squaredSpacing) {
return false;
}
j++;
// also put a limit at x distance checks
if (j > 10'000) {
return true;
}
}
return true;
};
#ifdef __clang__
std::sort(points.begin(), points.end(), [&center = std::as_const(center)](const Point& a, const Point& b) -> bool {
#else
auto parallel = std::execution::par_unseq;
std::sort(parallel, points.begin(), points.end(), [&center = std::as_const(center)](const Point& a, const Point& b) -> bool {
#endif
auto ax = a.x - center.x;
auto ay = a.y - center.y;
auto az = a.z - center.z;
auto add = ax * ax + ay * ay + az * az;
auto bx = b.x - center.x;
auto by = b.y - center.y;
auto bz = b.z - center.z;
auto bdd = bx * bx + by * by + bz * bz;
// sort by distance to center
return add < bdd;
// sort by manhattan distance to center
//return (ax + ay + az) < (bx + by + bz);
// sort by z axis
//return a.z < b.z;
});
for (const Point& point : points) {
bool isAccepted = checkAccept(point);
if (isAccepted) {
dbgAccepted[dbgNumAccepted] = point;
dbgNumAccepted++;
numAccepted++;
}
else {
numRejectedPerChild[point.childIndex]++;
}
acceptedChildPointFlags[point.childIndex][point.pointIndex] = isAccepted;
}
auto accepted = std::make_shared<Buffer>(numAccepted * attributes.bytes);
for (int64_t childIndex = 0; childIndex < 8; childIndex++) {
const auto& child = node->children[childIndex];
if (child == nullptr) {
continue;
}
auto numRejected = numRejectedPerChild[childIndex];
auto& acceptedFlags = acceptedChildPointFlags[childIndex];
auto rejected = std::make_shared<Buffer>(numRejected * attributes.bytes);
for (int64_t i = 0; i < child->numPoints; i++) {
auto isAccepted = acceptedFlags[i];
int64_t pointOffset = i * attributes.bytes;
if (isAccepted) {
accepted->write(child->points->data_u8 + pointOffset, attributes.bytes);
// rejected->write(child->points->data_u8 + pointOffset, attributes.bytes);
}
else {
rejected->write(child->points->data_u8 + pointOffset, attributes.bytes);
}
}
if (numRejected == 0 && child->isLeaf())
{
onNodeDiscarded(child.get());
node->children[childIndex] = nullptr;
continue;
}
if (numRejected > 0)
{
child->points = rejected;
child->numPoints = numRejected;
onNodeCompleted(child.get());
}
else if (numRejected == 0) {
// the parent has taken all points from this child,
// so make this child an empty inner node.
// Otherwise, the hierarchy file will claim that
// this node has points but because it doesn't have any,
// decompressing the nonexistent point buffer fails
// https://github.com/potree/potree/issues/1125
child->points = nullptr;
child->numPoints = 0;
onNodeCompleted(child.get());
}
}
node->points = accepted;
node->numPoints = numAccepted;
return true;
});
}
};
}

View File

@@ -1,239 +1,239 @@
#pragma once
#include "structures.h"
#include "Attributes.h"
namespace Potree
{
struct SamplerRandom : public Sampler {
// subsample a local octree from bottom up
void sample(Node* node, const Attributes& attributes, double baseSpacing,
std::function<void(Node*)> onNodeCompleted,
std::function<void(Node*)> onNodeDiscarded
) {
struct Point {
double x;
double y;
double z;
int32_t pointIndex;
int32_t childIndex;
};
std::function<void(Node*, std::function<void(Node*)>)> traversePost = [&traversePost](Node* node, std::function<void(Node*)> callback) {
for (const auto& child : node->children) {
if (child != nullptr && !child->sampled) {
traversePost(child.get(), callback);
}
}
callback(node);
};
int bytesPerPoint = attributes.bytes;
Vector3 scale = attributes.posScale;
Vector3 offset = attributes.posOffset;
traversePost(node, [bytesPerPoint, baseSpacing, scale, offset, &onNodeCompleted, &onNodeDiscarded, attributes](Node* node) {
node->sampled = true;
int64_t numPoints = node->numPoints;
int64_t gridSize = 128;
thread_local std::vector<int64_t> grid(gridSize * gridSize * gridSize, -1);
thread_local int64_t iteration = 0;
iteration++;
auto max = node->max;
auto min = node->min;
auto size = max - min;
auto scale = attributes.posScale;
auto offset = attributes.posOffset;
struct CellIndex {
int64_t index = -1;
double distance = 0.0;
};
auto toCellIndex = [min, size, gridSize](Vector3 point) -> CellIndex {
double nx = (point.x - min.x) / size.x;
double ny = (point.y - min.y) / size.y;
double nz = (point.z - min.z) / size.z;
double lx = 2.0 * fmod(double(gridSize) * nx, 1.0) - 1.0;
double ly = 2.0 * fmod(double(gridSize) * ny, 1.0) - 1.0;
double lz = 2.0 * fmod(double(gridSize) * nz, 1.0) - 1.0;
double distance = sqrt(lx * lx + ly * ly + lz * lz);
int64_t x = double(gridSize) * nx;
int64_t y = double(gridSize) * ny;
int64_t z = double(gridSize) * nz;
x = std::max(int64_t(0), std::min(x, gridSize - 1));
y = std::max(int64_t(0), std::min(y, gridSize - 1));
z = std::max(int64_t(0), std::min(z, gridSize - 1));
int64_t index = x + y * gridSize + z * gridSize * gridSize;
return { index, distance };
};
bool isLeaf = node->isLeaf();
if (isLeaf) {
// shuffle?
//
// a not particularly efficient approach to shuffling:
//
std::vector<int> indices(node->numPoints);
for (int i = 0; i < node->numPoints; i++) {
indices[i] = i;
}
unsigned seed = std::chrono::system_clock::now().time_since_epoch().count();
shuffle(indices.begin(), indices.end(), std::default_random_engine(seed));
auto buffer = std::make_shared<Buffer>(node->points->size);
for (int i = 0; i < node->numPoints; i++) {
int64_t sourceOffset = i * attributes.bytes;
int64_t targetOffset = indices[i] * attributes.bytes;
memcpy(buffer->data_u8 + targetOffset, node->points->data_u8 + sourceOffset, attributes.bytes);
}
node->points = buffer;
return false;
}
// =================================================================
// SAMPLING
// =================================================================
//
// first, check for each point whether it's accepted or rejected
// save result in an array with one element for each point
std::vector<std::vector<int8_t>> acceptedChildPointFlags;
std::vector<int64_t> numRejectedPerChild;
int64_t numAccepted = 0;
for (int childIndex = 0; childIndex < 8; childIndex++) {
auto child = node->children[childIndex];
if (child == nullptr) {
acceptedChildPointFlags.push_back({});
numRejectedPerChild.push_back({});
continue;
}
std::vector<int8_t> acceptedFlags(child->numPoints, 0);
int64_t numRejected = 0;
for (int i = 0; i < child->numPoints; i++) {
int64_t pointOffset = i * attributes.bytes;
int32_t* xyz = reinterpret_cast<int32_t*>(child->points->data_u8 + pointOffset);
double x = (xyz[0] * scale.x) + offset.x;
double y = (xyz[1] * scale.y) + offset.y;
double z = (xyz[2] * scale.z) + offset.z;
CellIndex cellIndex = toCellIndex({ x, y, z });
auto& gridValue = grid[cellIndex.index];
static double all = sqrt(3.0);
bool isAccepted;
if (child->numPoints < 100) {
isAccepted = true;
}
else if (cellIndex.distance < 0.7 * all && gridValue < iteration) {
isAccepted = true;
}
else {
isAccepted = false;
}
if (isAccepted) {
gridValue = iteration;
numAccepted++;
}
else {
numRejected++;
}
acceptedFlags[i] = isAccepted ? 1 : 0;
}
acceptedChildPointFlags.push_back(acceptedFlags);
numRejectedPerChild.push_back(numRejected);
}
auto accepted = std::make_shared<Buffer>(numAccepted * attributes.bytes);
for (int childIndex = 0; childIndex < 8; childIndex++) {
auto child = node->children[childIndex];
if (child == nullptr) continue;
auto numRejected = numRejectedPerChild[childIndex];
auto& acceptedFlags = acceptedChildPointFlags[childIndex];
auto rejected = std::make_shared<Buffer>(numRejected * attributes.bytes);
for (int i = 0; i < child->numPoints; i++) {
auto isAccepted = acceptedFlags[i];
int64_t pointOffset = i * attributes.bytes;
if (isAccepted) {
accepted->write(child->points->data_u8 + pointOffset, attributes.bytes);
}
else {
rejected->write(child->points->data_u8 + pointOffset, attributes.bytes);
}
}
if (numRejected == 0 && child->isLeaf()) {
onNodeDiscarded(child.get());
node->children[childIndex] = nullptr;
} if (numRejected > 0) {
child->points = rejected;
child->numPoints = numRejected;
onNodeCompleted(child.get());
}
else if (numRejected == 0) {
// the parent has taken all points from this child,
// so make this child an empty inner node.
// Otherwise, the hierarchy file will claim that
// this node has points but because it doesn't have any,
// decompressing the nonexistent point buffer fails
// https://github.com/potree/potree/issues/1125
child->points = nullptr;
child->numPoints = 0;
onNodeCompleted(child.get());
}
}
node->points = accepted;
node->numPoints = numAccepted;
return true;
});
}
};
}
#pragma once
#include "structures.h"
#include "Attributes.h"
namespace Potree
{
struct SamplerRandom : public Sampler {
// subsample a local octree from bottom up
void sample(Node* node, const Attributes& attributes, double baseSpacing,
std::function<void(Node*)> onNodeCompleted,
std::function<void(Node*)> onNodeDiscarded
) {
struct Point {
double x;
double y;
double z;
int32_t pointIndex;
int32_t childIndex;
};
std::function<void(Node*, std::function<void(Node*)>)> traversePost = [&traversePost](Node* node, std::function<void(Node*)> callback) {
for (const auto& child : node->children) {
if (child != nullptr && !child->sampled) {
traversePost(child.get(), callback);
}
}
callback(node);
};
int bytesPerPoint = attributes.bytes;
Vector3 scale = attributes.posScale;
Vector3 offset = attributes.posOffset;
traversePost(node, [bytesPerPoint, baseSpacing, scale, offset, &onNodeCompleted, &onNodeDiscarded, attributes](Node* node) {
node->sampled = true;
int64_t numPoints = node->numPoints;
int64_t gridSize = 128;
thread_local std::vector<int64_t> grid(gridSize * gridSize * gridSize, -1);
thread_local int64_t iteration = 0;
iteration++;
auto max = node->max;
auto min = node->min;
auto size = max - min;
auto scale = attributes.posScale;
auto offset = attributes.posOffset;
struct CellIndex {
int64_t index = -1;
double distance = 0.0;
};
auto toCellIndex = [min, size, gridSize](Vector3 point) -> CellIndex {
double nx = (point.x - min.x) / size.x;
double ny = (point.y - min.y) / size.y;
double nz = (point.z - min.z) / size.z;
double lx = 2.0 * fmod(double(gridSize) * nx, 1.0) - 1.0;
double ly = 2.0 * fmod(double(gridSize) * ny, 1.0) - 1.0;
double lz = 2.0 * fmod(double(gridSize) * nz, 1.0) - 1.0;
double distance = sqrt(lx * lx + ly * ly + lz * lz);
int64_t x = double(gridSize) * nx;
int64_t y = double(gridSize) * ny;
int64_t z = double(gridSize) * nz;
x = std::max(int64_t(0), std::min(x, gridSize - 1));
y = std::max(int64_t(0), std::min(y, gridSize - 1));
z = std::max(int64_t(0), std::min(z, gridSize - 1));
int64_t index = x + y * gridSize + z * gridSize * gridSize;
return { index, distance };
};
bool isLeaf = node->isLeaf();
if (isLeaf) {
// shuffle?
//
// a not particularly efficient approach to shuffling:
//
std::vector<int> indices(node->numPoints);
for (int i = 0; i < node->numPoints; i++) {
indices[i] = i;
}
unsigned seed = std::chrono::system_clock::now().time_since_epoch().count();
shuffle(indices.begin(), indices.end(), std::default_random_engine(seed));
auto buffer = std::make_shared<Buffer>(node->points->size);
for (int i = 0; i < node->numPoints; i++) {
int64_t sourceOffset = i * attributes.bytes;
int64_t targetOffset = indices[i] * attributes.bytes;
memcpy(buffer->data_u8 + targetOffset, node->points->data_u8 + sourceOffset, attributes.bytes);
}
node->points = buffer;
return false;
}
// =================================================================
// SAMPLING
// =================================================================
//
// first, check for each point whether it's accepted or rejected
// save result in an array with one element for each point
std::vector<std::vector<int8_t>> acceptedChildPointFlags;
std::vector<int64_t> numRejectedPerChild;
int64_t numAccepted = 0;
for (int childIndex = 0; childIndex < 8; childIndex++) {
auto child = node->children[childIndex];
if (child == nullptr) {
acceptedChildPointFlags.push_back({});
numRejectedPerChild.push_back({});
continue;
}
std::vector<int8_t> acceptedFlags(child->numPoints, 0);
int64_t numRejected = 0;
for (int i = 0; i < child->numPoints; i++) {
int64_t pointOffset = i * attributes.bytes;
int32_t* xyz = reinterpret_cast<int32_t*>(child->points->data_u8 + pointOffset);
double x = (xyz[0] * scale.x) + offset.x;
double y = (xyz[1] * scale.y) + offset.y;
double z = (xyz[2] * scale.z) + offset.z;
CellIndex cellIndex = toCellIndex({ x, y, z });
auto& gridValue = grid[cellIndex.index];
static double all = sqrt(3.0);
bool isAccepted;
if (child->numPoints < 100) {
isAccepted = true;
}
else if (cellIndex.distance < 0.7 * all && gridValue < iteration) {
isAccepted = true;
}
else {
isAccepted = false;
}
if (isAccepted) {
gridValue = iteration;
numAccepted++;
}
else {
numRejected++;
}
acceptedFlags[i] = isAccepted ? 1 : 0;
}
acceptedChildPointFlags.push_back(acceptedFlags);
numRejectedPerChild.push_back(numRejected);
}
auto accepted = std::make_shared<Buffer>(numAccepted * attributes.bytes);
for (int childIndex = 0; childIndex < 8; childIndex++) {
auto child = node->children[childIndex];
if (child == nullptr) continue;
auto numRejected = numRejectedPerChild[childIndex];
auto& acceptedFlags = acceptedChildPointFlags[childIndex];
auto rejected = std::make_shared<Buffer>(numRejected * attributes.bytes);
for (int i = 0; i < child->numPoints; i++) {
auto isAccepted = acceptedFlags[i];
int64_t pointOffset = i * attributes.bytes;
if (isAccepted) {
accepted->write(child->points->data_u8 + pointOffset, attributes.bytes);
}
else {
rejected->write(child->points->data_u8 + pointOffset, attributes.bytes);
}
}
if (numRejected == 0 && child->isLeaf()) {
onNodeDiscarded(child.get());
node->children[childIndex] = nullptr;
} if (numRejected > 0) {
child->points = rejected;
child->numPoints = numRejected;
onNodeCompleted(child.get());
}
else if (numRejected == 0) {
// the parent has taken all points from this child,
// so make this child an empty inner node.
// Otherwise, the hierarchy file will claim that
// this node has points but because it doesn't have any,
// decompressing the nonexistent point buffer fails
// https://github.com/potree/potree/issues/1125
child->points = nullptr;
child->numPoints = 0;
onNodeCompleted(child.get());
}
}
node->points = accepted;
node->numPoints = numAccepted;
return true;
});
}
};
}

View File

@@ -1,124 +1,124 @@
#pragma once
#include "Vector3.h"
#include "Constants.h"
#include <functional>
#include <map>
#include <string>
#include <atomic>
#include <vector>
namespace Potree
{
struct Attributes;
struct Node;
struct SourceDescription
{
std::string sourceFile;
size_t pointCount = 0;
size_t offsetToFirstPoint = 0;
Vector3 translation = Vector3(0.);
};
struct Options {
std::vector<SourceDescription> sources;
std::string outputName = "default";
std::string encoding = "BROTLI"; // "BROTLI", "UNCOMPRESSED"
std::string outdir;
std::string method = "poisson";
std::string chunkMethod = "";
//vector<string> flags;
std::vector<std::string> attributes;
bool generateJson5 = true;
std::string projection;
bool keepChunks = false;
bool noChunking = false;
bool noIndexing = false;
};
struct ChunkInfo
{
constexpr static size_t maxChunkSize = 1'000'000;
std::string filePath;
const SourceDescription* sDesc;
size_t totalPoints = 0;
size_t firstPoint = 0;
size_t firstByte = 0;
size_t pointsCount = 0;
size_t pointsBytesCount = 0;
uint16_t pointSize = 0;
};
struct Source
{
//std::string path;
uint64_t filesize = 0;
uint64_t numPoints = 0;
int bytesPerPoint = 0;
Vector3 min;
Vector3 max;
std::vector<ChunkInfo> chunks;
};
class punct_facet : public std::numpunct<char> {
protected:
char do_decimal_point() const { return '.'; };
char do_thousands_sep() const { return '\''; };
std::string do_grouping() const { return "\3"; }
};
struct State
{
std::string name = "";
std::atomic_int64_t pointsTotal = 0;
std::atomic_int64_t pointsProcessed = 0;
std::atomic_int64_t bytesProcessed = 0;
double duration = 0.0;
std::map<std::string, std::string> values;
int numPasses = 3;
int currentPass = 0; // starts with index 1! interval: [1, numPasses]
double progress() {
return double(pointsProcessed) / double(pointsTotal);
}
};
struct Stats {
Vector3 min = { Infinity , Infinity , Infinity };
Vector3 max = { -Infinity , -Infinity , -Infinity };
int64_t totalBytes = 0;
int64_t totalPoints = 0;
};
struct CumulativeColor
{
int64_t r = 0;
int64_t g = 0;
int64_t b = 0;
int64_t w = 0;
};
struct SamplerState
{
int bytesPerPoint;
double baseSpacing;
Vector3 scale;
Vector3 offset;
std::function<void(Node*)> writeAndUnload;
};
struct Sampler
{
virtual void sample(Node* node, const Attributes& attributes, double baseSpacing,
std::function<void(Node*)> callbackNodeCompleted,
std::function<void(Node*)> callbackNodeDiscarded
) = 0;
};
}
#pragma once
#include "Vector3.h"
#include "Constants.h"
#include <functional>
#include <map>
#include <string>
#include <atomic>
#include <vector>
namespace Potree
{
struct Attributes;
struct Node;
struct SourceDescription
{
std::string sourceFile;
size_t pointCount = 0;
size_t offsetToFirstPoint = 0;
Vector3 translation = Vector3(0.);
};
struct Options {
std::vector<SourceDescription> sources;
std::string outputName = "default";
std::string encoding = "BROTLI"; // "BROTLI", "UNCOMPRESSED"
std::string outdir;
std::string method = "poisson";
std::string chunkMethod = "";
//vector<string> flags;
std::vector<std::string> attributes;
bool generateJson5 = true;
std::string projection;
bool keepChunks = false;
bool noChunking = false;
bool noIndexing = false;
};
struct ChunkInfo
{
constexpr static size_t maxChunkSize = 1'000'000;
std::string filePath;
const SourceDescription* sDesc;
size_t totalPoints = 0;
size_t firstPoint = 0;
size_t firstByte = 0;
size_t pointsCount = 0;
size_t pointsBytesCount = 0;
uint16_t pointSize = 0;
};
struct Source
{
//std::string path;
uint64_t filesize = 0;
uint64_t numPoints = 0;
int bytesPerPoint = 0;
Vector3 min;
Vector3 max;
std::vector<ChunkInfo> chunks;
};
class punct_facet : public std::numpunct<char> {
protected:
char do_decimal_point() const { return '.'; };
char do_thousands_sep() const { return '\''; };
std::string do_grouping() const { return "\3"; }
};
struct State
{
std::string name = "";
std::atomic_int64_t pointsTotal = 0;
std::atomic_int64_t pointsProcessed = 0;
std::atomic_int64_t bytesProcessed = 0;
double duration = 0.0;
std::map<std::string, std::string> values;
int numPasses = 3;
int currentPass = 0; // starts with index 1! interval: [1, numPasses]
double progress() {
return double(pointsProcessed) / double(pointsTotal);
}
};
struct Stats {
Vector3 min = { Infinity , Infinity , Infinity };
Vector3 max = { -Infinity , -Infinity , -Infinity };
int64_t totalBytes = 0;
int64_t totalPoints = 0;
};
struct CumulativeColor
{
int64_t r = 0;
int64_t g = 0;
int64_t b = 0;
int64_t w = 0;
};
struct SamplerState
{
int bytesPerPoint;
double baseSpacing;
Vector3 scale;
Vector3 offset;
std::function<void(Node*)> writeAndUnload;
};
struct Sampler
{
virtual void sample(Node* node, const Attributes& attributes, double baseSpacing,
std::function<void(Node*)> callbackNodeCompleted,
std::function<void(Node*)> callbackNodeDiscarded
) = 0;
};
}

View File

@@ -1,8 +1,8 @@
#pragma once
#include "Vector3.h"
#include "Attributes.h"
#include "Potree/Vector3.h"
#include "Potree/Attributes.h"
#include <cstdint>
namespace Potree

View File

@@ -1,8 +1,8 @@
#include "laszip_api.h"
#include "LasLoader/LasLoader.h"
#include "structures.h"
#include "Potree/structures.h"
#include "PotreeDataProviderLas.h"
#include "PotreeAttributesHandler.h"
#include "Potree/PotreeAttributesHandler.h"
#include "unsuck/unsuck.hpp"
#include <cassert>

View File

@@ -1,7 +1,7 @@
#pragma once
#include "PotreeDataProviderBase.h"
#include "Potree/PotreeDataProviderBase.h"
struct laszip_point;

View File

@@ -1,6 +1,6 @@
#include "unsuck.hpp"
#include "converter_utils.h"
#include "Potree/converter_utils.h"
#include <thread>
#ifdef _WIN32

View File

@@ -1,12 +1,12 @@
#include "Chunker.h"
#include "logger.h"
#include "Potree/Chunker.h"
#include "Potree/logger.h"
#include "unsuck/unsuck.hpp"
#include "unsuck/TaskPool.hpp"
#include "json/json.hpp"
#include "Vector3.h"
#include "ConcurrentWriter.h"
#include "IPotreeDataProvider.h"
#include "Attributes.h"
#include "Potree/Vector3.h"
#include "Potree/ConcurrentWriter.h"
#include "Potree/IPotreeDataProvider.h"
#include "Potree/Attributes.h"
#include <cassert>
@@ -221,7 +221,7 @@ namespace Potree
ss << "Please try to repair the bounding box, e.g. using lasinfo with the -repair_bb argument." << endl;
if (g_logger)
{
g_logger->log(ss.str(), LogLevel::ERROR);
g_logger->error(ss.str());
}
exit(123);
}
@@ -298,7 +298,7 @@ namespace Potree
{
if (g_logger)
{
g_logger->log("invalid malloc size: " + formatNumber(numBytes), LogLevel::ERROR);
g_logger->error("invalid malloc size: " + formatNumber(numBytes));
}
}
if (bufferSize < numBytes)
@@ -567,7 +567,7 @@ namespace Potree
ss << "1d grid index: " << index << endl;
if (g_logger)
{
g_logger->log(ss.str(), LogLevel::ERROR);
g_logger->error(ss.str());
}
exit(123);
}

View File

@@ -1,5 +1,5 @@
#include "Node.h"
#include "converter_utils.h"
#include "Potree/Node.h"
#include "Potree/converter_utils.h"
#include <mutex>
namespace Potree

View File

@@ -1,13 +1,13 @@
#include "IPotreeDataProvider.h"
#include "PotreeConverter.h"
#include "converter_utils.h"
#include "Chunker.h"
#include "indexer.h"
#include "sampler_poisson.h"
#include "sampler_poisson_average.h"
#include "sampler_random.h"
#include "Monitor.h"
#include "logger.h"
#include "Potree/IPotreeDataProvider.h"
#include "Potree/PotreeConverter.h"
#include "Potree/converter_utils.h"
#include "Potree/Chunker.h"
#include "Potree/indexer.h"
#include "Potree/sampler_poisson.h"
#include "Potree/sampler_poisson_average.h"
#include "Potree/sampler_random.h"
#include "Potree/Monitor.h"
#include "Potree/logger.h"
namespace
{

View File

@@ -1,6 +1,6 @@
#include "PotreeDataProviderBase.h"
#include "PotreeAttributesHandler.h"
#include "converter_utils.h"
#include "Potree/PotreeDataProviderBase.h"
#include "Potree/PotreeAttributesHandler.h"
#include "Potree/converter_utils.h"
#include <cassert>
#include <cstring>

View File

@@ -1,11 +1,11 @@
#include "indexer.h"
#include "logger.h"
#include "Potree/indexer.h"
#include "Potree/logger.h"
#include "brotli/encode.h"
#include "HierarchyBuilder.h"
#include "Potree/HierarchyBuilder.h"
#include "json/json.hpp"
#include "unsuck/TaskPool.hpp"
#include "converter_utils.h"
#include "Potree/converter_utils.h"
#include <cerrno>
#include <algorithm>
@@ -287,7 +287,7 @@ namespace Potree
if (g_logger)
{
g_logger->log("start reloadChunkRoots", LogLevel::INFO);
g_logger->info("start reloadChunkRoots");
}
struct LoadTask {
@@ -324,7 +324,7 @@ namespace Potree
if (g_logger)
{
g_logger->log("end reloadChunkRoots", LogLevel::INFO);
g_logger->info("end reloadChunkRoots");
}
}
@@ -888,7 +888,7 @@ namespace Potree
if (g_logger)
{
g_logger->log(ss.str(), LogLevel::ERROR);
g_logger->error(ss.str());
}
}
@@ -962,7 +962,7 @@ namespace Potree
ss << "max: " << node->max.toString() << "\n";
if (g_logger)
{
g_logger->log(ss.str(), LogLevel::ERROR);
g_logger->error(ss.str());
}
}
@@ -996,7 +996,7 @@ namespace Potree
if (g_logger)
{
g_logger->log("failed to partition point cloud in indexer::buildHierarchy().", LogLevel::ERROR);
g_logger->error("failed to partition point cloud in indexer::buildHierarchy().");
}
}
@@ -1037,7 +1037,7 @@ namespace Potree
if (g_logger)
{
g_logger->log(ss.str(), LogLevel::WARN);
g_logger->warn(ss.str());
}
}
else {
@@ -1084,7 +1084,7 @@ namespace Potree
if (g_logger)
{
g_logger->log(msg.str(), LogLevel::WARN);
g_logger->warn(msg.str());
}
shared_ptr<Buffer> distinctBuffer = make_shared<Buffer>(distinct.size() * bpp);
@@ -1367,7 +1367,7 @@ namespace Potree
if (g_logger)
{
g_logger->log("reserved encoded_buffer size was too small. Trying again with size " + formatNumber(encoded_size) + ".", LogLevel::WARN);
g_logger->warn("reserved encoded_buffer size was too small. Trying again with size " + formatNumber(encoded_size) + ".");
}
}
}
@@ -1377,7 +1377,7 @@ namespace Potree
ss << "failed to compress node " << node->name << ". aborting conversion.";
if (g_logger)
{
g_logger->log(ss.str(), LogLevel::ERROR);
g_logger->error(ss.str());
}
exit(123);
}
@@ -1440,7 +1440,7 @@ namespace Potree
ss << "max: " << node->max.toString() << "\n";
if (g_logger)
{
g_logger->log(ss.str(), LogLevel::ERROR);
g_logger->error(ss.str());
}
}
};
@@ -1613,7 +1613,7 @@ namespace Potree
msg << "max: " << chunk->max.toString();
if (g_logger)
{
g_logger->log(msg.str(), LogLevel::INFO);
g_logger->info(msg.str());
}
indexer.bytesInMemory += filesize;
@@ -1659,7 +1659,7 @@ namespace Potree
if (g_logger)
{
g_logger->log("finished indexing chunk " + chunk->id, LogLevel::INFO);
g_logger->info("finished indexing chunk " + chunk->id);
}
activeThreads--;

View File

@@ -1,40 +0,0 @@
#include "logger.h"
#include <iostream>
#include <filesystem>
#include <sstream>
#include <array>
#include <string_view>
namespace
{
constexpr std::array<std::string_view, 3> logLevelStrings =
{
"INFO", "WARNING", "ERROR"
};
std::string_view LogLevelToString(Potree::LogLevel level)
{
return logLevelStrings[static_cast<int>(level)];
}
}
namespace Potree
{
Logger::Logger(const std::string& logFile)
{
m_fout.open(logFile);
}
Logger::~Logger()
{
m_fout.close();
}
void Logger::log(const std::string& msg, LogLevel level)
{
std::lock_guard<std::mutex> lock(m_mtx);
m_fout << LogLevelToString(level) << ": " << msg << '\n';
}
}

View File

@@ -1,8 +1,8 @@
#include "unsuck/unsuck.hpp"
#include "logger.h"
#include "Potree/logger.h"
#include "Arguments.hpp"
#include "PotreeConverter.h"
#include "Potree/PotreeConverter.h"
#if __has_include("PotreeDataProviderLas.h")
#include "PotreeDataProviderLas.h"
@@ -16,6 +16,40 @@ using namespace std;
namespace fs = std::filesystem;
using Potree::Vector3;
namespace
{
class Logger : public Potree::ILogger
{
public:
Logger(const std::string& logFile)
{
m_fout.open(logFile);
}
~Logger()
{
m_fout.close();
}
void warn(const std::string& msg) override
{
std::lock_guard<std::mutex> lock(m_mtx);
m_fout << "WARNING: " << msg << '\n';
}
void error(const std::string& msg) override
{
std::lock_guard<std::mutex> lock(m_mtx);
m_fout << "ERROR: " << msg << '\n';
}
void info(const std::string& msg) override
{
std::lock_guard<std::mutex> lock(m_mtx);
m_fout << "INFO: " << msg << '\n';
}
private:
std::ofstream m_fout;
std::mutex m_mtx;
};
}
Potree::Options parseArguments(int argc, char** argv) {
Potree::Arguments args(argc, argv);
@@ -143,7 +177,7 @@ int main(int argc, char** argv)
{
auto options = parseArguments(argc, argv);
#if __has_include("PotreeDataProviderLas.h")
Potree::Logger logger(options.outdir + "/log.txt");
::Logger logger(options.outdir + "/log.txt");
Potree::PotreeDataProviderLas lasProvider(options.sources[0].sourceFile);
const Potree::SourceDescription& sourceDesc = lasProvider.GetSourceDescription();
options.sources[0] = sourceDesc;