update to latest visual studio
This commit is contained in:
@@ -28,7 +28,7 @@ using std::fstream;
|
||||
using std::cout;
|
||||
using std::endl;
|
||||
|
||||
namespace fs = std::experimental::filesystem;
|
||||
namespace fs = std::filesystem;
|
||||
|
||||
struct Chunk {
|
||||
|
||||
|
||||
@@ -21,7 +21,7 @@ using json = nlohmann::json;
|
||||
using std::shared_ptr;
|
||||
using std::string;
|
||||
using std::atomic_bool;
|
||||
namespace fs = std::experimental::filesystem;
|
||||
namespace fs = std::filesystem;
|
||||
|
||||
struct ChunkerCell {
|
||||
uint32_t count = 0;
|
||||
|
||||
290
PotreeConverter/include/Chunker2.h
Normal file
290
PotreeConverter/include/Chunker2.h
Normal file
@@ -0,0 +1,290 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <string>
|
||||
#include <assert.h>
|
||||
#include <filesystem>
|
||||
#include <functional>
|
||||
|
||||
#include "Points.h"
|
||||
#include "Vector3.h"
|
||||
#include "LASWriter.hpp"
|
||||
#include "TaskPool.h"
|
||||
|
||||
using std::string;
|
||||
|
||||
struct ChunkPiece {
|
||||
int index = -1;
|
||||
string name = "";
|
||||
string path = "";
|
||||
shared_ptr<Points> points;
|
||||
|
||||
ChunkPiece(int index, string name, string path, shared_ptr<Points> points) {
|
||||
this->index = index;
|
||||
this->name = name;
|
||||
this->path = path;
|
||||
this->points = points;
|
||||
}
|
||||
};
|
||||
|
||||
mutex mtx_find_mutex;
|
||||
//unordered_map<int, shared_ptr<mutex>> mutexes;
|
||||
unordered_map<int, mutex> mutexes;
|
||||
|
||||
void flushProcessor(shared_ptr<ChunkPiece> piece) {
|
||||
|
||||
auto points = piece->points;
|
||||
uint64_t numPoints = points->points.size();
|
||||
uint64_t bytesPerPoint = 28;
|
||||
uint64_t fileDataSize = numPoints * bytesPerPoint;
|
||||
|
||||
void* fileData = malloc(fileDataSize);
|
||||
uint8_t* fileDataU8 = reinterpret_cast<uint8_t*>(fileData);
|
||||
|
||||
|
||||
uint8_t* attBuffer = points->attributeBuffer->dataU8;
|
||||
int attributesByteSize = 4;
|
||||
|
||||
int i = 0;
|
||||
for (Point point : points->points) {
|
||||
|
||||
int fileDataOffset = i * bytesPerPoint;
|
||||
|
||||
memcpy(fileDataU8 + fileDataOffset, &point, 24);
|
||||
|
||||
uint8_t* attSrc = attBuffer + (i * attributesByteSize);
|
||||
memcpy(fileDataU8 + fileDataOffset + 24, attSrc, attributesByteSize);
|
||||
|
||||
i++;
|
||||
}
|
||||
|
||||
string filepath = piece->path;
|
||||
|
||||
// avoid writing to the same file by multiple threads, using one mutex per file
|
||||
mtx_find_mutex.lock();
|
||||
mutex& mtx_file = mutexes[piece->index];
|
||||
mtx_find_mutex.unlock();
|
||||
|
||||
|
||||
{
|
||||
double tLockStart = now();
|
||||
lock_guard<mutex> lock(mtx_file);
|
||||
double dLocked = now() - tLockStart;
|
||||
|
||||
if (dLocked > 0.2) {
|
||||
string strDuration = to_string(dLocked);
|
||||
string msg = "long lock duration ( " + strDuration
|
||||
+ "s) while waiting to write to " + piece->name + "\n";
|
||||
|
||||
cout << msg;
|
||||
}
|
||||
|
||||
|
||||
fstream file;
|
||||
file.open(filepath, ios::out | ios::binary | ios::app);
|
||||
file.write(reinterpret_cast<const char*>(fileData), fileDataSize);
|
||||
file.close();
|
||||
}
|
||||
|
||||
|
||||
|
||||
free(fileData);
|
||||
|
||||
}
|
||||
|
||||
TaskPool<ChunkPiece> flushPool(16, flushProcessor);
|
||||
|
||||
|
||||
class Chunker {
|
||||
public:
|
||||
|
||||
string path = "";
|
||||
Attributes attributes;
|
||||
|
||||
Vector3<double> min;
|
||||
Vector3<double> max;
|
||||
Vector3<double> size;
|
||||
Vector3<double> cellsD;
|
||||
|
||||
int gridSize = 0;
|
||||
|
||||
|
||||
Chunker(string path, Attributes attributes, Vector3<double> min, Vector3<double> max, int gridSize) {
|
||||
this->path = path;
|
||||
this->min = min;
|
||||
this->max = max;
|
||||
this->attributes = attributes;
|
||||
this->gridSize = gridSize;
|
||||
|
||||
double gridSizeD = double(gridSize);
|
||||
cellsD = Vector3<double>(gridSizeD, gridSizeD, gridSizeD);
|
||||
size = max - min;
|
||||
|
||||
//grid.resize(gridSize * gridSize * gridSize);
|
||||
}
|
||||
|
||||
void close() {
|
||||
|
||||
flushPool.close();
|
||||
|
||||
//int numCells = 0;
|
||||
//int numPieces = 0;
|
||||
|
||||
//for (auto cell : grid) {
|
||||
//
|
||||
// if (cell == nullptr) {
|
||||
// continue;
|
||||
// }
|
||||
|
||||
// numCells++;
|
||||
// numPieces += cell->pieces.size();
|
||||
|
||||
// //for(auto piece : cell->pieces){
|
||||
|
||||
// // static int i = 0;
|
||||
|
||||
// // string lasPath = path + "/" + to_string(i) + ".las";
|
||||
// // LASHeader header;
|
||||
// // header.min = min;
|
||||
// // header.max = max;
|
||||
// // header.numPoints = piece->points.size();
|
||||
// // header.scale = { 0.001, 0.001, 0.001 };
|
||||
|
||||
// // writeLAS(lasPath, header, piece->points);
|
||||
// // i++;
|
||||
// //}
|
||||
|
||||
//}
|
||||
|
||||
//cout << "#cells: " << numCells << endl;
|
||||
//cout << "#pieces: " << numPieces << endl;
|
||||
|
||||
}
|
||||
|
||||
string getName(int index) {
|
||||
|
||||
int ix = index % gridSize;
|
||||
int iy = ((index - ix) / gridSize) % gridSize;
|
||||
int iz = (index - ix - iy * gridSize) / (gridSize * gridSize);
|
||||
|
||||
string name = "r";
|
||||
int levels = std::log2(gridSize);
|
||||
|
||||
int div = gridSize;
|
||||
for (int j = 0; j < levels; j++) {
|
||||
|
||||
int lIndex = 0;
|
||||
|
||||
if (ix >= (div / 2)) {
|
||||
lIndex = lIndex + 0b100;
|
||||
ix = ix - div / 2;
|
||||
}
|
||||
|
||||
if (iy >= (div / 2)) {
|
||||
lIndex = lIndex + 0b010;
|
||||
iy = iy - div / 2;
|
||||
}
|
||||
|
||||
if (iz >= (div / 2)) {
|
||||
lIndex = lIndex + 0b001;
|
||||
iz = iz - div / 2;
|
||||
}
|
||||
|
||||
name += to_string(lIndex);
|
||||
div = div / 2;
|
||||
}
|
||||
|
||||
return name;
|
||||
}
|
||||
|
||||
void add(shared_ptr<Points> batch) {
|
||||
|
||||
int64_t gridSizeM1 = gridSize - 1;
|
||||
|
||||
double scaleX = cellsD.x / size.x;
|
||||
double scaleY = cellsD.y / size.y;
|
||||
double scaleZ = cellsD.z / size.z;
|
||||
|
||||
int64_t gridSize = this->gridSize;
|
||||
int64_t gridSizeGridSize = gridSize * gridSize;
|
||||
|
||||
// making toIndex a lambda with necessary captures here seems to be faster than
|
||||
// making it a member function of this class?!
|
||||
auto toIndex = [=](Point point) {
|
||||
int64_t ix = (point.x - min.x) * scaleX;
|
||||
int64_t iy = (point.y - min.y) * scaleY;
|
||||
int64_t iz = (point.z - min.z) * scaleZ;
|
||||
|
||||
ix = std::min(ix, gridSizeM1);
|
||||
iy = std::min(iy, gridSizeM1);
|
||||
iz = std::min(iz, gridSizeM1);
|
||||
|
||||
int64_t index = ix + gridSize * iy + gridSizeGridSize * iz;
|
||||
|
||||
return index;
|
||||
};
|
||||
|
||||
// compute number of points per bin
|
||||
vector<int> binCounts(gridSize * gridSize * gridSize, 0);
|
||||
for (Point point : batch->points) {
|
||||
|
||||
int64_t index = toIndex(point);
|
||||
|
||||
binCounts[index]++;
|
||||
}
|
||||
|
||||
// create new bin-pieces and add them to bin-grid
|
||||
vector<shared_ptr<Points>> bins(gridSize * gridSize * gridSize, nullptr);
|
||||
for (int i = 0; i < binCounts.size(); i++) {
|
||||
|
||||
int binCount = binCounts[i];
|
||||
|
||||
if (binCount == 0) {
|
||||
continue;
|
||||
}
|
||||
|
||||
shared_ptr<Points> bin = make_shared<Points>();
|
||||
bin->points.reserve(binCount);
|
||||
|
||||
int attributeBufferSize = attributes.byteSize * binCount;
|
||||
bin->attributeBuffer = make_shared<Buffer>(attributeBufferSize);
|
||||
|
||||
//if (grid[i] == nullptr) {
|
||||
// grid[i] = make_shared<ChunkCell>();
|
||||
//}
|
||||
|
||||
// add bin-piece to grid
|
||||
//grid[i]->pieces.push_back(bin);
|
||||
|
||||
bins[i] = bin;
|
||||
}
|
||||
|
||||
// fill bins
|
||||
for (Point point : batch->points) {
|
||||
|
||||
int64_t index = toIndex(point);
|
||||
|
||||
bins[index]->points.push_back(point);
|
||||
}
|
||||
|
||||
// create flush tasks
|
||||
for (int i = 0; i < binCounts.size(); i++) {
|
||||
auto points = bins[i];
|
||||
|
||||
if (points == nullptr) {
|
||||
continue;
|
||||
}
|
||||
|
||||
int index = i;
|
||||
string name = getName(index);
|
||||
string filepath = this->path + name + ".bin";
|
||||
|
||||
auto piece = make_shared<ChunkPiece>(index, name, filepath, points);
|
||||
|
||||
flushPool.addTask(piece);
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
};
|
||||
@@ -4,20 +4,15 @@
|
||||
#include <string>
|
||||
#include <assert.h>
|
||||
#include <filesystem>
|
||||
#include <functional>
|
||||
|
||||
#include "Points.h"
|
||||
#include "Vector3.h"
|
||||
#include "LASWriter.hpp"
|
||||
#include "TaskPool.h"
|
||||
|
||||
using std::string;
|
||||
namespace fs = std::experimental::filesystem;
|
||||
|
||||
struct ChunkerCell {
|
||||
|
||||
uint32_t count = 0;
|
||||
vector<Points*> batches;
|
||||
|
||||
};
|
||||
namespace fs = std::filesystem;
|
||||
|
||||
struct ChunkNode {
|
||||
|
||||
@@ -49,7 +44,7 @@ struct ChunkNode {
|
||||
|
||||
childBinSize.resize(8, 0);
|
||||
|
||||
points.reserve(1'000'000);
|
||||
//points.reserve(1'000'000);
|
||||
}
|
||||
|
||||
void add(Point& point) {
|
||||
@@ -57,6 +52,10 @@ struct ChunkNode {
|
||||
totalPoints++;
|
||||
|
||||
if (totalPoints <= storeSize) {
|
||||
|
||||
if (points.size() == 0) {
|
||||
points.reserve(1'000'000);
|
||||
}
|
||||
points.push_back(point);
|
||||
|
||||
{
|
||||
@@ -179,36 +178,22 @@ public:
|
||||
int32_t gridSize = 1;
|
||||
string path = "";
|
||||
|
||||
//vector<ChunkerCell> cells;
|
||||
ChunkNode* root = nullptr;
|
||||
|
||||
//Vector3<double> min = {0.0, 0.0, 0.0};
|
||||
//Vector3<double> max = {0.0, 0.0, 0.0};
|
||||
|
||||
//Chunker(string targetDirectory, int gridSize) {
|
||||
Chunker(string path, Vector3<double> min, Vector3<double> max) {
|
||||
Chunker(string path, Attributes attributes, Vector3<double> min, Vector3<double> max) {
|
||||
this->path = path;
|
||||
this->gridSize = gridSize;
|
||||
|
||||
//cells.resize(gridSize * gridSize * gridSize);
|
||||
|
||||
root = new ChunkNode("r", min, max);
|
||||
}
|
||||
|
||||
void close() {
|
||||
|
||||
function<void(ChunkNode*)> traverse = [&traverse, this](ChunkNode* node) {
|
||||
vector<ChunkNode*> nodes;
|
||||
|
||||
cout << node->name << ": " << node->totalPoints << ", " << node->points.size() << endl;
|
||||
function<void(ChunkNode*)> traverse = [&traverse, this, &nodes](ChunkNode* node) {
|
||||
|
||||
string lasPath = this->path + "/" + node->name + ".las";
|
||||
LASHeader header;
|
||||
header.min = node->min;
|
||||
header.max = node->max;
|
||||
header.numPoints = node->points.size();
|
||||
header.scale = { 0.001, 0.001, 0.001 };
|
||||
|
||||
writeLAS(lasPath, header, node->points);
|
||||
nodes.push_back(node);
|
||||
|
||||
for (ChunkNode* child : node->children) {
|
||||
|
||||
@@ -218,14 +203,38 @@ public:
|
||||
|
||||
traverse(child);
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
traverse(root);
|
||||
|
||||
return;
|
||||
|
||||
auto path = this->path;
|
||||
auto flushProcessor = [path](shared_ptr<ChunkNode> node) {
|
||||
//cout << node->name << ": " << node->totalPoints << ", " << node->points.size() << endl;
|
||||
|
||||
string lasPath = path + "/" + node->name + ".las";
|
||||
LASHeader header;
|
||||
header.min = node->min;
|
||||
header.max = node->max;
|
||||
header.numPoints = node->points.size();
|
||||
header.scale = { 0.001, 0.001, 0.001 };
|
||||
|
||||
writeLAS(lasPath, header, node->points);
|
||||
};
|
||||
|
||||
int numFlushThreads = 12;
|
||||
TaskPool<ChunkNode> pool(numFlushThreads, flushProcessor);
|
||||
|
||||
for (ChunkNode* node : nodes) {
|
||||
pool.addTask(shared_ptr<ChunkNode>(node));
|
||||
}
|
||||
|
||||
pool.close();
|
||||
|
||||
}
|
||||
|
||||
void add(Points* batch) {
|
||||
void add(shared_ptr<Points> batch) {
|
||||
|
||||
for (Point& point : batch->points) {
|
||||
root->add(point);
|
||||
|
||||
@@ -169,7 +169,7 @@ public:
|
||||
|
||||
this->numPoints = npoints;
|
||||
|
||||
spawnLoadThread();
|
||||
//spawnLoadThread();
|
||||
|
||||
}
|
||||
|
||||
@@ -215,7 +215,7 @@ public:
|
||||
return batch;
|
||||
} else {
|
||||
lock.unlock();
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(10));
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(5));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -229,6 +229,8 @@ public:
|
||||
|
||||
void loadStuff() {
|
||||
|
||||
auto tStart = now();
|
||||
|
||||
uint64_t npoints = (header->number_of_point_records ? header->number_of_point_records : header->extended_number_of_point_records);
|
||||
|
||||
laszip_get_point_pointer(laszip_reader, &point);
|
||||
@@ -253,6 +255,7 @@ public:
|
||||
|
||||
points = make_shared<Points>();
|
||||
uint64_t attributeBufferSize = currentBatchSize * attributes.byteSize;
|
||||
points->points.reserve(currentBatchSize);
|
||||
points->attributes = attributes;
|
||||
points->attributeBuffer = make_shared<Buffer>(attributeBufferSize);
|
||||
}
|
||||
@@ -292,6 +295,7 @@ public:
|
||||
}
|
||||
|
||||
cout << "#points: " << npoints << endl;
|
||||
printElapsedTime("loaded", tStart);
|
||||
|
||||
cout << batches.size() << endl;
|
||||
}
|
||||
|
||||
@@ -9,6 +9,8 @@
|
||||
using std::string;
|
||||
using std::vector;
|
||||
using std::shared_ptr;
|
||||
using std::cout;
|
||||
using std::endl;
|
||||
|
||||
struct Buffer {
|
||||
void* data = nullptr;
|
||||
@@ -120,9 +122,13 @@ struct Points {
|
||||
Attributes attributes;
|
||||
shared_ptr<Buffer> attributeBuffer;
|
||||
|
||||
//~Points() {
|
||||
// delete attributeBuffer;
|
||||
//}
|
||||
Points() {
|
||||
//cout << "create points" << endl;
|
||||
}
|
||||
|
||||
~Points() {
|
||||
//cout << "delete points" << endl;
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
|
||||
@@ -9,6 +9,7 @@
|
||||
#include <random>
|
||||
#include <unordered_map>
|
||||
#include <vector>
|
||||
#include <algorithm>
|
||||
|
||||
|
||||
#include "json.hpp"
|
||||
@@ -16,7 +17,7 @@
|
||||
using json = nlohmann::json;
|
||||
using namespace std;
|
||||
|
||||
namespace fs = std::experimental::filesystem;
|
||||
namespace fs = std::filesystem;
|
||||
|
||||
struct PWNode {
|
||||
|
||||
@@ -544,10 +545,121 @@ public:
|
||||
// for debugging/testing
|
||||
writeHierarchyJSON();
|
||||
|
||||
writeHierarchyBinary();
|
||||
|
||||
|
||||
|
||||
|
||||
}
|
||||
|
||||
void writeHierarchyBinary() {
|
||||
|
||||
vector<PWNode*> nodes;
|
||||
function<void(PWNode*)> traverse = [&traverse, &nodes](PWNode* node){
|
||||
nodes.push_back(node);
|
||||
|
||||
for (auto child : node->children) {
|
||||
if (child != nullptr) {
|
||||
traverse(child);
|
||||
}
|
||||
}
|
||||
};
|
||||
traverse(root);
|
||||
|
||||
// sizeof(NodeData) = 32 bytes
|
||||
struct NodeData {
|
||||
uint64_t byteOffset = 0; // location of first byte in data store
|
||||
uint64_t byteLength = 0; // byte size in data store
|
||||
uint64_t childPosition = 0; // location of first child in hierarchy
|
||||
uint8_t childBitset = 0; // which of the eight children exist?
|
||||
};
|
||||
|
||||
// sort in breadth-first order
|
||||
auto compare = [](PWNode* a, PWNode* b) -> bool {
|
||||
if (a->name.size() == b->name.size()) {
|
||||
bool result = lexicographical_compare(
|
||||
a->name.begin(), a->name.end(),
|
||||
b->name.begin(), b->name.end());
|
||||
|
||||
return result;
|
||||
} else {
|
||||
return a->name.size() < b->name.size();
|
||||
}
|
||||
};
|
||||
|
||||
sort(nodes.begin(), nodes.end(), compare);
|
||||
|
||||
unordered_map<string, uint64_t> nodesMap;
|
||||
vector<NodeData> nodesData(nodes.size());
|
||||
|
||||
for (uint64_t i = 0; i < nodes.size(); i++) {
|
||||
|
||||
PWNode* node = nodes[i];
|
||||
NodeData& nodeData = nodesData[i];
|
||||
|
||||
nodeData.byteOffset = node->byteOffset;
|
||||
nodeData.byteLength = node->byteSize;
|
||||
|
||||
nodesMap[node->name] = i;
|
||||
|
||||
if (node->name != "r") {
|
||||
string parentName = node->name.substr(0, node->name.size() - 1);
|
||||
uint64_t parentIndex = nodesMap[parentName];
|
||||
PWNode* parent = nodes[parentIndex];
|
||||
NodeData& parentData = nodesData[parentIndex];
|
||||
|
||||
|
||||
int index = node->name.at(node->name.size() - 1) - '0';
|
||||
int bitmask = 1 << index;
|
||||
parentData.childBitset = parentData.childBitset | bitmask;
|
||||
|
||||
if (parentData.childPosition == 0) {
|
||||
parentData.childPosition = i;
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
cout << "#nodes: " << nodes.size() << endl;
|
||||
|
||||
{
|
||||
string jsPath = targetDirectory + "/hierarchy.bin";
|
||||
|
||||
fstream file;
|
||||
file.open(jsPath, ios::out | ios::binary);
|
||||
|
||||
char* data = reinterpret_cast<char*>(nodesData.data());
|
||||
file.write(data, nodesData.size() * sizeof(NodeData));
|
||||
|
||||
cout << "sizeof(NodeData): " << sizeof(NodeData) << endl;
|
||||
|
||||
//for (int i = 0; i < 109; i++) {
|
||||
// NodeData& nodeData = nodesData[i];
|
||||
// PWNode* node = nodes[i];
|
||||
|
||||
// file << "=================" << endl;
|
||||
// file << "position; " << i << endl;
|
||||
// file << "name: " << node->name << endl;
|
||||
// file << "offset: " << nodeData.byteOffset << endl;
|
||||
// file << "size: " << nodeData.byteLength << endl;
|
||||
// file << "childPosition: " << nodeData.childPosition << endl;
|
||||
// file << "children: ";
|
||||
|
||||
// for (int j = 0; j < 8; j++) {
|
||||
// int value = nodeData.childBitset & (1 << j);
|
||||
|
||||
// file << (value > 0 ? 1 : 0)<< ", ";
|
||||
// }
|
||||
// file << endl;
|
||||
|
||||
|
||||
//}
|
||||
|
||||
|
||||
file.close();
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
void writeHierarchyJSON() {
|
||||
|
||||
@@ -31,3 +31,11 @@ void printThreadsafe(string str1, string str2, string str3);
|
||||
void printThreadsafe(string str1, string str2, string str3, string str4);
|
||||
|
||||
|
||||
|
||||
struct MemoryUsage {
|
||||
uint64_t totalMemory = 0;
|
||||
uint64_t usedMemory = 0;
|
||||
|
||||
};
|
||||
|
||||
MemoryUsage getMemoryUsage();
|
||||
@@ -3,6 +3,8 @@
|
||||
#include <fstream>
|
||||
#include <iomanip>
|
||||
#include <iterator>
|
||||
#include <algorithm>
|
||||
#include <random>
|
||||
#include <stack>
|
||||
|
||||
#include "ChunkProcessor.h"
|
||||
@@ -128,7 +130,7 @@ shared_ptr<Points> loadChunk(shared_ptr<Chunk> chunk, Attributes attributes) {
|
||||
auto file = fstream(chunk->file, std::ios::in | std::ios::binary);
|
||||
|
||||
|
||||
ifstream inputFile("shorts.txt", std::ios::binary);
|
||||
//ifstream inputFile("shorts.txt", std::ios::binary);
|
||||
|
||||
int bufferSize = numPoints * bytesPerPoint;
|
||||
void* buffer = malloc(bufferSize);
|
||||
@@ -225,7 +227,10 @@ ProcessResult processChunk(
|
||||
root->setStorefreeLevels(levels);
|
||||
|
||||
// reduces sampling patterns for sampling algorithms that are order-dependent
|
||||
random_shuffle(points->points.begin(), points->points.end());
|
||||
std::random_device rd;
|
||||
std::mt19937 g(rd());
|
||||
//random_shuffle(points->points.begin(), points->points.end());
|
||||
std::shuffle(points->points.begin(), points->points.end(), g);
|
||||
|
||||
// TODO may have to disable Node::store for levels up to chunk
|
||||
for (Point& point : points->points) {
|
||||
@@ -283,7 +288,8 @@ ProcessResult processChunk(
|
||||
result.upperLevelsData = upperLevelsData;
|
||||
result.chunkRoot = chunkRoot;
|
||||
|
||||
printElapsedTime("processing", tStart);
|
||||
string label = "processed(" + chunk->id + ")";
|
||||
printElapsedTime(label, tStart);
|
||||
|
||||
return result;
|
||||
}
|
||||
@@ -10,39 +10,20 @@
|
||||
|
||||
#include "Metadata.h"
|
||||
#include "LASLoader.hpp"
|
||||
#include "Chunker.h"
|
||||
//#include "Chunker_Tree.h"
|
||||
#include "Chunker2.h"
|
||||
#include "Vector3.h"
|
||||
#include "ChunkProcessor.h"
|
||||
#include "PotreeWriter.h"
|
||||
#include "ThreadPool/ThreadPool.h"
|
||||
|
||||
using namespace std::experimental;
|
||||
|
||||
namespace fs = std::experimental::filesystem;
|
||||
|
||||
int gridSizeFromPointCount(uint64_t pointCount) {
|
||||
if (pointCount < 10'000'000) {
|
||||
return 2;
|
||||
} if (pointCount < 100'000'000) {
|
||||
return 4;
|
||||
} else if (pointCount < 1'000'000'000) {
|
||||
return 8;
|
||||
} else if (pointCount < 10'000'000'000) {
|
||||
return 16;
|
||||
} else if (pointCount < 100'000'000'000) {
|
||||
return 32;
|
||||
} else{
|
||||
return 64;
|
||||
}
|
||||
}
|
||||
|
||||
future<Chunker*> chunking(LASLoader* loader, Metadata metadata) {
|
||||
|
||||
double tStart = now();
|
||||
|
||||
string path = metadata.targetDirectory + "/chunks";
|
||||
for (const auto& entry : fs::directory_iterator(path)){
|
||||
fs::remove(entry);
|
||||
string path = metadata.targetDirectory + "/chunks/";
|
||||
for (const auto& entry : std::filesystem::directory_iterator(path)){
|
||||
std::filesystem::remove(entry);
|
||||
}
|
||||
|
||||
Vector3<double> size = metadata.max - metadata.min;
|
||||
@@ -51,8 +32,10 @@ future<Chunker*> chunking(LASLoader* loader, Metadata metadata) {
|
||||
Vector3<double> cubeMax = cubeMin + cubeSize;
|
||||
|
||||
Attributes attributes = loader->getAttributes();
|
||||
Chunker* chunker = new Chunker(path, attributes, cubeMin, cubeMax, 8);
|
||||
int gridSize = metadata.chunkGridSize;
|
||||
Chunker* chunker = new Chunker(path, attributes, cubeMin, cubeMax, gridSize);
|
||||
|
||||
double sum = 0.0;
|
||||
int batchNumber = 0;
|
||||
auto batch = co_await loader->nextBatch();
|
||||
while (batch != nullptr) {
|
||||
@@ -60,21 +43,30 @@ future<Chunker*> chunking(LASLoader* loader, Metadata metadata) {
|
||||
cout << "batch loaded: " << batchNumber << endl;
|
||||
}
|
||||
|
||||
auto tStart = now();
|
||||
chunker->add(batch);
|
||||
auto duration = now() - tStart;
|
||||
sum += duration;
|
||||
|
||||
batch = co_await loader->nextBatch();
|
||||
|
||||
batchNumber++;
|
||||
}
|
||||
|
||||
chunker->close();
|
||||
cout << "raw batch add time: " << sum << endl;
|
||||
|
||||
printElapsedTime("chunking duration", tStart);
|
||||
|
||||
chunker->close();
|
||||
|
||||
printElapsedTime("chunking duration + close", tStart);
|
||||
|
||||
return chunker;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
future<void> run() {
|
||||
|
||||
//string path = "D:/dev/pointclouds/Riegl/Retz_Airborne_Terrestrial_Combined_1cm.las";
|
||||
@@ -86,76 +78,84 @@ future<void> run() {
|
||||
//string path = "D:/dev/pointclouds/open_topography/ca13/morro_rock/merged.las";
|
||||
//string targetDirectory = "C:/temp/test";
|
||||
|
||||
string targetDirectory = "C:/dev/workspaces/potree/develop/test/new_format1";
|
||||
string targetDirectory = "C:/dev/workspaces/Potree2/master/pointclouds/test";
|
||||
|
||||
auto tStart = now();
|
||||
|
||||
LASLoader* loader = new LASLoader(path);
|
||||
loader->spawnLoadThread();
|
||||
Attributes attributes = loader->getAttributes();
|
||||
|
||||
//loader->estimateAttributes();
|
||||
//return;
|
||||
|
||||
auto size = loader->max - loader->min;
|
||||
double octreeSize = size.max();
|
||||
|
||||
fs::create_directories(targetDirectory);
|
||||
fs::create_directories(targetDirectory + "/chunks");
|
||||
|
||||
|
||||
Metadata metadata;
|
||||
metadata.targetDirectory = targetDirectory;
|
||||
metadata.min = loader->min;
|
||||
metadata.max = loader->min + octreeSize;
|
||||
metadata.numPoints = loader->numPoints;
|
||||
//metadata.chunkGridSize = gridSizeFromPointCount(metadata.numPoints);
|
||||
|
||||
metadata.chunkGridSize = 8;
|
||||
int upperLevels = 3;
|
||||
metadata.chunkGridSize = pow(2, upperLevels);
|
||||
|
||||
Chunker* chunker = co_await chunking(loader, metadata);
|
||||
|
||||
{
|
||||
vector<shared_ptr<Chunk>> chunks = getListOfChunks(metadata);
|
||||
//chunks.resize(39);
|
||||
//chunks = {chunks[38]};
|
||||
|
||||
double scale = 0.001;
|
||||
double spacing = loader->min.distanceTo(loader->max) / 100.0;
|
||||
PotreeWriter writer(targetDirectory,
|
||||
metadata.min,
|
||||
metadata.max,
|
||||
spacing,
|
||||
scale,
|
||||
upperLevels,
|
||||
chunks,
|
||||
attributes
|
||||
);
|
||||
|
||||
auto min = metadata.min;
|
||||
auto max = metadata.max;
|
||||
|
||||
// parallel
|
||||
ThreadPool* pool = new ThreadPool(16);
|
||||
for (int i = 0; i < chunks.size(); i++) {
|
||||
|
||||
shared_ptr<Chunk> chunk = chunks[i];
|
||||
|
||||
|
||||
//auto usage1 = getMemoryUsage();
|
||||
|
||||
//{
|
||||
// auto points = loadChunk(chunk, attributes);
|
||||
// //ProcessResult processResult = processChunk(chunk, points, min, max, spacing);
|
||||
// //writer.writeChunk(chunk, points, processResult);
|
||||
//}
|
||||
//
|
||||
|
||||
//auto usage2 = getMemoryUsage();
|
||||
|
||||
//cout << usage1.usedMemory << endl;
|
||||
//cout << usage2.usedMemory << endl;
|
||||
|
||||
pool->enqueue([chunk, attributes, &writer, min, max, spacing]() {
|
||||
auto points = loadChunk(chunk, attributes);
|
||||
|
||||
ProcessResult processResult = processChunk(chunk, points, min, max, spacing);
|
||||
|
||||
writer.writeChunk(chunk, points, processResult);
|
||||
});
|
||||
}
|
||||
delete pool;
|
||||
|
||||
writer.close();
|
||||
|
||||
|
||||
vector<shared_ptr<Chunk>> chunks = getListOfChunks(metadata);
|
||||
//chunks.resize(39);
|
||||
//chunks = {chunks[38]};
|
||||
|
||||
double scale = 0.001;
|
||||
double spacing = loader->min.distanceTo(loader->max) / 100.0;
|
||||
PotreeWriter writer(targetDirectory,
|
||||
metadata.min,
|
||||
metadata.max,
|
||||
spacing,
|
||||
scale,
|
||||
upperLevels,
|
||||
chunks,
|
||||
attributes
|
||||
);
|
||||
|
||||
auto min = metadata.min;
|
||||
auto max = metadata.max;
|
||||
|
||||
// parallel
|
||||
ThreadPool* pool = new ThreadPool(16);
|
||||
for(int i = 0; i < chunks.size(); i++){
|
||||
|
||||
shared_ptr<Chunk> chunk = chunks[i];
|
||||
|
||||
pool->enqueue([chunk, attributes, &writer, min, max, spacing](){
|
||||
auto points = loadChunk(chunk, attributes);
|
||||
|
||||
ProcessResult processResult = processChunk(chunk, points, min, max, spacing);
|
||||
|
||||
writer.writeChunk(chunk, points, processResult);
|
||||
});
|
||||
}
|
||||
delete pool;
|
||||
|
||||
writer.close();
|
||||
|
||||
|
||||
|
||||
|
||||
auto tEnd = now();
|
||||
auto duration = tEnd - tStart;
|
||||
@@ -164,50 +164,10 @@ future<void> run() {
|
||||
co_return;
|
||||
}
|
||||
|
||||
//#include "TaskPool.h"
|
||||
|
||||
//void testTaskPool() {
|
||||
//
|
||||
// struct Batch {
|
||||
// string path = "";
|
||||
// string text = "";
|
||||
//
|
||||
// Batch(string path, string text) {
|
||||
// this->path = path;
|
||||
// this->text = text;
|
||||
// }
|
||||
// };
|
||||
//
|
||||
// string someCapturedValue = "asoudh adpif sdgsrg";
|
||||
// auto processor = [someCapturedValue](shared_ptr<Batch> batch) {
|
||||
// fstream file;
|
||||
// file.open(batch->path, ios::out);
|
||||
// file << batch->text;
|
||||
// file << someCapturedValue;
|
||||
// file.close();
|
||||
// };
|
||||
//
|
||||
// TaskPool<Batch> pool(5, processor);
|
||||
//
|
||||
// shared_ptr<Batch> batch1 = make_shared<Batch>(
|
||||
// "C:/temp/test1.txt",
|
||||
// "content of file 1 ");
|
||||
// shared_ptr<Batch> batch2 = make_shared<Batch>(
|
||||
// "C:/temp/test2.txt",
|
||||
// "content of file 2 ");
|
||||
//
|
||||
// pool.addTask(batch1);
|
||||
// pool.addTask(batch2);
|
||||
//
|
||||
// pool.close();
|
||||
//}
|
||||
|
||||
int main(int argc, char **argv){
|
||||
|
||||
run().wait();
|
||||
|
||||
//testTaskPool();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -105,3 +105,33 @@ void printThreadsafe(string str1, string str2, string str3, string str4) {
|
||||
|
||||
cout << ss.str();
|
||||
}
|
||||
|
||||
|
||||
#include <Windows.h>
|
||||
#include "psapi.h"
|
||||
|
||||
MemoryUsage getMemoryUsage() {
|
||||
|
||||
MemoryUsage usage;
|
||||
|
||||
{
|
||||
MEMORYSTATUSEX memInfo;
|
||||
memInfo.dwLength = sizeof(MEMORYSTATUSEX);
|
||||
GlobalMemoryStatusEx(&memInfo);
|
||||
|
||||
usage.totalMemory = memInfo.ullTotalPhys;
|
||||
|
||||
}
|
||||
|
||||
{
|
||||
PROCESS_MEMORY_COUNTERS_EX pmc;
|
||||
GetProcessMemoryInfo(GetCurrentProcess(), (PROCESS_MEMORY_COUNTERS*)& pmc, sizeof(pmc));
|
||||
SIZE_T virtualMemUsedByMe = pmc.PrivateUsage;
|
||||
|
||||
usage.usedMemory = pmc.WorkingSetSize;
|
||||
|
||||
}
|
||||
|
||||
|
||||
return usage;
|
||||
}
|
||||
Reference in New Issue
Block a user