Compare commits

...

49 Commits

Author SHA1 Message Date
m-schuetz
2758400ca5 ... 2020-02-28 15:04:42 +01:00
m-schuetz
b7d9e402cc ... 2020-02-26 14:44:39 +01:00
m-schuetz
8a52ed8f69 ... 2020-02-20 10:50:27 +01:00
m-schuetz
9b00faf524 .... 2020-02-19 17:23:08 +01:00
m-schuetz
f5656b6219 faster and correcter but not so nice, not sure why 2020-02-19 14:47:05 +01:00
m-schuetz
fb1681f60a ... 2020-02-18 14:43:46 +01:00
m-schuetz
dac3eaaab2 ... 2020-02-18 14:33:00 +01:00
m-schuetz
3deaa3757f ... 2020-02-15 19:11:43 +01:00
m-schuetz
5be3e0b31d ... 2020-02-14 17:49:11 +01:00
m-schuetz
67d82f59c6 ... 2020-02-11 13:51:11 +01:00
m-schuetz
5779218832 ... 2020-02-11 12:05:17 +01:00
m-schuetz
288fa5beac update to latest visual studio 2020-02-05 12:06:31 +01:00
m-schuetz
fa5e4b152c ... 2020-01-19 15:34:14 +01:00
m-schuetz
e33463df59 experimenting 2020-01-12 15:42:55 +01:00
m-schuetz
b86fdc7556 experimenting 2020-01-12 14:00:52 +01:00
m-schuetz
40eaaffa6a memory management 2020-01-11 18:41:20 +01:00
m-schuetz
c5913bd323 commit with error for stackoverflow question 2020-01-11 18:24:33 +01:00
m-schuetz
cc55012180 memory management 2020-01-11 17:50:53 +01:00
m-schuetz
87c73d7980 memory management 2020-01-11 17:10:46 +01:00
m-schuetz
1f97fa8455 experimenting 2020-01-10 16:15:14 +01:00
m-schuetz
d3cda623f2 experimenting 2020-01-09 17:57:19 +01:00
m-schuetz
865f80ab31 more experimenting 2020-01-08 16:22:25 +01:00
m-schuetz
a1503e244c experimenting with binning 2020-01-08 15:36:53 +01:00
m-schuetz
c52f098d5e experimenting 2020-01-07 17:11:10 +01:00
m-schuetz
67c38711b4 more experimenting 2020-01-05 20:42:37 +01:00
m-schuetz
b659e4314f more playing around 2020-01-04 14:26:45 +01:00
m-schuetz
7182c682f6 more testing 2020-01-03 17:22:51 +01:00
m-schuetz
b582abf8ea more testing 2020-01-03 15:34:47 +01:00
m-schuetz
5f518f1da1 more playing around 2020-01-02 18:57:15 +01:00
m-schuetz
6740fc57d8 ... 2020-01-02 14:59:55 +01:00
m-schuetz
0c9406ff20 testing 2019-12-30 20:40:25 +01:00
m-schuetz
8446425437 some more testing of stuff 2019-12-29 19:49:31 +01:00
m-schuetz
20981e4579 trying stuff 2019-12-29 17:59:51 +01:00
m-schuetz
9937f9c142 trying stuff 2019-12-29 16:23:15 +01:00
m-schuetz
cdd98386b8 ... 2019-12-19 18:25:58 +01:00
m-schuetz
88d487a35c ... 2019-12-17 15:36:19 +01:00
m-schuetz
3d290761dd ... 2019-12-16 17:16:28 +01:00
m-schuetz
3e5703eb3d ... 2019-12-16 17:10:36 +01:00
mschuetz
09d5d10d16 ... 2015-08-06 09:47:22 +02:00
mschuetz
4ae4695888 first version with repeated flushing 2015-08-05 14:19:17 +02:00
mschuetz
1460ec5088 ... 2015-08-04 17:15:33 +02:00
mschuetz
3462079d9c cleanup 2015-08-04 14:19:35 +02:00
mschuetz
6b8475f7c4 ... 2015-08-04 13:41:59 +02:00
mschuetz
034a255702 refactoring / performance 2015-08-04 13:22:53 +02:00
mschuetz
a4adee4d0f performance improvements 2015-08-03 20:53:54 +02:00
mschuetz
3e77799c32 refactoring and performance improvements 2015-08-03 20:06:12 +02:00
mschuetz
3405bc89d2 performance improvements 2015-07-16 14:29:48 +02:00
mschuetz
7748467a27 basic writing possible 2015-07-15 10:31:46 +02:00
mschuetz
df993bae7d cleanup to necessary base for rewrite 2015-07-15 09:53:40 +02:00
95 changed files with 28126 additions and 16257 deletions

20
.vscode/keybindings.json vendored Normal file
View File

@@ -0,0 +1,20 @@
// Place your key bindings in this file to overwrite the defaults
[
{
"key": "ctrl+l",
"command": "editor.action.deleteLines",
"when": "editorTextFocus && !editorReadonly"
},{
"key": "ctrl+shift+i",
"command": "editor.action.toggleRenderWhitespace"
},{
"key": "ctrl+d",
"command": "editor.action.copyLinesDownAction"
},{
"key": "alt+2",
"command": "type",
"args": {
"text": "`"
}
}
]

90
.vscode/settings.json vendored Normal file
View File

@@ -0,0 +1,90 @@
{
"workbench.editor.enablePreview": false,
"files.associations": {
"*.vs": "cpp",
"*.fs": "cpp",
"memory": "cpp",
"algorithm": "cpp",
"array": "cpp",
"atomic": "cpp",
"cctype": "cpp",
"chrono": "cpp",
"clocale": "cpp",
"cmath": "cpp",
"concepts": "cpp",
"condition_variable": "cpp",
"cstdarg": "cpp",
"cstddef": "cpp",
"cstdint": "cpp",
"cstdio": "cpp",
"cstdlib": "cpp",
"cstring": "cpp",
"ctime": "cpp",
"cwchar": "cpp",
"deque": "cpp",
"exception": "cpp",
"filesystem": "cpp",
"forward_list": "cpp",
"fstream": "cpp",
"functional": "cpp",
"future": "cpp",
"initializer_list": "cpp",
"iomanip": "cpp",
"ios": "cpp",
"iosfwd": "cpp",
"iostream": "cpp",
"istream": "cpp",
"iterator": "cpp",
"limits": "cpp",
"list": "cpp",
"locale": "cpp",
"map": "cpp",
"mutex": "cpp",
"new": "cpp",
"numeric": "cpp",
"ostream": "cpp",
"queue": "cpp",
"ratio": "cpp",
"sstream": "cpp",
"stdexcept": "cpp",
"streambuf": "cpp",
"string": "cpp",
"system_error": "cpp",
"thread": "cpp",
"tuple": "cpp",
"type_traits": "cpp",
"typeinfo": "cpp",
"unordered_map": "cpp",
"utility": "cpp",
"valarray": "cpp",
"vector": "cpp",
"xfacet": "cpp",
"xhash": "cpp",
"xiosbase": "cpp",
"xlocale": "cpp",
"xlocbuf": "cpp",
"xlocinfo": "cpp",
"xlocmes": "cpp",
"xlocmon": "cpp",
"xlocnum": "cpp",
"xloctime": "cpp",
"xmemory": "cpp",
"xstddef": "cpp",
"xstring": "cpp",
"xtr1common": "cpp",
"xtree": "cpp",
"xutility": "cpp"
},
"files.trimTrailingWhitespace": false,
"editor.fontSize": 28,
"editor.autoIndent": false,
"editor.detectIndentation": false,
"editor.insertSpaces": false,
"editor.minimap.enabled": false,
"editor.autoClosingBrackets": false,
"editor.formatOnType": false,
"editor.acceptSuggestionOnEnter": "off",
"editor.acceptSuggestionOnCommitCharacter": false,
"editor.mouseWheelZoom": true,
"editor.renderWhitespace": "all",
}

View File

@@ -1,4 +1,4 @@
cmake_minimum_required(VERSION 2.8)
cmake_minimum_required(VERSION 3.3)
project(Potree)

View File

@@ -1,82 +0,0 @@
#ifndef AABB_H
#define AABB_H
#include <math.h>
#include <algorithm>
#include "Vector3.h"
using std::min;
using std::max;
using std::endl;
namespace Potree{
class AABB{
public:
Vector3<double> min;
Vector3<double> max;
Vector3<double> size;
AABB(){
min = Vector3<double>(std::numeric_limits<float>::max());
max = Vector3<double>(-std::numeric_limits<float>::max());
size = Vector3<double>(std::numeric_limits<float>::max());
}
AABB(Vector3<double> min, Vector3<double> max){
this->min = min;
this->max = max;
size = max-min;
}
bool isInside(const Vector3<double> &p){
if(min.x <= p.x && p.x <= max.x){
if(min.y <= p.y && p.y <= max.y){
if(min.z <= p.z && p.z <= max.z){
return true;
}
}
}
return false;
}
void update(const Vector3<double> &point){
min.x = std::min(min.x, point.x);
min.y = std::min(min.y, point.y);
min.z = std::min(min.z, point.z);
max.x = std::max(max.x, point.x);
max.y = std::max(max.y, point.y);
max.z = std::max(max.z, point.z);
size = max - min;
}
void update(const AABB &aabb){
update(aabb.min);
update(aabb.max);
}
void makeCubic(){
max = min + size.maxValue();
size = max - min;
}
friend ostream &operator<<( ostream &output, const AABB &value ){
output << "min: " << value.min << endl;
output << "max: " << value.max << endl;
output << "size: " << value.size << endl;
return output;
}
};
}
#endif

View File

@@ -1,55 +0,0 @@
#ifndef BINPOINTREADER_H
#define BINPOINTREADER_H
#include <string>
#include <iostream>
#include <vector>
#include "Point.h"
#include "PointReader.h"
#include "PointAttributes.hpp"
using std::string;
using std::ifstream;
using std::cout;
using std::endl;
using std::vector;
namespace Potree{
class BINPointReader : public PointReader{
private:
AABB aabb;
double scale;
string path;
vector<string> files;
vector<string>::iterator currentFile;
ifstream *reader;
PointAttributes attributes;
Point point;
public:
BINPointReader(string path, AABB aabb, double scale, PointAttributes pointAttributes);
~BINPointReader();
bool readNextPoint();
Point getPoint();
AABB getAABB();
long long numPoints();
void close();
Vector3<double> getScale();
};
}
#endif

View File

@@ -1,151 +0,0 @@
#ifndef BINPOINTWRITER_H
#define BINPOINTWRITER_H
#include <string>
#include <vector>
#include <iostream>
#include <fstream>
#include "AABB.h"
#include "PointAttributes.hpp"
#include "PointWriter.hpp"
#include "stuff.h"
using std::string;
using std::vector;
using std::ofstream;
using std::ios;
namespace Potree{
class BINPointWriter : public PointWriter{
public:
PointAttributes attributes;
ofstream *writer;
AABB aabb;
double scale;
BINPointWriter(string file, AABB aabb, double scale, PointAttributes pointAttributes) {
this->file = file;
this->aabb = aabb;
this->scale = scale;
numPoints = 0;
attributes = pointAttributes;
writer = new ofstream(file, ios::out | ios::binary);
}
BINPointWriter(string file, PointAttributes attributes) {
this->file = file;
numPoints = 0;
this->attributes = attributes;
writer = new ofstream(file, ios::out | ios::binary);
}
~BINPointWriter(){
close();
}
void write(Point &point){
for(int i = 0; i < attributes.size(); i++){
PointAttribute attribute = attributes[i];
if(attribute == PointAttribute::POSITION_CARTESIAN){
//float pos[3] = {(float) point.x,(float) point.y,(float) point.z};
int x = (int)((point.position.x - aabb.min.x) / scale);
int y = (int)((point.position.y - aabb.min.y) / scale);
int z = (int)((point.position.z - aabb.min.z) / scale);
int pos[3] = {x, y, z};
writer->write((const char*)pos, 3*sizeof(int));
}else if(attribute == PointAttribute::COLOR_PACKED){
unsigned char rgba[4] = {point.color.x, point.color.y, point.color.z, 255};
writer->write((const char*)rgba, 4*sizeof(unsigned char));
}else if(attribute == PointAttribute::INTENSITY){
writer->write((const char*)&point.intensity, sizeof(unsigned short));
}else if(attribute == PointAttribute::CLASSIFICATION){
writer->write((const char*)&point.classification, sizeof(unsigned char));
} else if (attribute == PointAttribute::RETURN_NUMBER) {
writer->write((const char*)&point.returnNumber, sizeof(unsigned char));
} else if (attribute == PointAttribute::NUMBER_OF_RETURNS) {
writer->write((const char*)&point.numberOfReturns, sizeof(unsigned char));
} else if (attribute == PointAttribute::SOURCE_ID) {
writer->write((const char*)&point.pointSourceID, sizeof(unsigned short));
} else if (attribute == PointAttribute::GPS_TIME) {
writer->write((const char*)&point.gpsTime, sizeof(double));
} else if(attribute == PointAttribute::NORMAL_SPHEREMAPPED){
// see http://aras-p.info/texts/CompactNormalStorage.html
float nx = point.normal.x;
float ny = point.normal.y;
float nz = point.normal.z;
float lengthxy = sqrt(nx * nx + ny * ny);
float ex = 0.5f * (nx / lengthxy) * sqrt(-nz * 0.5f + 0.5f) + 0.5f;
float ey = 0.5f * (ny / lengthxy) * sqrt(-nz * 0.5f + 0.5f) + 0.5f;
unsigned char bx = (unsigned char)(ex * 255);
unsigned char by = (unsigned char)(ey * 255);
writer->write((const char*)&bx, 1);
writer->write((const char*)&by, 1);
}else if(attribute == PointAttribute::NORMAL_OCT16){
// see http://lgdv.cs.fau.de/get/1602
float nx = point.normal.x;
float ny = point.normal.y;
float nz = point.normal.z;
float norm1 = abs(nx) + abs(ny) + abs(nz);
nx = nx / norm1;
ny = ny / norm1;
nz = nz / norm1;
float u = 0;
float v = 0;
if(nz >= 0){
u = nx;
v = ny;
}else{
u = psign(nx)*(1-psign(ny)*ny);
v = psign(ny)*(1-psign(nx)*nx);
}
unsigned char bx = (unsigned char)(min((u + 1) * 128, 255.0f));
unsigned char by = (unsigned char)(min((v + 1) * 128, 255.0f));
writer->write((const char*)&bx, 1);
writer->write((const char*)&by, 1);
}else if(attribute == PointAttribute::NORMAL){
writer->write((const char*)&point.normal.x, sizeof(float));
writer->write((const char*)&point.normal.y, sizeof(float));
writer->write((const char*)&point.normal.z, sizeof(float));
}
}
writer->write(reinterpret_cast<const char*>(point.extraBytes.data()), point.extraBytes.size());
numPoints++;
}
void close(){
if(writer != NULL){
writer->close();
delete writer;
writer = NULL;
}
}
};
}
#endif

View File

@@ -0,0 +1,53 @@
#pragma once
#include "Points.h"
#include "math.h"
#include "stuff.h"
struct ChunkPiece {
int index = -1;
string name = "";
string path = "";
shared_ptr<Points> points;
ChunkPiece(int index, string name, string path, shared_ptr<Points> points) {
this->index = index;
this->name = name;
this->path = path;
this->points = points;
}
};
class Chunker {
public:
string path = "";
Attributes attributes;
Vector3<double> min;
Vector3<double> max;
Vector3<double> size;
Vector3<double> cellsD;
int gridSize = 0;
Chunker(string path, Attributes attributes, Vector3<double> min, Vector3<double> max, int gridSize);
void close();
string getName(int index);
void add(shared_ptr<Points> batch);
};
void doChunking(string pathIn, string pathOut);

View File

@@ -1,196 +0,0 @@
#ifndef CLOUDJS_H
#define CLOUDJS_H
#include <string>
#include <vector>
#include <sstream>
#include <list>
#include "rapidjson/document.h"
#include "rapidjson/prettywriter.h"
#include "rapidjson/stringbuffer.h"
#include "AABB.h"
#include "definitions.hpp"
#include "PointAttributes.hpp"
using std::string;
using std::vector;
using std::stringstream;
using std::list;
using rapidjson::Document;
using rapidjson::StringBuffer;
using rapidjson::Writer;
using rapidjson::PrettyWriter;
using rapidjson::Value;
namespace Potree{
class CloudJS{
public:
class Node{
public:
string name;
int pointCount;
Node(string name, int pointCount){
this->name = name;
this->pointCount = pointCount;
}
};
string version;
string octreeDir = "data";
AABB boundingBox;
AABB tightBoundingBox;
OutputFormat outputFormat;
PointAttributes pointAttributes;
double spacing;
vector<Node> hierarchy;
double scale;
int hierarchyStepSize = -1;
long long numAccepted = 0;
string projection = "";
CloudJS() = default;
CloudJS(string content){
Document d;
d.Parse(content.c_str());
Value &vVersion = d["version"];
Value &vOctreeDir = d["octreeDir"];
Value &vPoints = d["points"];
Value &vBoundingBox = d["boundingBox"];
Value &vTightBoundingBox = d["tightBoundingBox"];
Value &vPointAttributes = d["pointAttributes"];
Value &vSpacing = d["spacing"];
Value &vScale = d["scale"];
Value &vHierarchyStepSize = d["hierarchyStepSize"];
version = vVersion.GetString();
octreeDir = vOctreeDir.GetString();
if(d.HasMember("projection")){
Value &vProjection = d["projection"];
projection = vProjection.GetString();
}
numAccepted = vPoints.GetInt64();
boundingBox = AABB(
Vector3<double>(vBoundingBox["lx"].GetDouble(), vBoundingBox["ly"].GetDouble(), vBoundingBox["lz"].GetDouble()),
Vector3<double>(vBoundingBox["ux"].GetDouble(), vBoundingBox["uy"].GetDouble(), vBoundingBox["uz"].GetDouble())
);
tightBoundingBox = AABB(
Vector3<double>(vTightBoundingBox["lx"].GetDouble(), vTightBoundingBox["ly"].GetDouble(), vTightBoundingBox["lz"].GetDouble()),
Vector3<double>(vTightBoundingBox["ux"].GetDouble(), vTightBoundingBox["uy"].GetDouble(), vTightBoundingBox["uz"].GetDouble())
);
if(vPointAttributes.IsArray()){
outputFormat = OutputFormat::BINARY;
pointAttributes = PointAttributes();
for (Value::ConstValueIterator itr = vPointAttributes.Begin(); itr != vPointAttributes.End(); ++itr){
string strpa = itr->GetString();
PointAttribute pa = PointAttribute::fromString(strpa);
pointAttributes.add(pa);
}
}else{
string pa = vPointAttributes.GetString();
if(pa == "LAS"){
outputFormat = OutputFormat::LAS;
}else if(pa == "LAZ"){
outputFormat = OutputFormat::LAZ;
}
}
spacing = vSpacing.GetDouble();
scale = vScale.GetDouble();
hierarchyStepSize = vHierarchyStepSize.GetInt();
}
string getString(){
Document d(rapidjson::kObjectType);
Value version(this->version.c_str(), (rapidjson::SizeType)this->version.size());
Value octreeDir("data");
Value projection(this->projection.c_str(), (rapidjson::SizeType)this->projection.size());
Value boundingBox(rapidjson::kObjectType);
{
boundingBox.AddMember("lx", this->boundingBox.min.x, d.GetAllocator());
boundingBox.AddMember("ly", this->boundingBox.min.y, d.GetAllocator());
boundingBox.AddMember("lz", this->boundingBox.min.z, d.GetAllocator());
boundingBox.AddMember("ux", this->boundingBox.max.x, d.GetAllocator());
boundingBox.AddMember("uy", this->boundingBox.max.y, d.GetAllocator());
boundingBox.AddMember("uz", this->boundingBox.max.z, d.GetAllocator());
}
Value tightBoundingBox(rapidjson::kObjectType);
{
tightBoundingBox.AddMember("lx", this->tightBoundingBox.min.x, d.GetAllocator());
tightBoundingBox.AddMember("ly", this->tightBoundingBox.min.y, d.GetAllocator());
tightBoundingBox.AddMember("lz", this->tightBoundingBox.min.z, d.GetAllocator());
tightBoundingBox.AddMember("ux", this->tightBoundingBox.max.x, d.GetAllocator());
tightBoundingBox.AddMember("uy", this->tightBoundingBox.max.y, d.GetAllocator());
tightBoundingBox.AddMember("uz", this->tightBoundingBox.max.z, d.GetAllocator());
}
Value pointAttributes;
if(outputFormat == OutputFormat::BINARY){
pointAttributes.SetArray();
for(int i = 0; i < this->pointAttributes.size(); i++){
PointAttribute attribute = this->pointAttributes[i];
Value vAttribute(rapidjson::kObjectType);
vAttribute.AddMember("name", Value(attribute.name.c_str(), d.GetAllocator()), d.GetAllocator());
vAttribute.AddMember("size", attribute.byteSize, d.GetAllocator());
vAttribute.AddMember("elements", attribute.numElements, d.GetAllocator());
vAttribute.AddMember("elementSize", attribute.byteSize / attribute.numElements, d.GetAllocator());
vAttribute.AddMember("type", Value(attribute.type.c_str(), d.GetAllocator()), d.GetAllocator());
vAttribute.AddMember("description", Value(attribute.description.c_str(), d.GetAllocator()), d.GetAllocator());
pointAttributes.PushBack(vAttribute, d.GetAllocator());
}
}else if(outputFormat == OutputFormat::LAS){
pointAttributes = "LAS";
}else if(outputFormat == OutputFormat::LAZ){
pointAttributes = "LAZ";
}
Value spacing(this->spacing);
Value scale(this->scale);
Value hierarchyStepSize(this->hierarchyStepSize);
d.AddMember("version", version, d.GetAllocator());
d.AddMember("octreeDir", octreeDir, d.GetAllocator());
d.AddMember("projection", projection, d.GetAllocator());
d.AddMember("points", (uint64_t)numAccepted, d.GetAllocator());
d.AddMember("boundingBox", boundingBox, d.GetAllocator());
d.AddMember("tightBoundingBox", tightBoundingBox, d.GetAllocator());
d.AddMember("pointAttributes", pointAttributes, d.GetAllocator());
d.AddMember("spacing", spacing, d.GetAllocator());
d.AddMember("scale", scale, d.GetAllocator());
d.AddMember("hierarchyStepSize", hierarchyStepSize, d.GetAllocator());
StringBuffer buffer;
PrettyWriter<StringBuffer> writer(buffer);
d.Accept(writer);
return buffer.GetString();
}
};
}
#endif

View File

@@ -1,76 +0,0 @@
#pragma once
#include <unordered_map>
using std::unordered_map;
// see LAS spec 1.4
// https://www.asprs.org/wp-content/uploads/2010/12/LAS_1_4_r13.pdf
// total of 192 bytes
struct ExtraBytesRecord {
unsigned char reserved[2];
unsigned char data_type;
unsigned char options;
char name[32];
unsigned char unused[4];
int64_t no_data[3]; // 24 = 3*8 bytes // hack: not really int, can be double too
int64_t min[3]; // 24 = 3*8 bytes // hack: not really int, can be double too
int64_t max[3]; // 24 = 3*8 bytes // hack: not really int, can be double too
double scale[3];
double offset[3];
char description[32];
};
struct ExtraType {
string type = "";
int size = 0;
int numElements = 0;
};
//ExtraType extraTypeFromID(int id) {
// if (id == 0) {
// return ExtraType{ "undefined", 0, 1 };
// }else if (id == 1) {
// return ExtraType{ "uint8", 1, 1 };
// }else if (id == 2) {
// return ExtraType{ "int8", 1, 1 };
// }else if (id == 3) {
// return ExtraType{ "uint16", 2, 1 };
// }else if (id == 4) {
// return ExtraType{ "int16", 2, 1 };
// }else if (id == 5) {
// return ExtraType{ "uint32", 4, 1 };
// }else if (id == 6) {
// return ExtraType{ "int32", 4, 1 };
// }else if (id == 7) {
// return ExtraType{ "uint64", 8, 1 };
// }else if (id == 8) {
// return ExtraType{ "int64", 8, 1 };
// }else if (id == 9) {
// return ExtraType{ "float", 4, 1 };
// }else if (id == 10) {
// return ExtraType{ "double", 8, 1 };
// }
//
// cout << "ERROR: unsupported extra type: " << id << endl;
// exit(123);
//}
const unordered_map<unsigned char, ExtraType> typeToExtraType = {
{0, ExtraType{"undefined", 0, 1}},
{1, ExtraType{"uint8", 1, 1}},
{2, ExtraType{"int8", 1, 1}},
{3, ExtraType{"uint16", 2, 1}},
{4, ExtraType{"int16", 2, 1}},
{5, ExtraType{"uint32", 4, 1}},
{6, ExtraType{"int32", 4, 1}},
{7, ExtraType{"uint64", 8, 1}},
{8, ExtraType{"int64", 8, 1}},
{9, ExtraType{"float", 4, 1}},
{10, ExtraType{"double", 8, 1}},
};

View File

@@ -1,35 +0,0 @@
#ifndef GRID_CELL_H
#define GRID_CELL_H
#include "Point.h"
#include "GridIndex.h"
#include <math.h>
#include <vector>
using std::vector;
namespace Potree{
class SparseGrid;
class GridCell{
public:
vector<Vector3<double> > points;
vector<GridCell*> neighbours;
SparseGrid *grid;
GridCell();
GridCell(SparseGrid *grid, GridIndex &index);
void add(Vector3<double> p);
bool isDistant(const Vector3<double> &p, const double &squaredSpacing) const;
};
}
#endif

View File

@@ -1,45 +0,0 @@
#ifndef GRID_INDEX_H
#define GRID_INDEX_H
namespace Potree{
class GridIndex{
public:
int i,j,k;
GridIndex(){
i = 0;
j = 0;
k = 0;
}
GridIndex(int i, int j, int k){
this->i = i;
this->j = j;
this->k = k;
}
bool operator<(const GridIndex& b) const{
if(i < b.i){
return true;
}else if(i == b.i && j < b.j){
return true;
}else if(i == b.i && j == b.j && k < b.k){
return true;
}
return false;
}
friend ostream &operator<<( ostream &output, const GridIndex &value ){
output << "[" << value.i << ", " << value.j << ", " << value.k << "]" ;
return output;
}
};
}
#endif

View File

@@ -0,0 +1,14 @@
#pragma once
#include <string>
namespace centered{
using std::string;
void doIndexing(string path);
}

View File

@@ -0,0 +1,13 @@
#pragma once
#include <string>
namespace countsort{
using std::string;
void doIndexing(string path);
}

View File

@@ -0,0 +1,14 @@
#pragma once
#include <string>
namespace centered_nochunks{
using std::string;
void doIndexing(string pathIn, string pathOut);
}

View File

@@ -0,0 +1,376 @@
#pragma once
#include <string>
#include <iostream>
#include <vector>
#include <unordered_map>
#include <thread>
#include <mutex>
#include <future>
#include <experimental/coroutine>
#include <algorithm>
#include <cmath>
#include "laszip_api.h"
#include "LASWriter.hpp"
#include "Points.h"
#include "stuff.h"
#include "Vector3.h"
using namespace std;
// see LAS spec 1.4
// https://www.asprs.org/wp-content/uploads/2010/12/LAS_1_4_r13.pdf
// total of 192 bytes
struct ExtraBytesRecord {
unsigned char reserved[2];
unsigned char data_type;
unsigned char options;
char name[32];
unsigned char unused[4];
int64_t no_data[3]; // 24 = 3*8 bytes // hack: not really int, can be double too
int64_t min[3]; // 24 = 3*8 bytes // hack: not really int, can be double too
int64_t max[3]; // 24 = 3*8 bytes // hack: not really int, can be double too
double scale[3];
double offset[3];
char description[32];
};
struct ExtraType {
AttributeType type;
int size = 0;
int numElements = 0;
};
const unordered_map<unsigned char, ExtraType> typeToExtraType = {
{0, ExtraType{AttributeTypes::undefined, 0, 1}},
{1, ExtraType{AttributeTypes::uint8, 1, 1}},
{2, ExtraType{AttributeTypes::int8, 1, 1}},
{3, ExtraType{AttributeTypes::uint16, 2, 1}},
{4, ExtraType{AttributeTypes::int16, 2, 1}},
{5, ExtraType{AttributeTypes::uint32, 4, 1}},
{6, ExtraType{AttributeTypes::int32, 4, 1}},
{7, ExtraType{AttributeTypes::uint64, 8, 1}},
{8, ExtraType{AttributeTypes::int64, 8, 1}},
{9, ExtraType{AttributeTypes::float32, 4, 1}},
{10, ExtraType{AttributeTypes::float64, 8, 1}},
};
inline Attributes estimateAttributes(string path) {
laszip_POINTER laszip_reader = nullptr;
laszip_create(&laszip_reader);
laszip_BOOL request_reader = 1;
laszip_request_compatibility_mode(laszip_reader, request_reader);
laszip_BOOL is_compressed = iEndsWith(path, ".laz") ? 1 : 0;
laszip_open_reader(laszip_reader, path.c_str(), &is_compressed);
laszip_header* header = nullptr;
laszip_get_header_pointer(laszip_reader, &header);
int64_t npoints = (header->number_of_point_records ? header->number_of_point_records : header->extended_number_of_point_records);
//int64_t nPointsChecking = std::min(npoints, 1'000'000ll);
//for (int64_t i = 0; i < nPointsChecking; i++) {
//}
Attributes attributes;
{ // read extra bytes
for (uint64_t i = 0; i < header->number_of_variable_length_records; i++) {
laszip_vlr_struct vlr = header->vlrs[i];
if (vlr.record_id != 4) {
continue;
}
cout << "record id: " << vlr.record_id << endl;
cout << "record_length_after_header: " << vlr.record_length_after_header << endl;
int numExtraBytes = vlr.record_length_after_header / sizeof(ExtraBytesRecord);
ExtraBytesRecord* extraBytes = reinterpret_cast<ExtraBytesRecord*>(vlr.data);
for (int j = 0; j < numExtraBytes; j++) {
ExtraBytesRecord extraAttribute = extraBytes[j];
string name = string(extraAttribute.name);
cout << "name: " << name << endl;
ExtraType et = typeToExtraType.at(extraAttribute.data_type);
Attribute attribute(name, et.type);
attribute.bytes = et.size;
attribute.numElements = et.numElements;
attributes.add(attribute);
}
}
}
return attributes;
}
struct LasLoadTask {
uint64_t start = 0;
uint64_t numPoints = 0;
bool done = false;
};
class LASLoader {
public:
//laszip_POINTER laszip_reader = nullptr;
//laszip_header* header = nullptr;
//laszip_point* point;
uint64_t batchSize = 1'000'000;
vector<shared_ptr<Points>> batches;
bool finishedLoading = false;
uint64_t numPoints = 0;
Vector3<double> min = { 0.0, 0.0, 0.0 };
Vector3<double> max = { 0.0, 0.0, 0.0 };
mutex mtx_batches;
mutex mtx_finishedLoading;
mutex mtx_loadTask;
string path;
int numThreads = 0;
int nextBatchStart = 0;
int activeThreads = 0;
LASLoader(string path, int numThreads) {
this->path = path;
this->numThreads = numThreads;
laszip_POINTER laszip_reader = nullptr;
laszip_header* header = nullptr;
laszip_create(&laszip_reader);
laszip_BOOL request_reader = 1;
laszip_request_compatibility_mode(laszip_reader, request_reader);
laszip_BOOL is_compressed = iEndsWith(path, ".laz") ? 1 : 0;
laszip_open_reader(laszip_reader, path.c_str(), &is_compressed);
laszip_get_header_pointer(laszip_reader, &header);
this->min = {header->min_x, header->min_y, header->min_z};
this->max = {header->max_x, header->max_y, header->max_z};
uint64_t npoints = (header->number_of_point_records ? header->number_of_point_records : header->extended_number_of_point_records);
//uint64_t npoints = 10'000;
this->numPoints = npoints;
laszip_close_reader(laszip_reader);
spawnThreads();
}
LasLoadTask getLoadTask() {
lock_guard<mutex> lock(mtx_loadTask);
LasLoadTask task;
task.start = nextBatchStart;
task.numPoints = std::min(task.start + batchSize, numPoints) - task.start;
if (nextBatchStart >= numPoints) {
task.done = true;
}
nextBatchStart += task.numPoints;
return task;
}
Attributes getAttributes() {
Attribute aPosition("position", AttributeTypes::float64, 0, 24, 3);
Attribute aColor("color", AttributeTypes::uint8, 0, 4, 4);
vector<Attribute> list = {
aPosition,
aColor
};
Attributes attributes(list);
return attributes;
}
future<shared_ptr<Points>> nextBatch() {
auto fut = std::async(std::launch::async, [=]() -> shared_ptr<Points> {
bool done = false;
while (!done) {
{
lock_guard<mutex> guard1(mtx_finishedLoading);
lock_guard<mutex> guard2(mtx_batches);
bool nothingLeftTodo = finishedLoading && batches.size() == 0;
if (nothingLeftTodo) {
return nullptr;
}
}
//unique<mutex> guard(mtx_batches);
unique_lock<mutex> lock(mtx_batches, std::defer_lock);
lock.lock();
if (batches.size() > 0) {
auto batch = batches.back();
batches.pop_back();
lock.unlock();
return batch;
} else {
lock.unlock();
std::this_thread::sleep_for(std::chrono::milliseconds(5));
}
}
cout << "damn" << endl;
return nullptr;
});
return fut;
}
void spawnThreads() {
this->activeThreads = numThreads;
for (int i = 0; i < numThreads; i++) {
spawnLoadThread();
}
}
void spawnLoadThread() {
thread t([this]() {
laszip_POINTER laszip_reader;
laszip_create(&laszip_reader);
laszip_BOOL request_reader = 1;
laszip_request_compatibility_mode(laszip_reader, request_reader);
laszip_BOOL is_compressed = iEndsWith(this->path, ".laz") ? 1 : 0;
laszip_open_reader(laszip_reader, path.c_str(), &is_compressed);
laszip_header* header = nullptr;
laszip_get_header_pointer(laszip_reader, &header);
laszip_point* point;
laszip_get_point_pointer(laszip_reader, &point);
LasLoadTask task = getLoadTask();
while (!task.done) {
uint64_t end = task.start + task.numPoints;
laszip_seek_point(laszip_reader, task.start);
double coordinates[3];
Attributes attributes = getAttributes();
shared_ptr<Points> points = make_shared<Points>();
points->points.reserve(task.numPoints);
points->attributes = attributes;
uint64_t attributeBufferSize = task.numPoints * attributes.byteSize;
points->attributeBuffer = make_shared<Buffer>(attributeBufferSize);
int relIndex = 0;
for (uint64_t i = task.start; i < end; i++) {
laszip_read_point(laszip_reader);
uint8_t r = point->rgb[0] / 256;
uint8_t g = point->rgb[1] / 256;
uint8_t b = point->rgb[2] / 256;
laszip_get_coordinates(laszip_reader, coordinates);
Point point = {
coordinates[0],
coordinates[1],
coordinates[2],
relIndex
};
uint8_t* rgbBuffer = points->attributeBuffer->dataU8 + (28 * relIndex + 24);
//uint8_t* rgbBuffer = points->attributeBuffer->dataU8 + (4 * relIndex);
rgbBuffer[0] = r;
rgbBuffer[1] = g;
rgbBuffer[2] = b;
rgbBuffer[3] = 255;
points->points.push_back(point);
relIndex++;
}
//writeLAS("D:/temp/test/batches/loader_finalizing.las", points);
{
lock_guard<mutex> guard(mtx_batches);
batches.push_back(points);
}
task = getLoadTask();
}
{
lock_guard<mutex> guard2(mtx_finishedLoading);
activeThreads--;
if (activeThreads == 0) {
finishedLoading = true;
}
}
});
t.detach();
//thread t([&](){
// loadStuff();
//});
//t.detach();
}
};

View File

@@ -1,170 +0,0 @@
#ifndef LASPOINTREADER_H
#define LASPOINTREADER_H
#include <string>
#include <iostream>
#include <vector>
#include "laszip_api.h"
#include "Point.h"
#include "PointReader.h"
#include "stuff.h"
#include "ExtraBytes.hpp"
using std::string;
using std::ifstream;
using std::cout;
using std::endl;
using std::vector;
namespace Potree{
class LIBLASReader{
private:
double tr[16];
bool hasTransform = false;
Point transform(double x, double y, double z) const {
Point p;
if (hasTransform) {
p.position.x = tr[0] * x + tr[4] * y + tr[8] * z + tr[12];
p.position.y = tr[1] * x + tr[5] * y + tr[9] * z + tr[13];
p.position.z = tr[2] * x + tr[6] * y + tr[10] * z + tr[14];
} else {
p.position = Vector3<double>{x,y,z};
}
return p;
}
public:
laszip_POINTER laszip_reader;
laszip_header* header;
laszip_point* point;
int colorScale;
double coordinates[3];
long long pointsRead = 0;
LIBLASReader(string path) {
laszip_create(&laszip_reader);
laszip_BOOL request_reader = 1;
laszip_request_compatibility_mode(laszip_reader, request_reader);
{// read first x points to find if color is 1 or 2 bytes
laszip_BOOL is_compressed = iEndsWith(path, ".laz") ? 1 : 0;
laszip_open_reader(laszip_reader, path.c_str(), &is_compressed);
laszip_get_header_pointer(laszip_reader, &header);
long long npoints = (header->number_of_point_records ? header->number_of_point_records : header->extended_number_of_point_records);
laszip_get_point_pointer(laszip_reader, &point);
colorScale = 1;
for(int i = 0; i < 100'000 && i < npoints; i++){
laszip_read_point(laszip_reader);
auto r = point->rgb[0];
auto g = point->rgb[1];
auto b = point->rgb[2];
if(r > 255 || g > 255 || b > 255){
colorScale = 256;
break;
};
}
}
laszip_seek_point(laszip_reader, 0);
}
long long numPoints() {
if (header->version_major >= 1 && header->version_minor >= 4) {
return header->extended_number_of_point_records;
} else {
return header->number_of_point_records;
}
}
~LIBLASReader(){
laszip_close_reader(laszip_reader);
laszip_destroy(laszip_reader);
}
bool readPoint(){
if(pointsRead < numPoints()){
laszip_read_point(laszip_reader);
pointsRead++;
return true;
}else{
return false;
}
}
Point GetPoint() {
laszip_get_coordinates(laszip_reader, coordinates);
Point p = transform(coordinates[0], coordinates[1], coordinates[2]);
p.intensity = point->intensity;
p.classification = point->classification;
p.color.x = point->rgb[0] / colorScale;
p.color.y = point->rgb[1] / colorScale;
p.color.z = point->rgb[2] / colorScale;
p.returnNumber = point->return_number;
p.numberOfReturns = point->number_of_returns;
p.pointSourceID = point->point_source_ID;
p.gpsTime = point->gps_time;
if (point->num_extra_bytes > 0) {
p.extraBytes = vector<uint8_t>(point->extra_bytes, point->extra_bytes + point->num_extra_bytes);
}
return p;
}
void close(){
}
AABB getAABB();
};
class LASPointReader : public PointReader{
private:
AABB aabb;
string path;
LIBLASReader *reader;
vector<string> files;
vector<string>::iterator currentFile;
public:
LASPointReader(string path);
~LASPointReader();
bool readNextPoint();
Point getPoint();
AABB getAABB();
long long numPoints();
void close();
Vector3<double> getScale();
};
}
#endif

View File

@@ -1,104 +0,0 @@
#ifndef LASPOINTWRITER_H
#define LASPOINTWRITER_H
#include <string>
#include <iostream>
#include <fstream>
#include <stdio.h>
#include <string.h>
#include "laszip_api.h"
#include "AABB.h"
#include "PointWriter.hpp"
#include "Point.h"
#include "stuff.h"
using std::string;
using std::fstream;
using std::ios;
namespace Potree{
class LASPointWriter : public PointWriter{
public:
AABB aabb;
laszip_POINTER writer = NULL;
laszip_header header;
laszip_point* point;
double coordinates[3];
LASPointWriter(string file, AABB aabb, double scale) {
this->file = file;
this->aabb = aabb;
numPoints = 0;
memset(&header, 0, sizeof(laszip_header));
strcpy(header.generating_software, "potree");
header.version_major = 1;
header.version_minor = 2;
header.header_size = 227;
header.offset_to_point_data = 227;
header.point_data_format = 2;
header.min_x = aabb.min.x;
header.min_y = aabb.min.y;
header.min_z = aabb.min.z;
header.max_x = aabb.max.x;
header.max_y = aabb.max.y;
header.max_z = aabb.max.z;
header.x_offset = aabb.min.x;
header.y_offset = aabb.min.y;
header.z_offset = aabb.min.z;
header.x_scale_factor = scale;
header.y_scale_factor = scale;
header.z_scale_factor = scale;
header.point_data_record_length = 26;
header.number_of_point_records = 111;
laszip_create(&writer);
laszip_BOOL compress = iEndsWith(file, ".laz") ? 1 : 0;
if(compress){
laszip_BOOL request_writer = 1;
laszip_request_compatibility_mode(writer, request_writer);
}
laszip_set_header(writer, &header);
laszip_open_writer(writer, file.c_str(), compress);
laszip_get_point_pointer(writer, &point);
}
~LASPointWriter(){
close();
}
void write(Point &point);
void close(){
if(writer != NULL){
laszip_close_writer(writer);
laszip_destroy(writer);
writer = NULL;
fstream *stream = new fstream(file, ios::out | ios::binary | ios::in );
stream->seekp(107);
stream->write(reinterpret_cast<const char*>(&numPoints), 4);
stream->close();
delete stream;
}
}
};
}
#endif

View File

@@ -0,0 +1,20 @@
#pragma once
#include "Points.h"
#include "Vector3.h"
struct LASHeader {
int headerSize = 375;
uint64_t numPoints = 0;
Vector3<double> min;
Vector3<double> max;
Vector3<double> scale = {0.001, 0.001, 0.001};
};
void writeLAS(string path, LASHeader header, vector<Point> points);
void writeLAS(string path, LASHeader header, vector<Point> sample, Points* points);
void writeLAS(string path, shared_ptr<Points> points);

View File

@@ -0,0 +1,26 @@
#pragma once
#include <cstdint>
#include <string>
#include "Vector3.h"
using std::string;
struct Metadata {
string targetDirectory = "";
Vector3<double> min;
Vector3<double> max;
uint64_t numPoints = 0;
uint32_t chunkGridSize = 0;
Metadata() {
}
};

View File

@@ -1,87 +0,0 @@
#ifndef PTXPOINTREADER_H
#define PTXPOINTREADER_H
#include <map>
#include "PointReader.h"
using std::string;
using std::fstream;
using std::vector;
namespace Potree{
/**
* This reader importa PTX files. We suppose that PTX files are a concatenation,
* of multiple PTX "chunks", all of them having the same structure. Every point
* has exactly 4 double precision fields: X, Y, Z, Intensity (from 0.0 to 1.0).
*/
class PTXPointReader : public PointReader {
private:
double tr[16];
Point p;
long currentChunk;
static std::map<string, AABB> aabbs;
static std::map<string, long> counts;
inline Point transform(double tr[16], double x, double y, double z) const {
Point p(tr[0] * x + tr[4] * y + tr[8] * z + tr[12],
tr[1] * x + tr[5] * y + tr[9] * z + tr[13],
tr[2] * x + tr[6] * y + tr[10] * z + tr[14]);
return p;
}
fstream *stream;
string path;
vector<string> files;
vector<string>::iterator currentFile;
Vector3<double> origin;
/**
* Returns false if there is neo next chunk.
*/
bool loadChunk(fstream *stream, long currentChunk, double tr[16]);
void scanForAABB();
bool doReadNextPoint();
public:
PTXPointReader(string path);
~PTXPointReader() {
close();
}
bool readNextPoint();
inline Point getPoint() {
return p;
}
inline Vector3<double> getOrigin() {
return origin;
}
inline AABB getAABB() {
if (PTXPointReader::aabbs.find(path) == aabbs.end()) {
scanForAABB();
}
return PTXPointReader::aabbs[path];
}
inline long long numPoints() {
if (PTXPointReader::counts.find(path) == counts.end()) {
scanForAABB();
}
return PTXPointReader::counts[path];
}
inline void close() {
stream->close();
}
};
}
#endif

View File

@@ -1,300 +0,0 @@
#ifndef PLYPOINTREADER_H
#define PLYPOINTREADER_H
#include <string>
#include <fstream>
#include <iostream>
#include <regex>
#include "Point.h"
#include "PointReader.h"
using std::ifstream;
using std::string;
using std::vector;
using std::map;
using std::cout;
using std::endl;
namespace Potree{
const int PLY_FILE_FORMAT_ASCII = 0;
const int PLY_FILE_FORMAT_BINARY_LITTLE_ENDIAN = 1;
struct PlyPropertyType{
string name;
int size;
PlyPropertyType(){}
PlyPropertyType(string name, int size)
:name(name)
,size(size)
{
}
};
struct PlyProperty{
string name;
PlyPropertyType type;
PlyProperty(string name, PlyPropertyType type)
:name(name)
,type(type)
{
}
};
struct PlyElement{
string name;
vector<PlyProperty> properties;
int size;
PlyElement(string name)
:name(name)
{
}
};
unordered_map<string, PlyPropertyType> plyPropertyTypes = {
{ "char", PlyPropertyType("char", 1) },
{ "int8", PlyPropertyType("char", 1) },
{ "uchar", PlyPropertyType("uchar", 1) },
{ "uint8", PlyPropertyType("uchar", 1) },
{ "short", PlyPropertyType("short", 2) },
{ "int16", PlyPropertyType("short", 2) },
{ "ushort", PlyPropertyType("ushort", 2) },
{ "uint16", PlyPropertyType("ushort", 2) },
{ "int", PlyPropertyType("int", 4) },
{ "int32", PlyPropertyType("int", 4) },
{ "uint", PlyPropertyType("uint", 4) },
{ "uint32", PlyPropertyType("uint", 4) },
{ "float", PlyPropertyType("float", 4) },
{ "float32", PlyPropertyType("float", 4) },
{ "double", PlyPropertyType("double", 8) },
{ "float64", PlyPropertyType("double", 8) }
};
vector<string> plyRedNames = { "r", "red", "diffuse_red" };
vector<string> plyGreenNames = { "g", "green", "diffuse_green" };
vector<string> plyBlueNames = { "b", "blue", "diffuse_blue" };
class PlyPointReader : public PointReader{
private:
AABB *aabb;
ifstream stream;
int format;
long pointCount;
long pointsRead;
PlyElement vertexElement;
char *buffer;
int pointByteSize;
Point point;
string file;
public:
PlyPointReader(string file)
: stream(file, std::ios::in | std::ios::binary)
,vertexElement("vertexElement"){
format = -1;
pointCount = 0;
pointsRead = 0;
pointByteSize = 0;
buffer = new char[100];
aabb = NULL;
this->file = file;
std::regex rEndHeader("^end_header.*");
std::regex rFormat("^format (ascii|binary_little_endian).*");
std::regex rElement("^element (\\w*) (\\d*)");
std::regex rProperty("^property (char|int8|uchar|uint8|short|int16|ushort|uint16|int|int32|uint|uint32|float|float32|double|float64) (\\w*)");
string line;
while(std::getline(stream, line)){
line = trim(line);
std::cmatch sm;
if(std::regex_match(line, rEndHeader)){
// stop line parsing when end_header is encountered
break;
}else if(std::regex_match(line.c_str(), sm, rFormat)){
// parse format
string f = sm[1];
if(f == "ascii"){
format = PLY_FILE_FORMAT_ASCII;
}else if(f == "binary_little_endian"){
format = PLY_FILE_FORMAT_BINARY_LITTLE_ENDIAN;
}
}else if(std::regex_match(line.c_str(), sm, rElement)){
// parse vertex element declaration
string name = sm[1];
long count = atol(string(sm[2]).c_str());
if(name != "vertex"){
continue;
}
pointCount = count;
while(true){
std::streamoff len = stream.tellg();
getline(stream, line);
line = trim(line);
if(std::regex_match(line.c_str(), sm, rProperty)){
string name = sm[2];
PlyPropertyType type = plyPropertyTypes[sm[1]];
PlyProperty property(name, type);
vertexElement.properties.push_back(property);
pointByteSize += type.size;
}else{
// abort if line was not a property definition
stream.seekg(len ,std::ios_base::beg);
break;
}
}
}
}
}
bool readNextPoint(){
if(pointsRead == pointCount){
return false;
}
double x = 0;
double y = 0;
double z = 0;
float dummy;
float nx = 0;
float ny = 0;
float nz = 0;
unsigned char r = 0;
unsigned char g = 0;
unsigned char b = 0;
if(format == PLY_FILE_FORMAT_ASCII){
string line;
getline(stream, line);
line = trim(line);
//vector<string> tokens;
//split(tokens, line, is_any_of("\t "));
vector<string> tokens = split(line, {'\t', ' '});
int i = 0;
for(const auto &prop : vertexElement.properties){
string token = tokens[i++];
if(prop.name == "x" && prop.type.name == plyPropertyTypes["float"].name){
x = stof(token);
}else if(prop.name == "y" && prop.type.name == plyPropertyTypes["float"].name){
y = stof(token);
}else if(prop.name == "z" && prop.type.name == plyPropertyTypes["float"].name){
z = stof(token);
}else if(prop.name == "x" && prop.type.name == plyPropertyTypes["double"].name){
x = stod(token);
}else if(prop.name == "y" && prop.type.name == plyPropertyTypes["double"].name){
y = stod(token);
}else if(prop.name == "z" && prop.type.name == plyPropertyTypes["double"].name){
z = stod(token);
}else if(std::find(plyRedNames.begin(), plyRedNames.end(), prop.name) != plyRedNames.end() && prop.type.name == plyPropertyTypes["uchar"].name){
r = (unsigned char)stof(token);
}else if(std::find(plyGreenNames.begin(), plyGreenNames.end(), prop.name) != plyGreenNames.end() && prop.type.name == plyPropertyTypes["uchar"].name){
g = (unsigned char)stof(token);
}else if(std::find(plyBlueNames.begin(), plyBlueNames.end(), prop.name) != plyBlueNames.end() && prop.type.name == plyPropertyTypes["uchar"].name){
b = (unsigned char)stof(token);
}else if(prop.name == "nx" && prop.type.name == plyPropertyTypes["float"].name){
nx = stof(token);
}else if(prop.name == "ny" && prop.type.name == plyPropertyTypes["float"].name){
ny = stof(token);
}else if(prop.name == "nz" && prop.type.name == plyPropertyTypes["float"].name){
nz = stof(token);
}
}
}else if(format == PLY_FILE_FORMAT_BINARY_LITTLE_ENDIAN){
stream.read(buffer, pointByteSize);
int offset = 0;
for(const auto &prop : vertexElement.properties){
if(prop.name == "x" && prop.type.name == plyPropertyTypes["float"].name){
memcpy(&dummy, (buffer+offset), prop.type.size);
x=dummy;
}else if(prop.name == "y" && prop.type.name == plyPropertyTypes["float"].name){
memcpy(&dummy, (buffer+offset), prop.type.size);
y=dummy;
}else if(prop.name == "z" && prop.type.name == plyPropertyTypes["float"].name){
memcpy(&dummy, (buffer+offset), prop.type.size);
z=dummy;
}else if(prop.name == "x" && prop.type.name == plyPropertyTypes["double"].name){
memcpy(&x, (buffer+offset), prop.type.size);
}else if(prop.name == "y" && prop.type.name == plyPropertyTypes["double"].name){
memcpy(&y, (buffer+offset), prop.type.size);
}else if(prop.name == "z" && prop.type.name == plyPropertyTypes["double"].name){
memcpy(&z, (buffer+offset), prop.type.size);
}else if(std::find(plyRedNames.begin(), plyRedNames.end(), prop.name) != plyRedNames.end() && prop.type.name == plyPropertyTypes["uchar"].name){
memcpy(&r, (buffer+offset), prop.type.size);
}else if(std::find(plyGreenNames.begin(), plyGreenNames.end(), prop.name) != plyGreenNames.end() && prop.type.name == plyPropertyTypes["uchar"].name){
memcpy(&g, (buffer+offset), prop.type.size);
}else if(std::find(plyBlueNames.begin(), plyBlueNames.end(), prop.name) != plyBlueNames.end() && prop.type.name == plyPropertyTypes["uchar"].name){
memcpy(&b, (buffer+offset), prop.type.size);
}else if(prop.name == "nx" && prop.type.name == plyPropertyTypes["float"].name){
memcpy(&nx, (buffer+offset), prop.type.size);
}else if(prop.name == "ny" && prop.type.name == plyPropertyTypes["float"].name){
memcpy(&ny, (buffer+offset), prop.type.size);
}else if(prop.name == "nz" && prop.type.name == plyPropertyTypes["float"].name){
memcpy(&nz, (buffer+offset), prop.type.size);
}
offset += prop.type.size;
}
}
point = Point(x,y,z,r,g,b);
point.normal.x = nx;
point.normal.y = ny;
point.normal.z = nz;
pointsRead++;
return true;
}
Point getPoint(){
return point;
}
AABB getAABB(){
if(aabb == NULL){
aabb = new AABB();
PlyPointReader *reader = new PlyPointReader(file);
while(reader->readNextPoint()){
Point p = reader->getPoint();
aabb->update(p.position);
}
reader->close();
delete reader;
}
return *aabb;
}
long long numPoints(){
return pointCount;
}
void close(){
stream.close();
}
};
}
#endif

View File

@@ -1,56 +0,0 @@
#ifndef POINT_H
#define POINT_H
#include "Vector3.h"
#include <iostream>
#include <vector>
using std::ostream;
using std::vector;
namespace Potree{
class Point{
public:
Vector3<double> position{0};
Vector3<unsigned char> color{255};
Vector3<float> normal{0};
unsigned short intensity = 0;
unsigned char classification = 0;
unsigned char returnNumber = 0;
unsigned char numberOfReturns = 0;
unsigned short pointSourceID = 0;
double gpsTime = 0.0;
vector<uint8_t> extraBytes;
Point() = default;
Point(double x, double y, double z) :
position(x, y, z)
{
}
Point(double x, double y, double z, unsigned char r, unsigned char g, unsigned char b) :
position(x, y, z), color(r, g, b)
{
}
Point(const Point &other) = default;
~Point() = default;
friend ostream &operator<<( ostream &output, const Point &value ){
output << value.position ;
return output;
}
};
}
#endif

View File

@@ -1,107 +0,0 @@
#ifndef POINT_ATTRIBUTES_H
#define POINT_ATTRIBUTES_H
#include <string>
#include <vector>
#include <unordered_map>
using std::string;
using std::vector;
using std::unordered_map;
namespace Potree{
#define ATTRIBUTE_TYPE_INT8 "int8"
#define ATTRIBUTE_TYPE_INT16 "int16"
#define ATTRIBUTE_TYPE_INT32 "int32"
#define ATTRIBUTE_TYPE_INT64 "int64"
#define ATTRIBUTE_TYPE_UINT8 "uint8"
#define ATTRIBUTE_TYPE_UINT16 "uint16"
#define ATTRIBUTE_TYPE_UINT32 "uint32"
#define ATTRIBUTE_TYPE_UINT64 "uint64"
#define ATTRIBUTE_TYPE_FLOAT "float"
#define ATTRIBUTE_TYPE_DOUBLE "double"
const unordered_map<string, int> attributeTypeSize = {
{ATTRIBUTE_TYPE_INT8, 1},
{ATTRIBUTE_TYPE_INT16, 2},
{ATTRIBUTE_TYPE_INT32, 4},
{ATTRIBUTE_TYPE_INT64, 8},
{ATTRIBUTE_TYPE_UINT8, 1},
{ATTRIBUTE_TYPE_UINT16, 2},
{ATTRIBUTE_TYPE_UINT32, 4},
{ATTRIBUTE_TYPE_UINT64, 8},
{ATTRIBUTE_TYPE_FLOAT, 4},
{ATTRIBUTE_TYPE_DOUBLE, 8}
};
class PointAttribute{
public:
static const PointAttribute POSITION_CARTESIAN;
static const PointAttribute COLOR_PACKED;
static const PointAttribute INTENSITY;
static const PointAttribute CLASSIFICATION;
static const PointAttribute RETURN_NUMBER;
static const PointAttribute NUMBER_OF_RETURNS;
static const PointAttribute SOURCE_ID;
static const PointAttribute GPS_TIME;
static const PointAttribute NORMAL_SPHEREMAPPED;
static const PointAttribute NORMAL_OCT16;
static const PointAttribute NORMAL;
int ordinal;
string name;
string description;
string type;
int numElements;
int byteSize;
PointAttribute(int ordinal, string name, string type, int numElements, int byteSize){
this->ordinal = ordinal;
this->name = name;
this->type = type;
this->numElements = numElements;
this->byteSize = byteSize;
}
static PointAttribute fromString(string name);
};
bool operator==(const PointAttribute& lhs, const PointAttribute& rhs);
class PointAttributes{
public:
vector<PointAttribute> attributes;
int byteSize;
PointAttributes(){
byteSize = 0;
}
void add(PointAttribute attribute){
attributes.push_back(attribute);
byteSize += attribute.byteSize;
}
int size(){
return (int)attributes.size();
}
PointAttribute& operator[](int i) {
return attributes[i];
}
};
}
#endif

View File

@@ -1,33 +0,0 @@
#ifndef POINTREADER_H
#define POINTREADER_H
#include <experimental/filesystem>
#include "Point.h"
#include "AABB.h"
namespace fs = std::experimental::filesystem;
namespace Potree{
class PointReader{
public:
virtual ~PointReader(){};
virtual bool readNextPoint() = 0;
virtual Point getPoint() = 0;
virtual AABB getAABB() = 0;
virtual long long numPoints() = 0;
virtual void close() = 0;
};
}
#endif

View File

@@ -1,32 +0,0 @@
#ifndef POINTWRITER_H
#define POINTWRITER_H
#include <string>
#include <iostream>
#include "Point.h"
using std::string;
namespace Potree{
class PointWriter{
public:
string file;
int numPoints = 0;
virtual ~PointWriter(){};
virtual void write(Point &point) = 0;
virtual void close() = 0;
};
}
#endif

View File

@@ -0,0 +1,223 @@
#pragma once
#include <cstdint>
#include <iostream>
#include <string>
#include <vector>
#include <memory>
#include <unordered_map>
#include "Vector3.h"
using std::string;
using std::unordered_map;
using std::vector;
using std::shared_ptr;
using std::cout;
using std::endl;
struct Buffer {
void* data = nullptr;
uint8_t* dataU8 = nullptr;
uint16_t* dataU16 = nullptr;
uint32_t* dataU32 = nullptr;
int8_t* dataI8 = nullptr;
int16_t* dataI16 = nullptr;
int32_t* dataI32 = nullptr;
float* dataF = nullptr;
double* dataD = nullptr;
char* dataChar = nullptr;
uint64_t size = 0;
// DEBUG
// 0x70 = 112
inline static uint8_t defaultvalue = 0x70;
Buffer(uint64_t size) {
this->data = malloc(size);
this->dataU8 = reinterpret_cast<uint8_t*>(this->data);
this->dataU16 = reinterpret_cast<uint16_t*>(this->data);
this->dataU32 = reinterpret_cast<uint32_t*>(this->data);
this->dataI8 = reinterpret_cast<int8_t*>(this->data);
this->dataI16 = reinterpret_cast<int16_t*>(this->data);
this->dataI32 = reinterpret_cast<int32_t*>(this->data);
this->dataF = reinterpret_cast<float*>(this->data);
this->dataD = reinterpret_cast<double*>(this->data);
this->dataChar = reinterpret_cast<char*>(this->data);
memset(this->dataU8, defaultvalue, size);
this->size = size;
}
~Buffer() {
free(this->data);
}
vector<uint8_t> debug_toVector() {
return vector<uint8_t>(this->dataU8, this->dataU8 + this->size);
}
};
struct AttributeType {
int id;
string name;
int bytes;
bool operator==(AttributeType& type) {
return this->id == type.id;
}
};
namespace AttributeTypes {
static const AttributeType undefined = { 0, "undefined", 0};
static const AttributeType int8 = { 1, "int8", 1};
static const AttributeType int16 = { 2, "int16", 2};
static const AttributeType int32 = { 3, "int32", 4};
static const AttributeType int64 = { 4, "int64", 8};
static const AttributeType uint8 = { 5, "uint8", 1};
static const AttributeType uint16 = { 6, "uint16", 2};
static const AttributeType uint32 = { 7, "uint32", 4};
static const AttributeType uint64 = { 8, "uint64", 8};
static const AttributeType float32 = { 9, "float32", 4};
static const AttributeType float64 = {10, "float64", 8};
static const unordered_map<string, AttributeType> map = {
{"undefined", undefined},
{"int8", int8},
{"int16", int16},
{"int32", int32},
{"int64", int64},
{"uint8", uint8},
{"uint16", uint16},
{"uint32", uint32},
{"uint64", uint64},
{"float32", float32},
{"float64", float64}
};
inline AttributeType fromName(string name) {
if (map.find(name) == map.end()) {
cout << "ERROR: attribute type with this name does not exist: " << name << endl;
cout << __FILE__ << "(" << __LINE__ << ")" << endl;
exit(123);
}
return map.at(name);
}
}
struct Attribute {
string name = "undefined";
string description = "";
AttributeType type;
int numElements = 1;
int bytes = 0;
Attribute(){
}
Attribute(string name, AttributeType type) {
this->name = name;
this->type = type;
}
Attribute(string name, AttributeType type, int offset, int bytes, int numElements) {
this->name = name;
this->type = type;
this->bytes = bytes;
this->numElements = numElements;
}
};
struct Attributes {
vector<Attribute> list;
int byteSize = 0;
Attributes() {
}
Attributes(vector<Attribute> list) {
for (auto attribute : list) {
add(attribute);
}
}
void add(Attribute attribute) {
list.push_back(attribute);
byteSize += attribute.bytes;
}
};
struct Point {
double x = 0.0;
double y = 0.0;
double z = 0.0;
uint64_t index = 0;
Point() {
}
Point(double x, double y, double z, int index) {
this->x = x;
this->y = y;
this->z = z;
this->index = index;
}
double squaredDistanceTo(Point& b) {
double dx = b.x - this->x;
double dy = b.y - this->y;
double dz = b.z - this->z;
double dd = dx * dx + dy * dy + dz * dz;
return dd;
};
double squaredDistanceTo(Vector3<double> b) {
double dx = b.x - this->x;
double dy = b.y - this->y;
double dz = b.z - this->z;
double dd = dx * dx + dy * dy + dz * dz;
return dd;
};
};
struct Points {
vector<Point> points;
Attributes attributes;
shared_ptr<Buffer> attributeBuffer;
Points() {
//cout << "create points" << endl;
}
~Points() {
//cout << "delete points" << endl;
}
};

View File

@@ -1,75 +0,0 @@
#ifndef POTREE_CONVERTER_H
#define POTREE_CONVERTER_H
#include "AABB.h"
#include "CloudJS.hpp"
#include "definitions.hpp"
#include "PointReader.h"
#include <string>
#include <vector>
#include <cstdint>
using std::vector;
using std::string;
namespace Potree{
class SparseGrid;
struct FileInfos {
AABB aabb;
uint64_t numPoints = 0;
};
class PotreeConverter{
private:
AABB aabb;
vector<string> sources;
string workDir;
CloudJS cloudjs;
PointAttributes pointAttributes;
PointReader *createPointReader(string source, PointAttributes pointAttributes);
void prepare();
FileInfos computeInfos();
void generatePage(string name);
public:
float spacing;
int maxDepth;
string format;
OutputFormat outputFormat;
vector<string> outputAttributes;
vector<double> colorRange;
vector<double> intensityRange;
double scale = 0.01;
int diagonalFraction = 250;
vector<double> aabbValues;
string pageName = "";
string pageTemplatePath = "";
StoreOption storeOption = StoreOption::ABORT_IF_EXISTS;
string projection = "";
bool sourceListingOnly = false;
ConversionQuality quality = ConversionQuality::DEFAULT;
string title = "PotreeViewer";
string description = "";
bool edlEnabled = false;
bool showSkybox = false;
string material = "RGB";
string executablePath;
int storeSize = 20'000;
int flushLimit = 10'000'000;
PotreeConverter(string executablePath, string workDir, vector<string> sources);
void convert();
};
}
#endif

View File

@@ -1,34 +0,0 @@
#ifndef POTREEEXCEPTION_H
#define POTREEEXCEPTION_H
// using standard exceptions
#include <iostream>
#include <exception>
#include <string>
using std::exception;
using std::string;
namespace Potree{
class PotreeException: public exception{
private:
string message;
public:
PotreeException(string message){
this->message = message;
}
virtual ~PotreeException() throw(){
}
virtual const char* what() const throw(){
return message.c_str();
}
};
}
#endif

View File

@@ -1,155 +1,714 @@
#ifndef POTREEWRITER_H
#define POTREEWRITER_H
#include <string>
#include <thread>
#include <vector>
#include <functional>
#include "AABB.h"
#include "SparseGrid.h"
#include "CloudJS.hpp"
#include "PointAttributes.hpp"
using std::string;
using std::thread;
using std::vector;
namespace Potree{
class PotreeWriter;
class PointReader;
class PointWriter;
class PWNode{
public:
int index = -1;
AABB aabb;
AABB acceptedAABB;
int level = 0;
SparseGrid *grid;
unsigned int numAccepted = 0;
PWNode *parent = NULL;
vector<PWNode*> children;
bool addedSinceLastFlush = true;
bool addCalledSinceLastFlush = false;
PotreeWriter *potreeWriter;
vector<Point> cache;
//int storeLimit = 20'000;
vector<Point> store;
bool isInMemory = true;
PWNode(PotreeWriter* potreeWriter, AABB aabb);
PWNode(PotreeWriter* potreeWriter, int index, AABB aabb, int level);
~PWNode();
string name() const;
float spacing();
bool isLeafNode(){
return children.size() == 0;
}
bool isInnerNode(){
return children.size() > 0;
}
void loadFromDisk();
PWNode *add(Point &point);
PWNode *createChild(int childIndex);
void split();
string workDir();
string hierarchyPath();
string path();
void flush();
void traverse(std::function<void(PWNode*)> callback);
void traverseBreadthFirst(std::function<void(PWNode*)> callback);
vector<PWNode*> getHierarchy(int levels);
PWNode* findNode(string name);
private:
PointReader *createReader(string path);
PointWriter *createWriter(string path);
};
class PotreeWriter{
public:
AABB aabb;
AABB tightAABB;
string workDir;
float spacing;
double scale = 0;
int maxDepth = -1;
PWNode *root;
long long numAdded = 0;
long long numAccepted = 0;
CloudJS cloudjs;
OutputFormat outputFormat;
PointAttributes pointAttributes;
int hierarchyStepSize = 5;
vector<Point> store;
thread storeThread;
int pointsInMemory = 0;
string projection = "";
ConversionQuality quality = ConversionQuality::DEFAULT;
int storeSize = 20'000;
PotreeWriter(string workDir, ConversionQuality quality);
PotreeWriter(string workDir, AABB aabb, float spacing, int maxDepth, double scale, OutputFormat outputFormat, PointAttributes pointAttributes, ConversionQuality quality);
~PotreeWriter(){
close();
delete root;
}
string getExtension();
void processStore();
void waitUntilProcessed();
void add(Point &p);
void flush();
void close(){
flush();
}
void setProjection(string projection);
void loadStateFromDisk();
private:
};
}
#endif
#pragma once
//
//#include <string>
//#include <thread>
//#include <mutex>
//#include <filesystem>
//#include <fstream>
//#include <random>
//#include <unordered_map>
//#include <vector>
//#include <algorithm>
//
//
//#include "Node.h"
//#include "json.hpp"
//
//using json = nlohmann::json;
//using namespace std;
//
//namespace fs = std::filesystem;
//
//struct PWNode {
//
// string name = "";
// int64_t numPoints = 0;
// vector<PWNode*> children;
//
// int64_t byteOffset = 0;
// int64_t byteSize = 0;
//
// PWNode(string name) {
// this->name = name;
// this->children.resize(8, nullptr);
// }
//
//};
//
//class PotreeWriter {
//public:
//
// string targetDirectory = "";
// string pathData = "";
// string pathCloudJs = "";
// string pathHierarchy = "";
//
// Vector3<double> min;
// Vector3<double> max;
// double scale = 1.0;
// double spacing = 1.0;
// int upperLevels = 1;
//
// PWNode* root = nullptr;
// unordered_map<Node*, PWNode*> pwNodes;
//
// struct UpperLevelStuff {
// shared_ptr<Node> node;
// shared_ptr<Points> data;
// };
//
// // partial sampling results for upper levels
// // will contain multiple entries for the same node
// // that need to be merged.
// vector<UpperLevelStuff> upperLevelsResults;
//
// mutex mtx_writeChunk;
// mutex* mtx_test = new mutex();
//
// int currentByteOffset = 0;
// mutex mtx_byteOffset;
//
// Attributes attributes;
// fstream* fsFile = nullptr;
//
// PotreeWriter(string targetDirectory,
// Vector3<double> min, Vector3<double> max,
// double spacing, double scale, int upperLevels,
// vector<shared_ptr<Chunk>> chunks, Attributes attributes): attributes(attributes) {
//
//
// this->targetDirectory = targetDirectory;
// this->min = min;
// this->max = max;
// this->spacing = spacing;
// this->scale = scale;
// this->upperLevels = upperLevels;
//
// fs::create_directories(targetDirectory);
//
// pathData = targetDirectory + "/octree.data";
// pathCloudJs = targetDirectory + "/cloud.json";
// pathHierarchy = targetDirectory + "/hierarchy.json";
//
// fs::remove(pathData);
//
// root = new PWNode("r");
//
// vector<string> nodeIDs;
// for (auto chunk : chunks) {
// nodeIDs.push_back(chunk->id);
// }
// createNodes(nodeIDs);
// }
//
// struct ChildParams {
// Vector3<double> min;
// Vector3<double> max;
// Vector3<double> size;
// int id;
// };
//
// uint64_t increaseByteOffset(uint64_t amount) {
//
// lock_guard<mutex> lock(mtx_byteOffset);
//
// uint64_t old = currentByteOffset;
//
// currentByteOffset += amount;
//
// return old;
// }
//
// ChildParams computeChildParameters(Vector3<double>& min, Vector3<double>& max, Vector3<double> point){
//
// auto size = max - min;
//
// double nx = (point.x - min.x) / size.x;
// double ny = (point.y - min.y) / size.y;
// double nz = (point.z - min.z) / size.z;
//
// Vector3<double> childMin;
// Vector3<double> childMax;
// Vector3<double> center = min + size / 2.0;
//
// int childIndex = 0;
//
// if (nx > 0.5) {
// childIndex = childIndex | 0b100;
// childMin.x = center.x;
// childMax.x = max.x;
// } else {
// childMin.x = min.x;
// childMax.x = center.x;
// }
//
// if (ny > 0.5) {
// childIndex = childIndex | 0b010;
// childMin.y = center.y;
// childMax.y = max.y;
// } else {
// childMin.y = min.y;
// childMax.y = center.y;
// }
//
// if (nz > 0.5) {
// childIndex = childIndex | 0b001;
// childMin.z = center.z;
// childMax.z = max.z;
// } else {
// childMin.z = min.z;
// childMax.z = center.z;
// }
//
// ChildParams params;
// params.min = childMin;
// params.max = childMax;
// params.size = childMax - childMin;
// params.id = childIndex;
//
// return params;
// }
//
// vector<int> toVectorID(string stringID) {
// vector<int> id;
//
// for (int i = 1; i < stringID.size(); i++) {
//
// int index = stringID[i] - '0'; // ... ouch
//
// id.push_back(index);
// }
//
// return id;
// }
//
// vector<int> computeNodeID(Node* node) {
//
// auto min = this->min;
// auto max = this->max;
// auto target = (node->min + node->max) / 2.0;
//
// vector<int> id;
//
// for (int i = 0; i < upperLevels; i++) {
// auto childParams = computeChildParameters(min, max, target);
//
// id.push_back(childParams.id);
//
// min = childParams.min;
// max = childParams.max;
// }
//
// return id;
// }
//
// void createNodes(vector<string> nodeIDs) {
//
// for (string nodeID : nodeIDs) {
// PWNode* node = root;
//
// vector<int> id = toVectorID(nodeID);
//
// for (int childIndex : id) {
//
// if (node->children[childIndex] == nullptr) {
// string childName = node->name + to_string(childIndex);
// PWNode* child = new PWNode(childName);
//
// node->children[childIndex] = child;
// }
//
// node = node->children[childIndex];
// }
// }
//
// }
//
// PWNode* findPWNode(vector<int> id) {
//
// PWNode* node = root;
//
// for (int childIndex : id) {
// node = node->children[childIndex];
//
// if (node == nullptr) {
// return nullptr;
// }
// }
//
// return node;
// }
//
//
//
// void writeChunk(shared_ptr<Chunk> chunk, shared_ptr<Points> points, ProcessResult processResult) {
//
// double tStart = now();
//
// {
// lock_guard<mutex> lock(mtx_writeChunk);
//
// UpperLevelStuff stuff = { processResult.upperLevels , processResult.upperLevelsData };
// upperLevelsResults.push_back(stuff);
// }
//
// auto chunkRoot = processResult.chunkRoot;
//
// if (chunkRoot == nullptr) {
// return;
// }
//
// struct NodePairing {
// shared_ptr<Node> node = nullptr;
// PWNode* pwNode = nullptr;
//
// NodePairing(shared_ptr<Node> node, PWNode* pwNode) {
// this->node = node;
// this->pwNode = pwNode;
// }
// };
//
// function<void(shared_ptr<Node>, PWNode*, vector<NodePairing> & nodes)> flatten = [&flatten](shared_ptr<Node> node, PWNode* pwNode, vector<NodePairing>& nodes) {
// nodes.emplace_back(node, pwNode);
//
// //for (int i = 0; i < node->children.size(); i++) {
//
// //auto child = node->children[i];
// for(auto child : node->children){
//
// if (child == nullptr) {
// continue;
// }
//
// PWNode* pwChild = new PWNode(child->name);
// pwNode->children.push_back(pwChild);
//
// flatten(child, pwChild, nodes);
// }
//
// return nodes;
// };
//
// PWNode* pwChunkRoot = new PWNode(chunkRoot->name);
// vector<NodePairing> nodes;
// flatten(chunkRoot, pwChunkRoot, nodes);
//
// Attributes attributes = points->attributes;
// auto attributeBuffer = points->attributeBuffer;
// const char* ccAttributeBuffer = attributeBuffer->dataChar;
//
// auto min = this->min;
// auto scale = this->scale;
//
// uint64_t bufferSize = 0;
// int bytesPerPoint = 12 + attributes.byteSize;
//
// for (NodePairing pair : nodes) {
// int numPoints = pair.node->grid->accepted.size() + pair.node->store.size();
// int nodeBufferSize = numPoints * bytesPerPoint;
//
// bufferSize += nodeBufferSize;
// }
//
// vector<uint8_t> buffer(bufferSize, 0);
// uint64_t bufferOffset = 0;
//
// auto writePoint = [&bufferOffset, &bytesPerPoint , &buffer, &min, &scale, &attributes, &attributeBuffer](Point& point) {
// int32_t ix = int32_t((point.x - min.x) / scale);
// int32_t iy = int32_t((point.y - min.y) / scale);
// int32_t iz = int32_t((point.z - min.z) / scale);
//
// memcpy(buffer.data() + bufferOffset + 0, reinterpret_cast<void*>(&ix), sizeof(int32_t));
// memcpy(buffer.data() + bufferOffset + 4, reinterpret_cast<void*>(&iy), sizeof(int32_t));
// memcpy(buffer.data() + bufferOffset + 8, reinterpret_cast<void*>(&iz), sizeof(int32_t));
//
// int64_t attributeOffset = point.index * attributes.byteSize;
//
// auto attributeTarget = buffer.data() + bufferOffset + 12;
// auto attributeSource = attributeBuffer->dataU8 + attributeOffset;
// memcpy(attributeTarget, attributeSource, attributes.byteSize);
//
// bufferOffset += bytesPerPoint;
// };
//
//
// for (NodePairing& pair: nodes) {
// for (Point& point : pair.node->grid->accepted) {
// writePoint(point);
// }
//
// for (Point& point : pair.node->store) {
// writePoint(point);
// }
// }
//
//
// // ==============================================================================
// // FROM HERE ON, ONLY ONE THREAD UPDATES THE HIERARCHY DATA AND WRITES TO FILE
// // ==============================================================================
//
// double tLockStart = now();
// lock_guard<mutex> lock(mtx_writeChunk);
//
// double lockDuration = now() - tLockStart;
// if (lockDuration > 0.1) {
// cout << "long lock duration: " << lockDuration << " s" << endl;
// }
//
// for (NodePairing& pair : nodes) {
// int numPoints = pair.node->grid->accepted.size() + pair.node->store.size();
// int nodeBufferSize = numPoints * bytesPerPoint;
//
// pair.pwNode->byteOffset = currentByteOffset;
// pair.pwNode->byteSize = nodeBufferSize;
// pair.pwNode->numPoints = numPoints;
//
// currentByteOffset += nodeBufferSize;
// }
//
// // attach local chunk-root to global hierarchy, by replacing previously created dummy
// vector<int> pwid = toVectorID(pwChunkRoot->name);
// PWNode* pwMain = findPWNode(pwid);
// PWNode* pwLocal = pwChunkRoot;
//
// pwMain->byteOffset = pwLocal->byteOffset;
// pwMain->byteSize = pwLocal->byteSize;
// pwMain->children = pwLocal->children;
// pwMain->name = pwLocal->name;
// pwMain->numPoints = pwLocal->numPoints;
//
// // now write everything to file
// if (fsFile == nullptr) {
// fsFile = new fstream();
// fsFile->open(pathData, ios::out | ios::binary | ios::app);
// }
//
// fsFile->write(reinterpret_cast<const char*>(buffer.data()), buffer.size());
//
// // fsFile is closed in PotreeWriter::close()
// }
//
// void processUpperLevelResults() {
//
// auto results = upperLevelsResults;
//
// struct NodeAndData {
// Node* node;
// shared_ptr<Points> data;
// };
//
// unordered_map<string, vector<NodeAndData>> nodes;
//
// for (auto stuff : results) {
// stuff.node->traverse([&nodes, &stuff](Node* node){
//
// NodeAndData nad;
// nad.node = node;
// nad.data = stuff.data;
//
// nodes[node->name].push_back(nad);
// });
// }
//
// for (auto it : nodes) {
//
// string name = it.first;
// auto id = toVectorID(name);
// PWNode* pwNode = findPWNode(id);
//
// if (pwNode == nullptr) {
// cout << "ERROR: points attempted to be added in an unintented chunk. " << endl;
// continue;
// }
//
// vector<NodeAndData>& nodeParts = it.second;
//
// int numPoints = 0;
// for (NodeAndData& part : nodeParts) {
// numPoints += part.node->grid->accepted.size(); // shouldn't have any stores
// }
//
// int bytesPerPoint = 12 + attributes.byteSize;
// uint64_t bufferSize = numPoints * bytesPerPoint;
//
// vector<uint8_t> buffer(bufferSize, 0);
// uint64_t bufferOffset = 0;
//
// auto min = this->min;
// auto max = this->max;
// auto scale = this->scale;
// auto attributes = this->attributes;
//
// for (auto nad : nodeParts) {
//
// auto node = nad.node;
// auto srcBuffer = nad.data->attributeBuffer;
//
// auto writePoint = [&bufferOffset, &bytesPerPoint, &buffer, &min, &scale, &attributes, srcBuffer](Point& point) {
// int32_t ix = int32_t((point.x - min.x) / scale);
// int32_t iy = int32_t((point.y - min.y) / scale);
// int32_t iz = int32_t((point.z - min.z) / scale);
//
// memcpy(buffer.data() + bufferOffset + 0, reinterpret_cast<void*>(&ix), sizeof(int32_t));
// memcpy(buffer.data() + bufferOffset + 4, reinterpret_cast<void*>(&iy), sizeof(int32_t));
// memcpy(buffer.data() + bufferOffset + 8, reinterpret_cast<void*>(&iz), sizeof(int32_t));
//
// int64_t attributeOffset = point.index * attributes.byteSize;
//
// auto attributeTarget = buffer.data() + bufferOffset + 12;
// auto attributeSource = srcBuffer->dataU8 + attributeOffset;
// memcpy(attributeTarget, attributeSource, attributes.byteSize);
//
// bufferOffset += bytesPerPoint;
// };
//
// for (Point& point : node->grid->accepted) {
// writePoint(point);
// }
// }
//
//
// pwNode->byteOffset = currentByteOffset;
// pwNode->byteSize = bufferSize;
// pwNode->numPoints = numPoints;
//
// currentByteOffset += bufferSize;
//
// fsFile->write(reinterpret_cast<const char*>(buffer.data()), buffer.size());
// }
//
// }
//
// void close() {
//
// processUpperLevelResults();
//
// fsFile->close();
//
// writeHierarchy();
// writeCloudJson();
//
// }
//
// void writeCloudJson() {
//
// auto min = this->min;
// auto max = this->max;
//
// json box = {
// {"min", {min.x, min.y, min.z}},
// {"max", {max.x, max.y, max.z}},
// };
//
// json aPosition = {
// {"name", "position"},
// {"elements", 3},
// {"elementSize", 4},
// {"type", "int32"},
// };
//
// json aRGBA = {
// {"name", "rgba"},
// {"elements", 4},
// {"elementSize", 1},
// {"type", "uint8"},
// };
//
// json attributes = {
// {"bla", "blubb"}
// };
//
// json js = {
// {"version", "1.9"},
// {"projection", ""},
// {"boundingBox", box},
// {"spacing", spacing},
// {"scale", scale},
// {"attributes", {aPosition, aRGBA}},
// };
//
//
// {
// string str = js.dump(4);
//
// fstream file;
// file.open(pathCloudJs, ios::out);
//
// file << str;
//
// file.close();
// }
// }
//
// void writeHierarchy() {
//
// // for debugging/testing
// writeHierarchyJSON();
//
// writeHierarchyBinary();
//
//
//
//
// }
//
// void writeHierarchyBinary() {
//
// vector<PWNode*> nodes;
// function<void(PWNode*)> traverse = [&traverse, &nodes](PWNode* node){
// nodes.push_back(node);
//
// for (auto child : node->children) {
// if (child != nullptr) {
// traverse(child);
// }
// }
// };
// traverse(root);
//
// // sizeof(NodeData) = 32 bytes
// struct NodeData {
// uint64_t byteOffset = 0; // location of first byte in data store
// uint64_t byteLength = 0; // byte size in data store
// uint64_t childPosition = 0; // location of first child in hierarchy
// uint8_t childBitset = 0; // which of the eight children exist?
// };
//
// // sort in breadth-first order
// auto compare = [](PWNode* a, PWNode* b) -> bool {
// if (a->name.size() == b->name.size()) {
// bool result = lexicographical_compare(
// a->name.begin(), a->name.end(),
// b->name.begin(), b->name.end());
//
// return result;
// } else {
// return a->name.size() < b->name.size();
// }
// };
//
// sort(nodes.begin(), nodes.end(), compare);
//
// unordered_map<string, uint64_t> nodesMap;
// vector<NodeData> nodesData(nodes.size());
//
// for (uint64_t i = 0; i < nodes.size(); i++) {
//
// PWNode* node = nodes[i];
// NodeData& nodeData = nodesData[i];
//
// nodeData.byteOffset = node->byteOffset;
// nodeData.byteLength = node->byteSize;
//
// nodesMap[node->name] = i;
//
// if (node->name != "r") {
// string parentName = node->name.substr(0, node->name.size() - 1);
// uint64_t parentIndex = nodesMap[parentName];
// PWNode* parent = nodes[parentIndex];
// NodeData& parentData = nodesData[parentIndex];
//
//
// int index = node->name.at(node->name.size() - 1) - '0';
// int bitmask = 1 << index;
// parentData.childBitset = parentData.childBitset | bitmask;
//
// if (parentData.childPosition == 0) {
// parentData.childPosition = i;
// }
//
// }
// }
//
// cout << "#nodes: " << nodes.size() << endl;
//
// {
// string jsPath = targetDirectory + "/hierarchy.bin";
//
// fstream file;
// file.open(jsPath, ios::out | ios::binary);
//
// char* data = reinterpret_cast<char*>(nodesData.data());
// file.write(data, nodesData.size() * sizeof(NodeData));
//
// cout << "sizeof(NodeData): " << sizeof(NodeData) << endl;
//
// //for (int i = 0; i < 109; i++) {
// // NodeData& nodeData = nodesData[i];
// // PWNode* node = nodes[i];
//
// // file << "=================" << endl;
// // file << "position; " << i << endl;
// // file << "name: " << node->name << endl;
// // file << "offset: " << nodeData.byteOffset << endl;
// // file << "size: " << nodeData.byteLength << endl;
// // file << "childPosition: " << nodeData.childPosition << endl;
// // file << "children: ";
//
// // for (int j = 0; j < 8; j++) {
// // int value = nodeData.childBitset & (1 << j);
//
// // file << (value > 0 ? 1 : 0)<< ", ";
// // }
// // file << endl;
//
//
// //}
//
//
// file.close();
// }
//
//
// }
//
// void writeHierarchyJSON() {
//
// function<json(PWNode*)> traverse = [&traverse](PWNode* node) -> json {
//
// vector<json> jsChildren;
// for (PWNode* child : node->children) {
// if (child == nullptr) {
// continue;
// }
//
// json jsChild = traverse(child);
// jsChildren.push_back(jsChild);
// }
//
// uint64_t numPoints = node->numPoints;
// int64_t byteOffset = node->byteOffset;
// int64_t byteSize = node->byteSize;
//
// json jsNode = {
// {"name", node->name},
// {"numPoints", numPoints},
// {"byteOffset", byteOffset},
// {"byteSize", byteSize},
// {"children", jsChildren}
// };
//
// return jsNode;
// };
//
// json js;
// js["hierarchy"] = traverse(root);
//
// { // write to file
// string str = js.dump(4);
//
// string jsPath = pathHierarchy;
//
// fstream file;
// file.open(jsPath, ios::out);
//
// file << str;
//
// file.close();
// }
// }
//
//
//
//};

View File

@@ -1,59 +0,0 @@
#ifndef SPARSE_GRID_H
#define SPARSE_GRID_H
#include "AABB.h"
#include "Point.h"
#include "GridCell.h"
#include <map>
#include <unordered_map>
#include <vector>
#include <math.h>
using std::vector;
using std::map;
using std::unordered_map;
using std::min;
using std::max;
namespace Potree{
#define MAX_FLOAT std::numeric_limits<float>::max()
class SparseGrid : public unordered_map<long long, GridCell*>{
public:
int width;
int height;
int depth;
AABB aabb;
float squaredSpacing;
unsigned int numAccepted = 0;
SparseGrid(AABB aabb, float minGap);
SparseGrid(const SparseGrid &other)
: width(other.width), height(other.height), depth(other.depth), aabb(other.aabb), squaredSpacing(other.squaredSpacing), numAccepted(other.numAccepted)
{
}
~SparseGrid();
bool isDistant(const Vector3<double> &p, GridCell *cell);
bool isDistant(const Vector3<double> &p, GridCell *cell, float &squaredSpacing);
bool willBeAccepted(const Vector3<double> &p);
bool willBeAccepted(const Vector3<double> &p, float &squaredSpacing);
bool add(Vector3<double> &p);
void addWithoutCheck(Vector3<double> &p);
};
}
#endif

View File

@@ -0,0 +1,123 @@
#pragma once
#include <thread>
#include <mutex>
#include <atomic>
#include <deque>
#include <vector>
#include <functional>
#include <atomic>
//using namespace std;
using std::thread;
using std::atomic;
using std::mutex;
using std::vector;
using std::deque;
using std::function;
using std::lock_guard;
// might be better off using https://github.com/progschj/ThreadPool
template<class Task>
class TaskPool {
public:
int numThreads = 0;
deque<shared_ptr<Task>> tasks;
using TaskProcessorType = function<void(shared_ptr<Task>)> ;
TaskProcessorType processor;
vector<thread> threads;
atomic<bool> isClosed = false;
mutex mtx_task;
TaskPool(int numThreads, TaskProcessorType processor){
this->numThreads = numThreads;
this->processor = processor;
for (int i = 0; i < numThreads; i++) {
threads.emplace_back([this](){
while(true){
shared_ptr<Task> task = nullptr;
{ // retrieve task or leave thread if done
lock_guard<mutex> lock(mtx_task);
bool allDone = tasks.size() == 0 && isClosed;
bool waitingForWork = tasks.size() == 0 && !allDone;
bool workAvailable = tasks.size() > 0;
if (allDone) {
break;
} else if (workAvailable) {
task = tasks.front();
tasks.pop_front();
}
}
if (task != nullptr) {
this->processor(task);
}
std::this_thread::sleep_for(std::chrono::milliseconds(10));
}
});
}
}
~TaskPool() {
this->close();
}
void addTask(shared_ptr<Task> t) {
lock_guard<mutex> lock(mtx_task);
tasks.push_back(t);
}
void close() {
if(isClosed){
return;
}
isClosed = true;
for (thread& t : threads) {
t.join();
}
}
void waitTillEmpty() {
while (true) {
int size = 0;
{
lock_guard<mutex> lock(mtx_task);
size = tasks.size();
}
if (size == 0) {
return;
} else {
std::this_thread::sleep_for(std::chrono::milliseconds(10));
}
}
}
};

View File

@@ -1,96 +1,84 @@
#ifndef VECTOR3_H
#define VECTOR3_H
#include <math.h>
#include <iostream>
#include <iomanip>
#include <sstream>
using std::ostream;
#ifndef _MSC_VER
using std::max;
#endif
namespace Potree{
template<class T>
class Vector3{
public:
T x = 0;
T y = 0;
T z = 0;
Vector3() = default;
Vector3(T x, T y, T z){
this->x = x;
this->y = y;
this->z = z;
}
Vector3(T value){
this->x = value;
this->y = value;
this->z = value;
}
Vector3(const Vector3<T> &other)
:x(other.x), y(other.y), z(other.z)
{
}
~Vector3() = default;
T length(){
return sqrt(x*x + y*y + z*z);
}
T squaredLength(){
return x*x + y*y + z*z;
}
T distanceTo(Vector3<T> p) const{
return ((*this) - p).length();
}
T squaredDistanceTo(const Vector3<T> &p) const{
return ((*this) - p).squaredLength();
}
T maxValue(){
return max(x, max(y,z));
}
Vector3<T> operator-(const Vector3<T>& right) const {
return Vector3<T>(x - right.x, y - right.y, z - right.z);
}
Vector3<T> operator+(const Vector3<T>& right) const {
return Vector3<T>(x + right.x, y + right.y, z + right.z);
}
Vector3<T> operator+(const T right) const {
return Vector3<T>(x + right, y + right, z + right);
}
Vector3<T> operator/(const T &a) const{
return Vector3<T>(x / a, y / a, z / a);
}
friend ostream &operator<<( ostream &output, const Vector3<T> &value ){
std::stringstream ss;
ss << std::setprecision(6) << std::fixed;
ss << "[" << value.x << ", " << value.y << ", " << value.z << "]";
output << ss.str();
return output;
}
};
}
#endif
#pragma once
#include <string>
#include <cmath>
#include <limits>
//using std::string;
static double Infinity = std::numeric_limits<double>::infinity();
template<typename T>
struct Vector3{
T x = T(0.0);
T y = T(0.0);
T z = T(0.0);
Vector3<T>() {
}
Vector3<T>(T x, T y, T z) {
this->x = x;
this->y = y;
this->z = z;
}
T squaredDistanceTo(const Vector3<T>& right) {
double dx = right.x - x;
double dy = right.y - y;
double dz = right.z - z;
double dd = dx * dx + dy * dy + dz * dz;
return dd;
}
T distanceTo(const Vector3<T>& right) {
double dx = right.x - x;
double dy = right.y - y;
double dz = right.z - z;
double dd = dx * dx + dy * dy + dz * dz;
double d = std::sqrt(dd);
return d;
}
T max() {
return std::max(std::max(x, y), z);
}
//string toString() {
// string str = to_string(x) + ", " + to_string(y) + ", " + to_string(z);
// return str;
//}
Vector3<T> operator-(const Vector3<T>& right) const {
return Vector3<T>(x - right.x, y - right.y, z - right.z);
}
Vector3<T> operator+(const Vector3<T>& right) const {
return Vector3<T>(x + right.x, y + right.y, z + right.z);
}
Vector3<T> operator+(const double& scalar) const {
return Vector3<T>(x + scalar, y + scalar, z + scalar);
}
Vector3<T> operator/(const double& scalar) const {
return Vector3<T>(x / scalar, y / scalar, z / scalar);
}
Vector3<T> operator*(const Vector3<T>& right) const {
return Vector3<T>(x * right.x, y * right.y, z * right.z);
}
Vector3<T> operator*(const double& scalar) const {
return Vector3<T>(x * scalar, y * scalar, z * scalar);
}
};

View File

@@ -1,226 +0,0 @@
#ifndef XYZPOINTREADER_H
#define XYZPOINTREADER_H
#include "Point.h"
#include "PointReader.h"
#include "PotreeException.h"
#include <string>
#include <fstream>
#include <iostream>
#include <regex>
#include <vector>
#include <sstream>
#include <algorithm>
using std::getline;
using std::ifstream;
using std::string;
using std::vector;
using std::cout;
using std::endl;
using std::stringstream;
namespace Potree{
class XYZPointReader : public PointReader{
private:
AABB aabb;
ifstream stream;
long pointsRead;
long pointCount;
char *buffer;
int pointByteSize;
Point point;
string format;
float colorOffset;
float colorScale;
float intensityOffset;
float intensityScale;
int linesSkipped;
public:
XYZPointReader(string file, string format, vector<double> colorRange, vector<double> intensityRange)
: stream(file, std::ios::in | std::ios::binary)
{
this->format = format;
pointsRead = 0;
linesSkipped = 0;
pointCount = 0;
colorScale = -1;
if(intensityRange.size() == 2){
intensityOffset = (float)intensityRange[0];
intensityScale = (float)intensityRange[1]-(float)intensityRange[0];
}else if(intensityRange.size() == 1){
intensityOffset = 0.0f;
intensityScale = (float)intensityRange[0];
}else{
intensityOffset = 0.0f;
intensityScale = 1.0f;
}
if(colorRange.size() == 2){
colorOffset = (float)colorRange[0];
colorScale = (float)colorRange[1];
}else if(colorRange.size() == 1){
colorOffset = 0.0f;
colorScale = (float)colorRange[0];
}else if(colorRange.size() == 0){
colorOffset = 0.0f;
// try to find color range by evaluating the first x points.
float max = 0;
int j = 0;
string line;
while(getline(stream, line) && j < 1000){
trim(line);
vector<string> tokens = split(line, { '\t', ' ', ',' });
if(this->format == "" && tokens.size() >= 3){
string f(tokens.size(), 's');
f.replace(0, 3, "xyz");
if(tokens.size() >= 6){
f.replace(tokens.size() - 3, 3, "rgb");
}
this->format = f;
cout << "using format: '" << this->format << "'" << endl;
}
if(tokens.size() < this->format.size()){
continue;
}
int i = 0;
for(const auto &f : format) {
string token = tokens[i++];
if(f == 'r'){
max = std::max(max, stof(token));
}else if(f == 'g'){
max = std::max(max, stof(token));
}else if(f == 'b'){
max = std::max(max, stof(token));
}
}
j++;
}
if(max <= 1.0f){
colorScale = 1.0f;
} else if(max <= 255){
colorScale = 255.0f;
}else if(max <= pow(2, 16) - 1){
colorScale =(float)pow(2, 16) - 1;
}else{
colorScale = (float)max;
}
stream.clear();
stream.seekg(0, stream.beg);
}
// read through once to calculate aabb and number of points
while(readNextPoint()){
Point p = getPoint();
aabb.update(p.position);
pointCount++;
}
stream.clear();
stream.seekg(0, stream.beg);
}
bool readNextPoint(){
double x = 0;
double y = 0;
double z = 0;
float nx = 0;
float ny = 0;
float nz = 0;
unsigned char r = 255;
unsigned char g = 255;
unsigned char b = 255;
// unsigned char a = 255; // unused variable
unsigned short intensity = 0;
string line;
while(getline(stream, line)){
trim(line);
vector<string> tokens = split(line, {'\t', ' ', ','});
if(tokens.size() != format.size()){
//throw PotreeException("Not enough tokens for the given format");
if(linesSkipped == 0){
cout << "some lines may be skipped because they do not match the given format: '" << format << "'" << endl;
}
linesSkipped++;
continue;
}
int i = 0;
for(const auto &f : format) {
string token = tokens[i++];
if(f == 'x'){
x = stod(token);
}else if(f == 'y'){
y = stod(token);
}else if(f == 'z'){
z = stod(token);
}else if(f == 'r'){
r = (unsigned char)(255.0f * (stof(token) - colorOffset) / colorScale);
}else if(f == 'g'){
g = (unsigned char)(255.0f * (stof(token) - colorOffset) / colorScale);
}else if(f == 'b'){
b = (unsigned char)(255.0f * (stof(token) - colorOffset) / colorScale);
}else if(f == 'i'){
intensity = (unsigned short)( 65535 * (stof(token) - intensityOffset) / intensityScale);
}else if(f == 's'){
// skip
}else if(f == 'X'){
nx = stof(token);
}else if(f == 'Y'){
ny = stof(token);
}else if(f == 'Z'){
nz = stof(token);
}
}
point = Point(x,y,z,r,g,b);
point.normal.x = nx;
point.normal.y = ny;
point.normal.z = nz;
point.intensity = intensity;
pointsRead++;
return true;
}
return false;
}
Point getPoint(){
return point;
}
AABB getAABB(){
return aabb;
}
long long numPoints(){
return pointCount;
}
void close(){
stream.close();
}
};
}
#endif

View File

@@ -0,0 +1,164 @@
#pragma once
#include <memory>
using std::make_shared;
#include "convmath.h"
#include "Points.h"
#include "stuff.h"
inline int childIndexOf(Vector3<double>& min, Vector3<double>& max, Point& point) {
int childIndex = 0;
double nx = (point.x - min.x) / (max.x - min.x);
double ny = (point.y - min.y) / (max.x - min.x);
double nz = (point.z - min.z) / (max.x - min.x);
if (nx > 0.5) {
childIndex = childIndex | 0b100;
}
if (ny > 0.5) {
childIndex = childIndex | 0b010;
}
if (nz > 0.5) {
childIndex = childIndex | 0b001;
}
return childIndex;
}
inline int computeChildIndex(BoundingBox box, Point point) {
return childIndexOf(box.min, box.max, point);
}
inline BoundingBox childBoundingBoxOf(Vector3<double> min, Vector3<double> max, int index) {
BoundingBox box;
auto size = max - min;
Vector3<double> center = min + (size * 0.5);
if ((index & 0b100) == 0) {
box.min.x = min.x;
box.max.x = center.x;
} else {
box.min.x = center.x;
box.max.x = max.x;
}
if ((index & 0b010) == 0) {
box.min.y = min.y;
box.max.y = center.y;
} else {
box.min.y = center.y;
box.max.y = max.y;
}
if ((index & 0b001) == 0) {
box.min.z = min.z;
box.max.z = center.z;
} else {
box.min.z = center.z;
box.max.z = max.z;
}
return box;
}
inline BoundingBox childBoundingBoxOf(BoundingBox in, int index) {
BoundingBox box;
Vector3<double> size = in.size();
Vector3<double> center = in.min + (size * 0.5);
if ((index & 0b100) == 0) {
box.min.x = in.min.x;
box.max.x = center.x;
} else {
box.min.x = center.x;
box.max.x = in.max.x;
}
if ((index & 0b010) == 0) {
box.min.y = in.min.y;
box.max.y = center.y;
} else {
box.min.y = center.y;
box.max.y = in.max.y;
}
if ((index & 0b001) == 0) {
box.min.z = in.min.z;
box.max.z = center.z;
} else {
box.min.z = center.z;
box.max.z = in.max.z;
}
return box;
}
inline vector<Point> loadPoints(string file) {
auto buffer = readBinaryFile(file);
int64_t numPoints = buffer.size() / 28;
vector<Point> points;
points.reserve(numPoints);
for (int i = 0; i < numPoints; i++) {
double x = reinterpret_cast<double*>(buffer.data() + (28 * i + 0))[0];
double y = reinterpret_cast<double*>(buffer.data() + (28 * i + 0))[1];
double z = reinterpret_cast<double*>(buffer.data() + (28 * i + 0))[2];
Point point;
point.x = x;
point.y = y;
point.z = z;
point.index = i;
points.push_back(point);
}
return points;
}
inline shared_ptr<Points> loadPoints(string file, Attributes attributes) {
auto buffer = readBinaryFile(file);
int bytesPerPoint = attributes.byteSize;
int64_t numPoints = buffer.size() / attributes.byteSize;
auto points = make_shared<Points>();
points->points.reserve(numPoints);
points->attributes = attributes;
points->attributeBuffer = make_shared<Buffer>(numPoints * bytesPerPoint);
for (int i = 0; i < numPoints; i++) {
double* xyz = reinterpret_cast<double*>(buffer.data() + (bytesPerPoint * i));
double x = xyz[0];
double y = xyz[1];
double z = xyz[2];
Point point;
point.x = x;
point.y = y;
point.z = z;
point.index = i;
points->points.push_back(point);
}
memcpy(points->attributeBuffer->data, buffer.data(), buffer.size());
return points;
}

View File

@@ -0,0 +1,26 @@
#pragma once
#include "Vector3.h"
class BoundingBox {
public:
Vector3<double> min;
Vector3<double> max;
BoundingBox() {
this->min = { Infinity,Infinity,Infinity};
this->max = { -Infinity,-Infinity,-Infinity };
}
BoundingBox(Vector3<double> min, Vector3<double> max) {
this->min = min;
this->max = max;
}
Vector3<double> size() {
return max - min;
}
};

View File

@@ -1,27 +0,0 @@
#ifndef DEFINITIONS_H
#define DEFINITIONS_H
namespace Potree{
enum class OutputFormat{
BINARY,
LAS,
LAZ
};
enum class StoreOption{
ABORT_IF_EXISTS,
OVERWRITE,
INCREMENTAL
};
enum class ConversionQuality{
FAST,
DEFAULT,
NICE
};
}
#endif

View File

@@ -1,129 +1,48 @@
#pragma once
#include <string>
#include <chrono>
#include <cstdarg>
#include <sstream>
#include <vector>
using std::string;
using std::vector;
using std::stringstream;
string repeat(string str, int count);
string stringReplace(string str, string search, string replacement);
// see https://stackoverflow.com/questions/23943728/case-insensitive-standard-string-comparison-in-c
bool icompare_pred(unsigned char a, unsigned char b);
// see https://stackoverflow.com/questions/23943728/case-insensitive-standard-string-comparison-in-c
bool icompare(std::string const& a, std::string const& b);
bool endsWith(const string& str, const string& suffix);
bool iEndsWith(const std::string& str, const std::string& suffix);
double now();
void printElapsedTime(string label, double startTime);
void printThreadsafe(string str);
void printThreadsafe(string str1, string str2);
void printThreadsafe(string str1, string str2, string str3);
void printThreadsafe(string str1, string str2, string str3, string str4);
void writeFile(string path, string text);
string readTextFile(string path);
vector<char> readBinaryFile(string path);
struct MemoryUsage {
uint64_t totalMemory = 0;
uint64_t usedMemory = 0;
#ifndef STUFF_H
#define STUFF_H
#include <vector>
#include <map>
#include <iostream>
#include <math.h>
#include <string>
#include <fstream>
#include <cctype>
//#include <unistd.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <experimental/filesystem>
#include "Vector3.h"
#include "AABB.h"
#include "Point.h"
#include "SparseGrid.h"
#include "GridCell.h"
using std::ifstream;
using std::ofstream;
using std::ios;
using std::string;
using std::min;
using std::max;
using std::ostream;
using std::cout;
using std::cin;
using std::endl;
using std::vector;
using std::binary_function;
using std::map;
namespace fs = std::experimental::filesystem;
namespace Potree {
AABB readAABB(string fIn, int numPoints);
AABB readAABB(string fIn);
/**
* y
* |-z
* |/
* O----x
*
* 3----7
* /| /|
* 2----6 |
* | 1--|-5
* |/ |/
* 0----4
*
*/
AABB childAABB(const AABB &aabb, const int &index);
/**
* y
* |-z
* |/
* O----x
*
* 3----7
* /| /|
* 2----6 |
* | 1--|-5
* |/ |/
* 0----4
*
*/
int nodeIndex(const AABB &aabb, const Point &point);
/**
* from http://stackoverflow.com/questions/5840148/how-can-i-get-a-files-size-in-c
*/
long filesize(string filename);
/**
* from http://stackoverflow.com/questions/874134/find-if-string-endswith-another-string-in-c
*/
bool endsWith(std::string const &fullString, std::string const &ending);
/**
* see http://stackoverflow.com/questions/735204/convert-a-string-in-c-to-upper-case
*/
string toUpper(string str);
bool copyDir(fs::path source, fs::path destination);
float psign(float value);
// see https://stackoverflow.com/questions/23943728/case-insensitive-standard-string-comparison-in-c
bool icompare_pred(unsigned char a, unsigned char b);
// see https://stackoverflow.com/questions/23943728/case-insensitive-standard-string-comparison-in-c
bool icompare(string const& a, string const& b);
bool endsWith(const string &str, const string &suffix);
bool iEndsWith(const string &str, const string &suffix);
vector<string> split(string str, vector<char> delimiters);
vector<string> split(string str, char delimiter);
// see https://stackoverflow.com/questions/216823/whats-the-best-way-to-trim-stdstring
string ltrim(string s);
// see https://stackoverflow.com/questions/216823/whats-the-best-way-to-trim-stdstring
string rtrim(string s);
// see https://stackoverflow.com/questions/216823/whats-the-best-way-to-trim-stdstring
string trim(string s);
}
#endif
};
MemoryUsage getMemoryUsage();

View File

@@ -0,0 +1,98 @@
// from https://github.com/progschj/ThreadPool/blob/master/ThreadPool.h
#pragma once
#include <vector>
#include <queue>
#include <memory>
#include <thread>
#include <mutex>
#include <condition_variable>
#include <future>
#include <functional>
#include <stdexcept>
class ThreadPool {
public:
ThreadPool(size_t);
template<class F, class... Args>
auto enqueue(F&& f, Args&& ... args)
->std::future<typename std::result_of<F(Args...)>::type>;
~ThreadPool();
private:
// need to keep track of threads so we can join them
std::vector< std::thread > workers;
// the task queue
std::queue< std::function<void()> > tasks;
// synchronization
std::mutex queue_mutex;
std::condition_variable condition;
bool stop;
};
// the constructor just launches some amount of workers
inline ThreadPool::ThreadPool(size_t threads)
: stop(false)
{
for (size_t i = 0; i < threads; ++i)
workers.emplace_back(
[this]
{
for (;;)
{
std::function<void()> task;
{
std::unique_lock<std::mutex> lock(this->queue_mutex);
this->condition.wait(lock,
[this] { return this->stop || !this->tasks.empty(); });
if (this->stop && this->tasks.empty())
return;
task = std::move(this->tasks.front());
this->tasks.pop();
}
task();
}
}
);
}
// add new work item to the pool
template<class F, class... Args>
auto ThreadPool::enqueue(F&& f, Args&& ... args)
-> std::future<typename std::result_of<F(Args...)>::type>
{
using return_type = typename std::result_of<F(Args...)>::type;
auto task = std::make_shared< std::packaged_task<return_type()> >(
std::bind(std::forward<F>(f), std::forward<Args>(args)...)
);
std::future<return_type> res = task->get_future();
{
std::unique_lock<std::mutex> lock(queue_mutex);
// don't allow enqueueing after stopping the pool
if (stop)
throw std::runtime_error("enqueue on stopped ThreadPool");
tasks.emplace([task]() { (*task)(); });
}
condition.notify_one();
return res;
}
// the destructor joins all threads
inline ThreadPool::~ThreadPool()
{
{
std::unique_lock<std::mutex> lock(queue_mutex);
stop = true;
}
condition.notify_all();
for (std::thread& worker : workers)
worker.join();
}

View File

@@ -1,300 +0,0 @@
#include <string>
#include <vector>
#include <unordered_map>
#include <iostream>
#include <algorithm>
using std::unordered_map;
using std::vector;
using std::string;
using std::cout;
using std::cerr;
using std::endl;
class AValue{
public:
vector<string> values;
AValue(vector<string> values) {
this->values = values;
}
template<typename T>
T as(T alternative) {
return !values.empty() ? T(values[0]) : alternative;
}
template<typename T>
T as() {
return !values.empty() ? T(values[0]) : T();
}
};
template<> vector<string> AValue::as<vector<string>>(vector<string> alternative) {
return !values.empty() ? values : alternative;
}
template<> vector<string> AValue::as<vector<string>>() {
return !values.empty() ? values : vector<string>{};
}
template<> vector<double> AValue::as<vector<double>>(vector<double> alternative) {
vector<double> res;
for (auto &v : values) {
res.push_back(std::stod(v));
}
return !res.empty() ? res : alternative;
}
template<> vector<double> AValue::as<vector<double>>() {
return as<vector<double>>({});
}
template<> double AValue::as<double>(double alternative) {
return !values.empty() ? std::stod(values[0]) : alternative;
}
template<> double AValue::as<double>() {
return !values.empty() ? std::stod(values[0]) : 0.0;
}
template<> int AValue::as<int>(int alternative) {
return !values.empty() ? std::stoi(values[0]) : alternative;
}
template<> int AValue::as<int>() {
return !values.empty() ? std::stoi(values[0]) : 0;
}
class Argument {
private:
vector<string> split(string str, vector<char> delimiters) {
vector<string> tokens;
auto isDelimiter = [&delimiters](char ch) {
for (auto &delimiter : delimiters) {
if (ch == delimiter) {
return true;
}
}
return false;
};
int start = 0;
for (int i = 0; i < str.size(); i++) {
if (isDelimiter(str[i])) {
if (start < i) {
auto token = str.substr(start, i - start);
tokens.push_back(token);
} else {
tokens.push_back("");
}
start = i + 1;
}
}
if (start < str.size()) {
tokens.push_back(str.substr(start));
} else if (isDelimiter(str[str.size() - 1])) {
tokens.push_back("");
}
return tokens;
}
public:
string id = "";
string description = "";
Argument(string id, string description) {
this->id = id;
this->description = description;
}
bool is(string name) {
auto tokens = split(id, { ',' });
for (auto token : tokens) {
if (token == name) {
return true;
}
}
return false;
}
string fullname() {
auto tokens = split(id, { ',' });
for (auto token : tokens) {
if (token.size() > 1) {
return token;
}
}
return "";
}
string shortname() {
auto tokens = split(id, { ',' });
for (auto token : tokens) {
if (token.size() == 1) {
return token;
}
}
return "";
}
};
class Arguments {
private:
bool startsWith(const string &str, const string &prefix) {
if (str.size() < prefix.size()) {
return false;
}
return str.substr(0, prefix.size()).compare(prefix) == 0;
}
public:
int argc = 0;
char **argv = nullptr;
bool ignoreFirst = true;
vector<string> tokens;
vector<Argument> argdefs;
unordered_map<string, vector<string>> map;
Arguments(int argc, char **argv, bool ignoreFirst = true) {
this->argc = argc;
this->argv = argv;
this->ignoreFirst = ignoreFirst;
for (int i = ignoreFirst ? 1 : 0; i < argc; i++) {
string token = string(argv[i]);
tokens.push_back(token);
}
string currentKey = "";
map.insert({ currentKey, {} });
for (string token : tokens) {
if(startsWith(token, "---")) {
cerr << "Invalid argument: " << token << endl;
exit(1);
} else if (startsWith(token, "--")) {
currentKey = token.substr(2);
map.insert({ currentKey,{} });
} else if (startsWith(token, "-")) {
currentKey = token.substr(1);
map.insert({ currentKey,{} });
} else {
map[currentKey].push_back(token);
}
}
}
void addArgument(string id, string description) {
Argument arg(id, description);
argdefs.push_back(arg);
}
Argument *getArgument(string name) {
for (Argument &arg : argdefs) {
if (arg.is(name)) {
return &arg;
}
}
return nullptr;
}
vector<string> keys() {
vector<string> keys;
for (auto entry : map) {
keys.push_back(entry.first);
}
return keys;
}
string usage() {
std::stringstream ss;
vector<string> keys;
for (auto argdef : argdefs) {
stringstream ssKey;
if (!argdef.shortname().empty()) {
ssKey << " -" << argdef.shortname();
if (!argdef.fullname().empty()) {
ssKey << " [ --" << argdef.fullname() << " ]";
}
} else if(!argdef.fullname().empty()) {
ssKey << " --" << argdef.fullname();
}
keys.push_back(ssKey.str());
}
int keyColumnLength = 0;
for (auto key : keys) {
keyColumnLength = std::max(int(key.size()), keyColumnLength);
}
keyColumnLength = keyColumnLength + 2;
for (int i = 0; i < argdefs.size(); i++) {
keys[i].resize(keyColumnLength, ' ');
ss << keys[i] << argdefs[i].description << endl;
}
return ss.str();
}
bool has(string name) {
Argument *arg = getArgument(name);
if (arg == nullptr) {
return false;
}
for (auto entry : map) {
if (arg->is(entry.first)) {
return true;
}
}
return false;
}
AValue get(string name) {
Argument *arg = getArgument(name);
for (auto entry : map) {
if (arg->is(entry.first)) {
return AValue(entry.second);
}
}
return AValue({});
}
};

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,24 @@
from: https://github.com/nlohmann/json/releases
LICENSE:
MIT License
Copyright (c) 2013-2019 Niels Lohmann
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@@ -1,261 +0,0 @@
// Tencent is pleased to support the open source community by making RapidJSON available.
//
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
//
// Licensed under the MIT License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// http://opensource.org/licenses/MIT
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef RAPIDJSON_ALLOCATORS_H_
#define RAPIDJSON_ALLOCATORS_H_
#include "rapidjson.h"
RAPIDJSON_NAMESPACE_BEGIN
///////////////////////////////////////////////////////////////////////////////
// Allocator
/*! \class rapidjson::Allocator
\brief Concept for allocating, resizing and freeing memory block.
Note that Malloc() and Realloc() are non-static but Free() is static.
So if an allocator need to support Free(), it needs to put its pointer in
the header of memory block.
\code
concept Allocator {
static const bool kNeedFree; //!< Whether this allocator needs to call Free().
// Allocate a memory block.
// \param size of the memory block in bytes.
// \returns pointer to the memory block.
void* Malloc(size_t size);
// Resize a memory block.
// \param originalPtr The pointer to current memory block. Null pointer is permitted.
// \param originalSize The current size in bytes. (Design issue: since some allocator may not book-keep this, explicitly pass to it can save memory.)
// \param newSize the new size in bytes.
void* Realloc(void* originalPtr, size_t originalSize, size_t newSize);
// Free a memory block.
// \param pointer to the memory block. Null pointer is permitted.
static void Free(void *ptr);
};
\endcode
*/
///////////////////////////////////////////////////////////////////////////////
// CrtAllocator
//! C-runtime library allocator.
/*! This class is just wrapper for standard C library memory routines.
\note implements Allocator concept
*/
class CrtAllocator {
public:
static const bool kNeedFree = true;
void* Malloc(size_t size) {
if (size) // behavior of malloc(0) is implementation defined.
return std::malloc(size);
else
return NULL; // standardize to returning NULL.
}
void* Realloc(void* originalPtr, size_t originalSize, size_t newSize) {
(void)originalSize;
if (newSize == 0) {
std::free(originalPtr);
return NULL;
}
return std::realloc(originalPtr, newSize);
}
static void Free(void *ptr) { std::free(ptr); }
};
///////////////////////////////////////////////////////////////////////////////
// MemoryPoolAllocator
//! Default memory allocator used by the parser and DOM.
/*! This allocator allocate memory blocks from pre-allocated memory chunks.
It does not free memory blocks. And Realloc() only allocate new memory.
The memory chunks are allocated by BaseAllocator, which is CrtAllocator by default.
User may also supply a buffer as the first chunk.
If the user-buffer is full then additional chunks are allocated by BaseAllocator.
The user-buffer is not deallocated by this allocator.
\tparam BaseAllocator the allocator type for allocating memory chunks. Default is CrtAllocator.
\note implements Allocator concept
*/
template <typename BaseAllocator = CrtAllocator>
class MemoryPoolAllocator {
public:
static const bool kNeedFree = false; //!< Tell users that no need to call Free() with this allocator. (concept Allocator)
//! Constructor with chunkSize.
/*! \param chunkSize The size of memory chunk. The default is kDefaultChunkSize.
\param baseAllocator The allocator for allocating memory chunks.
*/
MemoryPoolAllocator(size_t chunkSize = kDefaultChunkCapacity, BaseAllocator* baseAllocator = 0) :
chunkHead_(0), chunk_capacity_(chunkSize), userBuffer_(0), baseAllocator_(baseAllocator), ownBaseAllocator_(0)
{
}
//! Constructor with user-supplied buffer.
/*! The user buffer will be used firstly. When it is full, memory pool allocates new chunk with chunk size.
The user buffer will not be deallocated when this allocator is destructed.
\param buffer User supplied buffer.
\param size Size of the buffer in bytes. It must at least larger than sizeof(ChunkHeader).
\param chunkSize The size of memory chunk. The default is kDefaultChunkSize.
\param baseAllocator The allocator for allocating memory chunks.
*/
MemoryPoolAllocator(void *buffer, size_t size, size_t chunkSize = kDefaultChunkCapacity, BaseAllocator* baseAllocator = 0) :
chunkHead_(0), chunk_capacity_(chunkSize), userBuffer_(buffer), baseAllocator_(baseAllocator), ownBaseAllocator_(0)
{
RAPIDJSON_ASSERT(buffer != 0);
RAPIDJSON_ASSERT(size > sizeof(ChunkHeader));
chunkHead_ = reinterpret_cast<ChunkHeader*>(buffer);
chunkHead_->capacity = size - sizeof(ChunkHeader);
chunkHead_->size = 0;
chunkHead_->next = 0;
}
//! Destructor.
/*! This deallocates all memory chunks, excluding the user-supplied buffer.
*/
~MemoryPoolAllocator() {
Clear();
RAPIDJSON_DELETE(ownBaseAllocator_);
}
//! Deallocates all memory chunks, excluding the user-supplied buffer.
void Clear() {
while (chunkHead_ && chunkHead_ != userBuffer_) {
ChunkHeader* next = chunkHead_->next;
baseAllocator_->Free(chunkHead_);
chunkHead_ = next;
}
if (chunkHead_ && chunkHead_ == userBuffer_)
chunkHead_->size = 0; // Clear user buffer
}
//! Computes the total capacity of allocated memory chunks.
/*! \return total capacity in bytes.
*/
size_t Capacity() const {
size_t capacity = 0;
for (ChunkHeader* c = chunkHead_; c != 0; c = c->next)
capacity += c->capacity;
return capacity;
}
//! Computes the memory blocks allocated.
/*! \return total used bytes.
*/
size_t Size() const {
size_t size = 0;
for (ChunkHeader* c = chunkHead_; c != 0; c = c->next)
size += c->size;
return size;
}
//! Allocates a memory block. (concept Allocator)
void* Malloc(size_t size) {
if (!size)
return NULL;
size = RAPIDJSON_ALIGN(size);
if (chunkHead_ == 0 || chunkHead_->size + size > chunkHead_->capacity)
AddChunk(chunk_capacity_ > size ? chunk_capacity_ : size);
void *buffer = reinterpret_cast<char *>(chunkHead_) + RAPIDJSON_ALIGN(sizeof(ChunkHeader)) + chunkHead_->size;
chunkHead_->size += size;
return buffer;
}
//! Resizes a memory block (concept Allocator)
void* Realloc(void* originalPtr, size_t originalSize, size_t newSize) {
if (originalPtr == 0)
return Malloc(newSize);
if (newSize == 0)
return NULL;
// Do not shrink if new size is smaller than original
if (originalSize >= newSize)
return originalPtr;
// Simply expand it if it is the last allocation and there is sufficient space
if (originalPtr == (char *)(chunkHead_) + RAPIDJSON_ALIGN(sizeof(ChunkHeader)) + chunkHead_->size - originalSize) {
size_t increment = static_cast<size_t>(newSize - originalSize);
increment = RAPIDJSON_ALIGN(increment);
if (chunkHead_->size + increment <= chunkHead_->capacity) {
chunkHead_->size += increment;
return originalPtr;
}
}
// Realloc process: allocate and copy memory, do not free original buffer.
void* newBuffer = Malloc(newSize);
RAPIDJSON_ASSERT(newBuffer != 0); // Do not handle out-of-memory explicitly.
if (originalSize)
std::memcpy(newBuffer, originalPtr, originalSize);
return newBuffer;
}
//! Frees a memory block (concept Allocator)
static void Free(void *ptr) { (void)ptr; } // Do nothing
private:
//! Copy constructor is not permitted.
MemoryPoolAllocator(const MemoryPoolAllocator& rhs) /* = delete */;
//! Copy assignment operator is not permitted.
MemoryPoolAllocator& operator=(const MemoryPoolAllocator& rhs) /* = delete */;
//! Creates a new chunk.
/*! \param capacity Capacity of the chunk in bytes.
*/
void AddChunk(size_t capacity) {
if (!baseAllocator_)
ownBaseAllocator_ = baseAllocator_ = RAPIDJSON_NEW(BaseAllocator());
ChunkHeader* chunk = reinterpret_cast<ChunkHeader*>(baseAllocator_->Malloc(RAPIDJSON_ALIGN(sizeof(ChunkHeader)) + capacity));
chunk->capacity = capacity;
chunk->size = 0;
chunk->next = chunkHead_;
chunkHead_ = chunk;
}
static const int kDefaultChunkCapacity = 64 * 1024; //!< Default chunk capacity.
//! Chunk header for perpending to each chunk.
/*! Chunks are stored as a singly linked list.
*/
struct ChunkHeader {
size_t capacity; //!< Capacity of the chunk in bytes (excluding the header itself).
size_t size; //!< Current size of allocated memory in bytes.
ChunkHeader *next; //!< Next chunk in the linked list.
};
ChunkHeader *chunkHead_; //!< Head of the chunk linked-list. Only the head chunk serves allocation.
size_t chunk_capacity_; //!< The minimum capacity of chunk when they are allocated.
void *userBuffer_; //!< User supplied buffer.
BaseAllocator* baseAllocator_; //!< base allocator for allocating memory chunks.
BaseAllocator* ownBaseAllocator_; //!< base allocator created by this object.
};
RAPIDJSON_NAMESPACE_END
#endif // RAPIDJSON_ENCODINGS_H_

File diff suppressed because it is too large Load Diff

View File

@@ -1,261 +0,0 @@
// Tencent is pleased to support the open source community by making RapidJSON available.
//
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
//
// Licensed under the MIT License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// http://opensource.org/licenses/MIT
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef RAPIDJSON_ENCODEDSTREAM_H_
#define RAPIDJSON_ENCODEDSTREAM_H_
#include "rapidjson.h"
#ifdef __GNUC__
RAPIDJSON_DIAG_PUSH
RAPIDJSON_DIAG_OFF(effc++)
#endif
RAPIDJSON_NAMESPACE_BEGIN
//! Input byte stream wrapper with a statically bound encoding.
/*!
\tparam Encoding The interpretation of encoding of the stream. Either UTF8, UTF16LE, UTF16BE, UTF32LE, UTF32BE.
\tparam InputByteStream Type of input byte stream. For example, FileReadStream.
*/
template <typename Encoding, typename InputByteStream>
class EncodedInputStream {
RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1);
public:
typedef typename Encoding::Ch Ch;
EncodedInputStream(InputByteStream& is) : is_(is) {
current_ = Encoding::TakeBOM(is_);
}
Ch Peek() const { return current_; }
Ch Take() { Ch c = current_; current_ = Encoding::Take(is_); return c; }
size_t Tell() const { return is_.Tell(); }
// Not implemented
void Put(Ch) { RAPIDJSON_ASSERT(false); }
void Flush() { RAPIDJSON_ASSERT(false); }
Ch* PutBegin() { RAPIDJSON_ASSERT(false); return 0; }
size_t PutEnd(Ch*) { RAPIDJSON_ASSERT(false); return 0; }
private:
EncodedInputStream(const EncodedInputStream&);
EncodedInputStream& operator=(const EncodedInputStream&);
InputByteStream& is_;
Ch current_;
};
//! Output byte stream wrapper with statically bound encoding.
/*!
\tparam Encoding The interpretation of encoding of the stream. Either UTF8, UTF16LE, UTF16BE, UTF32LE, UTF32BE.
\tparam InputByteStream Type of input byte stream. For example, FileWriteStream.
*/
template <typename Encoding, typename OutputByteStream>
class EncodedOutputStream {
RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1);
public:
typedef typename Encoding::Ch Ch;
EncodedOutputStream(OutputByteStream& os, bool putBOM = true) : os_(os) {
if (putBOM)
Encoding::PutBOM(os_);
}
void Put(Ch c) { Encoding::Put(os_, c); }
void Flush() { os_.Flush(); }
// Not implemented
Ch Peek() const { RAPIDJSON_ASSERT(false); }
Ch Take() { RAPIDJSON_ASSERT(false); }
size_t Tell() const { RAPIDJSON_ASSERT(false); return 0; }
Ch* PutBegin() { RAPIDJSON_ASSERT(false); return 0; }
size_t PutEnd(Ch*) { RAPIDJSON_ASSERT(false); return 0; }
private:
EncodedOutputStream(const EncodedOutputStream&);
EncodedOutputStream& operator=(const EncodedOutputStream&);
OutputByteStream& os_;
};
#define RAPIDJSON_ENCODINGS_FUNC(x) UTF8<Ch>::x, UTF16LE<Ch>::x, UTF16BE<Ch>::x, UTF32LE<Ch>::x, UTF32BE<Ch>::x
//! Input stream wrapper with dynamically bound encoding and automatic encoding detection.
/*!
\tparam CharType Type of character for reading.
\tparam InputByteStream type of input byte stream to be wrapped.
*/
template <typename CharType, typename InputByteStream>
class AutoUTFInputStream {
RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1);
public:
typedef CharType Ch;
//! Constructor.
/*!
\param is input stream to be wrapped.
\param type UTF encoding type if it is not detected from the stream.
*/
AutoUTFInputStream(InputByteStream& is, UTFType type = kUTF8) : is_(&is), type_(type), hasBOM_(false) {
RAPIDJSON_ASSERT(type >= kUTF8 && type <= kUTF32BE);
DetectType();
static const TakeFunc f[] = { RAPIDJSON_ENCODINGS_FUNC(Take) };
takeFunc_ = f[type_];
current_ = takeFunc_(*is_);
}
UTFType GetType() const { return type_; }
bool HasBOM() const { return hasBOM_; }
Ch Peek() const { return current_; }
Ch Take() { Ch c = current_; current_ = takeFunc_(*is_); return c; }
size_t Tell() const { return is_->Tell(); }
// Not implemented
void Put(Ch) { RAPIDJSON_ASSERT(false); }
void Flush() { RAPIDJSON_ASSERT(false); }
Ch* PutBegin() { RAPIDJSON_ASSERT(false); return 0; }
size_t PutEnd(Ch*) { RAPIDJSON_ASSERT(false); return 0; }
private:
AutoUTFInputStream(const AutoUTFInputStream&);
AutoUTFInputStream& operator=(const AutoUTFInputStream&);
// Detect encoding type with BOM or RFC 4627
void DetectType() {
// BOM (Byte Order Mark):
// 00 00 FE FF UTF-32BE
// FF FE 00 00 UTF-32LE
// FE FF UTF-16BE
// FF FE UTF-16LE
// EF BB BF UTF-8
const unsigned char* c = (const unsigned char *)is_->Peek4();
if (!c)
return;
unsigned bom = static_cast<unsigned>(c[0] | (c[1] << 8) | (c[2] << 16) | (c[3] << 24));
hasBOM_ = false;
if (bom == 0xFFFE0000) { type_ = kUTF32BE; hasBOM_ = true; is_->Take(); is_->Take(); is_->Take(); is_->Take(); }
else if (bom == 0x0000FEFF) { type_ = kUTF32LE; hasBOM_ = true; is_->Take(); is_->Take(); is_->Take(); is_->Take(); }
else if ((bom & 0xFFFF) == 0xFFFE) { type_ = kUTF16BE; hasBOM_ = true; is_->Take(); is_->Take(); }
else if ((bom & 0xFFFF) == 0xFEFF) { type_ = kUTF16LE; hasBOM_ = true; is_->Take(); is_->Take(); }
else if ((bom & 0xFFFFFF) == 0xBFBBEF) { type_ = kUTF8; hasBOM_ = true; is_->Take(); is_->Take(); is_->Take(); }
// RFC 4627: Section 3
// "Since the first two characters of a JSON text will always be ASCII
// characters [RFC0020], it is possible to determine whether an octet
// stream is UTF-8, UTF-16 (BE or LE), or UTF-32 (BE or LE) by looking
// at the pattern of nulls in the first four octets."
// 00 00 00 xx UTF-32BE
// 00 xx 00 xx UTF-16BE
// xx 00 00 00 UTF-32LE
// xx 00 xx 00 UTF-16LE
// xx xx xx xx UTF-8
if (!hasBOM_) {
unsigned pattern = (c[0] ? 1 : 0) | (c[1] ? 2 : 0) | (c[2] ? 4 : 0) | (c[3] ? 8 : 0);
switch (pattern) {
case 0x08: type_ = kUTF32BE; break;
case 0x0A: type_ = kUTF16BE; break;
case 0x01: type_ = kUTF32LE; break;
case 0x05: type_ = kUTF16LE; break;
case 0x0F: type_ = kUTF8; break;
default: break; // Use type defined by user.
}
}
// Runtime check whether the size of character type is sufficient. It only perform checks with assertion.
if (type_ == kUTF16LE || type_ == kUTF16BE) RAPIDJSON_ASSERT(sizeof(Ch) >= 2);
if (type_ == kUTF32LE || type_ == kUTF32BE) RAPIDJSON_ASSERT(sizeof(Ch) >= 4);
}
typedef Ch (*TakeFunc)(InputByteStream& is);
InputByteStream* is_;
UTFType type_;
Ch current_;
TakeFunc takeFunc_;
bool hasBOM_;
};
//! Output stream wrapper with dynamically bound encoding and automatic encoding detection.
/*!
\tparam CharType Type of character for writing.
\tparam InputByteStream type of output byte stream to be wrapped.
*/
template <typename CharType, typename OutputByteStream>
class AutoUTFOutputStream {
RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1);
public:
typedef CharType Ch;
//! Constructor.
/*!
\param os output stream to be wrapped.
\param type UTF encoding type.
\param putBOM Whether to write BOM at the beginning of the stream.
*/
AutoUTFOutputStream(OutputByteStream& os, UTFType type, bool putBOM) : os_(&os), type_(type) {
RAPIDJSON_ASSERT(type >= kUTF8 && type <= kUTF32BE);
// Runtime check whether the size of character type is sufficient. It only perform checks with assertion.
if (type_ == kUTF16LE || type_ == kUTF16BE) RAPIDJSON_ASSERT(sizeof(Ch) >= 2);
if (type_ == kUTF32LE || type_ == kUTF32BE) RAPIDJSON_ASSERT(sizeof(Ch) >= 4);
static const PutFunc f[] = { RAPIDJSON_ENCODINGS_FUNC(Put) };
putFunc_ = f[type_];
if (putBOM)
PutBOM();
}
UTFType GetType() const { return type_; }
void Put(Ch c) { putFunc_(*os_, c); }
void Flush() { os_->Flush(); }
// Not implemented
Ch Peek() const { RAPIDJSON_ASSERT(false); }
Ch Take() { RAPIDJSON_ASSERT(false); }
size_t Tell() const { RAPIDJSON_ASSERT(false); return 0; }
Ch* PutBegin() { RAPIDJSON_ASSERT(false); return 0; }
size_t PutEnd(Ch*) { RAPIDJSON_ASSERT(false); return 0; }
private:
AutoUTFOutputStream(const AutoUTFOutputStream&);
AutoUTFOutputStream& operator=(const AutoUTFOutputStream&);
void PutBOM() {
typedef void (*PutBOMFunc)(OutputByteStream&);
static const PutBOMFunc f[] = { RAPIDJSON_ENCODINGS_FUNC(PutBOM) };
f[type_](*os_);
}
typedef void (*PutFunc)(OutputByteStream&, Ch);
OutputByteStream* os_;
UTFType type_;
PutFunc putFunc_;
};
#undef RAPIDJSON_ENCODINGS_FUNC
RAPIDJSON_NAMESPACE_END
#ifdef __GNUC__
RAPIDJSON_DIAG_POP
#endif
#endif // RAPIDJSON_FILESTREAM_H_

View File

@@ -1,625 +0,0 @@
// Tencent is pleased to support the open source community by making RapidJSON available.
//
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
//
// Licensed under the MIT License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// http://opensource.org/licenses/MIT
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef RAPIDJSON_ENCODINGS_H_
#define RAPIDJSON_ENCODINGS_H_
#include "rapidjson.h"
#ifdef _MSC_VER
RAPIDJSON_DIAG_PUSH
RAPIDJSON_DIAG_OFF(4244) // conversion from 'type1' to 'type2', possible loss of data
RAPIDJSON_DIAG_OFF(4702) // unreachable code
#elif defined(__GNUC__)
RAPIDJSON_DIAG_PUSH
RAPIDJSON_DIAG_OFF(effc++)
RAPIDJSON_DIAG_OFF(overflow)
#endif
RAPIDJSON_NAMESPACE_BEGIN
///////////////////////////////////////////////////////////////////////////////
// Encoding
/*! \class rapidjson::Encoding
\brief Concept for encoding of Unicode characters.
\code
concept Encoding {
typename Ch; //! Type of character. A "character" is actually a code unit in unicode's definition.
enum { supportUnicode = 1 }; // or 0 if not supporting unicode
//! \brief Encode a Unicode codepoint to an output stream.
//! \param os Output stream.
//! \param codepoint An unicode codepoint, ranging from 0x0 to 0x10FFFF inclusively.
template<typename OutputStream>
static void Encode(OutputStream& os, unsigned codepoint);
//! \brief Decode a Unicode codepoint from an input stream.
//! \param is Input stream.
//! \param codepoint Output of the unicode codepoint.
//! \return true if a valid codepoint can be decoded from the stream.
template <typename InputStream>
static bool Decode(InputStream& is, unsigned* codepoint);
//! \brief Validate one Unicode codepoint from an encoded stream.
//! \param is Input stream to obtain codepoint.
//! \param os Output for copying one codepoint.
//! \return true if it is valid.
//! \note This function just validating and copying the codepoint without actually decode it.
template <typename InputStream, typename OutputStream>
static bool Validate(InputStream& is, OutputStream& os);
// The following functions are deal with byte streams.
//! Take a character from input byte stream, skip BOM if exist.
template <typename InputByteStream>
static CharType TakeBOM(InputByteStream& is);
//! Take a character from input byte stream.
template <typename InputByteStream>
static Ch Take(InputByteStream& is);
//! Put BOM to output byte stream.
template <typename OutputByteStream>
static void PutBOM(OutputByteStream& os);
//! Put a character to output byte stream.
template <typename OutputByteStream>
static void Put(OutputByteStream& os, Ch c);
};
\endcode
*/
///////////////////////////////////////////////////////////////////////////////
// UTF8
//! UTF-8 encoding.
/*! http://en.wikipedia.org/wiki/UTF-8
http://tools.ietf.org/html/rfc3629
\tparam CharType Code unit for storing 8-bit UTF-8 data. Default is char.
\note implements Encoding concept
*/
template<typename CharType = char>
struct UTF8 {
typedef CharType Ch;
enum { supportUnicode = 1 };
template<typename OutputStream>
static void Encode(OutputStream& os, unsigned codepoint) {
if (codepoint <= 0x7F)
os.Put(static_cast<Ch>(codepoint & 0xFF));
else if (codepoint <= 0x7FF) {
os.Put(static_cast<Ch>(0xC0 | ((codepoint >> 6) & 0xFF)));
os.Put(static_cast<Ch>(0x80 | ((codepoint & 0x3F))));
}
else if (codepoint <= 0xFFFF) {
os.Put(static_cast<Ch>(0xE0 | ((codepoint >> 12) & 0xFF)));
os.Put(static_cast<Ch>(0x80 | ((codepoint >> 6) & 0x3F)));
os.Put(static_cast<Ch>(0x80 | (codepoint & 0x3F)));
}
else {
RAPIDJSON_ASSERT(codepoint <= 0x10FFFF);
os.Put(static_cast<Ch>(0xF0 | ((codepoint >> 18) & 0xFF)));
os.Put(static_cast<Ch>(0x80 | ((codepoint >> 12) & 0x3F)));
os.Put(static_cast<Ch>(0x80 | ((codepoint >> 6) & 0x3F)));
os.Put(static_cast<Ch>(0x80 | (codepoint & 0x3F)));
}
}
template <typename InputStream>
static bool Decode(InputStream& is, unsigned* codepoint) {
#define COPY() c = is.Take(); *codepoint = (*codepoint << 6) | ((unsigned char)c & 0x3Fu)
#define TRANS(mask) result &= ((GetRange((unsigned char)c) & mask) != 0)
#define TAIL() COPY(); TRANS(0x70)
Ch c = is.Take();
if (!(c & 0x80)) {
*codepoint = (unsigned char)c;
return true;
}
unsigned char type = GetRange((unsigned char)c);
*codepoint = (0xFF >> type) & (unsigned char)c;
bool result = true;
switch (type) {
case 2: TAIL(); return result;
case 3: TAIL(); TAIL(); return result;
case 4: COPY(); TRANS(0x50); TAIL(); return result;
case 5: COPY(); TRANS(0x10); TAIL(); TAIL(); return result;
case 6: TAIL(); TAIL(); TAIL(); return result;
case 10: COPY(); TRANS(0x20); TAIL(); return result;
case 11: COPY(); TRANS(0x60); TAIL(); TAIL(); return result;
default: return false;
}
#undef COPY
#undef TRANS
#undef TAIL
}
template <typename InputStream, typename OutputStream>
static bool Validate(InputStream& is, OutputStream& os) {
#define COPY() os.Put(c = is.Take())
#define TRANS(mask) result &= ((GetRange((unsigned char)c) & mask) != 0)
#define TAIL() COPY(); TRANS(0x70)
Ch c;
COPY();
if (!(c & 0x80))
return true;
bool result = true;
switch (GetRange((unsigned char)c)) {
case 2: TAIL(); return result;
case 3: TAIL(); TAIL(); return result;
case 4: COPY(); TRANS(0x50); TAIL(); return result;
case 5: COPY(); TRANS(0x10); TAIL(); TAIL(); return result;
case 6: TAIL(); TAIL(); TAIL(); return result;
case 10: COPY(); TRANS(0x20); TAIL(); return result;
case 11: COPY(); TRANS(0x60); TAIL(); TAIL(); return result;
default: return false;
}
#undef COPY
#undef TRANS
#undef TAIL
}
static unsigned char GetRange(unsigned char c) {
// Referring to DFA of http://bjoern.hoehrmann.de/utf-8/decoder/dfa/
// With new mapping 1 -> 0x10, 7 -> 0x20, 9 -> 0x40, such that AND operation can test multiple types.
static const unsigned char type[] = {
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,
0x40,0x40,0x40,0x40,0x40,0x40,0x40,0x40,0x40,0x40,0x40,0x40,0x40,0x40,0x40,0x40,
0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
8,8,2,2,2,2,2,2,2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,
10,3,3,3,3,3,3,3,3,3,3,3,3,4,3,3, 11,6,6,6,5,8,8,8,8,8,8,8,8,8,8,8,
};
return type[c];
}
template <typename InputByteStream>
static CharType TakeBOM(InputByteStream& is) {
RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1);
Ch c = Take(is);
if ((unsigned char)c != 0xEFu) return c;
c = is.Take();
if ((unsigned char)c != 0xBBu) return c;
c = is.Take();
if ((unsigned char)c != 0xBFu) return c;
c = is.Take();
return c;
}
template <typename InputByteStream>
static Ch Take(InputByteStream& is) {
RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1);
return is.Take();
}
template <typename OutputByteStream>
static void PutBOM(OutputByteStream& os) {
RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1);
os.Put(0xEFu); os.Put(0xBBu); os.Put(0xBFu);
}
template <typename OutputByteStream>
static void Put(OutputByteStream& os, Ch c) {
RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1);
os.Put(static_cast<typename OutputByteStream::Ch>(c));
}
};
///////////////////////////////////////////////////////////////////////////////
// UTF16
//! UTF-16 encoding.
/*! http://en.wikipedia.org/wiki/UTF-16
http://tools.ietf.org/html/rfc2781
\tparam CharType Type for storing 16-bit UTF-16 data. Default is wchar_t. C++11 may use char16_t instead.
\note implements Encoding concept
\note For in-memory access, no need to concern endianness. The code units and code points are represented by CPU's endianness.
For streaming, use UTF16LE and UTF16BE, which handle endianness.
*/
template<typename CharType = wchar_t>
struct UTF16 {
typedef CharType Ch;
RAPIDJSON_STATIC_ASSERT(sizeof(Ch) >= 2);
enum { supportUnicode = 1 };
template<typename OutputStream>
static void Encode(OutputStream& os, unsigned codepoint) {
RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputStream::Ch) >= 2);
if (codepoint <= 0xFFFF) {
RAPIDJSON_ASSERT(codepoint < 0xD800 || codepoint > 0xDFFF); // Code point itself cannot be surrogate pair
os.Put(static_cast<typename OutputStream::Ch>(codepoint));
}
else {
RAPIDJSON_ASSERT(codepoint <= 0x10FFFF);
unsigned v = codepoint - 0x10000;
os.Put(static_cast<typename OutputStream::Ch>((v >> 10) | 0xD800));
os.Put((v & 0x3FF) | 0xDC00);
}
}
template <typename InputStream>
static bool Decode(InputStream& is, unsigned* codepoint) {
RAPIDJSON_STATIC_ASSERT(sizeof(typename InputStream::Ch) >= 2);
Ch c = is.Take();
if (c < 0xD800 || c > 0xDFFF) {
*codepoint = c;
return true;
}
else if (c <= 0xDBFF) {
*codepoint = (c & 0x3FF) << 10;
c = is.Take();
*codepoint |= (c & 0x3FF);
*codepoint += 0x10000;
return c >= 0xDC00 && c <= 0xDFFF;
}
return false;
}
template <typename InputStream, typename OutputStream>
static bool Validate(InputStream& is, OutputStream& os) {
RAPIDJSON_STATIC_ASSERT(sizeof(typename InputStream::Ch) >= 2);
RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputStream::Ch) >= 2);
Ch c;
os.Put(c = is.Take());
if (c < 0xD800 || c > 0xDFFF)
return true;
else if (c <= 0xDBFF) {
os.Put(c = is.Take());
return c >= 0xDC00 && c <= 0xDFFF;
}
return false;
}
};
//! UTF-16 little endian encoding.
template<typename CharType = wchar_t>
struct UTF16LE : UTF16<CharType> {
template <typename InputByteStream>
static CharType TakeBOM(InputByteStream& is) {
RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1);
CharType c = Take(is);
return (unsigned short)c == 0xFEFFu ? Take(is) : c;
}
template <typename InputByteStream>
static CharType Take(InputByteStream& is) {
RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1);
CharType c = (unsigned char)is.Take();
c |= (unsigned char)is.Take() << 8;
return c;
}
template <typename OutputByteStream>
static void PutBOM(OutputByteStream& os) {
RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1);
os.Put(0xFFu); os.Put(0xFEu);
}
template <typename OutputByteStream>
static void Put(OutputByteStream& os, CharType c) {
RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1);
os.Put(c & 0xFFu);
os.Put((c >> 8) & 0xFFu);
}
};
//! UTF-16 big endian encoding.
template<typename CharType = wchar_t>
struct UTF16BE : UTF16<CharType> {
template <typename InputByteStream>
static CharType TakeBOM(InputByteStream& is) {
RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1);
CharType c = Take(is);
return (unsigned short)c == 0xFEFFu ? Take(is) : c;
}
template <typename InputByteStream>
static CharType Take(InputByteStream& is) {
RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1);
CharType c = (unsigned char)is.Take() << 8;
c |= (unsigned char)is.Take();
return c;
}
template <typename OutputByteStream>
static void PutBOM(OutputByteStream& os) {
RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1);
os.Put(0xFEu); os.Put(0xFFu);
}
template <typename OutputByteStream>
static void Put(OutputByteStream& os, CharType c) {
RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1);
os.Put((c >> 8) & 0xFFu);
os.Put(c & 0xFFu);
}
};
///////////////////////////////////////////////////////////////////////////////
// UTF32
//! UTF-32 encoding.
/*! http://en.wikipedia.org/wiki/UTF-32
\tparam CharType Type for storing 32-bit UTF-32 data. Default is unsigned. C++11 may use char32_t instead.
\note implements Encoding concept
\note For in-memory access, no need to concern endianness. The code units and code points are represented by CPU's endianness.
For streaming, use UTF32LE and UTF32BE, which handle endianness.
*/
template<typename CharType = unsigned>
struct UTF32 {
typedef CharType Ch;
RAPIDJSON_STATIC_ASSERT(sizeof(Ch) >= 4);
enum { supportUnicode = 1 };
template<typename OutputStream>
static void Encode(OutputStream& os, unsigned codepoint) {
RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputStream::Ch) >= 4);
RAPIDJSON_ASSERT(codepoint <= 0x10FFFF);
os.Put(codepoint);
}
template <typename InputStream>
static bool Decode(InputStream& is, unsigned* codepoint) {
RAPIDJSON_STATIC_ASSERT(sizeof(typename InputStream::Ch) >= 4);
Ch c = is.Take();
*codepoint = c;
return c <= 0x10FFFF;
}
template <typename InputStream, typename OutputStream>
static bool Validate(InputStream& is, OutputStream& os) {
RAPIDJSON_STATIC_ASSERT(sizeof(typename InputStream::Ch) >= 4);
Ch c;
os.Put(c = is.Take());
return c <= 0x10FFFF;
}
};
//! UTF-32 little endian enocoding.
template<typename CharType = unsigned>
struct UTF32LE : UTF32<CharType> {
template <typename InputByteStream>
static CharType TakeBOM(InputByteStream& is) {
RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1);
CharType c = Take(is);
return (unsigned)c == 0x0000FEFFu ? Take(is) : c;
}
template <typename InputByteStream>
static CharType Take(InputByteStream& is) {
RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1);
CharType c = (unsigned char)is.Take();
c |= (unsigned char)is.Take() << 8;
c |= (unsigned char)is.Take() << 16;
c |= (unsigned char)is.Take() << 24;
return c;
}
template <typename OutputByteStream>
static void PutBOM(OutputByteStream& os) {
RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1);
os.Put(0xFFu); os.Put(0xFEu); os.Put(0x00u); os.Put(0x00u);
}
template <typename OutputByteStream>
static void Put(OutputByteStream& os, CharType c) {
RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1);
os.Put(c & 0xFFu);
os.Put((c >> 8) & 0xFFu);
os.Put((c >> 16) & 0xFFu);
os.Put((c >> 24) & 0xFFu);
}
};
//! UTF-32 big endian encoding.
template<typename CharType = unsigned>
struct UTF32BE : UTF32<CharType> {
template <typename InputByteStream>
static CharType TakeBOM(InputByteStream& is) {
RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1);
CharType c = Take(is);
return (unsigned)c == 0x0000FEFFu ? Take(is) : c;
}
template <typename InputByteStream>
static CharType Take(InputByteStream& is) {
RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1);
CharType c = (unsigned char)is.Take() << 24;
c |= (unsigned char)is.Take() << 16;
c |= (unsigned char)is.Take() << 8;
c |= (unsigned char)is.Take();
return c;
}
template <typename OutputByteStream>
static void PutBOM(OutputByteStream& os) {
RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1);
os.Put(0x00u); os.Put(0x00u); os.Put(0xFEu); os.Put(0xFFu);
}
template <typename OutputByteStream>
static void Put(OutputByteStream& os, CharType c) {
RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1);
os.Put((c >> 24) & 0xFFu);
os.Put((c >> 16) & 0xFFu);
os.Put((c >> 8) & 0xFFu);
os.Put(c & 0xFFu);
}
};
///////////////////////////////////////////////////////////////////////////////
// ASCII
//! ASCII encoding.
/*! http://en.wikipedia.org/wiki/ASCII
\tparam CharType Code unit for storing 7-bit ASCII data. Default is char.
\note implements Encoding concept
*/
template<typename CharType = char>
struct ASCII {
typedef CharType Ch;
enum { supportUnicode = 0 };
template<typename OutputStream>
static void Encode(OutputStream& os, unsigned codepoint) {
RAPIDJSON_ASSERT(codepoint <= 0x7F);
os.Put(static_cast<Ch>(codepoint & 0xFF));
}
template <typename InputStream>
static bool Decode(InputStream& is, unsigned* codepoint) {
unsigned char c = static_cast<unsigned char>(is.Take());
*codepoint = c;
return c <= 0X7F;
}
template <typename InputStream, typename OutputStream>
static bool Validate(InputStream& is, OutputStream& os) {
unsigned char c = is.Take();
os.Put(c);
return c <= 0x7F;
}
template <typename InputByteStream>
static CharType TakeBOM(InputByteStream& is) {
RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1);
Ch c = Take(is);
return c;
}
template <typename InputByteStream>
static Ch Take(InputByteStream& is) {
RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1);
return is.Take();
}
template <typename OutputByteStream>
static void PutBOM(OutputByteStream& os) {
RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1);
(void)os;
}
template <typename OutputByteStream>
static void Put(OutputByteStream& os, Ch c) {
RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1);
os.Put(static_cast<typename OutputByteStream::Ch>(c));
}
};
///////////////////////////////////////////////////////////////////////////////
// AutoUTF
//! Runtime-specified UTF encoding type of a stream.
enum UTFType {
kUTF8 = 0, //!< UTF-8.
kUTF16LE = 1, //!< UTF-16 little endian.
kUTF16BE = 2, //!< UTF-16 big endian.
kUTF32LE = 3, //!< UTF-32 little endian.
kUTF32BE = 4 //!< UTF-32 big endian.
};
//! Dynamically select encoding according to stream's runtime-specified UTF encoding type.
/*! \note This class can be used with AutoUTFInputtStream and AutoUTFOutputStream, which provides GetType().
*/
template<typename CharType>
struct AutoUTF {
typedef CharType Ch;
enum { supportUnicode = 1 };
#define RAPIDJSON_ENCODINGS_FUNC(x) UTF8<Ch>::x, UTF16LE<Ch>::x, UTF16BE<Ch>::x, UTF32LE<Ch>::x, UTF32BE<Ch>::x
template<typename OutputStream>
RAPIDJSON_FORCEINLINE static void Encode(OutputStream& os, unsigned codepoint) {
typedef void (*EncodeFunc)(OutputStream&, unsigned);
static const EncodeFunc f[] = { RAPIDJSON_ENCODINGS_FUNC(Encode) };
(*f[os.GetType()])(os, codepoint);
}
template <typename InputStream>
RAPIDJSON_FORCEINLINE static bool Decode(InputStream& is, unsigned* codepoint) {
typedef bool (*DecodeFunc)(InputStream&, unsigned*);
static const DecodeFunc f[] = { RAPIDJSON_ENCODINGS_FUNC(Decode) };
return (*f[is.GetType()])(is, codepoint);
}
template <typename InputStream, typename OutputStream>
RAPIDJSON_FORCEINLINE static bool Validate(InputStream& is, OutputStream& os) {
typedef bool (*ValidateFunc)(InputStream&, OutputStream&);
static const ValidateFunc f[] = { RAPIDJSON_ENCODINGS_FUNC(Validate) };
return (*f[is.GetType()])(is, os);
}
#undef RAPIDJSON_ENCODINGS_FUNC
};
///////////////////////////////////////////////////////////////////////////////
// Transcoder
//! Encoding conversion.
template<typename SourceEncoding, typename TargetEncoding>
struct Transcoder {
//! Take one Unicode codepoint from source encoding, convert it to target encoding and put it to the output stream.
template<typename InputStream, typename OutputStream>
RAPIDJSON_FORCEINLINE static bool Transcode(InputStream& is, OutputStream& os) {
unsigned codepoint;
if (!SourceEncoding::Decode(is, &codepoint))
return false;
TargetEncoding::Encode(os, codepoint);
return true;
}
//! Validate one Unicode codepoint from an encoded stream.
template<typename InputStream, typename OutputStream>
RAPIDJSON_FORCEINLINE static bool Validate(InputStream& is, OutputStream& os) {
return Transcode(is, os); // Since source/target encoding is different, must transcode.
}
};
//! Specialization of Transcoder with same source and target encoding.
template<typename Encoding>
struct Transcoder<Encoding, Encoding> {
template<typename InputStream, typename OutputStream>
RAPIDJSON_FORCEINLINE static bool Transcode(InputStream& is, OutputStream& os) {
os.Put(is.Take()); // Just copy one code unit. This semantic is different from primary template class.
return true;
}
template<typename InputStream, typename OutputStream>
RAPIDJSON_FORCEINLINE static bool Validate(InputStream& is, OutputStream& os) {
return Encoding::Validate(is, os); // source/target encoding are the same
}
};
RAPIDJSON_NAMESPACE_END
#if defined(__GNUC__) || defined(_MSV_VER)
RAPIDJSON_DIAG_POP
#endif
#endif // RAPIDJSON_ENCODINGS_H_

View File

@@ -1,65 +0,0 @@
// Tencent is pleased to support the open source community by making RapidJSON available.
//
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
//
// Licensed under the MIT License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// http://opensource.org/licenses/MIT
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef RAPIDJSON_ERROR_EN_H__
#define RAPIDJSON_ERROR_EN_H__
#include "error.h"
RAPIDJSON_NAMESPACE_BEGIN
//! Maps error code of parsing into error message.
/*!
\ingroup RAPIDJSON_ERRORS
\param parseErrorCode Error code obtained in parsing.
\return the error message.
\note User can make a copy of this function for localization.
Using switch-case is safer for future modification of error codes.
*/
inline const RAPIDJSON_ERROR_CHARTYPE* GetParseError_En(ParseErrorCode parseErrorCode) {
switch (parseErrorCode) {
case kParseErrorNone: return RAPIDJSON_ERROR_STRING("No error.");
case kParseErrorDocumentEmpty: return RAPIDJSON_ERROR_STRING("The document is empty.");
case kParseErrorDocumentRootNotSingular: return RAPIDJSON_ERROR_STRING("The document root must not follow by other values.");
case kParseErrorValueInvalid: return RAPIDJSON_ERROR_STRING("Invalid value.");
case kParseErrorObjectMissName: return RAPIDJSON_ERROR_STRING("Missing a name for object member.");
case kParseErrorObjectMissColon: return RAPIDJSON_ERROR_STRING("Missing a colon after a name of object member.");
case kParseErrorObjectMissCommaOrCurlyBracket: return RAPIDJSON_ERROR_STRING("Missing a comma or '}' after an object member.");
case kParseErrorArrayMissCommaOrSquareBracket: return RAPIDJSON_ERROR_STRING("Missing a comma or ']' after an array element.");
case kParseErrorStringUnicodeEscapeInvalidHex: return RAPIDJSON_ERROR_STRING("Incorrect hex digit after \\u escape in string.");
case kParseErrorStringUnicodeSurrogateInvalid: return RAPIDJSON_ERROR_STRING("The surrogate pair in string is invalid.");
case kParseErrorStringEscapeInvalid: return RAPIDJSON_ERROR_STRING("Invalid escape character in string.");
case kParseErrorStringMissQuotationMark: return RAPIDJSON_ERROR_STRING("Missing a closing quotation mark in string.");
case kParseErrorStringInvalidEncoding: return RAPIDJSON_ERROR_STRING("Invalid encoding in string.");
case kParseErrorNumberTooBig: return RAPIDJSON_ERROR_STRING("Number too big to be stored in double.");
case kParseErrorNumberMissFraction: return RAPIDJSON_ERROR_STRING("Miss fraction part in number.");
case kParseErrorNumberMissExponent: return RAPIDJSON_ERROR_STRING("Miss exponent in number.");
case kParseErrorTermination: return RAPIDJSON_ERROR_STRING("Terminate parsing due to Handler error.");
case kParseErrorUnspecificSyntaxError: return RAPIDJSON_ERROR_STRING("Unspecific syntax error.");
default:
return RAPIDJSON_ERROR_STRING("Unknown error.");
}
}
RAPIDJSON_NAMESPACE_END
#endif // RAPIDJSON_ERROR_EN_H__

View File

@@ -1,146 +0,0 @@
// Tencent is pleased to support the open source community by making RapidJSON available.
//
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
//
// Licensed under the MIT License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// http://opensource.org/licenses/MIT
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef RAPIDJSON_ERROR_ERROR_H__
#define RAPIDJSON_ERROR_ERROR_H__
#include "../rapidjson.h"
/*! \file error.h */
/*! \defgroup RAPIDJSON_ERRORS RapidJSON error handling */
///////////////////////////////////////////////////////////////////////////////
// RAPIDJSON_ERROR_CHARTYPE
//! Character type of error messages.
/*! \ingroup RAPIDJSON_ERRORS
The default character type is \c char.
On Windows, user can define this macro as \c TCHAR for supporting both
unicode/non-unicode settings.
*/
#ifndef RAPIDJSON_ERROR_CHARTYPE
#define RAPIDJSON_ERROR_CHARTYPE char
#endif
///////////////////////////////////////////////////////////////////////////////
// RAPIDJSON_ERROR_STRING
//! Macro for converting string literial to \ref RAPIDJSON_ERROR_CHARTYPE[].
/*! \ingroup RAPIDJSON_ERRORS
By default this conversion macro does nothing.
On Windows, user can define this macro as \c _T(x) for supporting both
unicode/non-unicode settings.
*/
#ifndef RAPIDJSON_ERROR_STRING
#define RAPIDJSON_ERROR_STRING(x) x
#endif
RAPIDJSON_NAMESPACE_BEGIN
///////////////////////////////////////////////////////////////////////////////
// ParseErrorCode
//! Error code of parsing.
/*! \ingroup RAPIDJSON_ERRORS
\see GenericReader::Parse, GenericReader::GetParseErrorCode
*/
enum ParseErrorCode {
kParseErrorNone = 0, //!< No error.
kParseErrorDocumentEmpty, //!< The document is empty.
kParseErrorDocumentRootNotSingular, //!< The document root must not follow by other values.
kParseErrorValueInvalid, //!< Invalid value.
kParseErrorObjectMissName, //!< Missing a name for object member.
kParseErrorObjectMissColon, //!< Missing a colon after a name of object member.
kParseErrorObjectMissCommaOrCurlyBracket, //!< Missing a comma or '}' after an object member.
kParseErrorArrayMissCommaOrSquareBracket, //!< Missing a comma or ']' after an array element.
kParseErrorStringUnicodeEscapeInvalidHex, //!< Incorrect hex digit after \\u escape in string.
kParseErrorStringUnicodeSurrogateInvalid, //!< The surrogate pair in string is invalid.
kParseErrorStringEscapeInvalid, //!< Invalid escape character in string.
kParseErrorStringMissQuotationMark, //!< Missing a closing quotation mark in string.
kParseErrorStringInvalidEncoding, //!< Invalid encoding in string.
kParseErrorNumberTooBig, //!< Number too big to be stored in double.
kParseErrorNumberMissFraction, //!< Miss fraction part in number.
kParseErrorNumberMissExponent, //!< Miss exponent in number.
kParseErrorTermination, //!< Parsing was terminated.
kParseErrorUnspecificSyntaxError //!< Unspecific syntax error.
};
//! Result of parsing (wraps ParseErrorCode)
/*!
\ingroup RAPIDJSON_ERRORS
\code
Document doc;
ParseResult ok = doc.Parse("[42]");
if (!ok) {
fprintf(stderr, "JSON parse error: %s (%u)",
GetParseError_En(ok.Code()), ok.Offset());
exit(EXIT_FAILURE);
}
\endcode
\see GenericReader::Parse, GenericDocument::Parse
*/
struct ParseResult {
//! Default constructor, no error.
ParseResult() : code_(kParseErrorNone), offset_(0) {}
//! Constructor to set an error.
ParseResult(ParseErrorCode code, size_t offset) : code_(code), offset_(offset) {}
//! Get the error code.
ParseErrorCode Code() const { return code_; }
//! Get the error offset, if \ref IsError(), 0 otherwise.
size_t Offset() const { return offset_; }
//! Conversion to \c bool, returns \c true, iff !\ref IsError().
operator bool() const { return !IsError(); }
//! Whether the result is an error.
bool IsError() const { return code_ != kParseErrorNone; }
bool operator==(const ParseResult& that) const { return code_ == that.code_; }
bool operator==(ParseErrorCode code) const { return code_ == code; }
friend bool operator==(ParseErrorCode code, const ParseResult & err) { return code == err.code_; }
//! Reset error code.
void Clear() { Set(kParseErrorNone); }
//! Update error code and offset.
void Set(ParseErrorCode code, size_t offset = 0) { code_ = code; offset_ = offset; }
private:
ParseErrorCode code_;
size_t offset_;
};
//! Function pointer type of GetParseError().
/*! \ingroup RAPIDJSON_ERRORS
This is the prototype for \c GetParseError_X(), where \c X is a locale.
User can dynamically change locale in runtime, e.g.:
\code
GetParseErrorFunc GetParseError = GetParseError_En; // or whatever
const RAPIDJSON_ERROR_CHARTYPE* s = GetParseError(document.GetParseErrorCode());
\endcode
*/
typedef const RAPIDJSON_ERROR_CHARTYPE* (*GetParseErrorFunc)(ParseErrorCode);
RAPIDJSON_NAMESPACE_END
#endif // RAPIDJSON_ERROR_ERROR_H__

View File

@@ -1,88 +0,0 @@
// Tencent is pleased to support the open source community by making RapidJSON available.
//
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
//
// Licensed under the MIT License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// http://opensource.org/licenses/MIT
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef RAPIDJSON_FILEREADSTREAM_H_
#define RAPIDJSON_FILEREADSTREAM_H_
#include "rapidjson.h"
#include <cstdio>
RAPIDJSON_NAMESPACE_BEGIN
//! File byte stream for input using fread().
/*!
\note implements Stream concept
*/
class FileReadStream {
public:
typedef char Ch; //!< Character type (byte).
//! Constructor.
/*!
\param fp File pointer opened for read.
\param buffer user-supplied buffer.
\param bufferSize size of buffer in bytes. Must >=4 bytes.
*/
FileReadStream(std::FILE* fp, char* buffer, size_t bufferSize) : fp_(fp), buffer_(buffer), bufferSize_(bufferSize), bufferLast_(0), current_(buffer_), readCount_(0), count_(0), eof_(false) {
RAPIDJSON_ASSERT(fp_ != 0);
RAPIDJSON_ASSERT(bufferSize >= 4);
Read();
}
Ch Peek() const { return *current_; }
Ch Take() { Ch c = *current_; Read(); return c; }
size_t Tell() const { return count_ + static_cast<size_t>(current_ - buffer_); }
// Not implemented
void Put(Ch) { RAPIDJSON_ASSERT(false); }
void Flush() { RAPIDJSON_ASSERT(false); }
Ch* PutBegin() { RAPIDJSON_ASSERT(false); return 0; }
size_t PutEnd(Ch*) { RAPIDJSON_ASSERT(false); return 0; }
// For encoding detection only.
const Ch* Peek4() const {
return (current_ + 4 <= bufferLast_) ? current_ : 0;
}
private:
void Read() {
if (current_ < bufferLast_)
++current_;
else if (!eof_) {
count_ += readCount_;
readCount_ = fread(buffer_, 1, bufferSize_, fp_);
bufferLast_ = buffer_ + readCount_ - 1;
current_ = buffer_;
if (readCount_ < bufferSize_) {
buffer_[readCount_] = '\0';
++bufferLast_;
eof_ = true;
}
}
}
std::FILE* fp_;
Ch *buffer_;
size_t bufferSize_;
Ch *bufferLast_;
Ch *current_;
size_t readCount_;
size_t count_; //!< Number of characters read
bool eof_;
};
RAPIDJSON_NAMESPACE_END
#endif // RAPIDJSON_FILESTREAM_H_

View File

@@ -1,95 +0,0 @@
// Tencent is pleased to support the open source community by making RapidJSON available.
//
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
//
// Licensed under the MIT License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// http://opensource.org/licenses/MIT
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef RAPIDJSON_FILEWRITESTREAM_H_
#define RAPIDJSON_FILEWRITESTREAM_H_
#include "rapidjson.h"
#include <cstdio>
RAPIDJSON_NAMESPACE_BEGIN
//! Wrapper of C file stream for input using fread().
/*!
\note implements Stream concept
*/
class FileWriteStream {
public:
typedef char Ch; //!< Character type. Only support char.
FileWriteStream(std::FILE* fp, char* buffer, size_t bufferSize) : fp_(fp), buffer_(buffer), bufferEnd_(buffer + bufferSize), current_(buffer_) {
RAPIDJSON_ASSERT(fp_ != 0);
}
void Put(char c) {
if (current_ >= bufferEnd_)
Flush();
*current_++ = c;
}
void PutN(char c, size_t n) {
size_t avail = static_cast<size_t>(bufferEnd_ - current_);
while (n > avail) {
std::memset(current_, c, avail);
current_ += avail;
Flush();
n -= avail;
avail = static_cast<size_t>(bufferEnd_ - current_);
}
if (n > 0) {
std::memset(current_, c, n);
current_ += n;
}
}
void Flush() {
if (current_ != buffer_) {
size_t result = fwrite(buffer_, 1, static_cast<size_t>(current_ - buffer_), fp_);
if (result < static_cast<size_t>(current_ - buffer_)) {
// failure deliberately ignored at this time
// added to avoid warn_unused_result build errors
}
current_ = buffer_;
}
}
// Not implemented
char Peek() const { RAPIDJSON_ASSERT(false); return 0; }
char Take() { RAPIDJSON_ASSERT(false); return 0; }
size_t Tell() const { RAPIDJSON_ASSERT(false); return 0; }
char* PutBegin() { RAPIDJSON_ASSERT(false); return 0; }
size_t PutEnd(char*) { RAPIDJSON_ASSERT(false); return 0; }
private:
// Prohibit copy constructor & assignment operator.
FileWriteStream(const FileWriteStream&);
FileWriteStream& operator=(const FileWriteStream&);
std::FILE* fp_;
char *buffer_;
char *bufferEnd_;
char *current_;
};
//! Implement specialized version of PutN() with memset() for better performance.
template<>
inline void PutN(FileWriteStream& stream, char c, size_t n) {
stream.PutN(c, n);
}
RAPIDJSON_NAMESPACE_END
#endif // RAPIDJSON_FILESTREAM_H_

View File

@@ -1,290 +0,0 @@
// Tencent is pleased to support the open source community by making RapidJSON available.
//
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
//
// Licensed under the MIT License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// http://opensource.org/licenses/MIT
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef RAPIDJSON_BIGINTEGER_H_
#define RAPIDJSON_BIGINTEGER_H_
#include "../rapidjson.h"
#if defined(_MSC_VER) && defined(_M_AMD64)
#include <intrin.h> // for _umul128
#pragma intrinsic(_umul128)
#endif
RAPIDJSON_NAMESPACE_BEGIN
namespace internal {
class BigInteger {
public:
typedef uint64_t Type;
BigInteger(const BigInteger& rhs) : count_(rhs.count_) {
std::memcpy(digits_, rhs.digits_, count_ * sizeof(Type));
}
explicit BigInteger(uint64_t u) : count_(1) {
digits_[0] = u;
}
BigInteger(const char* decimals, size_t length) : count_(1) {
RAPIDJSON_ASSERT(length > 0);
digits_[0] = 0;
size_t i = 0;
const size_t kMaxDigitPerIteration = 19; // 2^64 = 18446744073709551616 > 10^19
while (length >= kMaxDigitPerIteration) {
AppendDecimal64(decimals + i, decimals + i + kMaxDigitPerIteration);
length -= kMaxDigitPerIteration;
i += kMaxDigitPerIteration;
}
if (length > 0)
AppendDecimal64(decimals + i, decimals + i + length);
}
BigInteger& operator=(const BigInteger &rhs)
{
if (this != &rhs) {
count_ = rhs.count_;
std::memcpy(digits_, rhs.digits_, count_ * sizeof(Type));
}
return *this;
}
BigInteger& operator=(uint64_t u) {
digits_[0] = u;
count_ = 1;
return *this;
}
BigInteger& operator+=(uint64_t u) {
Type backup = digits_[0];
digits_[0] += u;
for (size_t i = 0; i < count_ - 1; i++) {
if (digits_[i] >= backup)
return *this; // no carry
backup = digits_[i + 1];
digits_[i + 1] += 1;
}
// Last carry
if (digits_[count_ - 1] < backup)
PushBack(1);
return *this;
}
BigInteger& operator*=(uint64_t u) {
if (u == 0) return *this = 0;
if (u == 1) return *this;
if (*this == 1) return *this = u;
uint64_t k = 0;
for (size_t i = 0; i < count_; i++) {
uint64_t hi;
digits_[i] = MulAdd64(digits_[i], u, k, &hi);
k = hi;
}
if (k > 0)
PushBack(k);
return *this;
}
BigInteger& operator*=(uint32_t u) {
if (u == 0) return *this = 0;
if (u == 1) return *this;
if (*this == 1) return *this = u;
uint64_t k = 0;
for (size_t i = 0; i < count_; i++) {
const uint64_t c = digits_[i] >> 32;
const uint64_t d = digits_[i] & 0xFFFFFFFF;
const uint64_t uc = u * c;
const uint64_t ud = u * d;
const uint64_t p0 = ud + k;
const uint64_t p1 = uc + (p0 >> 32);
digits_[i] = (p0 & 0xFFFFFFFF) | (p1 << 32);
k = p1 >> 32;
}
if (k > 0)
PushBack(k);
return *this;
}
BigInteger& operator<<=(size_t shift) {
if (IsZero() || shift == 0) return *this;
size_t offset = shift / kTypeBit;
size_t interShift = shift % kTypeBit;
RAPIDJSON_ASSERT(count_ + offset <= kCapacity);
if (interShift == 0) {
std::memmove(&digits_[count_ - 1 + offset], &digits_[count_ - 1], count_ * sizeof(Type));
count_ += offset;
}
else {
digits_[count_] = 0;
for (size_t i = count_; i > 0; i--)
digits_[i + offset] = (digits_[i] << interShift) | (digits_[i - 1] >> (kTypeBit - interShift));
digits_[offset] = digits_[0] << interShift;
count_ += offset;
if (digits_[count_])
count_++;
}
std::memset(digits_, 0, offset * sizeof(Type));
return *this;
}
bool operator==(const BigInteger& rhs) const {
return count_ == rhs.count_ && std::memcmp(digits_, rhs.digits_, count_ * sizeof(Type)) == 0;
}
bool operator==(const Type rhs) const {
return count_ == 1 && digits_[0] == rhs;
}
BigInteger& MultiplyPow5(unsigned exp) {
static const uint32_t kPow5[12] = {
5,
5 * 5,
5 * 5 * 5,
5 * 5 * 5 * 5,
5 * 5 * 5 * 5 * 5,
5 * 5 * 5 * 5 * 5 * 5,
5 * 5 * 5 * 5 * 5 * 5 * 5,
5 * 5 * 5 * 5 * 5 * 5 * 5 * 5,
5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5,
5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5,
5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5,
5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5
};
if (exp == 0) return *this;
for (; exp >= 27; exp -= 27) *this *= RAPIDJSON_UINT64_C2(0X6765C793, 0XFA10079D); // 5^27
for (; exp >= 13; exp -= 13) *this *= static_cast<uint32_t>(1220703125u); // 5^13
if (exp > 0) *this *= kPow5[exp - 1];
return *this;
}
// Compute absolute difference of this and rhs.
// Assume this != rhs
bool Difference(const BigInteger& rhs, BigInteger* out) const {
int cmp = Compare(rhs);
RAPIDJSON_ASSERT(cmp != 0);
const BigInteger *a, *b; // Makes a > b
bool ret;
if (cmp < 0) { a = &rhs; b = this; ret = true; }
else { a = this; b = &rhs; ret = false; }
Type borrow = 0;
for (size_t i = 0; i < a->count_; i++) {
Type d = a->digits_[i] - borrow;
if (i < b->count_)
d -= b->digits_[i];
borrow = (d > a->digits_[i]) ? 1 : 0;
out->digits_[i] = d;
if (d != 0)
out->count_ = i + 1;
}
return ret;
}
int Compare(const BigInteger& rhs) const {
if (count_ != rhs.count_)
return count_ < rhs.count_ ? -1 : 1;
for (size_t i = count_; i-- > 0;)
if (digits_[i] != rhs.digits_[i])
return digits_[i] < rhs.digits_[i] ? -1 : 1;
return 0;
}
size_t GetCount() const { return count_; }
Type GetDigit(size_t index) const { RAPIDJSON_ASSERT(index < count_); return digits_[index]; }
bool IsZero() const { return count_ == 1 && digits_[0] == 0; }
private:
void AppendDecimal64(const char* begin, const char* end) {
uint64_t u = ParseUint64(begin, end);
if (IsZero())
*this = u;
else {
unsigned exp = static_cast<unsigned>(end - begin);
(MultiplyPow5(exp) <<= exp) += u; // *this = *this * 10^exp + u
}
}
void PushBack(Type digit) {
RAPIDJSON_ASSERT(count_ < kCapacity);
digits_[count_++] = digit;
}
static uint64_t ParseUint64(const char* begin, const char* end) {
uint64_t r = 0;
for (const char* p = begin; p != end; ++p) {
RAPIDJSON_ASSERT(*p >= '0' && *p <= '9');
r = r * 10u + (unsigned)(*p - '0');
}
return r;
}
// Assume a * b + k < 2^128
static uint64_t MulAdd64(uint64_t a, uint64_t b, uint64_t k, uint64_t* outHigh) {
#if defined(_MSC_VER) && defined(_M_AMD64)
uint64_t low = _umul128(a, b, outHigh) + k;
if (low < k)
(*outHigh)++;
return low;
#elif (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)) && defined(__x86_64__)
__extension__ typedef unsigned __int128 uint128;
uint128 p = static_cast<uint128>(a) * static_cast<uint128>(b);
p += k;
*outHigh = static_cast<uint64_t>(p >> 64);
return static_cast<uint64_t>(p);
#else
const uint64_t a0 = a & 0xFFFFFFFF, a1 = a >> 32, b0 = b & 0xFFFFFFFF, b1 = b >> 32;
uint64_t x0 = a0 * b0, x1 = a0 * b1, x2 = a1 * b0, x3 = a1 * b1;
x1 += (x0 >> 32); // can't give carry
x1 += x2;
if (x1 < x2)
x3 += (static_cast<uint64_t>(1) << 32);
uint64_t lo = (x1 << 32) + (x0 & 0xFFFFFFFF);
uint64_t hi = x3 + (x1 >> 32);
lo += k;
if (lo < k)
hi++;
*outHigh = hi;
return lo;
#endif
}
static const size_t kBitCount = 3328; // 64bit * 54 > 10^1000
static const size_t kCapacity = kBitCount / sizeof(Type);
static const size_t kTypeBit = sizeof(Type) * 8;
Type digits_[kCapacity];
size_t count_;
};
} // namespace internal
RAPIDJSON_NAMESPACE_END
#endif // RAPIDJSON_BIGINTEGER_H_

View File

@@ -1,248 +0,0 @@
// Tencent is pleased to support the open source community by making RapidJSON available.
//
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
//
// Licensed under the MIT License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// http://opensource.org/licenses/MIT
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
// This is a C++ header-only implementation of Grisu2 algorithm from the publication:
// Loitsch, Florian. "Printing floating-point numbers quickly and accurately with
// integers." ACM Sigplan Notices 45.6 (2010): 233-243.
#ifndef RAPIDJSON_DIYFP_H_
#define RAPIDJSON_DIYFP_H_
#include "../rapidjson.h"
#if defined(_MSC_VER) && defined(_M_AMD64)
#include <intrin.h>
#pragma intrinsic(_BitScanReverse64)
#pragma intrinsic(_umul128)
#endif
RAPIDJSON_NAMESPACE_BEGIN
namespace internal {
#ifdef __GNUC__
RAPIDJSON_DIAG_PUSH
RAPIDJSON_DIAG_OFF(effc++)
#endif
struct DiyFp {
DiyFp() {}
DiyFp(uint64_t fp, int exp) : f(fp), e(exp) {}
explicit DiyFp(double d) {
union {
double d;
uint64_t u64;
} u = { d };
int biased_e = static_cast<int>((u.u64 & kDpExponentMask) >> kDpSignificandSize);
uint64_t significand = (u.u64 & kDpSignificandMask);
if (biased_e != 0) {
f = significand + kDpHiddenBit;
e = biased_e - kDpExponentBias;
}
else {
f = significand;
e = kDpMinExponent + 1;
}
}
DiyFp operator-(const DiyFp& rhs) const {
return DiyFp(f - rhs.f, e);
}
DiyFp operator*(const DiyFp& rhs) const {
#if defined(_MSC_VER) && defined(_M_AMD64)
uint64_t h;
uint64_t l = _umul128(f, rhs.f, &h);
if (l & (uint64_t(1) << 63)) // rounding
h++;
return DiyFp(h, e + rhs.e + 64);
#elif (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)) && defined(__x86_64__)
__extension__ typedef unsigned __int128 uint128;
uint128 p = static_cast<uint128>(f) * static_cast<uint128>(rhs.f);
uint64_t h = static_cast<uint64_t>(p >> 64);
uint64_t l = static_cast<uint64_t>(p);
if (l & (uint64_t(1) << 63)) // rounding
h++;
return DiyFp(h, e + rhs.e + 64);
#else
const uint64_t M32 = 0xFFFFFFFF;
const uint64_t a = f >> 32;
const uint64_t b = f & M32;
const uint64_t c = rhs.f >> 32;
const uint64_t d = rhs.f & M32;
const uint64_t ac = a * c;
const uint64_t bc = b * c;
const uint64_t ad = a * d;
const uint64_t bd = b * d;
uint64_t tmp = (bd >> 32) + (ad & M32) + (bc & M32);
tmp += 1U << 31; /// mult_round
return DiyFp(ac + (ad >> 32) + (bc >> 32) + (tmp >> 32), e + rhs.e + 64);
#endif
}
DiyFp Normalize() const {
#if defined(_MSC_VER) && defined(_M_AMD64)
unsigned long index;
_BitScanReverse64(&index, f);
return DiyFp(f << (63 - index), e - (63 - index));
#elif defined(__GNUC__) && __GNUC__ >= 4
int s = __builtin_clzll(f);
return DiyFp(f << s, e - s);
#else
DiyFp res = *this;
while (!(res.f & (static_cast<uint64_t>(1) << 63))) {
res.f <<= 1;
res.e--;
}
return res;
#endif
}
DiyFp NormalizeBoundary() const {
DiyFp res = *this;
while (!(res.f & (kDpHiddenBit << 1))) {
res.f <<= 1;
res.e--;
}
res.f <<= (kDiySignificandSize - kDpSignificandSize - 2);
res.e = res.e - (kDiySignificandSize - kDpSignificandSize - 2);
return res;
}
void NormalizedBoundaries(DiyFp* minus, DiyFp* plus) const {
DiyFp pl = DiyFp((f << 1) + 1, e - 1).NormalizeBoundary();
DiyFp mi = (f == kDpHiddenBit) ? DiyFp((f << 2) - 1, e - 2) : DiyFp((f << 1) - 1, e - 1);
mi.f <<= mi.e - pl.e;
mi.e = pl.e;
*plus = pl;
*minus = mi;
}
double ToDouble() const {
union {
double d;
uint64_t u64;
}u;
const uint64_t be = (e == kDpDenormalExponent && (f & kDpHiddenBit) == 0) ? 0 :
static_cast<uint64_t>(e + kDpExponentBias);
u.u64 = (f & kDpSignificandMask) | (be << kDpSignificandSize);
return u.d;
}
static const int kDiySignificandSize = 64;
static const int kDpSignificandSize = 52;
static const int kDpExponentBias = 0x3FF + kDpSignificandSize;
static const int kDpMaxExponent = 0x7FF - kDpExponentBias;
static const int kDpMinExponent = -kDpExponentBias;
static const int kDpDenormalExponent = -kDpExponentBias + 1;
static const uint64_t kDpExponentMask = RAPIDJSON_UINT64_C2(0x7FF00000, 0x00000000);
static const uint64_t kDpSignificandMask = RAPIDJSON_UINT64_C2(0x000FFFFF, 0xFFFFFFFF);
static const uint64_t kDpHiddenBit = RAPIDJSON_UINT64_C2(0x00100000, 0x00000000);
uint64_t f;
int e;
};
inline DiyFp GetCachedPowerByIndex(size_t index) {
// 10^-348, 10^-340, ..., 10^340
static const uint64_t kCachedPowers_F[] = {
RAPIDJSON_UINT64_C2(0xfa8fd5a0, 0x081c0288), RAPIDJSON_UINT64_C2(0xbaaee17f, 0xa23ebf76),
RAPIDJSON_UINT64_C2(0x8b16fb20, 0x3055ac76), RAPIDJSON_UINT64_C2(0xcf42894a, 0x5dce35ea),
RAPIDJSON_UINT64_C2(0x9a6bb0aa, 0x55653b2d), RAPIDJSON_UINT64_C2(0xe61acf03, 0x3d1a45df),
RAPIDJSON_UINT64_C2(0xab70fe17, 0xc79ac6ca), RAPIDJSON_UINT64_C2(0xff77b1fc, 0xbebcdc4f),
RAPIDJSON_UINT64_C2(0xbe5691ef, 0x416bd60c), RAPIDJSON_UINT64_C2(0x8dd01fad, 0x907ffc3c),
RAPIDJSON_UINT64_C2(0xd3515c28, 0x31559a83), RAPIDJSON_UINT64_C2(0x9d71ac8f, 0xada6c9b5),
RAPIDJSON_UINT64_C2(0xea9c2277, 0x23ee8bcb), RAPIDJSON_UINT64_C2(0xaecc4991, 0x4078536d),
RAPIDJSON_UINT64_C2(0x823c1279, 0x5db6ce57), RAPIDJSON_UINT64_C2(0xc2109436, 0x4dfb5637),
RAPIDJSON_UINT64_C2(0x9096ea6f, 0x3848984f), RAPIDJSON_UINT64_C2(0xd77485cb, 0x25823ac7),
RAPIDJSON_UINT64_C2(0xa086cfcd, 0x97bf97f4), RAPIDJSON_UINT64_C2(0xef340a98, 0x172aace5),
RAPIDJSON_UINT64_C2(0xb23867fb, 0x2a35b28e), RAPIDJSON_UINT64_C2(0x84c8d4df, 0xd2c63f3b),
RAPIDJSON_UINT64_C2(0xc5dd4427, 0x1ad3cdba), RAPIDJSON_UINT64_C2(0x936b9fce, 0xbb25c996),
RAPIDJSON_UINT64_C2(0xdbac6c24, 0x7d62a584), RAPIDJSON_UINT64_C2(0xa3ab6658, 0x0d5fdaf6),
RAPIDJSON_UINT64_C2(0xf3e2f893, 0xdec3f126), RAPIDJSON_UINT64_C2(0xb5b5ada8, 0xaaff80b8),
RAPIDJSON_UINT64_C2(0x87625f05, 0x6c7c4a8b), RAPIDJSON_UINT64_C2(0xc9bcff60, 0x34c13053),
RAPIDJSON_UINT64_C2(0x964e858c, 0x91ba2655), RAPIDJSON_UINT64_C2(0xdff97724, 0x70297ebd),
RAPIDJSON_UINT64_C2(0xa6dfbd9f, 0xb8e5b88f), RAPIDJSON_UINT64_C2(0xf8a95fcf, 0x88747d94),
RAPIDJSON_UINT64_C2(0xb9447093, 0x8fa89bcf), RAPIDJSON_UINT64_C2(0x8a08f0f8, 0xbf0f156b),
RAPIDJSON_UINT64_C2(0xcdb02555, 0x653131b6), RAPIDJSON_UINT64_C2(0x993fe2c6, 0xd07b7fac),
RAPIDJSON_UINT64_C2(0xe45c10c4, 0x2a2b3b06), RAPIDJSON_UINT64_C2(0xaa242499, 0x697392d3),
RAPIDJSON_UINT64_C2(0xfd87b5f2, 0x8300ca0e), RAPIDJSON_UINT64_C2(0xbce50864, 0x92111aeb),
RAPIDJSON_UINT64_C2(0x8cbccc09, 0x6f5088cc), RAPIDJSON_UINT64_C2(0xd1b71758, 0xe219652c),
RAPIDJSON_UINT64_C2(0x9c400000, 0x00000000), RAPIDJSON_UINT64_C2(0xe8d4a510, 0x00000000),
RAPIDJSON_UINT64_C2(0xad78ebc5, 0xac620000), RAPIDJSON_UINT64_C2(0x813f3978, 0xf8940984),
RAPIDJSON_UINT64_C2(0xc097ce7b, 0xc90715b3), RAPIDJSON_UINT64_C2(0x8f7e32ce, 0x7bea5c70),
RAPIDJSON_UINT64_C2(0xd5d238a4, 0xabe98068), RAPIDJSON_UINT64_C2(0x9f4f2726, 0x179a2245),
RAPIDJSON_UINT64_C2(0xed63a231, 0xd4c4fb27), RAPIDJSON_UINT64_C2(0xb0de6538, 0x8cc8ada8),
RAPIDJSON_UINT64_C2(0x83c7088e, 0x1aab65db), RAPIDJSON_UINT64_C2(0xc45d1df9, 0x42711d9a),
RAPIDJSON_UINT64_C2(0x924d692c, 0xa61be758), RAPIDJSON_UINT64_C2(0xda01ee64, 0x1a708dea),
RAPIDJSON_UINT64_C2(0xa26da399, 0x9aef774a), RAPIDJSON_UINT64_C2(0xf209787b, 0xb47d6b85),
RAPIDJSON_UINT64_C2(0xb454e4a1, 0x79dd1877), RAPIDJSON_UINT64_C2(0x865b8692, 0x5b9bc5c2),
RAPIDJSON_UINT64_C2(0xc83553c5, 0xc8965d3d), RAPIDJSON_UINT64_C2(0x952ab45c, 0xfa97a0b3),
RAPIDJSON_UINT64_C2(0xde469fbd, 0x99a05fe3), RAPIDJSON_UINT64_C2(0xa59bc234, 0xdb398c25),
RAPIDJSON_UINT64_C2(0xf6c69a72, 0xa3989f5c), RAPIDJSON_UINT64_C2(0xb7dcbf53, 0x54e9bece),
RAPIDJSON_UINT64_C2(0x88fcf317, 0xf22241e2), RAPIDJSON_UINT64_C2(0xcc20ce9b, 0xd35c78a5),
RAPIDJSON_UINT64_C2(0x98165af3, 0x7b2153df), RAPIDJSON_UINT64_C2(0xe2a0b5dc, 0x971f303a),
RAPIDJSON_UINT64_C2(0xa8d9d153, 0x5ce3b396), RAPIDJSON_UINT64_C2(0xfb9b7cd9, 0xa4a7443c),
RAPIDJSON_UINT64_C2(0xbb764c4c, 0xa7a44410), RAPIDJSON_UINT64_C2(0x8bab8eef, 0xb6409c1a),
RAPIDJSON_UINT64_C2(0xd01fef10, 0xa657842c), RAPIDJSON_UINT64_C2(0x9b10a4e5, 0xe9913129),
RAPIDJSON_UINT64_C2(0xe7109bfb, 0xa19c0c9d), RAPIDJSON_UINT64_C2(0xac2820d9, 0x623bf429),
RAPIDJSON_UINT64_C2(0x80444b5e, 0x7aa7cf85), RAPIDJSON_UINT64_C2(0xbf21e440, 0x03acdd2d),
RAPIDJSON_UINT64_C2(0x8e679c2f, 0x5e44ff8f), RAPIDJSON_UINT64_C2(0xd433179d, 0x9c8cb841),
RAPIDJSON_UINT64_C2(0x9e19db92, 0xb4e31ba9), RAPIDJSON_UINT64_C2(0xeb96bf6e, 0xbadf77d9),
RAPIDJSON_UINT64_C2(0xaf87023b, 0x9bf0ee6b)
};
static const int16_t kCachedPowers_E[] = {
-1220, -1193, -1166, -1140, -1113, -1087, -1060, -1034, -1007, -980,
-954, -927, -901, -874, -847, -821, -794, -768, -741, -715,
-688, -661, -635, -608, -582, -555, -529, -502, -475, -449,
-422, -396, -369, -343, -316, -289, -263, -236, -210, -183,
-157, -130, -103, -77, -50, -24, 3, 30, 56, 83,
109, 136, 162, 189, 216, 242, 269, 295, 322, 348,
375, 402, 428, 455, 481, 508, 534, 561, 588, 614,
641, 667, 694, 720, 747, 774, 800, 827, 853, 880,
907, 933, 960, 986, 1013, 1039, 1066
};
return DiyFp(kCachedPowers_F[index], kCachedPowers_E[index]);
}
inline DiyFp GetCachedPower(int e, int* K) {
//int k = static_cast<int>(ceil((-61 - e) * 0.30102999566398114)) + 374;
double dk = (-61 - e) * 0.30102999566398114 + 347; // dk must be positive, so can do ceiling in positive
int k = static_cast<int>(dk);
if (dk - k > 0.0)
k++;
unsigned index = static_cast<unsigned>((k >> 3) + 1);
*K = -(-348 + static_cast<int>(index << 3)); // decimal exponent no need lookup table
return GetCachedPowerByIndex(index);
}
inline DiyFp GetCachedPower10(int exp, int *outExp) {
unsigned index = (static_cast<unsigned>(exp) + 348u) / 8u;
*outExp = -348 + static_cast<int>(index) * 8;
return GetCachedPowerByIndex(index);
}
#ifdef __GNUC__
RAPIDJSON_DIAG_POP
#endif
} // namespace internal
RAPIDJSON_NAMESPACE_END
#endif // RAPIDJSON_DIYFP_H_

View File

@@ -1,217 +0,0 @@
// Tencent is pleased to support the open source community by making RapidJSON available.
//
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
//
// Licensed under the MIT License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// http://opensource.org/licenses/MIT
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
// This is a C++ header-only implementation of Grisu2 algorithm from the publication:
// Loitsch, Florian. "Printing floating-point numbers quickly and accurately with
// integers." ACM Sigplan Notices 45.6 (2010): 233-243.
#ifndef RAPIDJSON_DTOA_
#define RAPIDJSON_DTOA_
#include "itoa.h" // GetDigitsLut()
#include "diyfp.h"
#include "ieee754.h"
RAPIDJSON_NAMESPACE_BEGIN
namespace internal {
#ifdef __GNUC__
RAPIDJSON_DIAG_PUSH
RAPIDJSON_DIAG_OFF(effc++)
#endif
inline void GrisuRound(char* buffer, int len, uint64_t delta, uint64_t rest, uint64_t ten_kappa, uint64_t wp_w) {
while (rest < wp_w && delta - rest >= ten_kappa &&
(rest + ten_kappa < wp_w || /// closer
wp_w - rest > rest + ten_kappa - wp_w)) {
buffer[len - 1]--;
rest += ten_kappa;
}
}
inline unsigned CountDecimalDigit32(uint32_t n) {
// Simple pure C++ implementation was faster than __builtin_clz version in this situation.
if (n < 10) return 1;
if (n < 100) return 2;
if (n < 1000) return 3;
if (n < 10000) return 4;
if (n < 100000) return 5;
if (n < 1000000) return 6;
if (n < 10000000) return 7;
if (n < 100000000) return 8;
// Will not reach 10 digits in DigitGen()
//if (n < 1000000000) return 9;
//return 10;
return 9;
}
inline void DigitGen(const DiyFp& W, const DiyFp& Mp, uint64_t delta, char* buffer, int* len, int* K) {
static const uint32_t kPow10[] = { 1, 10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000, 1000000000 };
const DiyFp one(uint64_t(1) << -Mp.e, Mp.e);
const DiyFp wp_w = Mp - W;
uint32_t p1 = static_cast<uint32_t>(Mp.f >> -one.e);
uint64_t p2 = Mp.f & (one.f - 1);
unsigned kappa = CountDecimalDigit32(p1); // kappa in [0, 9]
*len = 0;
while (kappa > 0) {
uint32_t d = 0;
switch (kappa) {
case 9: d = p1 / 100000000; p1 %= 100000000; break;
case 8: d = p1 / 10000000; p1 %= 10000000; break;
case 7: d = p1 / 1000000; p1 %= 1000000; break;
case 6: d = p1 / 100000; p1 %= 100000; break;
case 5: d = p1 / 10000; p1 %= 10000; break;
case 4: d = p1 / 1000; p1 %= 1000; break;
case 3: d = p1 / 100; p1 %= 100; break;
case 2: d = p1 / 10; p1 %= 10; break;
case 1: d = p1; p1 = 0; break;
default:;
}
if (d || *len)
buffer[(*len)++] = static_cast<char>('0' + static_cast<char>(d));
kappa--;
uint64_t tmp = (static_cast<uint64_t>(p1) << -one.e) + p2;
if (tmp <= delta) {
*K += kappa;
GrisuRound(buffer, *len, delta, tmp, static_cast<uint64_t>(kPow10[kappa]) << -one.e, wp_w.f);
return;
}
}
// kappa = 0
for (;;) {
p2 *= 10;
delta *= 10;
char d = static_cast<char>(p2 >> -one.e);
if (d || *len)
buffer[(*len)++] = static_cast<char>('0' + d);
p2 &= one.f - 1;
kappa--;
if (p2 < delta) {
*K += kappa;
GrisuRound(buffer, *len, delta, p2, one.f, wp_w.f * kPow10[-static_cast<int>(kappa)]);
return;
}
}
}
inline void Grisu2(double value, char* buffer, int* length, int* K) {
const DiyFp v(value);
DiyFp w_m, w_p;
v.NormalizedBoundaries(&w_m, &w_p);
const DiyFp c_mk = GetCachedPower(w_p.e, K);
const DiyFp W = v.Normalize() * c_mk;
DiyFp Wp = w_p * c_mk;
DiyFp Wm = w_m * c_mk;
Wm.f++;
Wp.f--;
DigitGen(W, Wp, Wp.f - Wm.f, buffer, length, K);
}
inline char* WriteExponent(int K, char* buffer) {
if (K < 0) {
*buffer++ = '-';
K = -K;
}
if (K >= 100) {
*buffer++ = static_cast<char>('0' + static_cast<char>(K / 100));
K %= 100;
const char* d = GetDigitsLut() + K * 2;
*buffer++ = d[0];
*buffer++ = d[1];
}
else if (K >= 10) {
const char* d = GetDigitsLut() + K * 2;
*buffer++ = d[0];
*buffer++ = d[1];
}
else
*buffer++ = static_cast<char>('0' + static_cast<char>(K));
return buffer;
}
inline char* Prettify(char* buffer, int length, int k) {
const int kk = length + k; // 10^(kk-1) <= v < 10^kk
if (length <= kk && kk <= 21) {
// 1234e7 -> 12340000000
for (int i = length; i < kk; i++)
buffer[i] = '0';
buffer[kk] = '.';
buffer[kk + 1] = '0';
return &buffer[kk + 2];
}
else if (0 < kk && kk <= 21) {
// 1234e-2 -> 12.34
std::memmove(&buffer[kk + 1], &buffer[kk], static_cast<size_t>(length - kk));
buffer[kk] = '.';
return &buffer[length + 1];
}
else if (-6 < kk && kk <= 0) {
// 1234e-6 -> 0.001234
const int offset = 2 - kk;
std::memmove(&buffer[offset], &buffer[0], static_cast<size_t>(length));
buffer[0] = '0';
buffer[1] = '.';
for (int i = 2; i < offset; i++)
buffer[i] = '0';
return &buffer[length + offset];
}
else if (length == 1) {
// 1e30
buffer[1] = 'e';
return WriteExponent(kk - 1, &buffer[2]);
}
else {
// 1234e30 -> 1.234e33
std::memmove(&buffer[2], &buffer[1], static_cast<size_t>(length - 1));
buffer[1] = '.';
buffer[length + 1] = 'e';
return WriteExponent(kk - 1, &buffer[0 + length + 2]);
}
}
inline char* dtoa(double value, char* buffer) {
Double d(value);
if (d.IsZero()) {
if (d.Sign())
*buffer++ = '-'; // -0.0, Issue #289
buffer[0] = '0';
buffer[1] = '.';
buffer[2] = '0';
return &buffer[3];
}
else {
if (value < 0) {
*buffer++ = '-';
value = -value;
}
int length, K;
Grisu2(value, buffer, &length, &K);
return Prettify(buffer, length, K);
}
}
#ifdef __GNUC__
RAPIDJSON_DIAG_POP
#endif
} // namespace internal
RAPIDJSON_NAMESPACE_END
#endif // RAPIDJSON_DTOA_

View File

@@ -1,77 +0,0 @@
// Tencent is pleased to support the open source community by making RapidJSON available.
//
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
//
// Licensed under the MIT License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// http://opensource.org/licenses/MIT
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef RAPIDJSON_IEEE754_
#define RAPIDJSON_IEEE754_
#include "../rapidjson.h"
RAPIDJSON_NAMESPACE_BEGIN
namespace internal {
class Double {
public:
Double() {}
Double(double d) : d_(d) {}
Double(uint64_t u) : u_(u) {}
double Value() const { return d_; }
uint64_t Uint64Value() const { return u_; }
double NextPositiveDouble() const {
RAPIDJSON_ASSERT(!Sign());
return Double(u_ + 1).Value();
}
bool Sign() const { return (u_ & kSignMask) != 0; }
uint64_t Significand() const { return u_ & kSignificandMask; }
int Exponent() const { return static_cast<int>(((u_ & kExponentMask) >> kSignificandSize) - kExponentBias); }
bool IsNan() const { return (u_ & kExponentMask) == kExponentMask && Significand() != 0; }
bool IsInf() const { return (u_ & kExponentMask) == kExponentMask && Significand() == 0; }
bool IsNormal() const { return (u_ & kExponentMask) != 0 || Significand() == 0; }
bool IsZero() const { return (u_ & (kExponentMask | kSignificandMask)) == 0; }
uint64_t IntegerSignificand() const { return IsNormal() ? Significand() | kHiddenBit : Significand(); }
int IntegerExponent() const { return (IsNormal() ? Exponent() : kDenormalExponent) - kSignificandSize; }
uint64_t ToBias() const { return (u_ & kSignMask) ? ~u_ + 1 : u_ | kSignMask; }
static unsigned EffectiveSignificandSize(int order) {
if (order >= -1021)
return 53;
else if (order <= -1074)
return 0;
else
return (unsigned)order + 1074;
}
private:
static const int kSignificandSize = 52;
static const int kExponentBias = 0x3FF;
static const int kDenormalExponent = 1 - kExponentBias;
static const uint64_t kSignMask = RAPIDJSON_UINT64_C2(0x80000000, 0x00000000);
static const uint64_t kExponentMask = RAPIDJSON_UINT64_C2(0x7FF00000, 0x00000000);
static const uint64_t kSignificandMask = RAPIDJSON_UINT64_C2(0x000FFFFF, 0xFFFFFFFF);
static const uint64_t kHiddenBit = RAPIDJSON_UINT64_C2(0x00100000, 0x00000000);
union {
double d_;
uint64_t u_;
};
};
} // namespace internal
RAPIDJSON_NAMESPACE_END
#endif // RAPIDJSON_IEEE754_

View File

@@ -1,304 +0,0 @@
// Tencent is pleased to support the open source community by making RapidJSON available.
//
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
//
// Licensed under the MIT License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// http://opensource.org/licenses/MIT
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef RAPIDJSON_ITOA_
#define RAPIDJSON_ITOA_
#include "../rapidjson.h"
RAPIDJSON_NAMESPACE_BEGIN
namespace internal {
inline const char* GetDigitsLut() {
static const char cDigitsLut[200] = {
'0','0','0','1','0','2','0','3','0','4','0','5','0','6','0','7','0','8','0','9',
'1','0','1','1','1','2','1','3','1','4','1','5','1','6','1','7','1','8','1','9',
'2','0','2','1','2','2','2','3','2','4','2','5','2','6','2','7','2','8','2','9',
'3','0','3','1','3','2','3','3','3','4','3','5','3','6','3','7','3','8','3','9',
'4','0','4','1','4','2','4','3','4','4','4','5','4','6','4','7','4','8','4','9',
'5','0','5','1','5','2','5','3','5','4','5','5','5','6','5','7','5','8','5','9',
'6','0','6','1','6','2','6','3','6','4','6','5','6','6','6','7','6','8','6','9',
'7','0','7','1','7','2','7','3','7','4','7','5','7','6','7','7','7','8','7','9',
'8','0','8','1','8','2','8','3','8','4','8','5','8','6','8','7','8','8','8','9',
'9','0','9','1','9','2','9','3','9','4','9','5','9','6','9','7','9','8','9','9'
};
return cDigitsLut;
}
inline char* u32toa(uint32_t value, char* buffer) {
const char* cDigitsLut = GetDigitsLut();
if (value < 10000) {
const uint32_t d1 = (value / 100) << 1;
const uint32_t d2 = (value % 100) << 1;
if (value >= 1000)
*buffer++ = cDigitsLut[d1];
if (value >= 100)
*buffer++ = cDigitsLut[d1 + 1];
if (value >= 10)
*buffer++ = cDigitsLut[d2];
*buffer++ = cDigitsLut[d2 + 1];
}
else if (value < 100000000) {
// value = bbbbcccc
const uint32_t b = value / 10000;
const uint32_t c = value % 10000;
const uint32_t d1 = (b / 100) << 1;
const uint32_t d2 = (b % 100) << 1;
const uint32_t d3 = (c / 100) << 1;
const uint32_t d4 = (c % 100) << 1;
if (value >= 10000000)
*buffer++ = cDigitsLut[d1];
if (value >= 1000000)
*buffer++ = cDigitsLut[d1 + 1];
if (value >= 100000)
*buffer++ = cDigitsLut[d2];
*buffer++ = cDigitsLut[d2 + 1];
*buffer++ = cDigitsLut[d3];
*buffer++ = cDigitsLut[d3 + 1];
*buffer++ = cDigitsLut[d4];
*buffer++ = cDigitsLut[d4 + 1];
}
else {
// value = aabbbbcccc in decimal
const uint32_t a = value / 100000000; // 1 to 42
value %= 100000000;
if (a >= 10) {
const unsigned i = a << 1;
*buffer++ = cDigitsLut[i];
*buffer++ = cDigitsLut[i + 1];
}
else
*buffer++ = static_cast<char>('0' + static_cast<char>(a));
const uint32_t b = value / 10000; // 0 to 9999
const uint32_t c = value % 10000; // 0 to 9999
const uint32_t d1 = (b / 100) << 1;
const uint32_t d2 = (b % 100) << 1;
const uint32_t d3 = (c / 100) << 1;
const uint32_t d4 = (c % 100) << 1;
*buffer++ = cDigitsLut[d1];
*buffer++ = cDigitsLut[d1 + 1];
*buffer++ = cDigitsLut[d2];
*buffer++ = cDigitsLut[d2 + 1];
*buffer++ = cDigitsLut[d3];
*buffer++ = cDigitsLut[d3 + 1];
*buffer++ = cDigitsLut[d4];
*buffer++ = cDigitsLut[d4 + 1];
}
return buffer;
}
inline char* i32toa(int32_t value, char* buffer) {
uint32_t u = static_cast<uint32_t>(value);
if (value < 0) {
*buffer++ = '-';
u = ~u + 1;
}
return u32toa(u, buffer);
}
inline char* u64toa(uint64_t value, char* buffer) {
const char* cDigitsLut = GetDigitsLut();
const uint64_t kTen8 = 100000000;
const uint64_t kTen9 = kTen8 * 10;
const uint64_t kTen10 = kTen8 * 100;
const uint64_t kTen11 = kTen8 * 1000;
const uint64_t kTen12 = kTen8 * 10000;
const uint64_t kTen13 = kTen8 * 100000;
const uint64_t kTen14 = kTen8 * 1000000;
const uint64_t kTen15 = kTen8 * 10000000;
const uint64_t kTen16 = kTen8 * kTen8;
if (value < kTen8) {
uint32_t v = static_cast<uint32_t>(value);
if (v < 10000) {
const uint32_t d1 = (v / 100) << 1;
const uint32_t d2 = (v % 100) << 1;
if (v >= 1000)
*buffer++ = cDigitsLut[d1];
if (v >= 100)
*buffer++ = cDigitsLut[d1 + 1];
if (v >= 10)
*buffer++ = cDigitsLut[d2];
*buffer++ = cDigitsLut[d2 + 1];
}
else {
// value = bbbbcccc
const uint32_t b = v / 10000;
const uint32_t c = v % 10000;
const uint32_t d1 = (b / 100) << 1;
const uint32_t d2 = (b % 100) << 1;
const uint32_t d3 = (c / 100) << 1;
const uint32_t d4 = (c % 100) << 1;
if (value >= 10000000)
*buffer++ = cDigitsLut[d1];
if (value >= 1000000)
*buffer++ = cDigitsLut[d1 + 1];
if (value >= 100000)
*buffer++ = cDigitsLut[d2];
*buffer++ = cDigitsLut[d2 + 1];
*buffer++ = cDigitsLut[d3];
*buffer++ = cDigitsLut[d3 + 1];
*buffer++ = cDigitsLut[d4];
*buffer++ = cDigitsLut[d4 + 1];
}
}
else if (value < kTen16) {
const uint32_t v0 = static_cast<uint32_t>(value / kTen8);
const uint32_t v1 = static_cast<uint32_t>(value % kTen8);
const uint32_t b0 = v0 / 10000;
const uint32_t c0 = v0 % 10000;
const uint32_t d1 = (b0 / 100) << 1;
const uint32_t d2 = (b0 % 100) << 1;
const uint32_t d3 = (c0 / 100) << 1;
const uint32_t d4 = (c0 % 100) << 1;
const uint32_t b1 = v1 / 10000;
const uint32_t c1 = v1 % 10000;
const uint32_t d5 = (b1 / 100) << 1;
const uint32_t d6 = (b1 % 100) << 1;
const uint32_t d7 = (c1 / 100) << 1;
const uint32_t d8 = (c1 % 100) << 1;
if (value >= kTen15)
*buffer++ = cDigitsLut[d1];
if (value >= kTen14)
*buffer++ = cDigitsLut[d1 + 1];
if (value >= kTen13)
*buffer++ = cDigitsLut[d2];
if (value >= kTen12)
*buffer++ = cDigitsLut[d2 + 1];
if (value >= kTen11)
*buffer++ = cDigitsLut[d3];
if (value >= kTen10)
*buffer++ = cDigitsLut[d3 + 1];
if (value >= kTen9)
*buffer++ = cDigitsLut[d4];
if (value >= kTen8)
*buffer++ = cDigitsLut[d4 + 1];
*buffer++ = cDigitsLut[d5];
*buffer++ = cDigitsLut[d5 + 1];
*buffer++ = cDigitsLut[d6];
*buffer++ = cDigitsLut[d6 + 1];
*buffer++ = cDigitsLut[d7];
*buffer++ = cDigitsLut[d7 + 1];
*buffer++ = cDigitsLut[d8];
*buffer++ = cDigitsLut[d8 + 1];
}
else {
const uint32_t a = static_cast<uint32_t>(value / kTen16); // 1 to 1844
value %= kTen16;
if (a < 10)
*buffer++ = static_cast<char>('0' + static_cast<char>(a));
else if (a < 100) {
const uint32_t i = a << 1;
*buffer++ = cDigitsLut[i];
*buffer++ = cDigitsLut[i + 1];
}
else if (a < 1000) {
*buffer++ = static_cast<char>('0' + static_cast<char>(a / 100));
const uint32_t i = (a % 100) << 1;
*buffer++ = cDigitsLut[i];
*buffer++ = cDigitsLut[i + 1];
}
else {
const uint32_t i = (a / 100) << 1;
const uint32_t j = (a % 100) << 1;
*buffer++ = cDigitsLut[i];
*buffer++ = cDigitsLut[i + 1];
*buffer++ = cDigitsLut[j];
*buffer++ = cDigitsLut[j + 1];
}
const uint32_t v0 = static_cast<uint32_t>(value / kTen8);
const uint32_t v1 = static_cast<uint32_t>(value % kTen8);
const uint32_t b0 = v0 / 10000;
const uint32_t c0 = v0 % 10000;
const uint32_t d1 = (b0 / 100) << 1;
const uint32_t d2 = (b0 % 100) << 1;
const uint32_t d3 = (c0 / 100) << 1;
const uint32_t d4 = (c0 % 100) << 1;
const uint32_t b1 = v1 / 10000;
const uint32_t c1 = v1 % 10000;
const uint32_t d5 = (b1 / 100) << 1;
const uint32_t d6 = (b1 % 100) << 1;
const uint32_t d7 = (c1 / 100) << 1;
const uint32_t d8 = (c1 % 100) << 1;
*buffer++ = cDigitsLut[d1];
*buffer++ = cDigitsLut[d1 + 1];
*buffer++ = cDigitsLut[d2];
*buffer++ = cDigitsLut[d2 + 1];
*buffer++ = cDigitsLut[d3];
*buffer++ = cDigitsLut[d3 + 1];
*buffer++ = cDigitsLut[d4];
*buffer++ = cDigitsLut[d4 + 1];
*buffer++ = cDigitsLut[d5];
*buffer++ = cDigitsLut[d5 + 1];
*buffer++ = cDigitsLut[d6];
*buffer++ = cDigitsLut[d6 + 1];
*buffer++ = cDigitsLut[d7];
*buffer++ = cDigitsLut[d7 + 1];
*buffer++ = cDigitsLut[d8];
*buffer++ = cDigitsLut[d8 + 1];
}
return buffer;
}
inline char* i64toa(int64_t value, char* buffer) {
uint64_t u = static_cast<uint64_t>(value);
if (value < 0) {
*buffer++ = '-';
u = ~u + 1;
}
return u64toa(u, buffer);
}
} // namespace internal
RAPIDJSON_NAMESPACE_END
#endif // RAPIDJSON_ITOA_

View File

@@ -1,181 +0,0 @@
// Tencent is pleased to support the open source community by making RapidJSON available.
//
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
//
// Licensed under the MIT License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// http://opensource.org/licenses/MIT
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef RAPIDJSON_INTERNAL_META_H_
#define RAPIDJSON_INTERNAL_META_H_
#include "../rapidjson.h"
#ifdef __GNUC__
RAPIDJSON_DIAG_PUSH
RAPIDJSON_DIAG_OFF(effc++)
#endif
#if defined(_MSC_VER)
RAPIDJSON_DIAG_PUSH
RAPIDJSON_DIAG_OFF(6334)
#endif
#if RAPIDJSON_HAS_CXX11_TYPETRAITS
#include <type_traits>
#endif
//@cond RAPIDJSON_INTERNAL
RAPIDJSON_NAMESPACE_BEGIN
namespace internal {
// Helper to wrap/convert arbitrary types to void, useful for arbitrary type matching
template <typename T> struct Void { typedef void Type; };
///////////////////////////////////////////////////////////////////////////////
// BoolType, TrueType, FalseType
//
template <bool Cond> struct BoolType {
static const bool Value = Cond;
typedef BoolType Type;
};
typedef BoolType<true> TrueType;
typedef BoolType<false> FalseType;
///////////////////////////////////////////////////////////////////////////////
// SelectIf, BoolExpr, NotExpr, AndExpr, OrExpr
//
template <bool C> struct SelectIfImpl { template <typename T1, typename T2> struct Apply { typedef T1 Type; }; };
template <> struct SelectIfImpl<false> { template <typename T1, typename T2> struct Apply { typedef T2 Type; }; };
template <bool C, typename T1, typename T2> struct SelectIfCond : SelectIfImpl<C>::template Apply<T1,T2> {};
template <typename C, typename T1, typename T2> struct SelectIf : SelectIfCond<C::Value, T1, T2> {};
template <bool Cond1, bool Cond2> struct AndExprCond : FalseType {};
template <> struct AndExprCond<true, true> : TrueType {};
template <bool Cond1, bool Cond2> struct OrExprCond : TrueType {};
template <> struct OrExprCond<false, false> : FalseType {};
template <typename C> struct BoolExpr : SelectIf<C,TrueType,FalseType>::Type {};
template <typename C> struct NotExpr : SelectIf<C,FalseType,TrueType>::Type {};
template <typename C1, typename C2> struct AndExpr : AndExprCond<C1::Value, C2::Value>::Type {};
template <typename C1, typename C2> struct OrExpr : OrExprCond<C1::Value, C2::Value>::Type {};
///////////////////////////////////////////////////////////////////////////////
// AddConst, MaybeAddConst, RemoveConst
template <typename T> struct AddConst { typedef const T Type; };
template <bool Constify, typename T> struct MaybeAddConst : SelectIfCond<Constify, const T, T> {};
template <typename T> struct RemoveConst { typedef T Type; };
template <typename T> struct RemoveConst<const T> { typedef T Type; };
///////////////////////////////////////////////////////////////////////////////
// IsSame, IsConst, IsMoreConst, IsPointer
//
template <typename T, typename U> struct IsSame : FalseType {};
template <typename T> struct IsSame<T, T> : TrueType {};
template <typename T> struct IsConst : FalseType {};
template <typename T> struct IsConst<const T> : TrueType {};
template <typename CT, typename T>
struct IsMoreConst
: AndExpr<IsSame<typename RemoveConst<CT>::Type, typename RemoveConst<T>::Type>,
BoolType<IsConst<CT>::Value >= IsConst<T>::Value> >::Type {};
template <typename T> struct IsPointer : FalseType {};
template <typename T> struct IsPointer<T*> : TrueType {};
///////////////////////////////////////////////////////////////////////////////
// IsBaseOf
//
#if RAPIDJSON_HAS_CXX11_TYPETRAITS
template <typename B, typename D> struct IsBaseOf
: BoolType< ::std::is_base_of<B,D>::value> {};
#else // simplified version adopted from Boost
template<typename B, typename D> struct IsBaseOfImpl {
RAPIDJSON_STATIC_ASSERT(sizeof(B) != 0);
RAPIDJSON_STATIC_ASSERT(sizeof(D) != 0);
typedef char (&Yes)[1];
typedef char (&No) [2];
template <typename T>
static Yes Check(const D*, T);
static No Check(const B*, int);
struct Host {
operator const B*() const;
operator const D*();
};
enum { Value = (sizeof(Check(Host(), 0)) == sizeof(Yes)) };
};
template <typename B, typename D> struct IsBaseOf
: OrExpr<IsSame<B, D>, BoolExpr<IsBaseOfImpl<B, D> > >::Type {};
#endif // RAPIDJSON_HAS_CXX11_TYPETRAITS
//////////////////////////////////////////////////////////////////////////
// EnableIf / DisableIf
//
template <bool Condition, typename T = void> struct EnableIfCond { typedef T Type; };
template <typename T> struct EnableIfCond<false, T> { /* empty */ };
template <bool Condition, typename T = void> struct DisableIfCond { typedef T Type; };
template <typename T> struct DisableIfCond<true, T> { /* empty */ };
template <typename Condition, typename T = void>
struct EnableIf : EnableIfCond<Condition::Value, T> {};
template <typename Condition, typename T = void>
struct DisableIf : DisableIfCond<Condition::Value, T> {};
// SFINAE helpers
struct SfinaeTag {};
template <typename T> struct RemoveSfinaeTag;
template <typename T> struct RemoveSfinaeTag<SfinaeTag&(*)(T)> { typedef T Type; };
#define RAPIDJSON_REMOVEFPTR_(type) \
typename ::RAPIDJSON_NAMESPACE::internal::RemoveSfinaeTag \
< ::RAPIDJSON_NAMESPACE::internal::SfinaeTag&(*) type>::Type
#define RAPIDJSON_ENABLEIF(cond) \
typename ::RAPIDJSON_NAMESPACE::internal::EnableIf \
<RAPIDJSON_REMOVEFPTR_(cond)>::Type * = NULL
#define RAPIDJSON_DISABLEIF(cond) \
typename ::RAPIDJSON_NAMESPACE::internal::DisableIf \
<RAPIDJSON_REMOVEFPTR_(cond)>::Type * = NULL
#define RAPIDJSON_ENABLEIF_RETURN(cond,returntype) \
typename ::RAPIDJSON_NAMESPACE::internal::EnableIf \
<RAPIDJSON_REMOVEFPTR_(cond), \
RAPIDJSON_REMOVEFPTR_(returntype)>::Type
#define RAPIDJSON_DISABLEIF_RETURN(cond,returntype) \
typename ::RAPIDJSON_NAMESPACE::internal::DisableIf \
<RAPIDJSON_REMOVEFPTR_(cond), \
RAPIDJSON_REMOVEFPTR_(returntype)>::Type
} // namespace internal
RAPIDJSON_NAMESPACE_END
//@endcond
#if defined(__GNUC__) || defined(_MSC_VER)
RAPIDJSON_DIAG_POP
#endif
#endif // RAPIDJSON_INTERNAL_META_H_

View File

@@ -1,55 +0,0 @@
// Tencent is pleased to support the open source community by making RapidJSON available.
//
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
//
// Licensed under the MIT License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// http://opensource.org/licenses/MIT
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef RAPIDJSON_POW10_
#define RAPIDJSON_POW10_
#include "../rapidjson.h"
RAPIDJSON_NAMESPACE_BEGIN
namespace internal {
//! Computes integer powers of 10 in double (10.0^n).
/*! This function uses lookup table for fast and accurate results.
\param n non-negative exponent. Must <= 308.
\return 10.0^n
*/
inline double Pow10(int n) {
static const double e[] = { // 1e-0...1e308: 309 * 8 bytes = 2472 bytes
1e+0,
1e+1, 1e+2, 1e+3, 1e+4, 1e+5, 1e+6, 1e+7, 1e+8, 1e+9, 1e+10, 1e+11, 1e+12, 1e+13, 1e+14, 1e+15, 1e+16, 1e+17, 1e+18, 1e+19, 1e+20,
1e+21, 1e+22, 1e+23, 1e+24, 1e+25, 1e+26, 1e+27, 1e+28, 1e+29, 1e+30, 1e+31, 1e+32, 1e+33, 1e+34, 1e+35, 1e+36, 1e+37, 1e+38, 1e+39, 1e+40,
1e+41, 1e+42, 1e+43, 1e+44, 1e+45, 1e+46, 1e+47, 1e+48, 1e+49, 1e+50, 1e+51, 1e+52, 1e+53, 1e+54, 1e+55, 1e+56, 1e+57, 1e+58, 1e+59, 1e+60,
1e+61, 1e+62, 1e+63, 1e+64, 1e+65, 1e+66, 1e+67, 1e+68, 1e+69, 1e+70, 1e+71, 1e+72, 1e+73, 1e+74, 1e+75, 1e+76, 1e+77, 1e+78, 1e+79, 1e+80,
1e+81, 1e+82, 1e+83, 1e+84, 1e+85, 1e+86, 1e+87, 1e+88, 1e+89, 1e+90, 1e+91, 1e+92, 1e+93, 1e+94, 1e+95, 1e+96, 1e+97, 1e+98, 1e+99, 1e+100,
1e+101,1e+102,1e+103,1e+104,1e+105,1e+106,1e+107,1e+108,1e+109,1e+110,1e+111,1e+112,1e+113,1e+114,1e+115,1e+116,1e+117,1e+118,1e+119,1e+120,
1e+121,1e+122,1e+123,1e+124,1e+125,1e+126,1e+127,1e+128,1e+129,1e+130,1e+131,1e+132,1e+133,1e+134,1e+135,1e+136,1e+137,1e+138,1e+139,1e+140,
1e+141,1e+142,1e+143,1e+144,1e+145,1e+146,1e+147,1e+148,1e+149,1e+150,1e+151,1e+152,1e+153,1e+154,1e+155,1e+156,1e+157,1e+158,1e+159,1e+160,
1e+161,1e+162,1e+163,1e+164,1e+165,1e+166,1e+167,1e+168,1e+169,1e+170,1e+171,1e+172,1e+173,1e+174,1e+175,1e+176,1e+177,1e+178,1e+179,1e+180,
1e+181,1e+182,1e+183,1e+184,1e+185,1e+186,1e+187,1e+188,1e+189,1e+190,1e+191,1e+192,1e+193,1e+194,1e+195,1e+196,1e+197,1e+198,1e+199,1e+200,
1e+201,1e+202,1e+203,1e+204,1e+205,1e+206,1e+207,1e+208,1e+209,1e+210,1e+211,1e+212,1e+213,1e+214,1e+215,1e+216,1e+217,1e+218,1e+219,1e+220,
1e+221,1e+222,1e+223,1e+224,1e+225,1e+226,1e+227,1e+228,1e+229,1e+230,1e+231,1e+232,1e+233,1e+234,1e+235,1e+236,1e+237,1e+238,1e+239,1e+240,
1e+241,1e+242,1e+243,1e+244,1e+245,1e+246,1e+247,1e+248,1e+249,1e+250,1e+251,1e+252,1e+253,1e+254,1e+255,1e+256,1e+257,1e+258,1e+259,1e+260,
1e+261,1e+262,1e+263,1e+264,1e+265,1e+266,1e+267,1e+268,1e+269,1e+270,1e+271,1e+272,1e+273,1e+274,1e+275,1e+276,1e+277,1e+278,1e+279,1e+280,
1e+281,1e+282,1e+283,1e+284,1e+285,1e+286,1e+287,1e+288,1e+289,1e+290,1e+291,1e+292,1e+293,1e+294,1e+295,1e+296,1e+297,1e+298,1e+299,1e+300,
1e+301,1e+302,1e+303,1e+304,1e+305,1e+306,1e+307,1e+308
};
RAPIDJSON_ASSERT(n >= 0 && n <= 308);
return e[n];
}
} // namespace internal
RAPIDJSON_NAMESPACE_END
#endif // RAPIDJSON_POW10_

View File

@@ -1,189 +0,0 @@
// Tencent is pleased to support the open source community by making RapidJSON available.
//
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
//
// Licensed under the MIT License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// http://opensource.org/licenses/MIT
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef RAPIDJSON_INTERNAL_STACK_H_
#define RAPIDJSON_INTERNAL_STACK_H_
#include "../rapidjson.h"
#include "swap.h"
RAPIDJSON_NAMESPACE_BEGIN
namespace internal {
///////////////////////////////////////////////////////////////////////////////
// Stack
//! A type-unsafe stack for storing different types of data.
/*! \tparam Allocator Allocator for allocating stack memory.
*/
template <typename Allocator>
class Stack {
public:
// Optimization note: Do not allocate memory for stack_ in constructor.
// Do it lazily when first Push() -> Expand() -> Resize().
Stack(Allocator* allocator, size_t stackCapacity) : allocator_(allocator), ownAllocator_(0), stack_(0), stackTop_(0), stackEnd_(0), initialCapacity_(stackCapacity) {
RAPIDJSON_ASSERT(stackCapacity > 0);
}
#if RAPIDJSON_HAS_CXX11_RVALUE_REFS
Stack(Stack&& rhs)
: allocator_(rhs.allocator_),
ownAllocator_(rhs.ownAllocator_),
stack_(rhs.stack_),
stackTop_(rhs.stackTop_),
stackEnd_(rhs.stackEnd_),
initialCapacity_(rhs.initialCapacity_)
{
rhs.allocator_ = 0;
rhs.ownAllocator_ = 0;
rhs.stack_ = 0;
rhs.stackTop_ = 0;
rhs.stackEnd_ = 0;
rhs.initialCapacity_ = 0;
}
#endif
~Stack() {
Destroy();
}
#if RAPIDJSON_HAS_CXX11_RVALUE_REFS
Stack& operator=(Stack&& rhs) {
if (&rhs != this)
{
Destroy();
allocator_ = rhs.allocator_;
ownAllocator_ = rhs.ownAllocator_;
stack_ = rhs.stack_;
stackTop_ = rhs.stackTop_;
stackEnd_ = rhs.stackEnd_;
initialCapacity_ = rhs.initialCapacity_;
rhs.allocator_ = 0;
rhs.ownAllocator_ = 0;
rhs.stack_ = 0;
rhs.stackTop_ = 0;
rhs.stackEnd_ = 0;
rhs.initialCapacity_ = 0;
}
return *this;
}
#endif
void Swap(Stack& rhs) RAPIDJSON_NOEXCEPT {
internal::Swap(allocator_, rhs.allocator_);
internal::Swap(ownAllocator_, rhs.ownAllocator_);
internal::Swap(stack_, rhs.stack_);
internal::Swap(stackTop_, rhs.stackTop_);
internal::Swap(stackEnd_, rhs.stackEnd_);
internal::Swap(initialCapacity_, rhs.initialCapacity_);
}
void Clear() { stackTop_ = stack_; }
void ShrinkToFit() {
if (Empty()) {
// If the stack is empty, completely deallocate the memory.
Allocator::Free(stack_);
stack_ = 0;
stackTop_ = 0;
stackEnd_ = 0;
}
else
Resize(GetSize());
}
// Optimization note: try to minimize the size of this function for force inline.
// Expansion is run very infrequently, so it is moved to another (probably non-inline) function.
template<typename T>
RAPIDJSON_FORCEINLINE T* Push(size_t count = 1) {
// Expand the stack if needed
if (stackTop_ + sizeof(T) * count >= stackEnd_)
Expand<T>(count);
T* ret = reinterpret_cast<T*>(stackTop_);
stackTop_ += sizeof(T) * count;
return ret;
}
template<typename T>
T* Pop(size_t count) {
RAPIDJSON_ASSERT(GetSize() >= count * sizeof(T));
stackTop_ -= count * sizeof(T);
return reinterpret_cast<T*>(stackTop_);
}
template<typename T>
T* Top() {
RAPIDJSON_ASSERT(GetSize() >= sizeof(T));
return reinterpret_cast<T*>(stackTop_ - sizeof(T));
}
template<typename T>
T* Bottom() { return (T*)stack_; }
Allocator& GetAllocator() { return *allocator_; }
bool Empty() const { return stackTop_ == stack_; }
size_t GetSize() const { return static_cast<size_t>(stackTop_ - stack_); }
size_t GetCapacity() const { return static_cast<size_t>(stackEnd_ - stack_); }
private:
template<typename T>
void Expand(size_t count) {
// Only expand the capacity if the current stack exists. Otherwise just create a stack with initial capacity.
size_t newCapacity;
if (stack_ == 0) {
if (!allocator_)
ownAllocator_ = allocator_ = RAPIDJSON_NEW(Allocator());
newCapacity = initialCapacity_;
} else {
newCapacity = GetCapacity();
newCapacity += (newCapacity + 1) / 2;
}
size_t newSize = GetSize() + sizeof(T) * count;
if (newCapacity < newSize)
newCapacity = newSize;
Resize(newCapacity);
}
void Resize(size_t newCapacity) {
const size_t size = GetSize(); // Backup the current size
stack_ = (char*)allocator_->Realloc(stack_, GetCapacity(), newCapacity);
stackTop_ = stack_ + size;
stackEnd_ = stack_ + newCapacity;
}
void Destroy() {
Allocator::Free(stack_);
RAPIDJSON_DELETE(ownAllocator_); // Only delete if it is owned by the stack
}
// Prohibit copy constructor & assignment operator.
Stack(const Stack&);
Stack& operator=(const Stack&);
Allocator* allocator_;
Allocator* ownAllocator_;
char *stack_;
char *stackTop_;
char *stackEnd_;
size_t initialCapacity_;
};
} // namespace internal
RAPIDJSON_NAMESPACE_END
#endif // RAPIDJSON_STACK_H_

View File

@@ -1,39 +0,0 @@
// Tencent is pleased to support the open source community by making RapidJSON available.
//
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
//
// Licensed under the MIT License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// http://opensource.org/licenses/MIT
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef RAPIDJSON_INTERNAL_STRFUNC_H_
#define RAPIDJSON_INTERNAL_STRFUNC_H_
#include "../rapidjson.h"
RAPIDJSON_NAMESPACE_BEGIN
namespace internal {
//! Custom strlen() which works on different character types.
/*! \tparam Ch Character type (e.g. char, wchar_t, short)
\param s Null-terminated input string.
\return Number of characters in the string.
\note This has the same semantics as strlen(), the return value is not number of Unicode codepoints.
*/
template <typename Ch>
inline SizeType StrLen(const Ch* s) {
const Ch* p = s;
while (*p) ++p;
return SizeType(p - s);
}
} // namespace internal
RAPIDJSON_NAMESPACE_END
#endif // RAPIDJSON_INTERNAL_STRFUNC_H_

View File

@@ -1,270 +0,0 @@
// Tencent is pleased to support the open source community by making RapidJSON available.
//
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
//
// Licensed under the MIT License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// http://opensource.org/licenses/MIT
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef RAPIDJSON_STRTOD_
#define RAPIDJSON_STRTOD_
#include "../rapidjson.h"
#include "ieee754.h"
#include "biginteger.h"
#include "diyfp.h"
#include "pow10.h"
RAPIDJSON_NAMESPACE_BEGIN
namespace internal {
inline double FastPath(double significand, int exp) {
if (exp < -308)
return 0.0;
else if (exp >= 0)
return significand * internal::Pow10(exp);
else
return significand / internal::Pow10(-exp);
}
inline double StrtodNormalPrecision(double d, int p) {
if (p < -308) {
// Prevent expSum < -308, making Pow10(p) = 0
d = FastPath(d, -308);
d = FastPath(d, p + 308);
}
else
d = FastPath(d, p);
return d;
}
template <typename T>
inline T Min3(T a, T b, T c) {
T m = a;
if (m > b) m = b;
if (m > c) m = c;
return m;
}
inline int CheckWithinHalfULP(double b, const BigInteger& d, int dExp) {
const Double db(b);
const uint64_t bInt = db.IntegerSignificand();
const int bExp = db.IntegerExponent();
const int hExp = bExp - 1;
int dS_Exp2 = 0, dS_Exp5 = 0, bS_Exp2 = 0, bS_Exp5 = 0, hS_Exp2 = 0, hS_Exp5 = 0;
// Adjust for decimal exponent
if (dExp >= 0) {
dS_Exp2 += dExp;
dS_Exp5 += dExp;
}
else {
bS_Exp2 -= dExp;
bS_Exp5 -= dExp;
hS_Exp2 -= dExp;
hS_Exp5 -= dExp;
}
// Adjust for binary exponent
if (bExp >= 0)
bS_Exp2 += bExp;
else {
dS_Exp2 -= bExp;
hS_Exp2 -= bExp;
}
// Adjust for half ulp exponent
if (hExp >= 0)
hS_Exp2 += hExp;
else {
dS_Exp2 -= hExp;
bS_Exp2 -= hExp;
}
// Remove common power of two factor from all three scaled values
int common_Exp2 = Min3(dS_Exp2, bS_Exp2, hS_Exp2);
dS_Exp2 -= common_Exp2;
bS_Exp2 -= common_Exp2;
hS_Exp2 -= common_Exp2;
BigInteger dS = d;
dS.MultiplyPow5(static_cast<unsigned>(dS_Exp5)) <<= static_cast<unsigned>(dS_Exp2);
BigInteger bS(bInt);
bS.MultiplyPow5(static_cast<unsigned>(bS_Exp5)) <<= static_cast<unsigned>(bS_Exp2);
BigInteger hS(1);
hS.MultiplyPow5(static_cast<unsigned>(hS_Exp5)) <<= static_cast<unsigned>(hS_Exp2);
BigInteger delta(0);
dS.Difference(bS, &delta);
return delta.Compare(hS);
}
inline bool StrtodFast(double d, int p, double* result) {
// Use fast path for string-to-double conversion if possible
// see http://www.exploringbinary.com/fast-path-decimal-to-floating-point-conversion/
if (p > 22 && p < 22 + 16) {
// Fast Path Cases In Disguise
d *= internal::Pow10(p - 22);
p = 22;
}
if (p >= -22 && p <= 22 && d <= 9007199254740991.0) { // 2^53 - 1
*result = FastPath(d, p);
return true;
}
else
return false;
}
// Compute an approximation and see if it is within 1/2 ULP
inline bool StrtodDiyFp(const char* decimals, size_t length, size_t decimalPosition, int exp, double* result) {
uint64_t significand = 0;
size_t i = 0; // 2^64 - 1 = 18446744073709551615, 1844674407370955161 = 0x1999999999999999
for (; i < length; i++) {
if (significand > RAPIDJSON_UINT64_C2(0x19999999, 0x99999999) ||
(significand == RAPIDJSON_UINT64_C2(0x19999999, 0x99999999) && decimals[i] > '5'))
break;
significand = significand * 10u + static_cast<unsigned>(decimals[i] - '0');
}
if (i < length && decimals[i] >= '5') // Rounding
significand++;
size_t remaining = length - i;
const unsigned kUlpShift = 3;
const unsigned kUlp = 1 << kUlpShift;
int error = (remaining == 0) ? 0 : kUlp / 2;
DiyFp v(significand, 0);
v = v.Normalize();
error <<= -v.e;
const int dExp = (int)decimalPosition - (int)i + exp;
int actualExp;
DiyFp cachedPower = GetCachedPower10(dExp, &actualExp);
if (actualExp != dExp) {
static const DiyFp kPow10[] = {
DiyFp(RAPIDJSON_UINT64_C2(0xa0000000, 00000000), -60), // 10^1
DiyFp(RAPIDJSON_UINT64_C2(0xc8000000, 00000000), -57), // 10^2
DiyFp(RAPIDJSON_UINT64_C2(0xfa000000, 00000000), -54), // 10^3
DiyFp(RAPIDJSON_UINT64_C2(0x9c400000, 00000000), -50), // 10^4
DiyFp(RAPIDJSON_UINT64_C2(0xc3500000, 00000000), -47), // 10^5
DiyFp(RAPIDJSON_UINT64_C2(0xf4240000, 00000000), -44), // 10^6
DiyFp(RAPIDJSON_UINT64_C2(0x98968000, 00000000), -40) // 10^7
};
int adjustment = dExp - actualExp - 1;
RAPIDJSON_ASSERT(adjustment >= 0 && adjustment < 7);
v = v * kPow10[adjustment];
if (length + static_cast<unsigned>(adjustment)> 19u) // has more digits than decimal digits in 64-bit
error += kUlp / 2;
}
v = v * cachedPower;
error += kUlp + (error == 0 ? 0 : 1);
const int oldExp = v.e;
v = v.Normalize();
error <<= oldExp - v.e;
const unsigned effectiveSignificandSize = Double::EffectiveSignificandSize(64 + v.e);
unsigned precisionSize = 64 - effectiveSignificandSize;
if (precisionSize + kUlpShift >= 64) {
unsigned scaleExp = (precisionSize + kUlpShift) - 63;
v.f >>= scaleExp;
v.e += scaleExp;
error = (error >> scaleExp) + 1 + static_cast<int>(kUlp);
precisionSize -= scaleExp;
}
DiyFp rounded(v.f >> precisionSize, v.e + static_cast<int>(precisionSize));
const uint64_t precisionBits = (v.f & ((uint64_t(1) << precisionSize) - 1)) * kUlp;
const uint64_t halfWay = (uint64_t(1) << (precisionSize - 1)) * kUlp;
if (precisionBits >= halfWay + static_cast<unsigned>(error)) {
rounded.f++;
if (rounded.f & (DiyFp::kDpHiddenBit << 1)) { // rounding overflows mantissa (issue #340)
rounded.f >>= 1;
rounded.e++;
}
}
*result = rounded.ToDouble();
return halfWay - static_cast<unsigned>(error) >= precisionBits || precisionBits >= halfWay + static_cast<unsigned>(error);
}
inline double StrtodBigInteger(double approx, const char* decimals, size_t length, size_t decimalPosition, int exp) {
const BigInteger dInt(decimals, length);
const int dExp = (int)decimalPosition - (int)length + exp;
Double a(approx);
int cmp = CheckWithinHalfULP(a.Value(), dInt, dExp);
if (cmp < 0)
return a.Value(); // within half ULP
else if (cmp == 0) {
// Round towards even
if (a.Significand() & 1)
return a.NextPositiveDouble();
else
return a.Value();
}
else // adjustment
return a.NextPositiveDouble();
}
inline double StrtodFullPrecision(double d, int p, const char* decimals, size_t length, size_t decimalPosition, int exp) {
RAPIDJSON_ASSERT(d >= 0.0);
RAPIDJSON_ASSERT(length >= 1);
double result;
if (StrtodFast(d, p, &result))
return result;
// Trim leading zeros
while (*decimals == '0' && length > 1) {
length--;
decimals++;
decimalPosition--;
}
// Trim trailing zeros
while (decimals[length - 1] == '0' && length > 1) {
length--;
decimalPosition--;
exp++;
}
// Trim right-most digits
const int kMaxDecimalDigit = 780;
if ((int)length > kMaxDecimalDigit) {
int delta = (int(length) - kMaxDecimalDigit);
exp += delta;
decimalPosition -= static_cast<unsigned>(delta);
length = kMaxDecimalDigit;
}
// If too small, underflow to zero
if (int(length) + exp < -324)
return 0.0;
if (StrtodDiyFp(decimals, length, decimalPosition, exp, &result))
return result;
// Use approximation from StrtodDiyFp and make adjustment with BigInteger comparison
return StrtodBigInteger(result, decimals, length, decimalPosition, exp);
}
} // namespace internal
RAPIDJSON_NAMESPACE_END
#endif // RAPIDJSON_STRTOD_

View File

@@ -1,37 +0,0 @@
// Tencent is pleased to support the open source community by making RapidJSON available.
//
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
//
// Licensed under the MIT License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// http://opensource.org/licenses/MIT
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef RAPIDJSON_INTERNAL_SWAP_H_
#define RAPIDJSON_INTERNAL_SWAP_H_
#include "../rapidjson.h"
RAPIDJSON_NAMESPACE_BEGIN
namespace internal {
//! Custom swap() to avoid dependency on C++ <algorith> header
/*! \tparam T Type of the arguments to swap, should be instantiated with primitive C++ types only.
\note This has the same semantics as std::swap().
*/
template <typename T>
inline void Swap(T& a, T& b) RAPIDJSON_NOEXCEPT {
T tmp = a;
a = b;
b = tmp;
}
} // namespace internal
RAPIDJSON_NAMESPACE_END
#endif // RAPIDJSON_INTERNAL_SWAP_H_

View File

@@ -1,70 +0,0 @@
// Tencent is pleased to support the open source community by making RapidJSON available.
//
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
//
// Licensed under the MIT License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// http://opensource.org/licenses/MIT
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef RAPIDJSON_MEMORYBUFFER_H_
#define RAPIDJSON_MEMORYBUFFER_H_
#include "rapidjson.h"
#include "internal/stack.h"
RAPIDJSON_NAMESPACE_BEGIN
//! Represents an in-memory output byte stream.
/*!
This class is mainly for being wrapped by EncodedOutputStream or AutoUTFOutputStream.
It is similar to FileWriteBuffer but the destination is an in-memory buffer instead of a file.
Differences between MemoryBuffer and StringBuffer:
1. StringBuffer has Encoding but MemoryBuffer is only a byte buffer.
2. StringBuffer::GetString() returns a null-terminated string. MemoryBuffer::GetBuffer() returns a buffer without terminator.
\tparam Allocator type for allocating memory buffer.
\note implements Stream concept
*/
template <typename Allocator = CrtAllocator>
struct GenericMemoryBuffer {
typedef char Ch; // byte
GenericMemoryBuffer(Allocator* allocator = 0, size_t capacity = kDefaultCapacity) : stack_(allocator, capacity) {}
void Put(Ch c) { *stack_.template Push<Ch>() = c; }
void Flush() {}
void Clear() { stack_.Clear(); }
void ShrinkToFit() { stack_.ShrinkToFit(); }
Ch* Push(size_t count) { return stack_.template Push<Ch>(count); }
void Pop(size_t count) { stack_.template Pop<Ch>(count); }
const Ch* GetBuffer() const {
return stack_.template Bottom<Ch>();
}
size_t GetSize() const { return stack_.GetSize(); }
static const size_t kDefaultCapacity = 256;
mutable internal::Stack<Allocator> stack_;
};
typedef GenericMemoryBuffer<> MemoryBuffer;
//! Implement specialized version of PutN() with memset() for better performance.
template<>
inline void PutN(MemoryBuffer& memoryBuffer, char c, size_t n) {
std::memset(memoryBuffer.stack_.Push<char>(n), c, n * sizeof(c));
}
RAPIDJSON_NAMESPACE_END
#endif // RAPIDJSON_MEMORYBUFFER_H_

View File

@@ -1,61 +0,0 @@
// Tencent is pleased to support the open source community by making RapidJSON available.
//
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
//
// Licensed under the MIT License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// http://opensource.org/licenses/MIT
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef RAPIDJSON_MEMORYSTREAM_H_
#define RAPIDJSON_MEMORYSTREAM_H_
#include "rapidjson.h"
RAPIDJSON_NAMESPACE_BEGIN
//! Represents an in-memory input byte stream.
/*!
This class is mainly for being wrapped by EncodedInputStream or AutoUTFInputStream.
It is similar to FileReadBuffer but the source is an in-memory buffer instead of a file.
Differences between MemoryStream and StringStream:
1. StringStream has encoding but MemoryStream is a byte stream.
2. MemoryStream needs size of the source buffer and the buffer don't need to be null terminated. StringStream assume null-terminated string as source.
3. MemoryStream supports Peek4() for encoding detection. StringStream is specified with an encoding so it should not have Peek4().
\note implements Stream concept
*/
struct MemoryStream {
typedef char Ch; // byte
MemoryStream(const Ch *src, size_t size) : src_(src), begin_(src), end_(src + size), size_(size) {}
Ch Peek() const { return (src_ == end_) ? '\0' : *src_; }
Ch Take() { return (src_ == end_) ? '\0' : *src_++; }
size_t Tell() const { return static_cast<size_t>(src_ - begin_); }
Ch* PutBegin() { RAPIDJSON_ASSERT(false); return 0; }
void Put(Ch) { RAPIDJSON_ASSERT(false); }
void Flush() { RAPIDJSON_ASSERT(false); }
size_t PutEnd(Ch*) { RAPIDJSON_ASSERT(false); return 0; }
// For encoding detection only.
const Ch* Peek4() const {
return Tell() + 4 <= size_ ? src_ : 0;
}
const Ch* src_; //!< Current read position.
const Ch* begin_; //!< Original head of the string.
const Ch* end_; //!< End of stream.
size_t size_; //!< Size of the stream.
};
RAPIDJSON_NAMESPACE_END
#endif // RAPIDJSON_MEMORYBUFFER_H_

View File

@@ -1,316 +0,0 @@
// ISO C9x compliant inttypes.h for Microsoft Visual Studio
// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124
//
// Copyright (c) 2006-2013 Alexander Chemeris
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// 3. Neither the name of the product nor the names of its contributors may
// be used to endorse or promote products derived from this software
// without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
///////////////////////////////////////////////////////////////////////////////
// The above software in this distribution may have been modified by
// THL A29 Limited ("Tencent Modifications").
// All Tencent Modifications are Copyright (C) 2015 THL A29 Limited.
#ifndef _MSC_VER // [
#error "Use this header only with Microsoft Visual C++ compilers!"
#endif // _MSC_VER ]
#ifndef _MSC_INTTYPES_H_ // [
#define _MSC_INTTYPES_H_
#if _MSC_VER > 1000
#pragma once
#endif
#include "stdint.h"
// miloyip: VC supports inttypes.h since VC2013
#if _MSC_VER >= 1800
#include <inttypes.h>
#else
// 7.8 Format conversion of integer types
typedef struct {
intmax_t quot;
intmax_t rem;
} imaxdiv_t;
// 7.8.1 Macros for format specifiers
#if !defined(__cplusplus) || defined(__STDC_FORMAT_MACROS) // [ See footnote 185 at page 198
// The fprintf macros for signed integers are:
#define PRId8 "d"
#define PRIi8 "i"
#define PRIdLEAST8 "d"
#define PRIiLEAST8 "i"
#define PRIdFAST8 "d"
#define PRIiFAST8 "i"
#define PRId16 "hd"
#define PRIi16 "hi"
#define PRIdLEAST16 "hd"
#define PRIiLEAST16 "hi"
#define PRIdFAST16 "hd"
#define PRIiFAST16 "hi"
#define PRId32 "I32d"
#define PRIi32 "I32i"
#define PRIdLEAST32 "I32d"
#define PRIiLEAST32 "I32i"
#define PRIdFAST32 "I32d"
#define PRIiFAST32 "I32i"
#define PRId64 "I64d"
#define PRIi64 "I64i"
#define PRIdLEAST64 "I64d"
#define PRIiLEAST64 "I64i"
#define PRIdFAST64 "I64d"
#define PRIiFAST64 "I64i"
#define PRIdMAX "I64d"
#define PRIiMAX "I64i"
#define PRIdPTR "Id"
#define PRIiPTR "Ii"
// The fprintf macros for unsigned integers are:
#define PRIo8 "o"
#define PRIu8 "u"
#define PRIx8 "x"
#define PRIX8 "X"
#define PRIoLEAST8 "o"
#define PRIuLEAST8 "u"
#define PRIxLEAST8 "x"
#define PRIXLEAST8 "X"
#define PRIoFAST8 "o"
#define PRIuFAST8 "u"
#define PRIxFAST8 "x"
#define PRIXFAST8 "X"
#define PRIo16 "ho"
#define PRIu16 "hu"
#define PRIx16 "hx"
#define PRIX16 "hX"
#define PRIoLEAST16 "ho"
#define PRIuLEAST16 "hu"
#define PRIxLEAST16 "hx"
#define PRIXLEAST16 "hX"
#define PRIoFAST16 "ho"
#define PRIuFAST16 "hu"
#define PRIxFAST16 "hx"
#define PRIXFAST16 "hX"
#define PRIo32 "I32o"
#define PRIu32 "I32u"
#define PRIx32 "I32x"
#define PRIX32 "I32X"
#define PRIoLEAST32 "I32o"
#define PRIuLEAST32 "I32u"
#define PRIxLEAST32 "I32x"
#define PRIXLEAST32 "I32X"
#define PRIoFAST32 "I32o"
#define PRIuFAST32 "I32u"
#define PRIxFAST32 "I32x"
#define PRIXFAST32 "I32X"
#define PRIo64 "I64o"
#define PRIu64 "I64u"
#define PRIx64 "I64x"
#define PRIX64 "I64X"
#define PRIoLEAST64 "I64o"
#define PRIuLEAST64 "I64u"
#define PRIxLEAST64 "I64x"
#define PRIXLEAST64 "I64X"
#define PRIoFAST64 "I64o"
#define PRIuFAST64 "I64u"
#define PRIxFAST64 "I64x"
#define PRIXFAST64 "I64X"
#define PRIoMAX "I64o"
#define PRIuMAX "I64u"
#define PRIxMAX "I64x"
#define PRIXMAX "I64X"
#define PRIoPTR "Io"
#define PRIuPTR "Iu"
#define PRIxPTR "Ix"
#define PRIXPTR "IX"
// The fscanf macros for signed integers are:
#define SCNd8 "d"
#define SCNi8 "i"
#define SCNdLEAST8 "d"
#define SCNiLEAST8 "i"
#define SCNdFAST8 "d"
#define SCNiFAST8 "i"
#define SCNd16 "hd"
#define SCNi16 "hi"
#define SCNdLEAST16 "hd"
#define SCNiLEAST16 "hi"
#define SCNdFAST16 "hd"
#define SCNiFAST16 "hi"
#define SCNd32 "ld"
#define SCNi32 "li"
#define SCNdLEAST32 "ld"
#define SCNiLEAST32 "li"
#define SCNdFAST32 "ld"
#define SCNiFAST32 "li"
#define SCNd64 "I64d"
#define SCNi64 "I64i"
#define SCNdLEAST64 "I64d"
#define SCNiLEAST64 "I64i"
#define SCNdFAST64 "I64d"
#define SCNiFAST64 "I64i"
#define SCNdMAX "I64d"
#define SCNiMAX "I64i"
#ifdef _WIN64 // [
# define SCNdPTR "I64d"
# define SCNiPTR "I64i"
#else // _WIN64 ][
# define SCNdPTR "ld"
# define SCNiPTR "li"
#endif // _WIN64 ]
// The fscanf macros for unsigned integers are:
#define SCNo8 "o"
#define SCNu8 "u"
#define SCNx8 "x"
#define SCNX8 "X"
#define SCNoLEAST8 "o"
#define SCNuLEAST8 "u"
#define SCNxLEAST8 "x"
#define SCNXLEAST8 "X"
#define SCNoFAST8 "o"
#define SCNuFAST8 "u"
#define SCNxFAST8 "x"
#define SCNXFAST8 "X"
#define SCNo16 "ho"
#define SCNu16 "hu"
#define SCNx16 "hx"
#define SCNX16 "hX"
#define SCNoLEAST16 "ho"
#define SCNuLEAST16 "hu"
#define SCNxLEAST16 "hx"
#define SCNXLEAST16 "hX"
#define SCNoFAST16 "ho"
#define SCNuFAST16 "hu"
#define SCNxFAST16 "hx"
#define SCNXFAST16 "hX"
#define SCNo32 "lo"
#define SCNu32 "lu"
#define SCNx32 "lx"
#define SCNX32 "lX"
#define SCNoLEAST32 "lo"
#define SCNuLEAST32 "lu"
#define SCNxLEAST32 "lx"
#define SCNXLEAST32 "lX"
#define SCNoFAST32 "lo"
#define SCNuFAST32 "lu"
#define SCNxFAST32 "lx"
#define SCNXFAST32 "lX"
#define SCNo64 "I64o"
#define SCNu64 "I64u"
#define SCNx64 "I64x"
#define SCNX64 "I64X"
#define SCNoLEAST64 "I64o"
#define SCNuLEAST64 "I64u"
#define SCNxLEAST64 "I64x"
#define SCNXLEAST64 "I64X"
#define SCNoFAST64 "I64o"
#define SCNuFAST64 "I64u"
#define SCNxFAST64 "I64x"
#define SCNXFAST64 "I64X"
#define SCNoMAX "I64o"
#define SCNuMAX "I64u"
#define SCNxMAX "I64x"
#define SCNXMAX "I64X"
#ifdef _WIN64 // [
# define SCNoPTR "I64o"
# define SCNuPTR "I64u"
# define SCNxPTR "I64x"
# define SCNXPTR "I64X"
#else // _WIN64 ][
# define SCNoPTR "lo"
# define SCNuPTR "lu"
# define SCNxPTR "lx"
# define SCNXPTR "lX"
#endif // _WIN64 ]
#endif // __STDC_FORMAT_MACROS ]
// 7.8.2 Functions for greatest-width integer types
// 7.8.2.1 The imaxabs function
#define imaxabs _abs64
// 7.8.2.2 The imaxdiv function
// This is modified version of div() function from Microsoft's div.c found
// in %MSVC.NET%\crt\src\div.c
#ifdef STATIC_IMAXDIV // [
static
#else // STATIC_IMAXDIV ][
_inline
#endif // STATIC_IMAXDIV ]
imaxdiv_t __cdecl imaxdiv(intmax_t numer, intmax_t denom)
{
imaxdiv_t result;
result.quot = numer / denom;
result.rem = numer % denom;
if (numer < 0 && result.rem > 0) {
// did division wrong; must fix up
++result.quot;
result.rem -= denom;
}
return result;
}
// 7.8.2.3 The strtoimax and strtoumax functions
#define strtoimax _strtoi64
#define strtoumax _strtoui64
// 7.8.2.4 The wcstoimax and wcstoumax functions
#define wcstoimax _wcstoi64
#define wcstoumax _wcstoui64
#endif // _MSC_VER >= 1800
#endif // _MSC_INTTYPES_H_ ]

View File

@@ -1,300 +0,0 @@
// ISO C9x compliant stdint.h for Microsoft Visual Studio
// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124
//
// Copyright (c) 2006-2013 Alexander Chemeris
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// 3. Neither the name of the product nor the names of its contributors may
// be used to endorse or promote products derived from this software
// without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
///////////////////////////////////////////////////////////////////////////////
// The above software in this distribution may have been modified by
// THL A29 Limited ("Tencent Modifications").
// All Tencent Modifications are Copyright (C) 2015 THL A29 Limited.
#ifndef _MSC_VER // [
#error "Use this header only with Microsoft Visual C++ compilers!"
#endif // _MSC_VER ]
#ifndef _MSC_STDINT_H_ // [
#define _MSC_STDINT_H_
#if _MSC_VER > 1000
#pragma once
#endif
// miloyip: Originally Visual Studio 2010 uses its own stdint.h. However it generates warning with INT64_C(), so change to use this file for vs2010.
#if _MSC_VER >= 1600 // [
#include <stdint.h>
#if !defined(__cplusplus) || defined(__STDC_CONSTANT_MACROS) // [ See footnote 224 at page 260
#undef INT8_C
#undef INT16_C
#undef INT32_C
#undef INT64_C
#undef UINT8_C
#undef UINT16_C
#undef UINT32_C
#undef UINT64_C
// 7.18.4.1 Macros for minimum-width integer constants
#define INT8_C(val) val##i8
#define INT16_C(val) val##i16
#define INT32_C(val) val##i32
#define INT64_C(val) val##i64
#define UINT8_C(val) val##ui8
#define UINT16_C(val) val##ui16
#define UINT32_C(val) val##ui32
#define UINT64_C(val) val##ui64
// 7.18.4.2 Macros for greatest-width integer constants
// These #ifndef's are needed to prevent collisions with <boost/cstdint.hpp>.
// Check out Issue 9 for the details.
#ifndef INTMAX_C // [
# define INTMAX_C INT64_C
#endif // INTMAX_C ]
#ifndef UINTMAX_C // [
# define UINTMAX_C UINT64_C
#endif // UINTMAX_C ]
#endif // __STDC_CONSTANT_MACROS ]
#else // ] _MSC_VER >= 1700 [
#include <limits.h>
// For Visual Studio 6 in C++ mode and for many Visual Studio versions when
// compiling for ARM we should wrap <wchar.h> include with 'extern "C++" {}'
// or compiler give many errors like this:
// error C2733: second C linkage of overloaded function 'wmemchr' not allowed
#ifdef __cplusplus
extern "C" {
#endif
# include <wchar.h>
#ifdef __cplusplus
}
#endif
// Define _W64 macros to mark types changing their size, like intptr_t.
#ifndef _W64
# if !defined(__midl) && (defined(_X86_) || defined(_M_IX86)) && _MSC_VER >= 1300
# define _W64 __w64
# else
# define _W64
# endif
#endif
// 7.18.1 Integer types
// 7.18.1.1 Exact-width integer types
// Visual Studio 6 and Embedded Visual C++ 4 doesn't
// realize that, e.g. char has the same size as __int8
// so we give up on __intX for them.
#if (_MSC_VER < 1300)
typedef signed char int8_t;
typedef signed short int16_t;
typedef signed int int32_t;
typedef unsigned char uint8_t;
typedef unsigned short uint16_t;
typedef unsigned int uint32_t;
#else
typedef signed __int8 int8_t;
typedef signed __int16 int16_t;
typedef signed __int32 int32_t;
typedef unsigned __int8 uint8_t;
typedef unsigned __int16 uint16_t;
typedef unsigned __int32 uint32_t;
#endif
typedef signed __int64 int64_t;
typedef unsigned __int64 uint64_t;
// 7.18.1.2 Minimum-width integer types
typedef int8_t int_least8_t;
typedef int16_t int_least16_t;
typedef int32_t int_least32_t;
typedef int64_t int_least64_t;
typedef uint8_t uint_least8_t;
typedef uint16_t uint_least16_t;
typedef uint32_t uint_least32_t;
typedef uint64_t uint_least64_t;
// 7.18.1.3 Fastest minimum-width integer types
typedef int8_t int_fast8_t;
typedef int16_t int_fast16_t;
typedef int32_t int_fast32_t;
typedef int64_t int_fast64_t;
typedef uint8_t uint_fast8_t;
typedef uint16_t uint_fast16_t;
typedef uint32_t uint_fast32_t;
typedef uint64_t uint_fast64_t;
// 7.18.1.4 Integer types capable of holding object pointers
#ifdef _WIN64 // [
typedef signed __int64 intptr_t;
typedef unsigned __int64 uintptr_t;
#else // _WIN64 ][
typedef _W64 signed int intptr_t;
typedef _W64 unsigned int uintptr_t;
#endif // _WIN64 ]
// 7.18.1.5 Greatest-width integer types
typedef int64_t intmax_t;
typedef uint64_t uintmax_t;
// 7.18.2 Limits of specified-width integer types
#if !defined(__cplusplus) || defined(__STDC_LIMIT_MACROS) // [ See footnote 220 at page 257 and footnote 221 at page 259
// 7.18.2.1 Limits of exact-width integer types
#define INT8_MIN ((int8_t)_I8_MIN)
#define INT8_MAX _I8_MAX
#define INT16_MIN ((int16_t)_I16_MIN)
#define INT16_MAX _I16_MAX
#define INT32_MIN ((int32_t)_I32_MIN)
#define INT32_MAX _I32_MAX
#define INT64_MIN ((int64_t)_I64_MIN)
#define INT64_MAX _I64_MAX
#define UINT8_MAX _UI8_MAX
#define UINT16_MAX _UI16_MAX
#define UINT32_MAX _UI32_MAX
#define UINT64_MAX _UI64_MAX
// 7.18.2.2 Limits of minimum-width integer types
#define INT_LEAST8_MIN INT8_MIN
#define INT_LEAST8_MAX INT8_MAX
#define INT_LEAST16_MIN INT16_MIN
#define INT_LEAST16_MAX INT16_MAX
#define INT_LEAST32_MIN INT32_MIN
#define INT_LEAST32_MAX INT32_MAX
#define INT_LEAST64_MIN INT64_MIN
#define INT_LEAST64_MAX INT64_MAX
#define UINT_LEAST8_MAX UINT8_MAX
#define UINT_LEAST16_MAX UINT16_MAX
#define UINT_LEAST32_MAX UINT32_MAX
#define UINT_LEAST64_MAX UINT64_MAX
// 7.18.2.3 Limits of fastest minimum-width integer types
#define INT_FAST8_MIN INT8_MIN
#define INT_FAST8_MAX INT8_MAX
#define INT_FAST16_MIN INT16_MIN
#define INT_FAST16_MAX INT16_MAX
#define INT_FAST32_MIN INT32_MIN
#define INT_FAST32_MAX INT32_MAX
#define INT_FAST64_MIN INT64_MIN
#define INT_FAST64_MAX INT64_MAX
#define UINT_FAST8_MAX UINT8_MAX
#define UINT_FAST16_MAX UINT16_MAX
#define UINT_FAST32_MAX UINT32_MAX
#define UINT_FAST64_MAX UINT64_MAX
// 7.18.2.4 Limits of integer types capable of holding object pointers
#ifdef _WIN64 // [
# define INTPTR_MIN INT64_MIN
# define INTPTR_MAX INT64_MAX
# define UINTPTR_MAX UINT64_MAX
#else // _WIN64 ][
# define INTPTR_MIN INT32_MIN
# define INTPTR_MAX INT32_MAX
# define UINTPTR_MAX UINT32_MAX
#endif // _WIN64 ]
// 7.18.2.5 Limits of greatest-width integer types
#define INTMAX_MIN INT64_MIN
#define INTMAX_MAX INT64_MAX
#define UINTMAX_MAX UINT64_MAX
// 7.18.3 Limits of other integer types
#ifdef _WIN64 // [
# define PTRDIFF_MIN _I64_MIN
# define PTRDIFF_MAX _I64_MAX
#else // _WIN64 ][
# define PTRDIFF_MIN _I32_MIN
# define PTRDIFF_MAX _I32_MAX
#endif // _WIN64 ]
#define SIG_ATOMIC_MIN INT_MIN
#define SIG_ATOMIC_MAX INT_MAX
#ifndef SIZE_MAX // [
# ifdef _WIN64 // [
# define SIZE_MAX _UI64_MAX
# else // _WIN64 ][
# define SIZE_MAX _UI32_MAX
# endif // _WIN64 ]
#endif // SIZE_MAX ]
// WCHAR_MIN and WCHAR_MAX are also defined in <wchar.h>
#ifndef WCHAR_MIN // [
# define WCHAR_MIN 0
#endif // WCHAR_MIN ]
#ifndef WCHAR_MAX // [
# define WCHAR_MAX _UI16_MAX
#endif // WCHAR_MAX ]
#define WINT_MIN 0
#define WINT_MAX _UI16_MAX
#endif // __STDC_LIMIT_MACROS ]
// 7.18.4 Limits of other integer types
#if !defined(__cplusplus) || defined(__STDC_CONSTANT_MACROS) // [ See footnote 224 at page 260
// 7.18.4.1 Macros for minimum-width integer constants
#define INT8_C(val) val##i8
#define INT16_C(val) val##i16
#define INT32_C(val) val##i32
#define INT64_C(val) val##i64
#define UINT8_C(val) val##ui8
#define UINT16_C(val) val##ui16
#define UINT32_C(val) val##ui32
#define UINT64_C(val) val##ui64
// 7.18.4.2 Macros for greatest-width integer constants
// These #ifndef's are needed to prevent collisions with <boost/cstdint.hpp>.
// Check out Issue 9 for the details.
#ifndef INTMAX_C // [
# define INTMAX_C INT64_C
#endif // INTMAX_C ]
#ifndef UINTMAX_C // [
# define UINTMAX_C UINT64_C
#endif // UINTMAX_C ]
#endif // __STDC_CONSTANT_MACROS ]
#endif // _MSC_VER >= 1600 ]
#endif // _MSC_STDINT_H_ ]

File diff suppressed because it is too large Load Diff

View File

@@ -1,207 +0,0 @@
// Tencent is pleased to support the open source community by making RapidJSON available.
//
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
//
// Licensed under the MIT License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// http://opensource.org/licenses/MIT
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef RAPIDJSON_PRETTYWRITER_H_
#define RAPIDJSON_PRETTYWRITER_H_
#include "writer.h"
#ifdef __GNUC__
RAPIDJSON_DIAG_PUSH
RAPIDJSON_DIAG_OFF(effc++)
#endif
RAPIDJSON_NAMESPACE_BEGIN
//! Writer with indentation and spacing.
/*!
\tparam OutputStream Type of ouptut os.
\tparam SourceEncoding Encoding of source string.
\tparam TargetEncoding Encoding of output stream.
\tparam StackAllocator Type of allocator for allocating memory of stack.
*/
template<typename OutputStream, typename SourceEncoding = UTF8<>, typename TargetEncoding = UTF8<>, typename StackAllocator = CrtAllocator>
class PrettyWriter : public Writer<OutputStream, SourceEncoding, TargetEncoding, StackAllocator> {
public:
typedef Writer<OutputStream, SourceEncoding, TargetEncoding, StackAllocator> Base;
typedef typename Base::Ch Ch;
//! Constructor
/*! \param os Output stream.
\param allocator User supplied allocator. If it is null, it will create a private one.
\param levelDepth Initial capacity of stack.
*/
PrettyWriter(OutputStream& os, StackAllocator* allocator = 0, size_t levelDepth = Base::kDefaultLevelDepth) :
Base(os, allocator, levelDepth), indentChar_(' '), indentCharCount_(4) {}
//! Set custom indentation.
/*! \param indentChar Character for indentation. Must be whitespace character (' ', '\\t', '\\n', '\\r').
\param indentCharCount Number of indent characters for each indentation level.
\note The default indentation is 4 spaces.
*/
PrettyWriter& SetIndent(Ch indentChar, unsigned indentCharCount) {
RAPIDJSON_ASSERT(indentChar == ' ' || indentChar == '\t' || indentChar == '\n' || indentChar == '\r');
indentChar_ = indentChar;
indentCharCount_ = indentCharCount;
return *this;
}
/*! @name Implementation of Handler
\see Handler
*/
//@{
bool Null() { PrettyPrefix(kNullType); return Base::WriteNull(); }
bool Bool(bool b) { PrettyPrefix(b ? kTrueType : kFalseType); return Base::WriteBool(b); }
bool Int(int i) { PrettyPrefix(kNumberType); return Base::WriteInt(i); }
bool Uint(unsigned u) { PrettyPrefix(kNumberType); return Base::WriteUint(u); }
bool Int64(int64_t i64) { PrettyPrefix(kNumberType); return Base::WriteInt64(i64); }
bool Uint64(uint64_t u64) { PrettyPrefix(kNumberType); return Base::WriteUint64(u64); }
bool Double(double d) { PrettyPrefix(kNumberType); return Base::WriteDouble(d); }
bool String(const Ch* str, SizeType length, bool copy = false) {
(void)copy;
PrettyPrefix(kStringType);
return Base::WriteString(str, length);
}
#if RAPIDJSON_HAS_STDSTRING
bool String(const std::basic_string<Ch>& str) {
return String(str.data(), SizeType(str.size()));
}
#endif
bool StartObject() {
PrettyPrefix(kObjectType);
new (Base::level_stack_.template Push<typename Base::Level>()) typename Base::Level(false);
return Base::WriteStartObject();
}
bool Key(const Ch* str, SizeType length, bool copy = false) { return String(str, length, copy); }
bool EndObject(SizeType memberCount = 0) {
(void)memberCount;
RAPIDJSON_ASSERT(Base::level_stack_.GetSize() >= sizeof(typename Base::Level));
RAPIDJSON_ASSERT(!Base::level_stack_.template Top<typename Base::Level>()->inArray);
bool empty = Base::level_stack_.template Pop<typename Base::Level>(1)->valueCount == 0;
if (!empty) {
Base::os_->Put('\n');
WriteIndent();
}
bool ret = Base::WriteEndObject();
(void)ret;
RAPIDJSON_ASSERT(ret == true);
if (Base::level_stack_.Empty()) // end of json text
Base::os_->Flush();
return true;
}
bool StartArray() {
PrettyPrefix(kArrayType);
new (Base::level_stack_.template Push<typename Base::Level>()) typename Base::Level(true);
return Base::WriteStartArray();
}
bool EndArray(SizeType memberCount = 0) {
(void)memberCount;
RAPIDJSON_ASSERT(Base::level_stack_.GetSize() >= sizeof(typename Base::Level));
RAPIDJSON_ASSERT(Base::level_stack_.template Top<typename Base::Level>()->inArray);
bool empty = Base::level_stack_.template Pop<typename Base::Level>(1)->valueCount == 0;
if (!empty) {
Base::os_->Put('\n');
WriteIndent();
}
bool ret = Base::WriteEndArray();
(void)ret;
RAPIDJSON_ASSERT(ret == true);
if (Base::level_stack_.Empty()) // end of json text
Base::os_->Flush();
return true;
}
//@}
/*! @name Convenience extensions */
//@{
//! Simpler but slower overload.
bool String(const Ch* str) { return String(str, internal::StrLen(str)); }
bool Key(const Ch* str) { return Key(str, internal::StrLen(str)); }
//@}
protected:
void PrettyPrefix(Type type) {
(void)type;
if (Base::level_stack_.GetSize() != 0) { // this value is not at root
typename Base::Level* level = Base::level_stack_.template Top<typename Base::Level>();
if (level->inArray) {
if (level->valueCount > 0) {
Base::os_->Put(','); // add comma if it is not the first element in array
Base::os_->Put('\n');
}
else
Base::os_->Put('\n');
WriteIndent();
}
else { // in object
if (level->valueCount > 0) {
if (level->valueCount % 2 == 0) {
Base::os_->Put(',');
Base::os_->Put('\n');
}
else {
Base::os_->Put(':');
Base::os_->Put(' ');
}
}
else
Base::os_->Put('\n');
if (level->valueCount % 2 == 0)
WriteIndent();
}
if (!level->inArray && level->valueCount % 2 == 0)
RAPIDJSON_ASSERT(type == kStringType); // if it's in object, then even number should be a name
level->valueCount++;
}
else {
RAPIDJSON_ASSERT(!Base::hasRoot_); // Should only has one and only one root.
Base::hasRoot_ = true;
}
}
void WriteIndent() {
size_t count = (Base::level_stack_.GetSize() / sizeof(typename Base::Level)) * indentCharCount_;
PutN(*Base::os_, indentChar_, count);
}
Ch indentChar_;
unsigned indentCharCount_;
private:
// Prohibit copy constructor & assignment operator.
PrettyWriter(const PrettyWriter&);
PrettyWriter& operator=(const PrettyWriter&);
};
RAPIDJSON_NAMESPACE_END
#ifdef __GNUC__
RAPIDJSON_DIAG_POP
#endif
#endif // RAPIDJSON_RAPIDJSON_H_

View File

@@ -1,654 +0,0 @@
// Tencent is pleased to support the open source community by making RapidJSON available.
//
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
//
// Licensed under the MIT License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// http://opensource.org/licenses/MIT
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef RAPIDJSON_RAPIDJSON_H_
#define RAPIDJSON_RAPIDJSON_H_
/*!\file rapidjson.h
\brief common definitions and configuration
\see RAPIDJSON_CONFIG
*/
/*! \defgroup RAPIDJSON_CONFIG RapidJSON configuration
\brief Configuration macros for library features
Some RapidJSON features are configurable to adapt the library to a wide
variety of platforms, environments and usage scenarios. Most of the
features can be configured in terms of overriden or predefined
preprocessor macros at compile-time.
Some additional customization is available in the \ref RAPIDJSON_ERRORS APIs.
\note These macros should be given on the compiler command-line
(where applicable) to avoid inconsistent values when compiling
different translation units of a single application.
*/
#include <cstdlib> // malloc(), realloc(), free(), size_t
#include <cstring> // memset(), memcpy(), memmove(), memcmp()
///////////////////////////////////////////////////////////////////////////////
// RAPIDJSON_VERSION_STRING
//
// ALWAYS synchronize the following 3 macros with corresponding variables in /CMakeLists.txt.
//
//!@cond RAPIDJSON_HIDDEN_FROM_DOXYGEN
// token stringification
#define RAPIDJSON_STRINGIFY(x) RAPIDJSON_DO_STRINGIFY(x)
#define RAPIDJSON_DO_STRINGIFY(x) #x
//!@endcond
/*! \def RAPIDJSON_MAJOR_VERSION
\ingroup RAPIDJSON_CONFIG
\brief Major version of RapidJSON in integer.
*/
/*! \def RAPIDJSON_MINOR_VERSION
\ingroup RAPIDJSON_CONFIG
\brief Minor version of RapidJSON in integer.
*/
/*! \def RAPIDJSON_PATCH_VERSION
\ingroup RAPIDJSON_CONFIG
\brief Patch version of RapidJSON in integer.
*/
/*! \def RAPIDJSON_VERSION_STRING
\ingroup RAPIDJSON_CONFIG
\brief Version of RapidJSON in "<major>.<minor>.<patch>" string format.
*/
#define RAPIDJSON_MAJOR_VERSION 1
#define RAPIDJSON_MINOR_VERSION 0
#define RAPIDJSON_PATCH_VERSION 2
#define RAPIDJSON_VERSION_STRING \
RAPIDJSON_STRINGIFY(RAPIDJSON_MAJOR_VERSION.RAPIDJSON_MINOR_VERSION.RAPIDJSON_PATCH_VERSION)
///////////////////////////////////////////////////////////////////////////////
// RAPIDJSON_NAMESPACE_(BEGIN|END)
/*! \def RAPIDJSON_NAMESPACE
\ingroup RAPIDJSON_CONFIG
\brief provide custom rapidjson namespace
In order to avoid symbol clashes and/or "One Definition Rule" errors
between multiple inclusions of (different versions of) RapidJSON in
a single binary, users can customize the name of the main RapidJSON
namespace.
In case of a single nesting level, defining \c RAPIDJSON_NAMESPACE
to a custom name (e.g. \c MyRapidJSON) is sufficient. If multiple
levels are needed, both \ref RAPIDJSON_NAMESPACE_BEGIN and \ref
RAPIDJSON_NAMESPACE_END need to be defined as well:
\code
// in some .cpp file
#define RAPIDJSON_NAMESPACE my::rapidjson
#define RAPIDJSON_NAMESPACE_BEGIN namespace my { namespace rapidjson {
#define RAPIDJSON_NAMESPACE_END } }
#include "rapidjson/..."
\endcode
\see rapidjson
*/
/*! \def RAPIDJSON_NAMESPACE_BEGIN
\ingroup RAPIDJSON_CONFIG
\brief provide custom rapidjson namespace (opening expression)
\see RAPIDJSON_NAMESPACE
*/
/*! \def RAPIDJSON_NAMESPACE_END
\ingroup RAPIDJSON_CONFIG
\brief provide custom rapidjson namespace (closing expression)
\see RAPIDJSON_NAMESPACE
*/
#ifndef RAPIDJSON_NAMESPACE
#define RAPIDJSON_NAMESPACE rapidjson
#endif
#ifndef RAPIDJSON_NAMESPACE_BEGIN
#define RAPIDJSON_NAMESPACE_BEGIN namespace RAPIDJSON_NAMESPACE {
#endif
#ifndef RAPIDJSON_NAMESPACE_END
#define RAPIDJSON_NAMESPACE_END }
#endif
///////////////////////////////////////////////////////////////////////////////
// RAPIDJSON_NO_INT64DEFINE
/*! \def RAPIDJSON_NO_INT64DEFINE
\ingroup RAPIDJSON_CONFIG
\brief Use external 64-bit integer types.
RapidJSON requires the 64-bit integer types \c int64_t and \c uint64_t types
to be available at global scope.
If users have their own definition, define RAPIDJSON_NO_INT64DEFINE to
prevent RapidJSON from defining its own types.
*/
#ifndef RAPIDJSON_NO_INT64DEFINE
//!@cond RAPIDJSON_HIDDEN_FROM_DOXYGEN
#ifdef _MSC_VER
#include "msinttypes/stdint.h"
#include "msinttypes/inttypes.h"
#else
// Other compilers should have this.
#include <stdint.h>
#include <inttypes.h>
#endif
//!@endcond
#ifdef RAPIDJSON_DOXYGEN_RUNNING
#define RAPIDJSON_NO_INT64DEFINE
#endif
#endif // RAPIDJSON_NO_INT64TYPEDEF
///////////////////////////////////////////////////////////////////////////////
// RAPIDJSON_FORCEINLINE
#ifndef RAPIDJSON_FORCEINLINE
//!@cond RAPIDJSON_HIDDEN_FROM_DOXYGEN
#if defined(_MSC_VER) && !defined(NDEBUG)
#define RAPIDJSON_FORCEINLINE __forceinline
#elif defined(__GNUC__) && __GNUC__ >= 4 && !defined(NDEBUG)
#define RAPIDJSON_FORCEINLINE __attribute__((always_inline))
#else
#define RAPIDJSON_FORCEINLINE
#endif
//!@endcond
#endif // RAPIDJSON_FORCEINLINE
///////////////////////////////////////////////////////////////////////////////
// RAPIDJSON_ENDIAN
#define RAPIDJSON_LITTLEENDIAN 0 //!< Little endian machine
#define RAPIDJSON_BIGENDIAN 1 //!< Big endian machine
//! Endianness of the machine.
/*!
\def RAPIDJSON_ENDIAN
\ingroup RAPIDJSON_CONFIG
GCC 4.6 provided macro for detecting endianness of the target machine. But other
compilers may not have this. User can define RAPIDJSON_ENDIAN to either
\ref RAPIDJSON_LITTLEENDIAN or \ref RAPIDJSON_BIGENDIAN.
Default detection implemented with reference to
\li https://gcc.gnu.org/onlinedocs/gcc-4.6.0/cpp/Common-Predefined-Macros.html
\li http://www.boost.org/doc/libs/1_42_0/boost/detail/endian.hpp
*/
#ifndef RAPIDJSON_ENDIAN
// Detect with GCC 4.6's macro
# ifdef __BYTE_ORDER__
# if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
# define RAPIDJSON_ENDIAN RAPIDJSON_LITTLEENDIAN
# elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
# define RAPIDJSON_ENDIAN RAPIDJSON_BIGENDIAN
# else
# error Unknown machine endianess detected. User needs to define RAPIDJSON_ENDIAN.
# endif // __BYTE_ORDER__
// Detect with GLIBC's endian.h
# elif defined(__GLIBC__)
# include <endian.h>
# if (__BYTE_ORDER == __LITTLE_ENDIAN)
# define RAPIDJSON_ENDIAN RAPIDJSON_LITTLEENDIAN
# elif (__BYTE_ORDER == __BIG_ENDIAN)
# define RAPIDJSON_ENDIAN RAPIDJSON_BIGENDIAN
# else
# error Unknown machine endianess detected. User needs to define RAPIDJSON_ENDIAN.
# endif // __GLIBC__
// Detect with _LITTLE_ENDIAN and _BIG_ENDIAN macro
# elif defined(_LITTLE_ENDIAN) && !defined(_BIG_ENDIAN)
# define RAPIDJSON_ENDIAN RAPIDJSON_LITTLEENDIAN
# elif defined(_BIG_ENDIAN) && !defined(_LITTLE_ENDIAN)
# define RAPIDJSON_ENDIAN RAPIDJSON_BIGENDIAN
// Detect with architecture macros
# elif defined(__sparc) || defined(__sparc__) || defined(_POWER) || defined(__powerpc__) || defined(__ppc__) || defined(__hpux) || defined(__hppa) || defined(_MIPSEB) || defined(_POWER) || defined(__s390__)
# define RAPIDJSON_ENDIAN RAPIDJSON_BIGENDIAN
# elif defined(__i386__) || defined(__alpha__) || defined(__ia64) || defined(__ia64__) || defined(_M_IX86) || defined(_M_IA64) || defined(_M_ALPHA) || defined(__amd64) || defined(__amd64__) || defined(_M_AMD64) || defined(__x86_64) || defined(__x86_64__) || defined(_M_X64) || defined(__bfin__)
# define RAPIDJSON_ENDIAN RAPIDJSON_LITTLEENDIAN
# elif defined(RAPIDJSON_DOXYGEN_RUNNING)
# define RAPIDJSON_ENDIAN
# else
# error Unknown machine endianess detected. User needs to define RAPIDJSON_ENDIAN.
# endif
#endif // RAPIDJSON_ENDIAN
///////////////////////////////////////////////////////////////////////////////
// RAPIDJSON_64BIT
//! Whether using 64-bit architecture
#ifndef RAPIDJSON_64BIT
#if defined(__LP64__) || defined(_WIN64) || defined(__EMSCRIPTEN__)
#define RAPIDJSON_64BIT 1
#else
#define RAPIDJSON_64BIT 0
#endif
#endif // RAPIDJSON_64BIT
///////////////////////////////////////////////////////////////////////////////
// RAPIDJSON_ALIGN
//! Data alignment of the machine.
/*! \ingroup RAPIDJSON_CONFIG
\param x pointer to align
Some machines require strict data alignment. Currently the default uses 4 bytes
alignment. User can customize by defining the RAPIDJSON_ALIGN function macro.
*/
#ifndef RAPIDJSON_ALIGN
#if RAPIDJSON_64BIT == 1
#define RAPIDJSON_ALIGN(x) (((x) + static_cast<uint64_t>(7u)) & ~static_cast<uint64_t>(7u))
#else
#define RAPIDJSON_ALIGN(x) (((x) + 3u) & ~3u)
#endif
#endif
///////////////////////////////////////////////////////////////////////////////
// RAPIDJSON_UINT64_C2
//! Construct a 64-bit literal by a pair of 32-bit integer.
/*!
64-bit literal with or without ULL suffix is prone to compiler warnings.
UINT64_C() is C macro which cause compilation problems.
Use this macro to define 64-bit constants by a pair of 32-bit integer.
*/
#ifndef RAPIDJSON_UINT64_C2
#define RAPIDJSON_UINT64_C2(high32, low32) ((static_cast<uint64_t>(high32) << 32) | static_cast<uint64_t>(low32))
#endif
///////////////////////////////////////////////////////////////////////////////
// RAPIDJSON_SSE2/RAPIDJSON_SSE42/RAPIDJSON_SIMD
/*! \def RAPIDJSON_SIMD
\ingroup RAPIDJSON_CONFIG
\brief Enable SSE2/SSE4.2 optimization.
RapidJSON supports optimized implementations for some parsing operations
based on the SSE2 or SSE4.2 SIMD extensions on modern Intel-compatible
processors.
To enable these optimizations, two different symbols can be defined;
\code
// Enable SSE2 optimization.
#define RAPIDJSON_SSE2
// Enable SSE4.2 optimization.
#define RAPIDJSON_SSE42
\endcode
\c RAPIDJSON_SSE42 takes precedence, if both are defined.
If any of these symbols is defined, RapidJSON defines the macro
\c RAPIDJSON_SIMD to indicate the availability of the optimized code.
*/
#if defined(RAPIDJSON_SSE2) || defined(RAPIDJSON_SSE42) \
|| defined(RAPIDJSON_DOXYGEN_RUNNING)
#define RAPIDJSON_SIMD
#endif
///////////////////////////////////////////////////////////////////////////////
// RAPIDJSON_NO_SIZETYPEDEFINE
#ifndef RAPIDJSON_NO_SIZETYPEDEFINE
/*! \def RAPIDJSON_NO_SIZETYPEDEFINE
\ingroup RAPIDJSON_CONFIG
\brief User-provided \c SizeType definition.
In order to avoid using 32-bit size types for indexing strings and arrays,
define this preprocessor symbol and provide the type rapidjson::SizeType
before including RapidJSON:
\code
#define RAPIDJSON_NO_SIZETYPEDEFINE
namespace rapidjson { typedef ::std::size_t SizeType; }
#include "rapidjson/..."
\endcode
\see rapidjson::SizeType
*/
#ifdef RAPIDJSON_DOXYGEN_RUNNING
#define RAPIDJSON_NO_SIZETYPEDEFINE
#endif
RAPIDJSON_NAMESPACE_BEGIN
//! Size type (for string lengths, array sizes, etc.)
/*! RapidJSON uses 32-bit array/string indices even on 64-bit platforms,
instead of using \c size_t. Users may override the SizeType by defining
\ref RAPIDJSON_NO_SIZETYPEDEFINE.
*/
typedef unsigned SizeType;
RAPIDJSON_NAMESPACE_END
#endif
// always import std::size_t to rapidjson namespace
RAPIDJSON_NAMESPACE_BEGIN
using std::size_t;
RAPIDJSON_NAMESPACE_END
///////////////////////////////////////////////////////////////////////////////
// RAPIDJSON_ASSERT
//! Assertion.
/*! \ingroup RAPIDJSON_CONFIG
By default, rapidjson uses C \c assert() for internal assertions.
User can override it by defining RAPIDJSON_ASSERT(x) macro.
\note Parsing errors are handled and can be customized by the
\ref RAPIDJSON_ERRORS APIs.
*/
#ifndef RAPIDJSON_ASSERT
#include <cassert>
#define RAPIDJSON_ASSERT(x) assert(x)
#endif // RAPIDJSON_ASSERT
///////////////////////////////////////////////////////////////////////////////
// RAPIDJSON_STATIC_ASSERT
// Adopt from boost
#ifndef RAPIDJSON_STATIC_ASSERT
//!@cond RAPIDJSON_HIDDEN_FROM_DOXYGEN
RAPIDJSON_NAMESPACE_BEGIN
template <bool x> struct STATIC_ASSERTION_FAILURE;
template <> struct STATIC_ASSERTION_FAILURE<true> { enum { value = 1 }; };
template<int x> struct StaticAssertTest {};
RAPIDJSON_NAMESPACE_END
#define RAPIDJSON_JOIN(X, Y) RAPIDJSON_DO_JOIN(X, Y)
#define RAPIDJSON_DO_JOIN(X, Y) RAPIDJSON_DO_JOIN2(X, Y)
#define RAPIDJSON_DO_JOIN2(X, Y) X##Y
#if defined(__GNUC__)
#define RAPIDJSON_STATIC_ASSERT_UNUSED_ATTRIBUTE __attribute__((unused))
#else
#define RAPIDJSON_STATIC_ASSERT_UNUSED_ATTRIBUTE
#endif
//!@endcond
/*! \def RAPIDJSON_STATIC_ASSERT
\brief (Internal) macro to check for conditions at compile-time
\param x compile-time condition
\hideinitializer
*/
#define RAPIDJSON_STATIC_ASSERT(x) \
typedef ::RAPIDJSON_NAMESPACE::StaticAssertTest< \
sizeof(::RAPIDJSON_NAMESPACE::STATIC_ASSERTION_FAILURE<bool(x) >)> \
RAPIDJSON_JOIN(StaticAssertTypedef, __LINE__) RAPIDJSON_STATIC_ASSERT_UNUSED_ATTRIBUTE
#endif
///////////////////////////////////////////////////////////////////////////////
// Helpers
//!@cond RAPIDJSON_HIDDEN_FROM_DOXYGEN
#define RAPIDJSON_MULTILINEMACRO_BEGIN do {
#define RAPIDJSON_MULTILINEMACRO_END \
} while((void)0, 0)
// adopted from Boost
#define RAPIDJSON_VERSION_CODE(x,y,z) \
(((x)*100000) + ((y)*100) + (z))
///////////////////////////////////////////////////////////////////////////////
// RAPIDJSON_DIAG_PUSH/POP, RAPIDJSON_DIAG_OFF
#if defined(__GNUC__)
#define RAPIDJSON_GNUC \
RAPIDJSON_VERSION_CODE(__GNUC__,__GNUC_MINOR__,__GNUC_PATCHLEVEL__)
#endif
#if defined(__clang__) || (defined(RAPIDJSON_GNUC) && RAPIDJSON_GNUC >= RAPIDJSON_VERSION_CODE(4,2,0))
#define RAPIDJSON_PRAGMA(x) _Pragma(RAPIDJSON_STRINGIFY(x))
#define RAPIDJSON_DIAG_PRAGMA(x) RAPIDJSON_PRAGMA(GCC diagnostic x)
#define RAPIDJSON_DIAG_OFF(x) \
RAPIDJSON_DIAG_PRAGMA(ignored RAPIDJSON_STRINGIFY(RAPIDJSON_JOIN(-W,x)))
// push/pop support in Clang and GCC>=4.6
#if defined(__clang__) || (defined(RAPIDJSON_GNUC) && RAPIDJSON_GNUC >= RAPIDJSON_VERSION_CODE(4,6,0))
#define RAPIDJSON_DIAG_PUSH RAPIDJSON_DIAG_PRAGMA(push)
#define RAPIDJSON_DIAG_POP RAPIDJSON_DIAG_PRAGMA(pop)
#else // GCC >= 4.2, < 4.6
#define RAPIDJSON_DIAG_PUSH /* ignored */
#define RAPIDJSON_DIAG_POP /* ignored */
#endif
#elif defined(_MSC_VER)
// pragma (MSVC specific)
#define RAPIDJSON_PRAGMA(x) __pragma(x)
#define RAPIDJSON_DIAG_PRAGMA(x) RAPIDJSON_PRAGMA(warning(x))
#define RAPIDJSON_DIAG_OFF(x) RAPIDJSON_DIAG_PRAGMA(disable: x)
#define RAPIDJSON_DIAG_PUSH RAPIDJSON_DIAG_PRAGMA(push)
#define RAPIDJSON_DIAG_POP RAPIDJSON_DIAG_PRAGMA(pop)
#else
#define RAPIDJSON_DIAG_OFF(x) /* ignored */
#define RAPIDJSON_DIAG_PUSH /* ignored */
#define RAPIDJSON_DIAG_POP /* ignored */
#endif // RAPIDJSON_DIAG_*
///////////////////////////////////////////////////////////////////////////////
// C++11 features
#ifndef RAPIDJSON_HAS_CXX11_RVALUE_REFS
#if defined(__clang__)
#define RAPIDJSON_HAS_CXX11_RVALUE_REFS __has_feature(cxx_rvalue_references) && \
(defined(_LIBCPP_VERSION) || defined(__GLIBCXX__) && __GLIBCXX__ >= 20080306)
#elif (defined(RAPIDJSON_GNUC) && (RAPIDJSON_GNUC >= RAPIDJSON_VERSION_CODE(4,3,0)) && defined(__GXX_EXPERIMENTAL_CXX0X__)) || \
(defined(_MSC_VER) && _MSC_VER >= 1600)
#define RAPIDJSON_HAS_CXX11_RVALUE_REFS 1
#else
#define RAPIDJSON_HAS_CXX11_RVALUE_REFS 0
#endif
#endif // RAPIDJSON_HAS_CXX11_RVALUE_REFS
#ifndef RAPIDJSON_HAS_CXX11_NOEXCEPT
#if defined(__clang__)
#define RAPIDJSON_HAS_CXX11_NOEXCEPT __has_feature(cxx_noexcept)
#elif (defined(RAPIDJSON_GNUC) && (RAPIDJSON_GNUC >= RAPIDJSON_VERSION_CODE(4,6,0)) && defined(__GXX_EXPERIMENTAL_CXX0X__))
// (defined(_MSC_VER) && _MSC_VER >= ????) // not yet supported
#define RAPIDJSON_HAS_CXX11_NOEXCEPT 1
#else
#define RAPIDJSON_HAS_CXX11_NOEXCEPT 0
#endif
#endif
#if RAPIDJSON_HAS_CXX11_NOEXCEPT
#define RAPIDJSON_NOEXCEPT noexcept
#else
#define RAPIDJSON_NOEXCEPT /* noexcept */
#endif // RAPIDJSON_HAS_CXX11_NOEXCEPT
// no automatic detection, yet
#ifndef RAPIDJSON_HAS_CXX11_TYPETRAITS
#define RAPIDJSON_HAS_CXX11_TYPETRAITS 0
#endif
//!@endcond
///////////////////////////////////////////////////////////////////////////////
// new/delete
#ifndef RAPIDJSON_NEW
///! customization point for global \c new
#define RAPIDJSON_NEW(x) new x
#endif
#ifndef RAPIDJSON_DELETE
///! customization point for global \c delete
#define RAPIDJSON_DELETE(x) delete x
#endif
///////////////////////////////////////////////////////////////////////////////
// Allocators and Encodings
#include "allocators.h"
#include "encodings.h"
/*! \namespace rapidjson
\brief main RapidJSON namespace
\see RAPIDJSON_NAMESPACE
*/
RAPIDJSON_NAMESPACE_BEGIN
///////////////////////////////////////////////////////////////////////////////
// Stream
/*! \class rapidjson::Stream
\brief Concept for reading and writing characters.
For read-only stream, no need to implement PutBegin(), Put(), Flush() and PutEnd().
For write-only stream, only need to implement Put() and Flush().
\code
concept Stream {
typename Ch; //!< Character type of the stream.
//! Read the current character from stream without moving the read cursor.
Ch Peek() const;
//! Read the current character from stream and moving the read cursor to next character.
Ch Take();
//! Get the current read cursor.
//! \return Number of characters read from start.
size_t Tell();
//! Begin writing operation at the current read pointer.
//! \return The begin writer pointer.
Ch* PutBegin();
//! Write a character.
void Put(Ch c);
//! Flush the buffer.
void Flush();
//! End the writing operation.
//! \param begin The begin write pointer returned by PutBegin().
//! \return Number of characters written.
size_t PutEnd(Ch* begin);
}
\endcode
*/
//! Provides additional information for stream.
/*!
By using traits pattern, this type provides a default configuration for stream.
For custom stream, this type can be specialized for other configuration.
See TEST(Reader, CustomStringStream) in readertest.cpp for example.
*/
template<typename Stream>
struct StreamTraits {
//! Whether to make local copy of stream for optimization during parsing.
/*!
By default, for safety, streams do not use local copy optimization.
Stream that can be copied fast should specialize this, like StreamTraits<StringStream>.
*/
enum { copyOptimization = 0 };
};
//! Put N copies of a character to a stream.
template<typename Stream, typename Ch>
inline void PutN(Stream& stream, Ch c, size_t n) {
for (size_t i = 0; i < n; i++)
stream.Put(c);
}
///////////////////////////////////////////////////////////////////////////////
// StringStream
//! Read-only string stream.
/*! \note implements Stream concept
*/
template <typename Encoding>
struct GenericStringStream {
typedef typename Encoding::Ch Ch;
GenericStringStream(const Ch *src) : src_(src), head_(src) {}
Ch Peek() const { return *src_; }
Ch Take() { return *src_++; }
size_t Tell() const { return static_cast<size_t>(src_ - head_); }
Ch* PutBegin() { RAPIDJSON_ASSERT(false); return 0; }
void Put(Ch) { RAPIDJSON_ASSERT(false); }
void Flush() { RAPIDJSON_ASSERT(false); }
size_t PutEnd(Ch*) { RAPIDJSON_ASSERT(false); return 0; }
const Ch* src_; //!< Current read position.
const Ch* head_; //!< Original head of the string.
};
template <typename Encoding>
struct StreamTraits<GenericStringStream<Encoding> > {
enum { copyOptimization = 1 };
};
//! String stream with UTF8 encoding.
typedef GenericStringStream<UTF8<> > StringStream;
///////////////////////////////////////////////////////////////////////////////
// InsituStringStream
//! A read-write string stream.
/*! This string stream is particularly designed for in-situ parsing.
\note implements Stream concept
*/
template <typename Encoding>
struct GenericInsituStringStream {
typedef typename Encoding::Ch Ch;
GenericInsituStringStream(Ch *src) : src_(src), dst_(0), head_(src) {}
// Read
Ch Peek() { return *src_; }
Ch Take() { return *src_++; }
size_t Tell() { return static_cast<size_t>(src_ - head_); }
// Write
void Put(Ch c) { RAPIDJSON_ASSERT(dst_ != 0); *dst_++ = c; }
Ch* PutBegin() { return dst_ = src_; }
size_t PutEnd(Ch* begin) { return static_cast<size_t>(dst_ - begin); }
void Flush() {}
Ch* Push(size_t count) { Ch* begin = dst_; dst_ += count; return begin; }
void Pop(size_t count) { dst_ -= count; }
Ch* src_;
Ch* dst_;
Ch* head_;
};
template <typename Encoding>
struct StreamTraits<GenericInsituStringStream<Encoding> > {
enum { copyOptimization = 1 };
};
//! Insitu string stream with UTF8 encoding.
typedef GenericInsituStringStream<UTF8<> > InsituStringStream;
///////////////////////////////////////////////////////////////////////////////
// Type
//! Type of JSON value
enum Type {
kNullType = 0, //!< null
kFalseType = 1, //!< false
kTrueType = 2, //!< true
kObjectType = 3, //!< object
kArrayType = 4, //!< array
kStringType = 5, //!< string
kNumberType = 6 //!< number
};
RAPIDJSON_NAMESPACE_END
#endif // RAPIDJSON_RAPIDJSON_H_

File diff suppressed because it is too large Load Diff

View File

@@ -1,93 +0,0 @@
// Tencent is pleased to support the open source community by making RapidJSON available.
//
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
//
// Licensed under the MIT License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// http://opensource.org/licenses/MIT
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef RAPIDJSON_STRINGBUFFER_H_
#define RAPIDJSON_STRINGBUFFER_H_
#include "rapidjson.h"
#if RAPIDJSON_HAS_CXX11_RVALUE_REFS
#include <utility> // std::move
#endif
#include "internal/stack.h"
RAPIDJSON_NAMESPACE_BEGIN
//! Represents an in-memory output stream.
/*!
\tparam Encoding Encoding of the stream.
\tparam Allocator type for allocating memory buffer.
\note implements Stream concept
*/
template <typename Encoding, typename Allocator = CrtAllocator>
class GenericStringBuffer {
public:
typedef typename Encoding::Ch Ch;
GenericStringBuffer(Allocator* allocator = 0, size_t capacity = kDefaultCapacity) : stack_(allocator, capacity) {}
#if RAPIDJSON_HAS_CXX11_RVALUE_REFS
GenericStringBuffer(GenericStringBuffer&& rhs) : stack_(std::move(rhs.stack_)) {}
GenericStringBuffer& operator=(GenericStringBuffer&& rhs) {
if (&rhs != this)
stack_ = std::move(rhs.stack_);
return *this;
}
#endif
void Put(Ch c) { *stack_.template Push<Ch>() = c; }
void Flush() {}
void Clear() { stack_.Clear(); }
void ShrinkToFit() {
// Push and pop a null terminator. This is safe.
*stack_.template Push<Ch>() = '\0';
stack_.ShrinkToFit();
stack_.template Pop<Ch>(1);
}
Ch* Push(size_t count) { return stack_.template Push<Ch>(count); }
void Pop(size_t count) { stack_.template Pop<Ch>(count); }
const Ch* GetString() const {
// Push and pop a null terminator. This is safe.
*stack_.template Push<Ch>() = '\0';
stack_.template Pop<Ch>(1);
return stack_.template Bottom<Ch>();
}
size_t GetSize() const { return stack_.GetSize(); }
static const size_t kDefaultCapacity = 256;
mutable internal::Stack<Allocator> stack_;
private:
// Prohibit copy constructor & assignment operator.
GenericStringBuffer(const GenericStringBuffer&);
GenericStringBuffer& operator=(const GenericStringBuffer&);
};
//! String buffer with UTF8 encoding
typedef GenericStringBuffer<UTF8<> > StringBuffer;
//! Implement specialized version of PutN() with memset() for better performance.
template<>
inline void PutN(GenericStringBuffer<UTF8<> >& stream, char c, size_t n) {
std::memset(stream.stack_.Push<char>(n), c, n * sizeof(c));
}
RAPIDJSON_NAMESPACE_END
#endif // RAPIDJSON_STRINGBUFFER_H_

View File

@@ -1,395 +0,0 @@
// Tencent is pleased to support the open source community by making RapidJSON available.
//
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
//
// Licensed under the MIT License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// http://opensource.org/licenses/MIT
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef RAPIDJSON_WRITER_H_
#define RAPIDJSON_WRITER_H_
#include "rapidjson.h"
#include "internal/stack.h"
#include "internal/strfunc.h"
#include "internal/dtoa.h"
#include "internal/itoa.h"
#include "stringbuffer.h"
#include <new> // placement new
#if RAPIDJSON_HAS_STDSTRING
#include <string>
#endif
#ifdef _MSC_VER
RAPIDJSON_DIAG_PUSH
RAPIDJSON_DIAG_OFF(4127) // conditional expression is constant
#endif
RAPIDJSON_NAMESPACE_BEGIN
//! JSON writer
/*! Writer implements the concept Handler.
It generates JSON text by events to an output os.
User may programmatically calls the functions of a writer to generate JSON text.
On the other side, a writer can also be passed to objects that generates events,
for example Reader::Parse() and Document::Accept().
\tparam OutputStream Type of output stream.
\tparam SourceEncoding Encoding of source string.
\tparam TargetEncoding Encoding of output stream.
\tparam StackAllocator Type of allocator for allocating memory of stack.
\note implements Handler concept
*/
template<typename OutputStream, typename SourceEncoding = UTF8<>, typename TargetEncoding = UTF8<>, typename StackAllocator = CrtAllocator>
class Writer {
public:
typedef typename SourceEncoding::Ch Ch;
//! Constructor
/*! \param os Output stream.
\param stackAllocator User supplied allocator. If it is null, it will create a private one.
\param levelDepth Initial capacity of stack.
*/
explicit
Writer(OutputStream& os, StackAllocator* stackAllocator = 0, size_t levelDepth = kDefaultLevelDepth) :
os_(&os), level_stack_(stackAllocator, levelDepth * sizeof(Level)), hasRoot_(false) {}
explicit
Writer(StackAllocator* allocator = 0, size_t levelDepth = kDefaultLevelDepth) :
os_(0), level_stack_(allocator, levelDepth * sizeof(Level)), hasRoot_(false) {}
//! Reset the writer with a new stream.
/*!
This function reset the writer with a new stream and default settings,
in order to make a Writer object reusable for output multiple JSONs.
\param os New output stream.
\code
Writer<OutputStream> writer(os1);
writer.StartObject();
// ...
writer.EndObject();
writer.Reset(os2);
writer.StartObject();
// ...
writer.EndObject();
\endcode
*/
void Reset(OutputStream& os) {
os_ = &os;
hasRoot_ = false;
level_stack_.Clear();
}
//! Checks whether the output is a complete JSON.
/*!
A complete JSON has a complete root object or array.
*/
bool IsComplete() const {
return hasRoot_ && level_stack_.Empty();
}
/*!@name Implementation of Handler
\see Handler
*/
//@{
bool Null() { Prefix(kNullType); return WriteNull(); }
bool Bool(bool b) { Prefix(b ? kTrueType : kFalseType); return WriteBool(b); }
bool Int(int i) { Prefix(kNumberType); return WriteInt(i); }
bool Uint(unsigned u) { Prefix(kNumberType); return WriteUint(u); }
bool Int64(int64_t i64) { Prefix(kNumberType); return WriteInt64(i64); }
bool Uint64(uint64_t u64) { Prefix(kNumberType); return WriteUint64(u64); }
//! Writes the given \c double value to the stream
/*!
\param d The value to be written.
\return Whether it is succeed.
*/
bool Double(double d) { Prefix(kNumberType); return WriteDouble(d); }
bool String(const Ch* str, SizeType length, bool copy = false) {
(void)copy;
Prefix(kStringType);
return WriteString(str, length);
}
#if RAPIDJSON_HAS_STDSTRING
bool String(const std::basic_string<Ch>& str) {
return String(str.data(), SizeType(str.size()));
}
#endif
bool StartObject() {
Prefix(kObjectType);
new (level_stack_.template Push<Level>()) Level(false);
return WriteStartObject();
}
bool Key(const Ch* str, SizeType length, bool copy = false) { return String(str, length, copy); }
bool EndObject(SizeType memberCount = 0) {
(void)memberCount;
RAPIDJSON_ASSERT(level_stack_.GetSize() >= sizeof(Level));
RAPIDJSON_ASSERT(!level_stack_.template Top<Level>()->inArray);
level_stack_.template Pop<Level>(1);
bool ret = WriteEndObject();
if (level_stack_.Empty()) // end of json text
os_->Flush();
return ret;
}
bool StartArray() {
Prefix(kArrayType);
new (level_stack_.template Push<Level>()) Level(true);
return WriteStartArray();
}
bool EndArray(SizeType elementCount = 0) {
(void)elementCount;
RAPIDJSON_ASSERT(level_stack_.GetSize() >= sizeof(Level));
RAPIDJSON_ASSERT(level_stack_.template Top<Level>()->inArray);
level_stack_.template Pop<Level>(1);
bool ret = WriteEndArray();
if (level_stack_.Empty()) // end of json text
os_->Flush();
return ret;
}
//@}
/*! @name Convenience extensions */
//@{
//! Simpler but slower overload.
bool String(const Ch* str) { return String(str, internal::StrLen(str)); }
bool Key(const Ch* str) { return Key(str, internal::StrLen(str)); }
//@}
protected:
//! Information for each nested level
struct Level {
Level(bool inArray_) : valueCount(0), inArray(inArray_) {}
size_t valueCount; //!< number of values in this level
bool inArray; //!< true if in array, otherwise in object
};
static const size_t kDefaultLevelDepth = 32;
bool WriteNull() {
os_->Put('n'); os_->Put('u'); os_->Put('l'); os_->Put('l'); return true;
}
bool WriteBool(bool b) {
if (b) {
os_->Put('t'); os_->Put('r'); os_->Put('u'); os_->Put('e');
}
else {
os_->Put('f'); os_->Put('a'); os_->Put('l'); os_->Put('s'); os_->Put('e');
}
return true;
}
bool WriteInt(int i) {
char buffer[11];
const char* end = internal::i32toa(i, buffer);
for (const char* p = buffer; p != end; ++p)
os_->Put(*p);
return true;
}
bool WriteUint(unsigned u) {
char buffer[10];
const char* end = internal::u32toa(u, buffer);
for (const char* p = buffer; p != end; ++p)
os_->Put(*p);
return true;
}
bool WriteInt64(int64_t i64) {
char buffer[21];
const char* end = internal::i64toa(i64, buffer);
for (const char* p = buffer; p != end; ++p)
os_->Put(*p);
return true;
}
bool WriteUint64(uint64_t u64) {
char buffer[20];
char* end = internal::u64toa(u64, buffer);
for (char* p = buffer; p != end; ++p)
os_->Put(*p);
return true;
}
bool WriteDouble(double d) {
char buffer[25];
char* end = internal::dtoa(d, buffer);
for (char* p = buffer; p != end; ++p)
os_->Put(*p);
return true;
}
bool WriteString(const Ch* str, SizeType length) {
static const char hexDigits[16] = { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F' };
static const char escape[256] = {
#define Z16 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
//0 1 2 3 4 5 6 7 8 9 A B C D E F
'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'b', 't', 'n', 'u', 'f', 'r', 'u', 'u', // 00
'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', // 10
0, 0, '"', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 20
Z16, Z16, // 30~4F
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,'\\', 0, 0, 0, // 50
Z16, Z16, Z16, Z16, Z16, Z16, Z16, Z16, Z16, Z16 // 60~FF
#undef Z16
};
os_->Put('\"');
GenericStringStream<SourceEncoding> is(str);
while (is.Tell() < length) {
const Ch c = is.Peek();
if (!TargetEncoding::supportUnicode && (unsigned)c >= 0x80) {
// Unicode escaping
unsigned codepoint;
if (!SourceEncoding::Decode(is, &codepoint))
return false;
os_->Put('\\');
os_->Put('u');
if (codepoint <= 0xD7FF || (codepoint >= 0xE000 && codepoint <= 0xFFFF)) {
os_->Put(hexDigits[(codepoint >> 12) & 15]);
os_->Put(hexDigits[(codepoint >> 8) & 15]);
os_->Put(hexDigits[(codepoint >> 4) & 15]);
os_->Put(hexDigits[(codepoint ) & 15]);
}
else {
RAPIDJSON_ASSERT(codepoint >= 0x010000 && codepoint <= 0x10FFFF);
// Surrogate pair
unsigned s = codepoint - 0x010000;
unsigned lead = (s >> 10) + 0xD800;
unsigned trail = (s & 0x3FF) + 0xDC00;
os_->Put(hexDigits[(lead >> 12) & 15]);
os_->Put(hexDigits[(lead >> 8) & 15]);
os_->Put(hexDigits[(lead >> 4) & 15]);
os_->Put(hexDigits[(lead ) & 15]);
os_->Put('\\');
os_->Put('u');
os_->Put(hexDigits[(trail >> 12) & 15]);
os_->Put(hexDigits[(trail >> 8) & 15]);
os_->Put(hexDigits[(trail >> 4) & 15]);
os_->Put(hexDigits[(trail ) & 15]);
}
}
else if ((sizeof(Ch) == 1 || (unsigned)c < 256) && escape[(unsigned char)c]) {
is.Take();
os_->Put('\\');
os_->Put(escape[(unsigned char)c]);
if (escape[(unsigned char)c] == 'u') {
os_->Put('0');
os_->Put('0');
os_->Put(hexDigits[(unsigned char)c >> 4]);
os_->Put(hexDigits[(unsigned char)c & 0xF]);
}
}
else
if (!Transcoder<SourceEncoding, TargetEncoding>::Transcode(is, *os_))
return false;
}
os_->Put('\"');
return true;
}
bool WriteStartObject() { os_->Put('{'); return true; }
bool WriteEndObject() { os_->Put('}'); return true; }
bool WriteStartArray() { os_->Put('['); return true; }
bool WriteEndArray() { os_->Put(']'); return true; }
void Prefix(Type type) {
(void)type;
if (level_stack_.GetSize() != 0) { // this value is not at root
Level* level = level_stack_.template Top<Level>();
if (level->valueCount > 0) {
if (level->inArray)
os_->Put(','); // add comma if it is not the first element in array
else // in object
os_->Put((level->valueCount % 2 == 0) ? ',' : ':');
}
if (!level->inArray && level->valueCount % 2 == 0)
RAPIDJSON_ASSERT(type == kStringType); // if it's in object, then even number should be a name
level->valueCount++;
}
else {
RAPIDJSON_ASSERT(!hasRoot_); // Should only has one and only one root.
hasRoot_ = true;
}
}
OutputStream* os_;
internal::Stack<StackAllocator> level_stack_;
bool hasRoot_;
private:
// Prohibit copy constructor & assignment operator.
Writer(const Writer&);
Writer& operator=(const Writer&);
};
// Full specialization for StringStream to prevent memory copying
template<>
inline bool Writer<StringBuffer>::WriteInt(int i) {
char *buffer = os_->Push(11);
const char* end = internal::i32toa(i, buffer);
os_->Pop(static_cast<size_t>(11 - (end - buffer)));
return true;
}
template<>
inline bool Writer<StringBuffer>::WriteUint(unsigned u) {
char *buffer = os_->Push(10);
const char* end = internal::u32toa(u, buffer);
os_->Pop(static_cast<size_t>(10 - (end - buffer)));
return true;
}
template<>
inline bool Writer<StringBuffer>::WriteInt64(int64_t i64) {
char *buffer = os_->Push(21);
const char* end = internal::i64toa(i64, buffer);
os_->Pop(static_cast<size_t>(21 - (end - buffer)));
return true;
}
template<>
inline bool Writer<StringBuffer>::WriteUint64(uint64_t u) {
char *buffer = os_->Push(20);
const char* end = internal::u64toa(u, buffer);
os_->Pop(static_cast<size_t>(20 - (end - buffer)));
return true;
}
template<>
inline bool Writer<StringBuffer>::WriteDouble(double d) {
char *buffer = os_->Push(25);
char* end = internal::dtoa(d, buffer);
os_->Pop(static_cast<size_t>(25 - (end - buffer)));
return true;
}
RAPIDJSON_NAMESPACE_END
#ifdef _MSC_VER
RAPIDJSON_DIAG_POP
#endif
#endif // RAPIDJSON_RAPIDJSON_H_

View File

@@ -1,57 +0,0 @@
Tencent is pleased to support the open source community by making RapidJSON available.
Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
If you have downloaded a copy of the RapidJSON binary from Tencent, please note that the RapidJSON binary is licensed under the MIT License.
If you have downloaded a copy of the RapidJSON source code from Tencent, please note that RapidJSON source code is licensed under the MIT License, except for the third-party components listed below which are subject to different license terms. Your integration of RapidJSON into your own projects may require compliance with the MIT License, as well as the other licenses applicable to the third-party components included within RapidJSON. To avoid the problematic JSON license in your own projects, it's sufficient to exclude the bin/jsonchecker/ directory, as it's the only code under the JSON license.
A copy of the MIT License is included in this file.
Other dependencies and licenses:
Open Source Software Licensed Under the BSD License:
--------------------------------------------------------------------
The msinttypes r29
Copyright (c) 2006-2013 Alexander Chemeris
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Open Source Software Licensed Under the JSON License:
--------------------------------------------------------------------
json.org
Copyright (c) 2002 JSON.org
All Rights Reserved.
JSON_checker
Copyright (c) 2002 JSON.org
All Rights Reserved.
Terms of the JSON License:
---------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
The Software shall be used for Good, not Evil.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Terms of the MIT License:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@@ -1,129 +0,0 @@
![](doc/logo/rapidjson.png)
![](https://img.shields.io/badge/release-v1.0.2-blue.png)
## A fast JSON parser/generator for C++ with both SAX/DOM style API
Tencent is pleased to support the open source community by making RapidJSON available.
Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
* [RapidJSON GitHub](https://github.com/miloyip/rapidjson/)
* RapidJSON Documentation
* [English](http://rapidjson.org/)
* [简体中文](http://rapidjson.org/zh-cn/)
* [GitBook](https://www.gitbook.com/book/miloyip/rapidjson/) with downloadable PDF/EPUB/MOBI, without API reference.
## Build status
| [Linux][lin-link] | [Windows][win-link] | [Coveralls][cov-link] |
| :---------------: | :-----------------: | :-------------------: |
| ![lin-badge] | ![win-badge] | ![cov-badge] |
[lin-badge]: https://travis-ci.org/miloyip/rapidjson.png?branch=master "Travis build status"
[lin-link]: https://travis-ci.org/miloyip/rapidjson "Travis build status"
[win-badge]: https://ci.appveyor.com/api/projects/status/u658dcuwxo14a8m9/branch/master "AppVeyor build status"
[win-link]: https://ci.appveyor.com/project/miloyip/rapidjson/branch/master "AppVeyor build status"
[cov-badge]: https://coveralls.io/repos/miloyip/rapidjson/badge.png?branch=master
[cov-link]: https://coveralls.io/r/miloyip/rapidjson?branch=master
## Introduction
RapidJSON is a JSON parser and generator for C++. It was inspired by [RapidXml](http://rapidxml.sourceforge.net/).
* RapidJSON is small but complete. It supports both SAX and DOM style API. The SAX parser is only a half thousand lines of code.
* RapidJSON is fast. Its performance can be comparable to `strlen()`. It also optionally supports SSE2/SSE4.2 for acceleration.
* RapidJSON is self-contained. It does not depend on external libraries such as BOOST. It even does not depend on STL.
* RapidJSON is memory friendly. Each JSON value occupies exactly 16/20 bytes for most 32/64-bit machines (excluding text string). By default it uses a fast memory allocator, and the parser allocates memory compactly during parsing.
* RapidJSON is Unicode friendly. It supports UTF-8, UTF-16, UTF-32 (LE & BE), and their detection, validation and transcoding internally. For example, you can read a UTF-8 file and let RapidJSON transcode the JSON strings into UTF-16 in the DOM. It also supports surrogates and "\u0000" (null character).
More features can be read [here](doc/features.md).
JSON(JavaScript Object Notation) is a light-weight data exchange format. RapidJSON should be in fully compliance with RFC7159/ECMA-404. More information about JSON can be obtained at
* [Introducing JSON](http://json.org/)
* [RFC7159: The JavaScript Object Notation (JSON) Data Interchange Format](http://www.ietf.org/rfc/rfc7159.txt)
* [Standard ECMA-404: The JSON Data Interchange Format](http://www.ecma-international.org/publications/standards/Ecma-404.htm)
## Compatibility
RapidJSON is cross-platform. Some platform/compiler combinations which have been tested are shown as follows.
* Visual C++ 2008/2010/2013 on Windows (32/64-bit)
* GNU C++ 3.8.x on Cygwin
* Clang 3.4 on Mac OS X (32/64-bit) and iOS
* Clang 3.4 on Android NDK
Users can build and run the unit tests on their platform/compiler.
## Installation
RapidJSON is a header-only C++ library. Just copy the `include/rapidjson` folder to system or project's include path.
RapidJSON uses following software as its dependencies:
* [CMake](http://www.cmake.org) as a general build tool
* (optional)[Doxygen](http://www.doxygen.org) to build documentation
* (optional)[googletest](https://code.google.com/p/googletest/) for unit and performance testing
To generate user documentation and run tests please proceed with the steps below:
1. Execute `git submodule update --init` to get the files of thirdparty submodules (google test).
2. Create directory called `build` in rapidjson source directory.
3. Change to `build` directory and run `cmake ..` command to configure your build. Windows users can do the same with cmake-gui application.
4. On Windows, build the solution found in the build directory. On Linux, run `make` from the build directory.
On successfull build you will find compiled test and example binaries in `bin`
directory. The generated documentation will be available in `doc/html`
directory of the build tree. To run tests after finished build please run `make
test` or `ctest` from your build tree. You can get detailed output using `ctest
-V` command.
It is possible to install library system-wide by running `make install` command
from the build tree with administrative privileges. This will install all files
according to system preferences. Once RapidJSON is installed, it is possible
to use it from other CMake projects by adding `find_package(RapidJSON)` line to
your CMakeLists.txt.
## Usage at a glance
This simple example parses a JSON string into a document (DOM), make a simple modification of the DOM, and finally stringify the DOM to a JSON string.
~~~~~~~~~~cpp
// rapidjson/example/simpledom/simpledom.cpp`
#include "rapidjson/document.h"
#include "rapidjson/writer.h"
#include "rapidjson/stringbuffer.h"
#include <iostream>
using namespace rapidjson;
int main() {
// 1. Parse a JSON string into DOM.
const char* json = "{\"project\":\"rapidjson\",\"stars\":10}";
Document d;
d.Parse(json);
// 2. Modify it by DOM.
Value& s = d["stars"];
s.SetInt(s.GetInt() + 1);
// 3. Stringify the DOM
StringBuffer buffer;
Writer<StringBuffer> writer(buffer);
d.Accept(writer);
// Output {"project":"rapidjson","stars":11}
std::cout << buffer.GetString() << std::endl;
return 0;
}
~~~~~~~~~~
Note that this example did not handle potential errors.
The following diagram shows the process.
![simpledom](doc/diagram/simpledom.png)
More [examples](https://github.com/miloyip/rapidjson/tree/master/example) are available.

View File

@@ -0,0 +1,572 @@
#include "Indexer.h"
#include <vector>
#include <filesystem>
#include <memory>
#include <thread>
#include <unordered_map>
#include "json.hpp"
#include "convmath.h"
#include "converter_utils.h"
#include "stuff.h"
#include "LASWriter.hpp"
#include "LASLoader.hpp"
#include "TaskPool.h"
#include "Structures.h"
#include "PotreeWriter.h"
using std::vector;
using std::thread;
using std::unordered_map;
using std::string;
using std::to_string;
using std::make_shared;
using std::shared_ptr;
using json = nlohmann::json;
namespace fs = std::filesystem;
namespace bluenoise {
struct Indexer{
shared_ptr<Node> root;
shared_ptr<Chunks> chunks;
shared_ptr<PotreeWriter> writer;
double spacing = 1.0;
Attributes attributes;
Indexer(){
}
void indexRoot() {
auto node = this->root;
}
void indexNode(shared_ptr<Node> chunkRoot){
function<void(shared_ptr<Node>, function<void(Node*)>)> traverse;
traverse = [&traverse](shared_ptr<Node> node, function<void(Node*)> callback) {
for (auto child : node->children) {
if (child == nullptr || child->isSubsampled) {
continue;
}
traverse(child, callback);
}
callback(node.get());
};
double baseSpacing = this->spacing;
auto writer = this->writer;
auto attributes = this->attributes;
traverse(chunkRoot, [baseSpacing, writer, attributes](Node* node){
int level = node->name.size() - 1;
auto min = node->min;
auto max = node->max;
auto size = node->max - node->min;
auto center = min + size * 0.5;
double spacing = baseSpacing / pow(2.0, level);
double spacingSquared = spacing * spacing;
vector<Point> accepted;
auto byCenter = [center](Point& a, Point& b) {
//return a.x - b.x;
double da = a.squaredDistanceTo(center);
double db = b.squaredDistanceTo(center);
return da > db;
};
//std::sort(node->store.begin(), node->store.end(), byCenter);
auto isInConflictZone = [min, max, spacing](Point& candidate, Vector3<double>& center) {
double wx = sin(3.1415 * candidate.x / spacing);
double wy = sin(3.1415 * candidate.y / spacing);
double wz = sin(3.1415 * candidate.z / spacing);
double wxy = wx * wy;
double wxz = wx * wz;
double wyz = wy * wz;
double adjust = 0.8;
bool conflicting = false;
conflicting = conflicting ||(candidate.x - min.x) < 0.5 * (wyz + 1.0) * spacing * adjust;
conflicting = conflicting ||(candidate.x - max.x) > 0.5 * (wyz - 1.0) * spacing * adjust;
conflicting = conflicting || (candidate.y - min.y) < 0.5 * (wxz + 1.0) * spacing * adjust;
conflicting = conflicting || (candidate.y - max.y) > 0.5 * (wxz - 1.0) * spacing * adjust;
conflicting = conflicting || (candidate.z - min.z) < 0.5 * (wxy + 1.0) * spacing * adjust;
conflicting = conflicting || (candidate.z - max.z) > 0.5 * (wxy - 1.0) * spacing * adjust;
return conflicting;
};
auto isDistant = [&accepted, spacingSquared, spacing, &isInConflictZone](Point& candidate, Vector3<double>& center){
if (isInConflictZone(candidate, center)) {
return false;
}
for(int i = accepted.size() - 1; i >= 0; i--){
Point& prev = accepted[i];
double cc = sqrt(candidate.squaredDistanceTo(center));
double pc = sqrt(prev.squaredDistanceTo(center));
//if (cc > pc + spacing) {
// return true;
//}
//if (cc < pc - spacing) {
// return true;
//}
auto distanceSquared = candidate.squaredDistanceTo(prev);
if(distanceSquared < spacingSquared){
return false;
}
}
return true;
};
if(node->isLeaf()){
std::sort(node->store.begin(), node->store.end(), byCenter);
vector<Point> rejected;
for(Point& candidate : node->store){
bool distant = isDistant(candidate, center);
if(distant){
accepted.push_back(candidate);
} else {
rejected.push_back(candidate);
}
}
node->points = accepted;
node->store = rejected;
}else{
// count how many points were accepted from each child
vector<int> acceptedByCounts(8, 0);
vector<shared_ptr<Node>> childsToClear;
vector<shared_ptr<Node>> childsToRemove;
for (int i = 0; i < 8; i++) {
auto child = node->children[i];
if(child == nullptr){
continue;
}
std::sort(child->points.begin(), child->points.end(), byCenter);
vector<Point> rejected;
for(Point& candidate : child->points){
bool distant = isDistant(candidate, center);
if(distant){
accepted.push_back(candidate);
acceptedByCounts[i]++;
}else{
rejected.push_back(candidate);
}
}
child->points = rejected;
bool isEmptyChild = child->points.size() == 0 && child->store.size() == 0;
if (isEmptyChild) {
// remove
childsToRemove.push_back(child);
} else {
// write
auto childPtr = child.get();
writer->writeNode(childPtr);
childsToClear.push_back(child);
}
}
int numPoints = accepted.size();
int bytesPerPoint = attributes.byteSize;
auto attributeBuffer = make_shared<Buffer>(numPoints * bytesPerPoint);
int targetIndex = 0;
for (int i = 0; i < 8; i++) {
auto child = node->children[i];
int acceptedByChildCount = acceptedByCounts[i];
for (int j = 0; j < acceptedByChildCount; j++) {
auto& point = accepted[targetIndex];
auto sourceIndex = point.index;
auto source = child->attributeBuffer->dataU8 + sourceIndex * bytesPerPoint;
auto target = attributeBuffer->dataU8 + targetIndex * bytesPerPoint;
memcpy(target, source, bytesPerPoint);
point.index = targetIndex;
targetIndex++;
}
//if(child != nullptr){
// vector<uint8_t> viewS(child->attributeBuffer->dataU8, child->attributeBuffer->dataU8 + 56);
// vector<uint8_t> viewT(attributeBuffer->dataU8, attributeBuffer->dataU8 + 56);
// int a = 10;
//}
}
node->points = accepted;
node->attributeBuffer = attributeBuffer;
for (auto child : childsToClear) {
child->clear();
}
for (auto child : childsToRemove) {
int childIndex = child->name.at(child->name.size() - 1) - '0';
child->parent->children[childIndex] = nullptr;
}
}
node->isSubsampled = true;
//node->points = accepted;
if(node->parent == nullptr){
writer->writeNode(node);
}
//cout << repeat(" ", level) << node->name << ": " << accepted.size() << endl;
});
}
shared_ptr<Points> loadChunk(shared_ptr<Chunk> chunk, shared_ptr<Node> targetNode){
auto points = loadPoints(chunk->file, chunk->attributes);
//auto view1 = points->attributeBuffer->debug_toVector();
// add points with indices to attributes
for (Point& point : points->points) {
targetNode->add(point);
}
vector<int> counts(points->points.size(), 0);
// build attribute buffers for points that were distributed to leaves
targetNode->traverse([points, &counts](Node* node){
int numPoints = node->store.size();
if (numPoints == 0) {
return;
}
auto bytesPerPoint = points->attributes.byteSize;
auto attributeBuffer = make_shared<Buffer>(numPoints * bytesPerPoint);
for (int i = 0; i < numPoints; i++) {
auto& point = node->store[i];
int index = point.index;
counts[index]++;
auto source = points->attributeBuffer->dataU8 + index * bytesPerPoint;
auto target = attributeBuffer->dataU8 + i * bytesPerPoint;
memcpy(target, source, bytesPerPoint);
point.index = i;
}
//string msg = "built attribute buffer for " + node->name + ", " + to_string(numPoints) + " points\n";
//cout << msg;
//vector<uint8_t> view1(points->attributeBuffer->dataU8, points->attributeBuffer->dataU8 + 56);
//vector<uint8_t> view2(attributeBuffer->dataU8, attributeBuffer->dataU8 + 56);
node->attributeBuffer = attributeBuffer;
});
// DEBUG: verify that each point attribute is taken
//for (int i = 0; i < counts.size(); i++) {
// if (counts[i] != 1) {
// cout << "ERROR: count should be 1" << endl;
// exit(123);
// }
//}
//DEBUG: verify that attributes are initialized
targetNode->traverse([points](Node* node) {
auto attributeBuffer = node->attributeBuffer;
int bytesPerPoint = points->attributes.byteSize;
for (int i = 0; i < node->store.size(); i++) {
uint8_t* rgba = attributeBuffer->dataU8 + (i * bytesPerPoint) + 24;
uint8_t r = rgba[0];
uint8_t g = rgba[1];
uint8_t b = rgba[2];
uint8_t a = rgba[3];
auto dv = Buffer::defaultvalue;
if (r == dv && g == dv && b == dv && a == dv) {
// auto view = vector<uint8_t>(attributeBuffer->dataU8, attributeBuffer->dataU8 + 10 * 28);
// cout << "ERROR: rgb is not initialized" << endl;
// exit(123);
}
}
});
return points;
}
};
// create hierarchy from root "r" to the nodes represented by all chunks
shared_ptr<Node> buildHierarchyToRoot(shared_ptr<Chunks> chunks){
auto expandHierarchy = [](shared_ptr<Node> root, string name){
shared_ptr<Node> node = root;
for(int i = 1; i < name.size(); i++){
int childIndex = name.at(i) - '0';
auto child = node->children[childIndex];
if(child == nullptr){
string childName = name.substr(0, i + 1);
auto box = childBoundingBoxOf(node->min, node->max, childIndex);
child = make_shared<Node>(childName, box.min, box.max);
child->parent = node.get();
node->children[childIndex] = child;
}
node = child;
}
};
shared_ptr<Node> root = make_shared<Node>("r", chunks->min, chunks->max);
for(auto chunk : chunks->list){
expandHierarchy(root, chunk->id);
}
return root;
}
shared_ptr<Chunks> getChunks(string pathIn) {
string chunkDirectory = pathIn + "/chunks";
string metadataText = readTextFile(chunkDirectory + "/metadata.json");
json js = json::parse(metadataText);
Vector3<double> min = {
js["min"][0].get<double>(),
js["min"][1].get<double>(),
js["min"][2].get<double>()
};
Vector3<double> max = {
js["max"][0].get<double>(),
js["max"][1].get<double>(),
js["max"][2].get<double>()
};
Attributes attributes;
{
auto jsAttributes = js["attributes"];
for (auto jsAttribute : jsAttributes) {
auto jsEncoding = jsAttribute["encoding"];
Attribute attribute;
attribute.name = jsAttribute["name"];
attribute.type = AttributeTypes::fromName(jsEncoding["type"]);
attribute.numElements = jsAttribute["numElements"];
attribute.bytes = jsEncoding["bytes"];
attributes.add(attribute);
}
}
auto toID = [](string filename) -> string {
string strID = stringReplace(filename, "chunk_", "");
strID = stringReplace(strID, ".bin", "");
return strID;
};
vector<shared_ptr<Chunk>> chunksToLoad;
for (const auto& entry : fs::directory_iterator(chunkDirectory)) {
string filename = entry.path().filename().string();
string chunkID = toID(filename);
if (!iEndsWith(filename, ".bin")) {
continue;
}
shared_ptr<Chunk> chunk = make_shared<Chunk>();
chunk->file = entry.path().string();
chunk->id = chunkID;
chunk->attributes = attributes;
BoundingBox box = { min, max };
for (int i = 1; i < chunkID.size(); i++) {
int index = chunkID[i] - '0'; // this feels so wrong...
box = childBoundingBoxOf(box, index);
}
chunk->min = box.min;
chunk->max = box.max;
chunksToLoad.push_back(chunk);
//break;
}
auto chunks = make_shared<Chunks>(chunksToLoad, min, max);
chunks->attributes = attributes;
return chunks;
}
int maxChecks = 0;
void doIndexing(string path) {
// create base directory or remove files from previous build
fs::create_directories(path + "/nodes");
for (const auto& entry : std::filesystem::directory_iterator(path + "/nodes")) {
std::filesystem::remove(entry);
}
std::filesystem::remove(path + "/octree.bin");
auto chunks = getChunks(path);
Indexer indexer;
indexer.chunks = chunks;
indexer.root = buildHierarchyToRoot(chunks);
indexer.spacing = chunks->max.distanceTo(chunks->min) / 100.0;
indexer.attributes = chunks->attributes;
//indexer.chunkAttributes = chunks->attributes;
auto writer = make_shared<PotreeWriter>(path, chunks->min, chunks->max);
indexer.writer = writer;
//auto chunk = chunks->list[1];
//indexChunk(chunk, path, spacing);
struct IndexTask {
shared_ptr<Chunk> chunk;
shared_ptr<Node> node;
string path;
IndexTask(shared_ptr<Chunk> chunk, shared_ptr<Node> node, string path) {
this->chunk = chunk;
this->node = node;
this->path = path;
}
};
auto processor = [&indexer](shared_ptr<IndexTask> task) {
auto file = task->chunk->file;
auto node = task->node;
auto points = indexer.loadChunk(task->chunk, node);
static int i = 0;
string laspath = file + "/../../debug/chunk_" + to_string(i) + ".las";
writeLAS(laspath, points);
i++;
indexer.indexNode(node);
};
TaskPool<IndexTask> pool(15, processor);
for(auto chunk : chunks->list){
auto node = Node::find(indexer.root, chunk->id);
shared_ptr<IndexTask> task = make_shared<IndexTask>(chunk, node, path);
pool.addTask(task);
}
pool.close();
shared_ptr<Buffer> attributeBuffer;
// TODO: properly initialize attribute buffer
//indexer.indexRoot();
indexer.indexNode(indexer.root);
writer->close();
}
}

View File

@@ -0,0 +1,16 @@
#pragma once
#include <string>
//#include "Structures.h"
namespace bluenoise {
using std::string;
void doIndexing(string path);
}

View File

@@ -0,0 +1,238 @@
#include "./modules/index_bluenoise/PotreeWriter.h"
#include <mutex>
#include <queue>
#include <fstream>
#include <sstream>
#include "json.hpp"
#include "converter_utils.h"
using std::mutex;
using std::lock_guard;
using std::deque;
using std::fstream;
using std::ios;
using std::stringstream;
using json = nlohmann::json;
namespace bluenoise {
mutex mtx_findNode;
mutex mtx_writeFile;
PotreeWriter::PotreeWriter(string path, Vector3<double> min, Vector3<double> max) {
this->path = path;
this->min = min;
this->max = max;
root = make_shared<WriterNode>("r", min, max);
string octreeFilePath = path + "/octree.bin";
fout.open(octreeFilePath, ios::out | ios::binary | ios::app);
}
PotreeWriter::~PotreeWriter(){
close();
}
shared_ptr<WriterNode> PotreeWriter::findOrCreateWriterNode(string name){
lock_guard<mutex> lock(mtx_findNode);
auto node = root;
for(int i = 1; i < name.size(); i++){
int childIndex = name.at(i) - '0';
auto child = node->children[childIndex];
if(child == nullptr){
string childName = name.substr(0, i + 1);
auto box = childBoundingBoxOf(node->min, node->max, childIndex);
child = make_shared<WriterNode>(childName, box.min, box.max);
node->children[childIndex] = child;
}
node = child;
}
return node;
}
void PotreeWriter::writeNode(Node* node){
auto writerNode = findOrCreateWriterNode(node->name);
{
auto numPoints = node->points.size() + node->store.size();
auto sourceBytesPerPoint = 28;
auto targetBytesPerPoint = 16;
auto bufferSize = numPoints * targetBytesPerPoint;
auto buffer = malloc(bufferSize);
auto bufferU8 = reinterpret_cast<uint8_t*>(buffer);
auto min = this->min;
auto max = this->max;
auto scale = this->scale;
auto attributeBuffer = node->attributeBuffer;
int i = 0;
auto writePoint = [&i, bufferU8, min, scale, sourceBytesPerPoint, targetBytesPerPoint, &attributeBuffer](Point& point){
int32_t x = (point.x - min.x) / scale.x;
int32_t y = (point.y - min.y) / scale.y;
int32_t z = (point.z - min.z) / scale.z;
auto destXYZ = reinterpret_cast<int32_t*>(bufferU8 + i * targetBytesPerPoint);
destXYZ[0] = x;
destXYZ[1] = y;
destXYZ[2] = z;
int sourceIndex = point.index;
auto source = attributeBuffer->dataU8 + sourceIndex * sourceBytesPerPoint;
uint8_t r = source[24];
uint8_t g = source[25];
uint8_t b = source[26];
bufferU8[i * targetBytesPerPoint + 12] = source[24];
bufferU8[i * targetBytesPerPoint + 13] = source[25];
bufferU8[i * targetBytesPerPoint + 14] = source[26];
bufferU8[i * targetBytesPerPoint + 15] = 255;
i++;
};
for(Point& point : node->points){
writePoint(point);
}
for(Point& point : node->store){
writePoint(point);
}
{
lock_guard<mutex> lock(mtx_writeFile);
fout.write(reinterpret_cast<char*>(buffer), bufferSize);
writerNode->byteOffset = this->bytesWritten;
writerNode->byteSize = bufferSize;
writerNode->numPoints = numPoints;
this->bytesWritten += bufferSize;
}
free(buffer);
node->isFlushed = true;
}
}
void PotreeWriter::finishOctree(){
fout.close();
}
void PotreeWriter::writeMetadata(){
json js;
js["name"] = "abc";
js["boundingBox"]["min"] = {min.x, min.y, min.z};
js["boundingBox"]["max"] = {max.x, max.y, max.z};
js["projection"] = "";
js["description"] = "";
js["points"] = 123456;
js["scale"] = 0.001;
json jsAttributePosition;
jsAttributePosition["name"] = "position";
jsAttributePosition["type"] = "int32";
jsAttributePosition["scale"] = "0.001";
jsAttributePosition["offset"] = "3.1415";
json jsAttributeColor;
jsAttributeColor["name"] = "color";
jsAttributeColor["type"] = "uint8";
js["attributes"] = {jsAttributePosition, jsAttributeColor};
string str = js.dump(4);
string metadataPath = path + "/metadata.json";
writeFile(metadataPath, str);
}
void PotreeWriter::writeHierarchy(){
function<json(shared_ptr<WriterNode>)> traverse = [&traverse](shared_ptr<WriterNode> node) -> json {
vector<json> jsChildren;
for (auto child : node->children) {
if (child == nullptr) {
continue;
}
json jsChild = traverse(child);
jsChildren.push_back(jsChild);
}
// uint64_t numPoints = node->numPoints;
int64_t byteOffset = node->byteOffset;
int64_t byteSize = node->byteSize;
int64_t numPoints = node->numPoints;
json jsNode = {
{"name", node->name},
{"numPoints", numPoints},
{"byteOffset", byteOffset},
{"byteSize", byteSize},
{"children", jsChildren}
};
return jsNode;
};
json js;
js["hierarchy"] = traverse(root);
string str = js.dump(4);
string hierarchyPath = path + "/hierarchy.json";
writeFile(hierarchyPath, str);
}
void PotreeWriter::close(){
if(closed){
return;
}
finishOctree();
writeMetadata();
writeHierarchy();
}
}

View File

@@ -0,0 +1,74 @@
#pragma once
#include <string>
#include <memory>
#include <fstream>
#include "Structures.h"
using std::string;
using std::shared_ptr;
using std::make_shared;
using std::fstream;
namespace bluenoise {
struct WriterNode{
string name;
Vector3<double> min;
Vector3<double> max;
uint64_t numPoints = 0;
uint64_t byteOffset = 0;
uint64_t byteSize = 0;
WriterNode* parent = nullptr;
shared_ptr<WriterNode> children[8] = {
nullptr, nullptr, nullptr, nullptr,
nullptr, nullptr, nullptr, nullptr};
WriterNode(string name, Vector3<double> min, Vector3<double> max){
this->name = name;
this->min = min;
this->max = max;
}
};
struct PotreeWriter {
string path;
bool closed = false;
fstream fout;
Vector3<double> min;
Vector3<double> max;
Vector3<double> scale = {0.001, 0.001, 0.001};
uint64_t bytesWritten = 0;
shared_ptr<WriterNode> root = nullptr;
PotreeWriter(string path, Vector3<double> min, Vector3<double> max);
~PotreeWriter();
shared_ptr<WriterNode> findOrCreateWriterNode(string name);
void writeNode(Node* node);
void finishOctree();
void writeMetadata();
void writeHierarchy();
void close();
};
}

View File

@@ -0,0 +1,145 @@
#include "Structures.h"
#include "converter_utils.h"
#include <memory>
using std::to_string;
using std::make_shared;
namespace bluenoise{
Node::Node(string name, Vector3<double> min, Vector3<double> max) {
this->name = name;
this->min = min;
this->max = max;
store.reserve(10'000);
}
void Node::processStore() {
storeExceeded = true;
for (Point& point : store) {
add(point);
}
store.clear();
store.shrink_to_fit();
}
void Node::add(Point& point) {
numPoints++;
if (!storeExceeded) {
store.push_back(point);
if (store.size() == storeSize) {
processStore();
}
return;
}
int childIndex = childIndexOf(min, max, point);
auto child = children[childIndex];
if (child == nullptr) {
string childName = name + to_string(childIndex);
auto box = childBoundingBoxOf(min, max, childIndex);
if (childName.size() > 10) {
int a = 10;
}
child = make_shared<Node>(childName, box.min, box.max);
child->parent = this;
children[childIndex] = child;
}
child->add(point);
}
void Node::traverse(function<void(Node*)> callback) {
callback(this);
for (auto child : children) {
if (child == nullptr) {
continue;
}
child->traverse(callback);
}
}
void Node::traverse_postorder(function<void(Node*)> callback){
for (auto child : children) {
if (child == nullptr) {
continue;
}
child->traverse_postorder(callback);
}
callback(this);
}
shared_ptr<Node> Node::find(shared_ptr<Node> root, string targetName){
auto node = root;
for(int i = 1; i < targetName.size(); i++){
int childIndex = targetName.at(i) - '0';
auto child = node->children[childIndex];
if(child == nullptr){
return nullptr;
}else{
node = child;
}
}
if(node->name == targetName){
return node;
}else{
cout << "ERROR: could not find node with name " << targetName << endl;
exit(1234);
return nullptr;
}
}
void Node::clear(){
//this->traverse([](Node* node){
this->points.clear();
this->store.clear();
this->attributeBuffer = nullptr;
this->points.shrink_to_fit();
this->store.shrink_to_fit();
//});
}
bool Node::isLeaf(){
for(auto child : children){
if(child != nullptr){
return false;
}
}
return true;
}
}

View File

@@ -0,0 +1,90 @@
#pragma once
#include <string>
#include <vector>
#include <memory>
#include <functional>
#include "convmath.h"
#include "Points.h"
using std::function;
using std::string;
using std::vector;
using std::shared_ptr;
namespace bluenoise {
struct Chunk {
Vector3<double> min;
Vector3<double> max;
Attributes attributes;
string file;
string id;
};
struct Chunks {
vector<shared_ptr<Chunk>> list;
Vector3<double> min;
Vector3<double> max;
Attributes attributes;
Chunks(vector<shared_ptr<Chunk>> list, Vector3<double> min, Vector3<double> max) {
this->list = list;
this->min = min;
this->max = max;
}
};
struct Node {
string name;
Vector3<double> min;
Vector3<double> max;
vector<Point> points;
vector<Point> store;
shared_ptr<Buffer> attributeBuffer = nullptr;
int64_t numPoints = 0;
int storeSize = 5'000;
bool storeExceeded = false;
bool isFlushed = false;
bool isSubsampled = false;
Node* parent = nullptr;
shared_ptr<Node> children[8] = {
nullptr, nullptr, nullptr, nullptr,
nullptr, nullptr, nullptr, nullptr};
Node(string name, Vector3<double> min, Vector3<double> max);
void processStore();
void add(Point& point);
void traverse(function<void(Node*)> callback);
void traverse_postorder(function<void(Node*)> callback);
static shared_ptr<Node> find(shared_ptr<Node> start, string targetName);
void clear();
bool isLeaf();
};
struct IndexedChunk {
shared_ptr<Chunk> chunk;
shared_ptr<Node> node;
};
}

View File

@@ -1,210 +0,0 @@
#include <fstream>
#include <iostream>
#include <vector>
#include <experimental/filesystem>
#include "BINPointReader.hpp"
#include "stuff.h"
namespace fs = std::experimental::filesystem;
using std::ifstream;
using std::cout;
using std::endl;
using std::vector;
using std::ios;
namespace Potree{
BINPointReader::BINPointReader(string path, AABB aabb, double scale, PointAttributes pointAttributes){
this->path = path;
this->aabb = aabb;
this->scale = scale;
this->attributes = pointAttributes;
if(fs::is_directory(path)){
// if directory is specified, find all las and laz files inside directory
for(fs::directory_iterator it(path); it != fs::directory_iterator(); it++){
fs::path filepath = it->path();
if(fs::is_regular_file(filepath)){
files.push_back(filepath.string());
}
}
}else{
files.push_back(path);
}
currentFile = files.begin();
reader = new ifstream(*currentFile, ios::in | ios::binary);
}
BINPointReader::~BINPointReader(){
close();
}
void BINPointReader::close(){
if(reader != NULL){
reader->close();
delete reader;
reader = NULL;
}
}
long long BINPointReader::numPoints(){
//TODO
return 0;
}
bool BINPointReader::readNextPoint(){
bool hasPoints = reader->good();
if(!hasPoints){
// try to open next file, if available
reader->close();
delete reader;
reader = NULL;
currentFile++;
if(currentFile != files.end()){
reader = new ifstream(*currentFile, ios::in | ios::binary);
hasPoints = reader->good();
}
}
if(hasPoints){
point = Point();
char* buffer = new char[attributes.byteSize];
reader->read(buffer, attributes.byteSize);
if(!reader->good()){
delete [] buffer;
return false;
}
int offset = 0;
for(int i = 0; i < attributes.size(); i++){
const PointAttribute attribute = attributes[i];
if(attribute == PointAttribute::POSITION_CARTESIAN){
int* iBuffer = reinterpret_cast<int*>(buffer+offset);
point.position.x = (iBuffer[0] * scale) + aabb.min.x;
point.position.y = (iBuffer[1] * scale) + aabb.min.y;
point.position.z = (iBuffer[2] * scale) + aabb.min.z;
}else if(attribute == PointAttribute::COLOR_PACKED){
unsigned char* ucBuffer = reinterpret_cast<unsigned char*>(buffer+offset);
point.color.x = ucBuffer[0];
point.color.y = ucBuffer[1];
point.color.z = ucBuffer[2];
}else if(attribute == PointAttribute::INTENSITY){
unsigned short* usBuffer = reinterpret_cast<unsigned short*>(buffer+offset);
point.intensity = usBuffer[0];
}else if(attribute == PointAttribute::CLASSIFICATION){
unsigned char* ucBuffer = reinterpret_cast<unsigned char*>(buffer+offset);
point.classification = ucBuffer[0];
} else if (attribute == PointAttribute::RETURN_NUMBER) {
unsigned char* ucBuffer = reinterpret_cast<unsigned char*>(buffer + offset);
point.returnNumber = ucBuffer[0];
} else if (attribute == PointAttribute::NUMBER_OF_RETURNS) {
unsigned char* ucBuffer = reinterpret_cast<unsigned char*>(buffer + offset);
point.numberOfReturns = ucBuffer[0];
} else if (attribute == PointAttribute::SOURCE_ID) {
unsigned short* usBuffer = reinterpret_cast<unsigned short*>(buffer + offset);
point.pointSourceID = usBuffer[0];
} else if (attribute == PointAttribute::GPS_TIME) {
double* dBuffer = reinterpret_cast<double*>(buffer + offset);
point.gpsTime = dBuffer[0];
} else if(attribute == PointAttribute::NORMAL_SPHEREMAPPED){
// see http://aras-p.info/texts/CompactNormalStorage.html
unsigned char* ucBuffer = reinterpret_cast<unsigned char*>(buffer+offset);
unsigned char bx = ucBuffer[0];
unsigned char by = ucBuffer[1];
float ex = (float)bx / 255.0f;
float ey = (float)by / 255.0f;
float nx = ex * 2 - 1;
float ny = ey * 2 - 1;
float nz = 1;
float nw = -1;
float l = (nx * (-nx) + ny * (-ny) + nz * (-nw));
nz = l;
nx = nx * sqrt(l);
ny = ny * sqrt(l);
nx = nx * 2;
ny = ny * 2;
nz = nz * 2 -1;
point.normal.x = nx;
point.normal.y = ny;
point.normal.z = nz;
}else if(attribute == PointAttribute::NORMAL_OCT16){
unsigned char* ucBuffer = reinterpret_cast<unsigned char*>(buffer+offset);
unsigned char bx = ucBuffer[0];
unsigned char by = ucBuffer[1];
float u = (float)((bx / 255.0) * 2.0 - 1.0);
float v = (float)((by / 255.0) * 2.0 - 1.0);
float x = 0.0f;
float y = 0.0f;
float z = 1.0f - abs(u) - abs(v);
if(z >= 0){
x = u;
y = v;
}else{
x = float(-( v / psign(v) - 1.0 ) / psign(u));
y = float(-( u / psign(u) - 1.0 ) / psign(v));
}
float length = sqrt(x*x + y*y + z*z);
x = x / length;
y = y / length;
z = z / length;
point.normal.x = x;
point.normal.y = y;
point.normal.z = z;
}else if(attribute == PointAttribute::NORMAL){
float* fBuffer = reinterpret_cast<float*>(buffer+offset);
point.normal.x = fBuffer[0];
point.normal.y = fBuffer[1];
point.normal.z = fBuffer[2];
} else {
cout << "ERROR: attribute reader not implemented: " << attribute.name << endl;
exit(1);
}
offset += attribute.byteSize;
}
delete [] buffer;
}
return hasPoints;
}
Point BINPointReader::getPoint(){
return point;
}
AABB BINPointReader::getAABB(){
AABB aabb;
//TODO
return aabb;
}
}

View File

@@ -0,0 +1,367 @@
#include "Chunker.h"
#include <string>
#include <assert.h>
#include <filesystem>
#include <functional>
#include <fstream>
#include <unordered_map>
#include <memory>
#include "json.hpp"
#include "LASLoader.hpp"
#include "LASWriter.hpp"
#include "TaskPool.h"
mutex mtx_find_mutex;
//unordered_map<int, shared_ptr<mutex>> mutexes;
unordered_map<int, mutex> mutexes;
using std::string;
using std::to_string;
using std::ios;
using std::make_shared;
using json = nlohmann::json;
using std::fstream;
namespace fs = std::filesystem;
void flushProcessor(shared_ptr<ChunkPiece> piece) {
auto points = piece->points;
auto attributes = points->attributes;
uint8_t* attBuffer = points->attributeBuffer->dataU8;
uint64_t numPoints = points->points.size();
uint64_t bytesPerPoint = attributes.byteSize;
uint64_t fileDataSize = numPoints * bytesPerPoint;
void* fileData = malloc(fileDataSize);
uint8_t* fileDataU8 = reinterpret_cast<uint8_t*>(fileData);
int i = 0;
for (Point point : points->points) {
int fileDataOffset = i * bytesPerPoint;
memcpy(fileDataU8 + fileDataOffset, &point, 24);
uint8_t* source = attBuffer + i * attributes.byteSize + 24;
uint8_t* target = fileDataU8 + i * attributes.byteSize + 24;
memcpy(target, source, attributes.byteSize - 24);
i++;
}
string filepath = piece->path;
// avoid writing to the same file by multiple threads, using one mutex per file
mtx_find_mutex.lock();
mutex& mtx_file = mutexes[piece->index];
mtx_find_mutex.unlock();
{
double tLockStart = now();
lock_guard<mutex> lock(mtx_file);
double dLocked = now() - tLockStart;
if (dLocked > 0.2) {
string strDuration = to_string(dLocked);
string msg = "long lock duration ( " + strDuration
+ "s) while waiting to write to " + piece->name + "\n";
cout << msg;
}
fstream file;
file.open(filepath, ios::out | ios::binary | ios::app);
file.write(reinterpret_cast<const char*>(fileData), fileDataSize);
file.close();
}
free(fileData);
}
shared_ptr< TaskPool<ChunkPiece>> flushPool;
void writeMetadata(string path, Vector3<double> min, Vector3<double> max, Attributes attributes) {
json js;
js["min"] = { min.x, min.y, min.z };
js["max"] = { max.x, max.y, max.z };
js["attributes"] = {};
for (auto attribute : attributes.list) {
json jsAttribute;
jsAttribute["name"] = attribute.name;
jsAttribute["type"] = attribute.type.name;
jsAttribute["numElements"] = attribute.numElements;
jsAttribute["description"] = "";
json jsEncoding;
jsEncoding["type"] = attribute.type.name;
jsEncoding["scale"] = { 0.0, 0.0, 0.0 };
jsEncoding["offset"] = { 0.0, 0.0, 0.0 };
jsEncoding["bytes"] = attribute.type.bytes * attribute.numElements;
jsAttribute["encoding"] = jsEncoding;
//jsAttribute["byteOffset"] = attribute.byteOffset;
//jsAttribute["byteSize"] = attribute.bytes;
//jsAttribute["description"] = attribute.description;
js["attributes"].push_back(jsAttribute);
}
string content = js.dump(4);
writeFile(path, content);
}
Chunker::Chunker(string path, Attributes attributes, Vector3<double> min, Vector3<double> max, int gridSize) {
this->path = path;
this->min = min;
this->max = max;
this->attributes = attributes;
this->gridSize = gridSize;
double gridSizeD = double(gridSize);
cellsD = Vector3<double>(gridSizeD, gridSizeD, gridSizeD);
size = max - min;
//grid.resize(gridSize * gridSize * gridSize);
}
void Chunker::close() {
flushPool->close();
}
string Chunker::getName(int index) {
int ix = index % gridSize;
int iy = ((index - ix) / gridSize) % gridSize;
int iz = (index - ix - iy * gridSize) / (gridSize * gridSize);
string name = "r";
int levels = std::log2(gridSize);
int div = gridSize;
for (int j = 0; j < levels; j++) {
int lIndex = 0;
if (ix >= (div / 2)) {
lIndex = lIndex + 0b100;
ix = ix - div / 2;
}
if (iy >= (div / 2)) {
lIndex = lIndex + 0b010;
iy = iy - div / 2;
}
if (iz >= (div / 2)) {
lIndex = lIndex + 0b001;
iz = iz - div / 2;
}
name += to_string(lIndex);
div = div / 2;
}
return name;
}
void Chunker::add(shared_ptr<Points> batch) {
int64_t gridSizeM1 = gridSize - 1;
double scaleX = cellsD.x / size.x;
double scaleY = cellsD.y / size.y;
double scaleZ = cellsD.z / size.z;
int64_t gridSize = this->gridSize;
int64_t gridSizeGridSize = gridSize * gridSize;
// making toIndex a lambda with necessary captures here seems to be faster than
// making it a member function of this class?!
auto toIndex = [=](Point point) {
int64_t ix = (point.x - min.x) * scaleX;
int64_t iy = (point.y - min.y) * scaleY;
int64_t iz = (point.z - min.z) * scaleZ;
ix = std::min(ix, gridSizeM1);
iy = std::min(iy, gridSizeM1);
iz = std::min(iz, gridSizeM1);
int64_t index = ix + gridSize * iy + gridSizeGridSize * iz;
return index;
};
// compute number of points per bin
vector<int> binCounts(gridSize * gridSize * gridSize, 0);
for (Point point : batch->points) {
int64_t index = toIndex(point);
binCounts[index]++;
}
// create new bin-pieces and add them to bin-grid
vector<shared_ptr<Points>> bins(gridSize * gridSize * gridSize, nullptr);
for (int i = 0; i < binCounts.size(); i++) {
int binCount = binCounts[i];
if (binCount == 0) {
continue;
}
shared_ptr<Points> bin = make_shared<Points>();
bin->points.reserve(binCount);
bin->attributes = batch->attributes;
int attributeBufferSize = attributes.byteSize * binCount;
bin->attributeBuffer = make_shared<Buffer>(attributeBufferSize);
//memset(bin->attributeBuffer->dataU8, 123, attributeBufferSize);
bins[i] = bin;
}
// fill bins
for (Point point : batch->points) {
int64_t index = toIndex(point);
auto bin = bins[index];
int i = bin->points.size();
auto source = batch->attributeBuffer->dataU8 + point.index * attributes.byteSize;
auto target = bin->attributeBuffer->dataU8 + i * attributes.byteSize;
memcpy(target, source, attributes.byteSize);
// if (index == 1) {
// auto viewSource = batch->attributeBuffer->debug_toVector();
// auto viewTarget = bin->attributeBuffer->debug_toVector();
// auto viewSourceOffseted = vector<uint8_t>(
// batch->attributeBuffer->dataU8 + point.index * attributes.byteSize,
// batch->attributeBuffer->dataU8 + point.index * attributes.byteSize + 28);
// int a = 10;
// }
point.index = i;
bin->points.push_back(point);
}
// create flush tasks
for (int i = 0; i < binCounts.size(); i++) {
auto points = bins[i];
if (points == nullptr) {
continue;
}
// auto view = points->attributeBuffer->debug_toVector();
int index = i;
string name = getName(index);
string filepath = this->path + name + ".bin";
auto piece = make_shared<ChunkPiece>(index, name, filepath, points);
flushPool->addTask(piece);
}
}
void doChunking(string pathIn, string pathOut) {
double tStart = now();
flushPool = make_shared<TaskPool<ChunkPiece>>(16, flushProcessor);
LASLoader* loader = new LASLoader(pathIn, 32);
Attributes attributes = loader->getAttributes();
string path = pathOut + "/chunks/";
fs::create_directories(path);
for (const auto& entry : std::filesystem::directory_iterator(path)) {
std::filesystem::remove(entry);
}
Vector3<double> size = loader->max - loader->min;
double cubeSize = std::max(std::max(size.x, size.y), size.z);
Vector3<double> cubeMin = loader->min;
Vector3<double> cubeMax = cubeMin + cubeSize;
int gridSize = 4;
Chunker* chunker = new Chunker(path, attributes, cubeMin, cubeMax, gridSize);
double sum = 0.0;
int batchNumber = 0;
auto promise = loader->nextBatch();
promise.wait();
auto batch = promise.get();
while (batch != nullptr) {
if ((batchNumber % 10) == 0) {
cout << "batch loaded: " << batchNumber << endl;
}
auto tStart = now();
chunker->add(batch);
auto duration = now() - tStart;
sum += duration;
promise = loader->nextBatch();
promise.wait();
batch = promise.get();
batchNumber++;
}
cout << "raw batch add time: " << sum << endl;
string metadataPath = pathOut + "/chunks/metadata.json";
writeMetadata(metadataPath, cubeMin, cubeMax, attributes);
printElapsedTime("chunking duration", tStart);
flushPool->waitTillEmpty();
//chunker->close();
printElapsedTime("chunking duration + close", tStart);
}

View File

@@ -1,60 +0,0 @@
#include "GridCell.h"
#include "SparseGrid.h"
#include <iostream>
using std::cout;
using std::endl;
using std::min;
using std::max;
namespace Potree{
#define MAX_FLOAT std::numeric_limits<float>::max()
GridCell::GridCell(){
}
GridCell::GridCell(SparseGrid *grid, GridIndex &index){
this->grid = grid;
neighbours.reserve(26);
for(int i = max(index.i -1, 0); i <= min(grid->width-1, index.i + 1); i++){
for(int j = max(index.j -1, 0); j <= min(grid->height-1, index.j + 1); j++){
for(int k = max(index.k -1, 0); k <= min(grid->depth-1, index.k + 1); k++){
long long key = ((long long)k << 40) | ((long long)j << 20) | i;
SparseGrid::iterator it = grid->find(key);
if(it != grid->end()){
GridCell *neighbour = it->second;
if(neighbour != this){
neighbours.push_back(neighbour);
neighbour->neighbours.push_back(this);
}
}
}
}
}
}
void GridCell::add(Vector3<double> p){
points.push_back(p);
}
bool GridCell::isDistant(const Vector3<double> &p, const double &squaredSpacing) const {
for(const Vector3<double> &point : points){
if(p.squaredDistanceTo(point) < squaredSpacing){
return false;
}
}
return true;
}
}

View File

@@ -0,0 +1,398 @@
#include "Indexer_Centered.h"
#include <vector>
#include <filesystem>
#include <memory>
#include <thread>
#include <unordered_map>
#include "json.hpp"
#include "convmath.h"
#include "converter_utils.h"
#include "stuff.h"
#include "LASWriter.hpp"
#include "TaskPool.h"
using std::vector;
using std::thread;
using std::unordered_map;
using std::string;
using std::to_string;
using std::make_shared;
using std::shared_ptr;
using json = nlohmann::json;
namespace fs = std::filesystem;
namespace centered{
struct Chunk {
Vector3<double> min;
Vector3<double> max;
string file;
string id;
};
int pointsAdded = 0;
struct Node {
string name;
int level;
int gridSize;
double dGridSize;
vector<Point> store;
int storeSize = 1'000;
bool storeBroken = false;
int maxDepth = 0;
unordered_map<int, Point> grid;
BoundingBox box;
vector<shared_ptr<Node>> children;
Node(string name, int gridSize, BoundingBox box, int maxDepth) {
this->name = name;
this->level = name.size() - 1;
this->gridSize = gridSize;
this->box = box;
this->maxDepth = maxDepth;
children.resize(8, nullptr);
//store.reserve(storeSize);
dGridSize = gridSize;
}
void breakStore() {
storeBroken = true;
for (Point& point : store) {
this->add(point);
}
store.clear();
store = vector<Point>();
}
void add(Point& point) {
//if (!storeBroken) {
// store.push_back(point);
// if (store.size() > storeSize) {
// breakStore();
// }
// return;
//}
if (level >= maxDepth) {
store.push_back(point);
return;
}
auto min = this->box.min;
auto max = this->box.max;
auto size = max - min;
int64_t ix = std::min(dGridSize * (point.x - min.x) / size.x, dGridSize - 1.0);
int64_t iy = std::min(dGridSize * (point.y - min.y) / size.y, dGridSize - 1.0);
int64_t iz = std::min(dGridSize * (point.z - min.z) / size.z, dGridSize - 1.0);
int64_t index = ix + gridSize * iy + gridSize * gridSize * iz;
//Point prev = grid[index];
if (grid.find(index) == grid.end()) {
//grid[index] = point;
grid.insert(std::make_pair(index, point));
pointsAdded++;
} else {
Point prev = grid[index];
double p1x = (dGridSize * (point.x - min.x) / size.x) - double(ix) - 0.5;
double p1y = (dGridSize * (point.y - min.y) / size.y) - double(iy) - 0.5;
double p1z = (dGridSize * (point.z - min.z) / size.z) - double(iz) - 0.5;
double p2x = (dGridSize * (prev.x - min.x) / size.x) - double(ix) - 0.5;
double p2y = (dGridSize * (prev.y - min.y) / size.y) - double(iy) - 0.5;
double p2z = (dGridSize * (prev.z - min.z) / size.z) - double(iz) - 0.5;
double d1 = p1x * p1x + p1y * p1y + p1z * p1z;
double d2 = p2x * p2x + p2y * p2y + p2z * p2z;
// d1 is closer to center
// 1: swap
// 2: move previous point down to child
if (d1 < d2) {
grid[index] = point;
int childIndex = computeChildIndex(box, prev);
if (children[childIndex] == nullptr) {
string childName = name + to_string(childIndex);
auto childBox = childBoundingBoxOf(box, childIndex);
auto child = make_shared<Node>(childName, gridSize, childBox, maxDepth);
children[childIndex] = child;
}
children[childIndex]->add(prev);
} else {
int childIndex = computeChildIndex(box, point);
if (children[childIndex] == nullptr) {
string childName = name + to_string(childIndex);
auto childBox = childBoundingBoxOf(box, childIndex);
auto child = make_shared<Node>(childName, gridSize, childBox, maxDepth);
children[childIndex] = child;
}
children[childIndex]->add(point);
}
}
}
vector<Point> getAccepted() {
vector<Point> accepted;
//for (Point point : grid) {
for (auto it : grid){
Point point = it.second;
if (point.index == 0) {
continue;
}
accepted.push_back(point);
}
return accepted;
}
void traverse(std::function<void(Node*)> callback) {
callback(this);
for (auto child : children) {
if (child == nullptr) {
continue;
}
child->traverse(callback);
}
}
};
vector<shared_ptr<Chunk>> getListOfChunks(string pathIn) {
string chunkDirectory = pathIn + "/chunks";
auto toID = [](string filename) -> string {
string strID = stringReplace(filename, "chunk_", "");
strID = stringReplace(strID, ".bin", "");
return strID;
};
vector<shared_ptr<Chunk>> chunksToLoad;
for (const auto& entry : fs::directory_iterator(chunkDirectory)) {
string filename = entry.path().filename().string();
string chunkID = toID(filename);
if (!iEndsWith(filename, ".bin")) {
continue;
}
shared_ptr<Chunk> chunk = make_shared<Chunk>();
chunk->file = entry.path().string();
chunk->id = chunkID;
string metadataText = readTextFile(chunkDirectory + "/metadata.json");
json js = json::parse(metadataText);
/*Vector3<double> min = metadata.min;
Vector3<double> max = metadata.max;*/
Vector3<double> min = {
js["min"][0].get<double>(),
js["min"][1].get<double>(),
js["min"][2].get<double>()
};
Vector3<double> max = {
js["max"][0].get<double>(),
js["max"][1].get<double>(),
js["max"][2].get<double>()
};
BoundingBox box = { min, max };
for (int i = 1; i < chunkID.size(); i++) {
int index = chunkID[i] - '0'; // this feels so wrong...
box = childBoundingBoxOf(box, index);
}
chunk->min = box.min;
chunk->max = box.max;
chunksToLoad.push_back(chunk);
}
return chunksToLoad;
}
int pointsProcessed = 0;
shared_ptr<Node> indexChunk(shared_ptr<Chunk> chunk, string path) {
auto points = loadPoints(chunk->file);
auto tStartIndexing = now();
int gridSize = 128;
BoundingBox box = { chunk->min, chunk->max };
shared_ptr<Node> root = make_shared<Node>(chunk->id, gridSize, box, 5);
for (Point point : points) {
root->add(point);
}
int numNodes = 0;
int highestLevel = 0;
root->traverse([&numNodes, &highestLevel](Node* node) {
//cout << repeat(" ", node->level) << node->name << endl;
numNodes++;
highestLevel = std::max(highestLevel, node->level);
});
//cout << "numNodes: " << numNodes << endl;
//cout << "highestLevel: " << highestLevel << endl;
//auto accepted = root.getAccepted();
//auto accepted = root.children[0]->getAccepted();
printElapsedTime("indexing " + chunk->id, tStartIndexing);
pointsProcessed += points.size();
return root;
//cout << "pointsAdded: " << pointsAdded << endl;
// vector<Point> resolved;
// resolved.reserve(points.size());
//root.traverse([path](Node* node) {
// auto accepted = node->getAccepted();
// if (accepted.size() > 0) {
// LASHeader header;
// header.numPoints = accepted.size();
// header.headerSize = 375;
// header.scale = { 0.001, 0.001, 0.001 };
// header.min = node->box.min;
// header.max = node->box.max;
// string laspath = path + "/nodes/" + node->name + ".las";
// writeLAS(laspath, header, accepted);
// }
// if (node->store.size() > 0) {
// LASHeader header;
// header.numPoints = node->store.size();
// header.headerSize = 375;
// header.scale = { 0.001, 0.001, 0.001 };
// header.min = node->box.min;
// header.max = node->box.max;
// string laspath = path + "/nodes/" + node->name + "_store.las";
// writeLAS(laspath, header, node->store);
// }
//
//});
//LASHeader header;
//header.numPoints = resolved.size();
//header.headerSize = 375;
//header.scale = { 0.001, 0.001, 0.001 };
//header.min = chunk->min;
//header.max = chunk->max;
//string laspath = path + "/nodes/" + chunk->id + ".las";
//writeLAS(laspath, header, resolved);
}
struct IndexTask {
shared_ptr<Chunk> chunk;
string path;
IndexTask(shared_ptr<Chunk> chunk, string path) {
this->chunk = chunk;
this->path = path;
}
};
auto processor = [](shared_ptr<IndexTask> task) {
auto root = indexChunk(task->chunk, task->path);
};
void doIndexing(string path) {
fs::create_directories(path + "/nodes");
auto chunks = getListOfChunks(path);
// auto chunk = chunks[0];
// indexChunk(chunk, path);
TaskPool<IndexTask> pool(16, processor);
for(auto chunk : chunks){
shared_ptr<IndexTask> task = make_shared<IndexTask>(chunk, path);
pool.addTask(task);
}
pool.close();
cout << "pointsProcessed: " << pointsProcessed << endl;
}
}

View File

@@ -0,0 +1,280 @@
#include "Indexer_Centered_countsort.h"
#include <vector>
#include <filesystem>
#include <memory>
#include <thread>
#include <unordered_map>
#include "json.hpp"
#include "convmath.h"
#include "converter_utils.h"
#include "stuff.h"
#include "LASWriter.hpp"
#include <bitset>
using std::vector;
using std::thread;
using std::unordered_map;
using std::string;
using std::to_string;
using std::make_shared;
using std::shared_ptr;
using json = nlohmann::json;
namespace fs = std::filesystem;
namespace countsort{
struct Chunk {
Vector3<double> min;
Vector3<double> max;
string file;
string id;
};
vector<shared_ptr<Chunk>> getListOfChunks(string pathIn) {
string chunkDirectory = pathIn + "/chunks";
auto toID = [](string filename) -> string {
string strID = stringReplace(filename, "chunk_", "");
strID = stringReplace(strID, ".bin", "");
return strID;
};
vector<shared_ptr<Chunk>> chunksToLoad;
for (const auto& entry : fs::directory_iterator(chunkDirectory)) {
string filename = entry.path().filename().string();
string chunkID = toID(filename);
if (!iEndsWith(filename, ".bin")) {
continue;
}
shared_ptr<Chunk> chunk = make_shared<Chunk>();
chunk->file = entry.path().string();
chunk->id = chunkID;
string metadataText = readTextFile(chunkDirectory + "/metadata.json");
json js = json::parse(metadataText);
/*Vector3<double> min = metadata.min;
Vector3<double> max = metadata.max;*/
Vector3<double> min = {
js["min"][0].get<double>(),
js["min"][1].get<double>(),
js["min"][2].get<double>()
};
Vector3<double> max = {
js["max"][0].get<double>(),
js["max"][1].get<double>(),
js["max"][2].get<double>()
};
BoundingBox box = { min, max };
for (int i = 1; i < chunkID.size(); i++) {
int index = chunkID[i] - '0'; // this feels so wrong...
box = childBoundingBoxOf(box, index);
}
chunk->min = box.min;
chunk->max = box.max;
chunksToLoad.push_back(chunk);
}
return chunksToLoad;
}
//struct Bin {
// uint64_t index;
// uint64_t start;
// uint64_t size;
//};
//vector<Bin> binsort(vector<Point>& points, int gridSize,
// Vector3<double> min, Vector3<double> max) {
// double dGridSize = gridSize;
// auto size = max - min;
// vector<int> counts(gridSize * gridSize * gridSize, 0);
// vector<Bin> bins;
// auto toIndex = [dGridSize, min, size, gridSize](Point& point){
// int64_t ix = std::min(dGridSize * (point.x - min.x) / size.x, dGridSize - 1.0);
// int64_t iy = std::min(dGridSize * (point.y - min.y) / size.y, dGridSize - 1.0);
// int64_t iz = std::min(dGridSize * (point.z - min.z) / size.z, dGridSize - 1.0);
// int64_t index = ix + iy * gridSize + iz * gridSize * gridSize;
// return index;
// };
// //================
// //== COUNT
// //================
// for (Point point : points) {
// // int64_t ix = std::min(dGridSize * (point.x - min.x) / size.x, dGridSize - 1.0);
// // int64_t iy = std::min(dGridSize * (point.y - min.y) / size.y, dGridSize - 1.0);
// // int64_t iz = std::min(dGridSize * (point.z - min.z) / size.z, dGridSize - 1.0);
// // int64_t index = ix + iy * gridSize + iz * gridSize * gridSize;
// int64_t index = toIndex(point);
// int count = counts[index]++;
// if(count == 0){
// Bin bin = {index, 0, 0};
// bins.push_back(bin);
// }
// }
// int sum = 0;
// for(Bin& bin : bins){
// bin.start = sum;
// bin.size = counts[bin.index];
// sum += bin.size;
// }
// //================
// //== SORT
// //================
// return bins;
//}
// see https://www.forceflow.be/2013/10/07/morton-encodingdecoding-through-bit-interleaving-implementations/
// method to seperate bits from a given integer 3 positions apart
inline uint64_t splitBy3(uint32_t a){
uint64_t x = a & 0x1fffff; // we only look at the first 21 bits
x = (x | x << 32) & 0x1f00000000ffff; // shift left 32 bits, OR with self, and 00011111000000000000000000000000000000001111111111111111
x = (x | x << 16) & 0x1f0000ff0000ff; // shift left 32 bits, OR with self, and 00011111000000000000000011111111000000000000000011111111
x = (x | x << 8) & 0x100f00f00f00f00f; // shift left 32 bits, OR with self, and 0001000000001111000000001111000000001111000000001111000000000000
x = (x | x << 4) & 0x10c30c30c30c30c3; // shift left 32 bits, OR with self, and 0001000011000011000011000011000011000011000011000011000100000000
x = (x | x << 2) & 0x1249249249249249;
return x;
}
// see https://www.forceflow.be/2013/10/07/morton-encodingdecoding-through-bit-interleaving-implementations/
inline uint64_t mortonEncode_magicbits(uint32_t x, uint32_t y, uint32_t z){
uint64_t answer = 0;
answer |= splitBy3(x) | splitBy3(y) << 1 | splitBy3(z) << 2;
return answer;
}
double sum = 0;
// see https://www.forceflow.be/2013/10/07/morton-encodingdecoding-through-bit-interleaving-implementations/
void mortonSort(vector<Point> points, Vector3<double> min, Vector3<double> max, int levels){
auto size = max - min;
int gridSize = pow(2, levels);
double dGridSize = gridSize;
for(Point& point : points){
uint32_t x = std::min((point.x - min.x) * (dGridSize / size.x), dGridSize - 1.0);
uint32_t y = std::min((point.y - min.y) * (dGridSize / size.y), dGridSize - 1.0);
uint32_t z = std::min((point.z - min.z) * (dGridSize / size.z), dGridSize - 1.0);
uint64_t mortonCode = mortonEncode_magicbits(x, y, z);
point.index = mortonCode;
}
std::sort(points.begin(), points.end(), [](Point& a, Point& b){
return a.index - b.index;
});
//for (int i = 0; i < points.size(); i++) {
//}
//int i = 0;
//int levels = 8;
//uint32_t currCellCode = (points[0].index >> (3 * levels));
//Point
//for (Point& point : points) {
// sum += point.x * point.y * point.z;
// uint32_t mortonCode = point.index;
// uint32_t cellCode = (mortonCode >> (3 * level)) & 0b111;
// if (cellCode != currCellCode) {
// currCelCode = cellCode;
// }
// std::bitset<32> bcode(mortonCode);
// if (i < 100) {
// cout << bcode << endl;
// }
// i++;
//}
}
void doIndexing(string path) {
fs::create_directories(path + "/nodes");
auto chunks = getListOfChunks(path);
auto chunk = chunks[0];
auto points = loadPoints(chunk->file);
auto tStart = now();
//vector<Bin> bins = binsort(points, 128, chunk->min, chunk->max);
//std::sort(points.begin(), points.end(), [](Point& a, Point& b) -> bool {
// return a.x - b.x;
//});
//int levels = 5;
//int gridSize = 128 * pow(2, levels);
int gridSize = 512;
int levels = 9;
//int gridSize = pow(2, levels);
mortonSort(points, chunk->min, chunk->max, levels);
printElapsedTime("binsort", tStart);
cout << "#points: " << points.size() << endl;
//cout << "#bins: " << bins.size() << endl;
cout << "sum: " << sum << endl;
}
}

View File

@@ -0,0 +1,480 @@
#include "Indexer_Centered.h"
#include <vector>
#include <filesystem>
#include <memory>
#include <thread>
#include <unordered_map>
#include "json.hpp"
#include "convmath.h"
#include "converter_utils.h"
#include "stuff.h"
#include "LASWriter.hpp"
#include "TaskPool.h"
#include "LASLoader.hpp"
using std::vector;
using std::thread;
using std::unordered_map;
using std::string;
using std::to_string;
using std::make_shared;
using std::shared_ptr;
using json = nlohmann::json;
namespace fs = std::filesystem;
namespace centered_nochunks{
int gridSize = 128;
double dGridSize = gridSize;
struct Cell {
Point point;
double distance = Infinity;
int childCellPointers[8] = { -1, -1, -1, -1, -1, -1, -1, -1 };
};
int numNodes = 0;
int numAdds = 0;
int maxDepth = 0;
struct Node{
string name;
int level = 0;
Vector3<double> min;
Vector3<double> max;
Vector3<double> size;
vector<shared_ptr<Node>> children;
vector<Cell> cells;
Node(string name, Vector3<double> min, Vector3<double> max){
this->name = name;
this->min = min;
this->max = max;
this->size = max - min;
this->children.resize(8, nullptr);
this->level = name.size() - 1;
numNodes++;
}
int add(Point point, int cellPointer, int childIndex) {
if (level >= 7) {
return cellPointer;
}
numAdds++;
maxDepth = std::max(maxDepth, level);
double dix = dGridSize * (point.x - min.x) / size.x;
double diy = dGridSize * (point.y - min.y) / size.y;
double diz = dGridSize * (point.z - min.z) / size.z;
int64_t ix = std::min(dix, dGridSize - 1.0);
int64_t iy = std::min(diy, dGridSize - 1.0);
int64_t iz = std::min(diz, dGridSize - 1.0);
double dx = dix - double(ix) - 0.5;
double dy = diy - double(iy) - 0.5;
double dz = diz - double(iz) - 0.5;
double distance = dx * dx + dy * dy + dz * dz;
if (cellPointer == -1) {
Cell cell;
cell.point = point;
cell.distance = distance;
cells.push_back(cell);
int newCellPointer = cells.size() - 1;
return newCellPointer;
} else {
Cell& cell = cells[cellPointer];
Point pointForChild;
if (distance < cell.distance) {
// new one is closer to center
pointForChild = cell.point;
cell.point = point;
cell.distance = distance;
}else{
// old one is closer to center
pointForChild = point;
}
auto child = children[childIndex];
if (child == nullptr) {
auto childBox = childBoundingBoxOf(min, max, childIndex);
string childName = name + to_string(childIndex);
child = make_shared<Node>(childName, childBox.min, childBox.max);
children[childIndex] = child;
}
auto nextChildIndex = childIndexOf(child->min, child->max, pointForChild);
int childCellIndex = 0;
if (dx > 0.0) {
childCellIndex = childCellIndex | 0b100;
}
if (dy > 0.0) {
childCellIndex = childCellIndex | 0b010;
}
if (dz > 0.0) {
childCellIndex = childCellIndex | 0b001;
}
auto childCellPointer = cell.childCellPointers[childCellIndex];
int newChildPointer = child->add(pointForChild, childCellPointer, nextChildIndex);
if (newChildPointer == 383) {
int a = 10;
}
cell.childCellPointers[childCellIndex] = newChildPointer;
int a = newChildPointer;
int b = child->cells.size();
if (a > b) {
int a = 10;
exit(1234);
}
return cellPointer;
}
}
void traverse(function<void(Node*)> callback) {
callback(this);
for (auto child : children) {
if (child == nullptr) {
continue;
}
child->traverse(callback);
}
}
};
double addDuration = 0.0;
struct Indexer{
Vector3<double> min;
Vector3<double> max;
//vector<Cell> cells;
vector<int> cellPointers;
shared_ptr<Node> root = nullptr;
int pointsProcessed = 0;
Indexer(Vector3<double> min, Vector3<double> max){
this->min = min;
this->max = max;
cellPointers = vector<int>(gridSize * gridSize * gridSize, -1);
root = make_shared<Node>("r", min, max);
}
void add(shared_ptr<Points> points){
auto tStart = now();
auto min = this->min;
auto max = this->max;
auto size = max - min;
//auto indexOf = [min, max, size](Point& point) {
// uint64_t ix = std::min(dGridSize * (point.x - min.x) / size.x, dGridSize - 1.0);
// uint64_t iy = std::min(dGridSize * (point.y - min.y) / size.y, dGridSize - 1.0);
// uint64_t iz = std::min(dGridSize * (point.z - min.z) / size.z, dGridSize - 1.0);
// int64_t index = ix + iy * gridSize + iz * gridSize * gridSize;
// return index;
//};
//auto distanceOf = [min, max, size](Point& point) {
// double ix = std::min(dGridSize * (point.x - min.x) / size.x, dGridSize - 1.0);
// double iy = std::min(dGridSize * (point.y - min.y) / size.y, dGridSize - 1.0);
// double iz = std::min(dGridSize * (point.z - min.z) / size.z, dGridSize - 1.0);
// double dx = fmod(ix, 1.0) - 0.5;
// double dy = fmod(iy, 1.0) - 0.5;
// double dz = fmod(iz, 1.0) - 0.5;
// double d = dx * dx + dy * dy + dz * dz;
// return d;
//};
struct Result {
int64_t index;
double distance;
int childIndex;
};
auto resultOf = [min, max, size](Point& point) -> Result{
double dix = dGridSize * (point.x - min.x) / size.x;
double diy = dGridSize * (point.y - min.y) / size.y;
double diz = dGridSize * (point.z - min.z) / size.z;
int64_t ix = std::min(dix, dGridSize - 1.0);
int64_t iy = std::min(diy, dGridSize - 1.0);
int64_t iz = std::min(diz, dGridSize - 1.0);
double dx = dix - double(ix) - 0.5;
double dy = diy - double(iy) - 0.5;
double dz = diz - double(iz) - 0.5;
int64_t index = ix + iy * gridSize + iz * gridSize * gridSize;
double distance = dx * dx + dy * dy + dz * dz;
int childIndex = 0;
if (ix > (gridSize / 2)) {
childIndex = childIndex | 0b100;
}
if (iy > (gridSize / 2)) {
childIndex = childIndex | 0b010;
}
if (iz > (gridSize / 2)) {
childIndex = childIndex | 0b001;
}
return {index, distance, childIndex};
};
for (Point& point : points->points) {
//auto index = indexOf(point);
auto result = resultOf(point);
auto index = result.index;
auto childIndex = result.childIndex;
auto childCellPointer = cellPointers[index];
auto newChildCellPointer = root->add(point, childCellPointer, childIndex);
if (childCellPointer == -1) {
cellPointers[index] = newChildCellPointer;
}
pointsProcessed++;
}
auto duration = now() - tStart;
addDuration += duration;
cout << "#root.cells: " << root->cells.size() << endl;
}
};
shared_ptr<Points> createTestData() {
auto points = make_shared<Points>();
//// center
//points->points.emplace_back( 64.0, 64.0, 0.0, 0);
//// near center
//points->points.emplace_back(64.0 - 0.5, 64.0 - 0.5, 0.0, 0);
//points->points.emplace_back(64.0 - 0.5, 64.0 + 0.5, 0.0, 0);
//points->points.emplace_back(64.0 + 0.5, 64.0 - 0.5, 0.0, 0);
//points->points.emplace_back(64.0 + 0.5, 64.0 + 0.5, 0.0, 0);
//// corners
//points->points.emplace_back( 0.0, 0.0, 0.0, 0);
//points->points.emplace_back( 0.0, 128.0, 0.0, 0);
//points->points.emplace_back(128.0, 0.0, 0.0, 0);
//points->points.emplace_back(128.0, 128.0, 0.0, 0);
int n = 2048;
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++) {
double x = 128.0 * double(i) / double(n - 1);
double y = 128.0 * double(j) / double(n - 1);
double z = 0;
points->points.emplace_back(x, y, z, 0);
}
}
return points;
}
void doIndexingTest() {
auto data = createTestData();
Vector3<double> min = { 0.0, 0.0, 0.0 };
Vector3<double> max = { 128.0, 128.0, 128.0 };
Indexer indexer(min, max);
indexer.add(data);
indexer.root->traverse([](Node* node) {
vector<Point> accepted;
for (auto cell : node->cells) {
accepted.push_back(cell.point);
}
if (accepted.size() == 0) {
return;
}
//accepted.push_back(Point(node->min.x, node->min.y, 0.0, 0));
//accepted.push_back(Point(node->min.x, node->max.y, 0.0, 0));
//accepted.push_back(Point(node->max.x, node->min.y, 0.0, 0));
//accepted.push_back(Point(node->max.x, node->max.y, 0.0, 0));
LASHeader header;
header.min = node->min;
header.max = node->max;
header.numPoints = accepted.size();
string filename = "D:/temp/test/" + node->name + ".las";
writeLAS(filename, header, accepted);
});
}
void doIndexing(string pathIn, string pathOut) {
//doIndexingTest();
//return;
fs::create_directories(pathOut);
LASLoader* loader = new LASLoader(pathIn, 1);
Attributes attributes = loader->getAttributes();
//for (const auto& entry : std::filesystem::directory_iterator(pathOut)) {
// std::filesystem::remove(entry);
//}
Vector3<double> size = loader->max - loader->min;
double cubeSize = std::max(std::max(size.x, size.y), size.z);
Vector3<double> cubeMin = loader->min;
Vector3<double> cubeMax = cubeMin + cubeSize;
Indexer indexer(cubeMin, cubeMax);
double sum = 0;
int batchNumber = 0;
auto promise = loader->nextBatch();
promise.wait();
auto batch = promise.get();
while (batch != nullptr) {
if ((batchNumber % 10) == 0) {
cout << "batch loaded: " << batchNumber << endl;
}
auto tStart = now();
indexer.add(batch);
auto duration = now() - tStart;
sum += duration;
promise = loader->nextBatch();
promise.wait();
batch = promise.get();
batchNumber++;
cout << "#nodes: " << numNodes << endl;
cout << "#adds: " << numAdds << endl;
cout << "#depth: " << maxDepth << endl;
}
cout << "indexing duration: " << sum << endl;
cout << "raw add duration: " << addDuration << endl;
indexer.root->traverse([](Node* node){
vector<Point> accepted;
for (auto cell : node->cells) {
accepted.push_back(cell.point);
}
if (accepted.size() == 0) {
return;
}
LASHeader header;
header.min = node->min;
header.max = node->max;
header.numPoints = accepted.size();
string filename = "D:/temp/test/" + node->name + ".las";
writeLAS(filename, header, accepted);
});
}
}

View File

@@ -1,133 +0,0 @@
#include <fstream>
#include <iostream>
#include <vector>
#include <experimental/filesystem>
#include "laszip_api.h"
#include "LASPointReader.h"
#include "stuff.h"
namespace fs = std::experimental::filesystem;
using std::ifstream;
using std::cout;
using std::endl;
using std::vector;
using std::ios;
namespace Potree{
AABB LIBLASReader::getAABB(){
AABB aabb;
Point minp = transform(header->min_x, header->min_y, header->min_z);
Point maxp = transform(header->max_x, header->max_y, header->max_z);
aabb.update(minp.position);
aabb.update(maxp.position);
return aabb;
}
LASPointReader::LASPointReader(string path){
this->path = path;
if(fs::is_directory(path)){
// if directory is specified, find all las and laz files inside directory
for(fs::directory_iterator it(path); it != fs::directory_iterator(); it++){
fs::path filepath = it->path();
if(fs::is_regular_file(filepath)){
if(icompare(fs::path(filepath).extension().string(), ".las") || icompare(fs::path(filepath).extension().string(), ".laz")){
files.push_back(filepath.string());
}
}
}
}else{
files.push_back(path);
}
// read bounding box
for (const auto &file : files) {
LIBLASReader aabbReader(file);
AABB lAABB = aabbReader.getAABB();
aabb.update(lAABB.min);
aabb.update(lAABB.max);
aabbReader.close();
}
// open first file
currentFile = files.begin();
reader = new LIBLASReader(*currentFile);
// cout << "let's go..." << endl;
}
LASPointReader::~LASPointReader(){
close();
}
void LASPointReader::close(){
if(reader != NULL){
reader->close();
delete reader;
reader = NULL;
}
}
long long LASPointReader::numPoints(){
if (reader->header->version_major >= 1 && reader->header->version_minor >= 4) {
return reader->header->extended_number_of_point_records;
} else {
return reader->header->number_of_point_records;
}
}
bool LASPointReader::readNextPoint(){
bool hasPoints = reader->readPoint();
if(!hasPoints){
// try to open next file, if available
reader->close();
delete reader;
reader = NULL;
currentFile++;
if(currentFile != files.end()){
reader = new LIBLASReader(*currentFile);
hasPoints = reader->readPoint();
}
}
return hasPoints;
}
Point LASPointReader::getPoint(){
Point const p = reader->GetPoint();
//cout << p.position.x << ", " << p.position.y << ", " << p.position.z << endl;
return p;
}
AABB LASPointReader::getAABB(){
return aabb;
}
Vector3<double> LASPointReader::getScale(){
Vector3<double> scale;
scale.x =reader->header->x_scale_factor;
scale.y =reader->header->y_scale_factor;
scale.z =reader->header->z_scale_factor;
return scale;
}
}

View File

@@ -1,35 +0,0 @@
#include <vector>
#include "LASPointWriter.hpp"
using std::vector;
namespace Potree{
void LASPointWriter::write(Point &point){
coordinates[0] = point.position.x;
coordinates[1] = point.position.y;
coordinates[2] = point.position.z;
laszip_set_coordinates(writer, coordinates);
this->point->rgb[0] = point.color.x * 256;
this->point->rgb[1] = point.color.y * 256;
this->point->rgb[2] = point.color.z * 256;
this->point->intensity = point.intensity;
this->point->classification = point.classification;
this->point->return_number = point.returnNumber;
this->point->number_of_returns = point.numberOfReturns;
this->point->point_source_ID = point.pointSourceID;
this->point->extra_bytes = reinterpret_cast<laszip_U8*>(&point.extraBytes[0]);
this->point->num_extra_bytes = point.extraBytes.size();
laszip_set_point(writer, this->point);
laszip_write_point(writer);
numPoints++;
}
}

View File

@@ -0,0 +1,189 @@
#include "LASWriter.hpp"
#include <fstream>
#include <cstdint>
#include <filesystem>
using namespace std;
namespace fs = std::filesystem;
vector<uint8_t> makeHeaderBuffer(LASHeader header) {
int headerSize = header.headerSize;
vector<uint8_t> buffer(headerSize, 0);
uint8_t* data = buffer.data();
// file signature
data[0] = 'L';
data[1] = 'A';
data[2] = 'S';
data[3] = 'F';
// version major & minor -> 1.4
data[24] = 1;
data[25] = 4;
// header size
reinterpret_cast<uint16_t*>(data + 94)[0] = headerSize;
// point data format
data[104] = 2;
// bytes per point
reinterpret_cast<uint16_t*>(data + 105)[0] = 26;
// #points
uint64_t numPoints = header.numPoints;
reinterpret_cast<uint64_t*>(data + 247)[0] = numPoints;
// min
reinterpret_cast<double*>(data + 187)[0] = header.min.x;
reinterpret_cast<double*>(data + 203)[0] = header.min.y;
reinterpret_cast<double*>(data + 219)[0] = header.min.z;
// offset
reinterpret_cast<double*>(data + 155)[0] = header.min.x;
reinterpret_cast<double*>(data + 163)[0] = header.min.y;
reinterpret_cast<double*>(data + 171)[0] = header.min.z;
// max
reinterpret_cast<double*>(data + 179)[0] = header.max.x;
reinterpret_cast<double*>(data + 195)[0] = header.max.y;
reinterpret_cast<double*>(data + 211)[0] = header.max.z;
// scale
reinterpret_cast<double*>(data + 131)[0] = header.scale.x;
reinterpret_cast<double*>(data + 139)[0] = header.scale.y;
reinterpret_cast<double*>(data + 147)[0] = header.scale.z;
// offset to point data
uint32_t offSetToPointData = headerSize;
reinterpret_cast<uint32_t*>(data + 96)[0] = offSetToPointData;
return buffer;
}
struct LASPointF2 {
int32_t x;
int32_t y;
int32_t z;
uint16_t intensity;
uint8_t returnNumber;
uint8_t classification;
uint8_t scanAngleRank;
uint8_t userData;
uint16_t pointSourceID;
uint16_t r;
uint16_t g;
uint16_t b;
};
void writeLAS(string path, LASHeader header, vector<Point> points) {
vector<uint8_t> headerBuffer = makeHeaderBuffer(header);
fstream file(path, ios::out | ios::binary);
file.write(reinterpret_cast<const char*>(headerBuffer.data()), header.headerSize);
LASPointF2 laspoint;
for (Point& point : points) {
int32_t ix = int32_t((point.x - header.min.x) / header.scale.x);
int32_t iy = int32_t((point.y - header.min.y) / header.scale.y);
int32_t iz = int32_t((point.z - header.min.z) / header.scale.z);
laspoint.x = ix;
laspoint.y = iy;
laspoint.z = iz;
laspoint.r = 255;
laspoint.g = 0;
laspoint.b = 0;
//laspoint.r = point.index & 0xff000000;
//laspoint.g = point.index & 0x00ff0000;
//laspoint.b = point.index & 0x0000ff00;
file.write(reinterpret_cast<const char*>(&laspoint), 26);
}
file.close();
}
void writeLAS(string path, LASHeader header, vector<Point> sample, Points* points) {
vector<uint8_t> headerBuffer = makeHeaderBuffer(header);
fstream file(path, ios::out | ios::binary);
file.write(reinterpret_cast<const char*>(headerBuffer.data()), header.headerSize);
LASPointF2 laspoint;
auto attributeBuffer = points->attributeBuffer;
int bytesPerPointAttribute = 4;
for (Point& point : sample) {
int32_t ix = int32_t((point.x - header.min.x) / header.scale.x);
int32_t iy = int32_t((point.y - header.min.y) / header.scale.y);
int32_t iz = int32_t((point.z - header.min.z) / header.scale.z);
laspoint.x = ix;
laspoint.y = iy;
laspoint.z = iz;
uint8_t* pointAttributeData = attributeBuffer->dataU8 + (point.index * bytesPerPointAttribute);
laspoint.r = pointAttributeData[0];
laspoint.g = pointAttributeData[1];
laspoint.b = pointAttributeData[2];
file.write(reinterpret_cast<const char*>(&laspoint), 26);
}
file.close();
}
void writeLAS(string path, shared_ptr<Points> points) {
fs::create_directories(fs::path(path).parent_path());
LASHeader header;
header.numPoints = points->points.size();
vector<uint8_t> headerBuffer = makeHeaderBuffer(header);
fstream file(path, ios::out | ios::binary);
file.write(reinterpret_cast<const char*>(headerBuffer.data()), header.headerSize);
LASPointF2 laspoint;
auto attributeBuffer = points->attributeBuffer;
int bytesPerPoint = points->attributes.byteSize;
int offsetToFirstAttribute = points->attributes.list[0].bytes;
for (Point& point : points->points) {
int32_t ix = int32_t((point.x - header.min.x) / header.scale.x);
int32_t iy = int32_t((point.y - header.min.y) / header.scale.y);
int32_t iz = int32_t((point.z - header.min.z) / header.scale.z);
laspoint.x = ix;
laspoint.y = iy;
laspoint.z = iz;
uint8_t* pointAttributeData = attributeBuffer->dataU8 + (point.index * bytesPerPoint + offsetToFirstAttribute);
laspoint.r = pointAttributeData[0];
laspoint.g = pointAttributeData[1];
laspoint.b = pointAttributeData[2];
file.write(reinterpret_cast<const char*>(&laspoint), 26);
}
file.close();
}

View File

@@ -1,265 +0,0 @@
#include <fstream>
#include <sstream>
#include "PTXPointReader.h"
#include "stuff.h"
using std::cout;
using std::endl;
using std::vector;
using std::ios;
using std::string;
namespace Potree{
static const int INVALID_INTENSITY = 32767;
std::map<string, AABB> PTXPointReader::aabbs = std::map<string, AABB>();
std::map<string, long> PTXPointReader::counts = std::map<string, long>();
//inline void split(vector<double> &v, char (&str)[512]) {
// // vector<std::pair<string::const_iterator, string::const_iterator> > sp;
// if (strlen(str) > 200) return;
//
// //string strstr(str);
// //split(sp, strstr, is_space(), token_compress_on);
// vector<string>
// for (auto beg = sp.begin(); beg != sp.end(); ++beg) {
// string token(beg->first, beg->second);
// if (!token.empty()) {
// v.push_back(atof(token.c_str()));
// }
// }
//}
inline void getlined(fstream &stream, vector<double> &result) {
char str[512];
result.clear();
stream.getline(str, 512);
//split(result, str);
vector<string> tokens = split(str, ' ');
for (auto &token : tokens) {
result.push_back(std::stod(token));
}
}
inline void skipline(fstream &stream) {
string str;
getline(stream, str);
}
bool assertd(fstream &stream, size_t i) {
vector<double> tokens;
getlined(stream, tokens);
return i == tokens.size();
}
/**
* The constructor needs to scan the whole PTX file to find out the bounding box. Unluckily.
* TODO: during the scan all the points are read and transformed. Afterwards, during loading
* the points are read again and transformed again. It should be nice to save the
* transformed points in a temporary file. That would mean a LAS file or something similar.
* Unuseful. It's better to convert the PTX to LAS files.
* TODO: it seems theat the PTXPointReader is asked to produce the bounding box more than once.
* Maybe it should be saved somewhere. Chez moi, scanning 14m points needs 90 secs. The
* process speed of the PTX file is about 1m points every 50 secs.
*/
PTXPointReader::PTXPointReader(string path) {
this->path = path;
if (fs::is_directory(path)) {
// if directory is specified, find all ptx files inside directory
for (fs::directory_iterator it(path); it != fs::directory_iterator(); it++) {
fs::path filepath = it->path();
if (fs::is_regular_file(filepath)) {
if (icompare(fs::path(filepath).extension().string(), ".ptx")) {
files.push_back(filepath.string());
}
}
}
} else {
files.push_back(path);
}
// open first file
this->currentFile = files.begin();
this->stream = new fstream(*(this->currentFile), ios::in);
this->currentChunk = 0;
skipline(*this->stream);
loadChunk(this->stream, this->currentChunk, this->tr);
}
void PTXPointReader::scanForAABB() {
// read bounding box
double x(0), y(0), z(0);
// TODO: verify that this initial values are ok
double minx = std::numeric_limits<float>::max();
double miny = std::numeric_limits<float>::max();
double minz = std::numeric_limits<float>::max();
double maxx = -std::numeric_limits<float>::max();
double maxy = -std::numeric_limits<float>::max();
double maxz = -std::numeric_limits<float>::max();
double intensity(0);
bool firstPoint = true;
bool pleaseStop = false;
long currentChunk = 0;
long count = 0;
double tr[16];
vector<double> split;
for (const auto &file : files) {
fstream stream(file, ios::in);
currentChunk = 0;
getlined(stream, split);
while (!pleaseStop) {
if (1 == split.size()) {
if (!loadChunk(&stream, currentChunk, tr)) {
break;
}
}
while (true) {
getlined(stream, split);
if (4 == split.size() || 7 == split.size()) {
x = split[0];
y = split[1];
z = split[2];
intensity = split[3];
if (0.5 != intensity) {
Point p = transform(tr, x, y, z);
if (firstPoint) {
maxx = minx = p.position.x;
maxy = miny = p.position.y;
maxz = minz = p.position.z;
firstPoint = false;
} else {
minx = p.position.x < minx ? p.position.x : minx;
maxx = p.position.x > maxx ? p.position.x : maxx;
miny = p.position.y < miny ? p.position.y : miny;
maxy = p.position.y > maxy ? p.position.y : maxy;
minz = p.position.z < minz ? p.position.z : minz;
maxz = p.position.z > maxz ? p.position.z : maxz;
}
count++;
if (0 == count % 1000000)
cout << "AABB-SCANNING: " << count << " points; " << currentChunk << " chunks" << endl;
}
} else {
break;
}
}
if (stream.eof()) {
pleaseStop = true;
break;
}
currentChunk++;
}
stream.close();
}
counts[path] = count;
AABB lAABB(Vector3<double>(minx, miny, minz), Vector3<double>(maxx, maxy, maxz));
PTXPointReader::aabbs[path] = lAABB;
}
bool PTXPointReader::loadChunk(fstream *stream, long currentChunk, double tr[16]) {
vector<double> split;
// The first 5 lines should have respectively 1, 3, 3, 3, 3 numbers each.
if (!assertd(*stream, 1) || !assertd(*stream, 3) || !assertd(*stream, 3) || !assertd(*stream, 3) || !assertd(*stream, 3))
return false;
getlined(*stream, split);
if (4 != split.size()) {
return false;
};
tr[0] = split[0];
tr[1] = split[1];
tr[2] = split[2];
tr[3] = split[3];
getlined(*stream, split);
if (4 != split.size()) {
return false;
};
tr[4] = split[0];
tr[5] = split[1];
tr[6] = split[2];
tr[7] = split[3];
getlined(*stream, split);
if (4 != split.size()) {
return false;
};
tr[8] = split[0];
tr[9] = split[1];
tr[10] = split[2];
tr[11] = split[3];
getlined(*stream, split);
if (4 != split.size()) {
return false;
};
tr[12] = split[0];
tr[13] = split[1];
tr[14] = split[2];
tr[15] = split[3];
origin = Vector3<double>(split[0], split[1], split[2]);
return true;
}
bool PTXPointReader::readNextPoint() {
while (true) {
bool result = doReadNextPoint();
if (!result)
return false;
if (INVALID_INTENSITY != p.intensity)
return true;
}
return false;
}
bool PTXPointReader::doReadNextPoint() {
if (this->stream->eof()) {
this->stream->close();
this->currentFile++;
if (this->currentFile != files.end()) {
this->stream = new fstream(*(this->currentFile), ios::in);
this->currentChunk = 0;
skipline(*stream);
loadChunk(stream, currentChunk, tr);
} else {
return false;
}
}
vector<double> split;
getlined(*stream, split);
if (1 == split.size()) {
this->currentChunk++;
loadChunk(stream, currentChunk, tr);
getlined(*stream, split);
}
auto size1 = split.size();
if (size1 > 3) {
this->p = transform(tr, split[0], split[1], split[2]);
double intensity = split[3];
this->p.intensity = (unsigned short)(65535.0 * intensity);
if (4 == size1) {
this->p.color.x = (unsigned char)(intensity * 255.0);
this->p.color.y = (unsigned char)(intensity * 255.0);
this->p.color.z = (unsigned char)(intensity * 255.0);
} else if (7 == size1) {
this->p.color.x = (unsigned char)(split[4]);
this->p.color.y = (unsigned char)(split[5]);
this->p.color.z = (unsigned char)(split[6]);
}
} else {
this->p.intensity = INVALID_INTENSITY;
}
return true;
}
}

View File

@@ -1,49 +0,0 @@
#include "PointAttributes.hpp"
#include "PotreeException.h"
namespace Potree{
const PointAttribute PointAttribute::POSITION_CARTESIAN = PointAttribute(0, "POSITION_CARTESIAN", ATTRIBUTE_TYPE_INT32, 3, 12);
const PointAttribute PointAttribute::COLOR_PACKED = PointAttribute(1, "RGBA", ATTRIBUTE_TYPE_UINT8, 4, 4);
const PointAttribute PointAttribute::INTENSITY = PointAttribute(2, "intensity", ATTRIBUTE_TYPE_UINT16, 1, 2);
const PointAttribute PointAttribute::CLASSIFICATION = PointAttribute(3, "classification", ATTRIBUTE_TYPE_UINT8, 1, 1);
const PointAttribute PointAttribute::RETURN_NUMBER = PointAttribute(4, "return number", ATTRIBUTE_TYPE_UINT8, 1, 1);
const PointAttribute PointAttribute::NUMBER_OF_RETURNS = PointAttribute(5, "number of returns", ATTRIBUTE_TYPE_UINT8, 1, 1);
const PointAttribute PointAttribute::SOURCE_ID = PointAttribute(6, "source id", ATTRIBUTE_TYPE_UINT16, 1, 2);
const PointAttribute PointAttribute::GPS_TIME = PointAttribute(7, "gps-time", ATTRIBUTE_TYPE_DOUBLE, 1, 8);
const PointAttribute PointAttribute::NORMAL_SPHEREMAPPED = PointAttribute(8, "NORMAL_SPHEREMAPPED", ATTRIBUTE_TYPE_INT8, 2, 2);
const PointAttribute PointAttribute::NORMAL_OCT16 = PointAttribute(9, "NORMAL_OCT16", ATTRIBUTE_TYPE_INT8, 2, 2);
const PointAttribute PointAttribute::NORMAL = PointAttribute(10, "NORMAL", ATTRIBUTE_TYPE_FLOAT, 3, 12);
PointAttribute PointAttribute::fromString(string name){
if(name == "POSITION_CARTESIAN"){
return PointAttribute::POSITION_CARTESIAN;
}else if(name == "COLOR_PACKED"){
return PointAttribute::COLOR_PACKED;
}else if(name == "INTENSITY"){
return PointAttribute::INTENSITY;
}else if(name == "CLASSIFICATION"){
return PointAttribute::CLASSIFICATION;
} else if (name == "RETURN_NUMBER") {
return PointAttribute::RETURN_NUMBER;
} else if (name == "NUMBER_OF_RETURNS") {
return PointAttribute::NUMBER_OF_RETURNS;
} else if (name == "SOURCE_ID") {
return PointAttribute::SOURCE_ID;
} else if(name == "GPS_TIME"){
return PointAttribute::GPS_TIME;
}else if(name == "NORMAL_OCT16"){
return PointAttribute::NORMAL_OCT16;
}else if(name == "NORMAL"){
return PointAttribute::NORMAL;
}
throw PotreeException("Invalid PointAttribute name: '" + name + "'");
}
bool operator==(const PointAttribute& lhs, const PointAttribute& rhs){
return lhs.ordinal == rhs.ordinal;
}
}

View File

@@ -1,705 +0,0 @@
#include <experimental/filesystem>
#include "rapidjson/document.h"
#include "rapidjson/prettywriter.h"
#include "rapidjson/stringbuffer.h"
#include "PotreeConverter.h"
#include "stuff.h"
#include "LASPointReader.h"
#include "PTXPointReader.h"
#include "PotreeException.h"
#include "PotreeWriter.h"
#include "LASPointWriter.hpp"
#include "BINPointWriter.hpp"
#include "BINPointReader.hpp"
#include "PlyPointReader.h"
#include "XYZPointReader.hpp"
#include "ExtraBytes.hpp"
#include <chrono>
#include <sstream>
#include <string>
#include <map>
#include <vector>
#include <math.h>
#include <fstream>
using rapidjson::Document;
using rapidjson::StringBuffer;
using rapidjson::Writer;
using rapidjson::PrettyWriter;
using rapidjson::Value;
using std::stringstream;
using std::map;
using std::string;
using std::vector;
using std::find;
using std::chrono::high_resolution_clock;
using std::chrono::milliseconds;
using std::chrono::duration_cast;
using std::fstream;
namespace fs = std::experimental::filesystem;
namespace Potree{
PointReader *PotreeConverter::createPointReader(string path, PointAttributes pointAttributes){
PointReader *reader = NULL;
if(iEndsWith(path, ".las") || iEndsWith(path, ".laz")){
reader = new LASPointReader(path);
}else if(iEndsWith(path, ".ptx")){
reader = new PTXPointReader(path);
}else if(iEndsWith(path, ".ply")){
reader = new PlyPointReader(path);
}else if(iEndsWith(path, ".xyz") || iEndsWith(path, ".txt")){
reader = new XYZPointReader(path, format, colorRange, intensityRange);
}else if(iEndsWith(path, ".pts")){
vector<double> intensityRange;
if(this->intensityRange.size() == 0){
intensityRange.push_back(-2048);
intensityRange.push_back(+2047);
}
reader = new XYZPointReader(path, format, colorRange, intensityRange);
}else if(iEndsWith(path, ".bin")){
reader = new BINPointReader(path, aabb, scale, pointAttributes);
}
return reader;
}
PotreeConverter::PotreeConverter(string executablePath, string workDir, vector<string> sources){
this->executablePath = executablePath;
this->workDir = workDir;
this->sources = sources;
}
vector<PointAttribute> checkAvailableStandardAttributes(string file) {
vector<PointAttribute> attributes;
bool isLas = iEndsWith(file, ".las") || iEndsWith(file, ".laz");
if (!isLas) {
return attributes;
}
laszip_POINTER laszip_reader;
laszip_header* header;
laszip_create(&laszip_reader);
laszip_BOOL request_reader = 1;
laszip_request_compatibility_mode(laszip_reader, request_reader);
bool hasClassification = false;
bool hasGpsTime = false;
bool hasIntensity = false;
bool hasNumberOfReturns = false;
bool hasReturnNumber = false;
bool hasPointSourceId = false;
{
laszip_BOOL is_compressed = iEndsWith(file, ".laz") ? 1 : 0;
laszip_open_reader(laszip_reader, file.c_str(), &is_compressed);
laszip_get_header_pointer(laszip_reader, &header);
long long npoints = (header->number_of_point_records ? header->number_of_point_records : header->extended_number_of_point_records);
laszip_point* point;
laszip_get_point_pointer(laszip_reader, &point);
for (int i = 0; i < 1'000'000 && i < npoints; i++) {
laszip_read_point(laszip_reader);
hasClassification |= point->classification != 0;
hasGpsTime |= point->gps_time != 0;
hasIntensity |= point->intensity != 0;
hasNumberOfReturns |= point->number_of_returns != 0;
hasReturnNumber |= point->return_number != 0;
hasPointSourceId |= point->point_source_ID != 0;
}
}
laszip_close_reader(laszip_reader);
laszip_destroy(laszip_reader);
if (hasClassification) {
attributes.push_back(PointAttribute::CLASSIFICATION);
}
if (hasGpsTime) {
attributes.push_back(PointAttribute::GPS_TIME);
}
if (hasIntensity) {
attributes.push_back(PointAttribute::INTENSITY);
}
if (hasNumberOfReturns) {
attributes.push_back(PointAttribute::NUMBER_OF_RETURNS);
}
if (hasReturnNumber) {
attributes.push_back(PointAttribute::RETURN_NUMBER);
}
if (hasPointSourceId) {
attributes.push_back(PointAttribute::SOURCE_ID);
}
return attributes;
}
vector<PointAttribute> parseExtraAttributes(string file) {
vector<PointAttribute> attributes;
bool isLas = iEndsWith(file, ".las") || iEndsWith(file, ".laz");
if(!isLas) {
return attributes;
}
laszip_POINTER laszip_reader;
laszip_header* header;
laszip_create(&laszip_reader);
laszip_BOOL request_reader = 1;
laszip_request_compatibility_mode(laszip_reader, request_reader);
laszip_BOOL is_compressed = iEndsWith(file, ".laz") ? 1 : 0;
laszip_open_reader(laszip_reader, file.c_str(), &is_compressed);
laszip_get_header_pointer(laszip_reader, &header);
{ // read extra bytes
for (int i = 0; i < header->number_of_variable_length_records; i++) {
laszip_vlr_struct vlr = header->vlrs[i];
if (vlr.record_id != 4) {
continue;
}
cout << "record id: " << vlr.record_id << endl;
cout << "record_length_after_header: " << vlr.record_length_after_header << endl;
int numExtraBytes = vlr.record_length_after_header / sizeof(ExtraBytesRecord);
ExtraBytesRecord* extraBytes = reinterpret_cast<ExtraBytesRecord*>(vlr.data);
for (int j = 0; j < numExtraBytes; j++) {
ExtraBytesRecord extraAttribute = extraBytes[j];
string name = string(extraAttribute.name);
cout << "name: " << name << endl;
//ExtraType type = extraTypeFromID(extraAttribute.data_type);
ExtraType type = typeToExtraType.at(extraAttribute.data_type);
int byteSize = type.size;
PointAttribute attribute(123, name, type.type, type.numElements, byteSize);
attributes.push_back(attribute);
}
}
}
laszip_close_reader(laszip_reader);
laszip_destroy(laszip_reader);
return attributes;
}
void PotreeConverter::prepare(){
// if sources contains directories, use files inside the directory instead
vector<string> sourceFiles;
for (const auto &source : sources) {
fs::path pSource(source);
if(fs::is_directory(pSource)){
fs::directory_iterator it(pSource);
for(;it != fs::directory_iterator(); it++){
fs::path pDirectoryEntry = it->path();
if(fs::is_regular_file(pDirectoryEntry)){
string filepath = pDirectoryEntry.string();
if(iEndsWith(filepath, ".las")
|| iEndsWith(filepath, ".laz")
|| iEndsWith(filepath, ".xyz")
|| iEndsWith(filepath, ".pts")
|| iEndsWith(filepath, ".ptx")
|| iEndsWith(filepath, ".ply")){
sourceFiles.push_back(filepath);
}
}
}
}else if(fs::is_regular_file(pSource)){
sourceFiles.push_back(source);
}
}
this->sources = sourceFiles;
pointAttributes = PointAttributes();
pointAttributes.add(PointAttribute::POSITION_CARTESIAN);
bool addExtraAttributes = false;
if(outputAttributes.size() > 0){
for(const auto &attribute : outputAttributes){
if(attribute == "RGB"){
pointAttributes.add(PointAttribute::COLOR_PACKED);
}else if(attribute == "INTENSITY"){
pointAttributes.add(PointAttribute::INTENSITY);
} else if (attribute == "CLASSIFICATION") {
pointAttributes.add(PointAttribute::CLASSIFICATION);
} else if (attribute == "RETURN_NUMBER") {
pointAttributes.add(PointAttribute::RETURN_NUMBER);
} else if (attribute == "NUMBER_OF_RETURNS") {
pointAttributes.add(PointAttribute::NUMBER_OF_RETURNS);
} else if (attribute == "SOURCE_ID") {
pointAttributes.add(PointAttribute::SOURCE_ID);
} else if (attribute == "GPS_TIME") {
pointAttributes.add(PointAttribute::GPS_TIME);
} else if(attribute == "NORMAL"){
pointAttributes.add(PointAttribute::NORMAL_OCT16);
} else if (attribute == "EXTRA") {
addExtraAttributes = true;
}
}
} else {
string file = sourceFiles[0];
// always add colors?
pointAttributes.add(PointAttribute::COLOR_PACKED);
vector<PointAttribute> attributes = checkAvailableStandardAttributes(file);
for (PointAttribute attribute : attributes) {
pointAttributes.add(attribute);
//cout << attribute.name << ", " << attribute.byteSize << endl;
}
addExtraAttributes = true;
}
if(addExtraAttributes){
string file = sourceFiles[0];
vector<PointAttribute> extraAttributes = parseExtraAttributes(file);
for (PointAttribute attribute : extraAttributes) {
pointAttributes.add(attribute);
//cout << attribute.name << ", " << attribute.byteSize << endl;
}
}
cout << "processing following attributes: " << endl;
for (PointAttribute& attribute : pointAttributes.attributes) {
cout << attribute.name << endl;
}
cout << endl;
}
FileInfos PotreeConverter::computeInfos(){
AABB aabb;
uint64_t numPoints = 0;
if(aabbValues.size() == 6){
Vector3<double> userMin(aabbValues[0],aabbValues[1],aabbValues[2]);
Vector3<double> userMax(aabbValues[3],aabbValues[4],aabbValues[5]);
aabb = AABB(userMin, userMax);
}else{
for(string source : sources){
PointReader *reader = createPointReader(source, pointAttributes);
numPoints += reader->numPoints();
AABB lAABB = reader->getAABB();
aabb.update(lAABB.min);
aabb.update(lAABB.max);
reader->close();
delete reader;
}
}
FileInfos infos = {aabb, numPoints};
return infos;
}
void PotreeConverter::generatePage(string name){
string pagedir = this->workDir;
string templateSourcePath = this->executablePath + "/resources/page_template/viewer_template.html";
string mapTemplateSourcePath = this->executablePath + "/resources/page_template/lasmap_template.html";
string templateDir = this->executablePath + "/resources/page_template";
if(!this->pageTemplatePath.empty()) {
templateSourcePath = this->pageTemplatePath + "/viewer_template.html";
mapTemplateSourcePath = this->pageTemplatePath + "/lasmap_template.html";
templateDir = this->pageTemplatePath;
}
string templateTargetPath = pagedir + "/" + name + ".html";
string mapTemplateTargetPath = pagedir + "/lasmap_" + name + ".html";
Potree::copyDir(fs::path(templateDir), fs::path(pagedir));
fs::remove(pagedir + "/viewer_template.html");
fs::remove(pagedir + "/lasmap_template.html");
if(!this->sourceListingOnly){ // change viewer template
ifstream in( templateSourcePath );
ofstream out( templateTargetPath );
string line;
while(getline(in, line)){
if(line.find("<!-- INCLUDE POINTCLOUD -->") != string::npos){
out << "\t\tPotree.loadPointCloud(\"pointclouds/" << name << "/cloud.js\", \"" << name << "\", e => {" << endl;
out << "\t\t\tlet pointcloud = e.pointcloud;\n";
out << "\t\t\tlet material = pointcloud.material;\n";
out << "\t\t\tviewer.scene.addPointCloud(pointcloud);" << endl;
out << "\t\t\t" << "material.pointColorType = Potree.PointColorType." << material << "; // any Potree.PointColorType.XXXX \n";
out << "\t\t\tmaterial.size = 1;\n";
out << "\t\t\tmaterial.pointSizeType = Potree.PointSizeType.ADAPTIVE;\n";
out << "\t\t\tmaterial.shape = Potree.PointShape.SQUARE;\n";
out << "\t\t\tviewer.fitToScreen();" << endl;
out << "\t\t});" << endl;
}else if(line.find("<!-- INCLUDE SETTINGS HERE -->") != string::npos){
out << std::boolalpha;
out << "\t\t" << "document.title = \"" << title << "\";\n";
out << "\t\t" << "viewer.setEDLEnabled(" << edlEnabled << ");\n";
if(showSkybox){
out << "\t\t" << "viewer.setBackground(\"skybox\"); // [\"skybox\", \"gradient\", \"black\", \"white\"];\n";
}else{
out << "\t\t" << "viewer.setBackground(\"gradient\"); // [\"skybox\", \"gradient\", \"black\", \"white\"];\n";
}
string descriptionEscaped = string(description);
std::replace(descriptionEscaped.begin(), descriptionEscaped.end(), '`', '\'');
out << "\t\t" << "viewer.setDescription(`" << descriptionEscaped << "`);\n";
}else{
out << line << endl;
}
}
in.close();
out.close();
}
// change lasmap template
if(!this->projection.empty()){
ifstream in( mapTemplateSourcePath );
ofstream out( mapTemplateTargetPath );
string line;
while(getline(in, line)){
if(line.find("<!-- INCLUDE SOURCE -->") != string::npos){
out << "\tvar source = \"" << "pointclouds/" << name << "/sources.json" << "\";";
}else{
out << line << endl;
}
}
in.close();
out.close();
}
//{ // write settings
// stringstream ssSettings;
//
// ssSettings << "var sceneProperties = {" << endl;
// ssSettings << "\tpath: \"" << "../resources/pointclouds/" << name << "/cloud.js\"," << endl;
// ssSettings << "\tcameraPosition: null, // other options: cameraPosition: [10,10,10]," << endl;
// ssSettings << "\tcameraTarget: null, // other options: cameraTarget: [0,0,0]," << endl;
// ssSettings << "\tfov: 60, // field of view in degrees," << endl;
// ssSettings << "\tsizeType: \"Adaptive\", // other options: \"Fixed\", \"Attenuated\"" << endl;
// ssSettings << "\tquality: null, // other options: \"Circles\", \"Interpolation\", \"Splats\"" << endl;
// ssSettings << "\tmaterial: \"RGB\", // other options: \"Height\", \"Intensity\", \"Classification\"" << endl;
// ssSettings << "\tpointLimit: 1, // max number of points in millions" << endl;
// ssSettings << "\tpointSize: 1, // " << endl;
// ssSettings << "\tnavigation: \"Orbit\", // other options: \"Orbit\", \"Flight\"" << endl;
// ssSettings << "\tuseEDL: false, " << endl;
// ssSettings << "};" << endl;
//
//
// ofstream fSettings;
// fSettings.open(pagedir + "/examples/" + name + ".js", ios::out);
// fSettings << ssSettings.str();
// fSettings.close();
//}
}
void writeSources(string path, vector<string> sourceFilenames, vector<int> numPoints, vector<AABB> boundingBoxes, string projection){
Document d(rapidjson::kObjectType);
AABB bb;
Value jProjection(projection.c_str(), (rapidjson::SizeType)projection.size());
Value jSources(rapidjson::kObjectType);
jSources.SetArray();
for(int i = 0; i < sourceFilenames.size(); i++){
string &source = sourceFilenames[i];
int points = numPoints[i];
AABB boundingBox = boundingBoxes[i];
bb.update(boundingBox);
Value jSource(rapidjson::kObjectType);
Value jName(source.c_str(), (rapidjson::SizeType)source.size());
Value jPoints(points);
Value jBounds(rapidjson::kObjectType);
{
Value bbMin(rapidjson::kObjectType);
Value bbMax(rapidjson::kObjectType);
bbMin.SetArray();
bbMin.PushBack(boundingBox.min.x, d.GetAllocator());
bbMin.PushBack(boundingBox.min.y, d.GetAllocator());
bbMin.PushBack(boundingBox.min.z, d.GetAllocator());
bbMax.SetArray();
bbMax.PushBack(boundingBox.max.x, d.GetAllocator());
bbMax.PushBack(boundingBox.max.y, d.GetAllocator());
bbMax.PushBack(boundingBox.max.z, d.GetAllocator());
jBounds.AddMember("min", bbMin, d.GetAllocator());
jBounds.AddMember("max", bbMax, d.GetAllocator());
}
jSource.AddMember("name", jName, d.GetAllocator());
jSource.AddMember("points", jPoints, d.GetAllocator());
jSource.AddMember("bounds", jBounds, d.GetAllocator());
jSources.PushBack(jSource, d.GetAllocator());
}
Value jBoundingBox(rapidjson::kObjectType);
{
Value bbMin(rapidjson::kObjectType);
Value bbMax(rapidjson::kObjectType);
bbMin.SetArray();
bbMin.PushBack(bb.min.x, d.GetAllocator());
bbMin.PushBack(bb.min.y, d.GetAllocator());
bbMin.PushBack(bb.min.z, d.GetAllocator());
bbMax.SetArray();
bbMax.PushBack(bb.max.x, d.GetAllocator());
bbMax.PushBack(bb.max.y, d.GetAllocator());
bbMax.PushBack(bb.max.z, d.GetAllocator());
jBoundingBox.AddMember("min", bbMin, d.GetAllocator());
jBoundingBox.AddMember("max", bbMax, d.GetAllocator());
}
d.AddMember("bounds", jBoundingBox, d.GetAllocator());
d.AddMember("projection", jProjection, d.GetAllocator());
d.AddMember("sources", jSources, d.GetAllocator());
StringBuffer buffer;
//PrettyWriter<StringBuffer> writer(buffer);
Writer<StringBuffer> writer(buffer);
d.Accept(writer);
if(!fs::exists(fs::path(path))){
fs::path pcdir(path);
fs::create_directories(pcdir);
}
ofstream sourcesOut(path + "/sources.json", ios::out);
sourcesOut << buffer.GetString();
sourcesOut.close();
}
void PotreeConverter::convert(){
auto start = high_resolution_clock::now();
prepare();
long long pointsProcessed = 0;
FileInfos infos = computeInfos();
AABB aabb = infos.aabb;
{
cout << "AABB: {" << endl;
cout << "\t\"min\": " << aabb.min << "," << endl;
cout << "\t\"max\": " << aabb.max << "," << endl;
cout << "\t\"size\": " << aabb.size << endl;
cout << "}" << endl << endl;
aabb.makeCubic();
cout << "cubicAABB: {" << endl;
cout << "\t\"min\": " << aabb.min << "," << endl;
cout << "\t\"max\": " << aabb.max << "," << endl;
cout << "\t\"size\": " << aabb.size << endl;
cout << "}" << endl << endl;
}
cout << "total number of points: " << infos.numPoints << endl;
if (diagonalFraction != 0) {
spacing = (float)(aabb.size.length() / diagonalFraction);
cout << "spacing calculated from diagonal: " << spacing << endl;
}
if(pageName.size() > 0){
generatePage(pageName);
workDir = workDir + "/pointclouds/" + pageName;
}
PotreeWriter *writer = NULL;
if(fs::exists(fs::path(this->workDir + "/cloud.js"))){
if(storeOption == StoreOption::ABORT_IF_EXISTS){
cout << "ABORTING CONVERSION: target already exists: " << this->workDir << "/cloud.js" << endl;
cout << "If you want to overwrite the existing conversion, specify --overwrite" << endl;
cout << "If you want add new points to the existing conversion, make sure the new points ";
cout << "are contained within the bounding box of the existing conversion and then specify --incremental" << endl;
return;
}else if(storeOption == StoreOption::OVERWRITE){
fs::remove_all(workDir + "/data");
fs::remove_all(workDir + "/temp");
fs::remove(workDir + "/cloud.js");
writer = new PotreeWriter(this->workDir, aabb, spacing, maxDepth, scale, outputFormat, pointAttributes, quality);
writer->setProjection(this->projection);
}else if(storeOption == StoreOption::INCREMENTAL){
writer = new PotreeWriter(this->workDir, quality);
writer->loadStateFromDisk();
}
}else{
writer = new PotreeWriter(this->workDir, aabb, spacing, maxDepth, scale, outputFormat, pointAttributes, quality);
writer->setProjection(this->projection);
}
if(writer == NULL){
return;
}
writer->storeSize = storeSize;
vector<AABB> boundingBoxes;
vector<int> numPoints;
vector<string> sourceFilenames;
for (const auto &source : sources) {
cout << "READING: " << source << endl;
PointReader *reader = createPointReader(source, pointAttributes);
boundingBoxes.push_back(reader->getAABB());
numPoints.push_back(reader->numPoints());
sourceFilenames.push_back(fs::path(source).filename().string());
writeSources(this->workDir, sourceFilenames, numPoints, boundingBoxes, this->projection);
if(this->sourceListingOnly){
reader->close();
delete reader;
continue;
}
while(reader->readNextPoint()){
pointsProcessed++;
Point p = reader->getPoint();
writer->add(p);
if((pointsProcessed % (1'000'000)) == 0){
writer->processStore();
writer->waitUntilProcessed();
auto end = high_resolution_clock::now();
long long duration = duration_cast<milliseconds>(end-start).count();
float seconds = duration / 1'000.0f;
stringstream ssMessage;
ssMessage.imbue(std::locale(""));
//ssMessage << "INDEXING: ";
//ssMessage << pointsProcessed << " points processed; ";
//ssMessage << writer->numAccepted << " points written; ";
//ssMessage << seconds << " seconds passed";
int percent = 100.0f * float(pointsProcessed) / float(infos.numPoints);
ssMessage << "INDEXING: ";
ssMessage << pointsProcessed << " of " << infos.numPoints << " processed (" << percent << "%); ";
ssMessage << writer->numAccepted << " written; ";
ssMessage << seconds << " seconds passed";
cout << ssMessage.str() << endl;
}
if((pointsProcessed % (flushLimit)) == 0){
cout << "FLUSHING: ";
auto start = high_resolution_clock::now();
writer->flush();
auto end = high_resolution_clock::now();
long long duration = duration_cast<milliseconds>(end-start).count();
float seconds = duration / 1'000.0f;
cout << seconds << "s" << endl;
}
//if(pointsProcessed >= 10'000'000){
// break;
//}
}
reader->close();
delete reader;
}
cout << "closing writer" << endl;
writer->flush();
writer->close();
writeSources(this->workDir + "/sources.json", sourceFilenames, numPoints, boundingBoxes, this->projection);
float percent = (float)writer->numAccepted / (float)pointsProcessed;
percent = percent * 100;
auto end = high_resolution_clock::now();
long long duration = duration_cast<milliseconds>(end-start).count();
cout << endl;
cout << "conversion finished" << endl;
cout << pointsProcessed << " points were processed and " << writer->numAccepted << " points ( " << percent << "% ) were written to the output. " << endl;
cout << "duration: " << (duration / 1000.0f) << "s" << endl;
}
}

View File

@@ -1,765 +0,0 @@
#include <cmath>
#include <sstream>
#include <stack>
#include <chrono>
#include <fstream>
#include <iomanip>
#include <experimental/filesystem>
#include "AABB.h"
#include "SparseGrid.h"
#include "stuff.h"
#include "CloudJS.hpp"
#include "PointAttributes.hpp"
#include "PointReader.h"
#include "PointWriter.hpp"
#include "LASPointReader.h"
#include "BINPointReader.hpp"
#include "LASPointWriter.hpp"
#include "BINPointWriter.hpp"
#include "PotreeException.h"
#include "PotreeWriter.h"
using std::ifstream;
using std::stack;
using std::stringstream;
using std::chrono::high_resolution_clock;
using std::chrono::milliseconds;
using std::chrono::duration_cast;
namespace fs = std::experimental::filesystem;
namespace Potree{
PWNode::PWNode(PotreeWriter* potreeWriter, AABB aabb){
this->potreeWriter = potreeWriter;
this->aabb = aabb;
this->grid = new SparseGrid(aabb, spacing());
}
PWNode::PWNode(PotreeWriter* potreeWriter, int index, AABB aabb, int level){
this->index = index;
this->aabb = aabb;
this->level = level;
this->potreeWriter = potreeWriter;
this->grid = new SparseGrid(aabb, spacing());
}
PWNode::~PWNode(){
for(PWNode *child : children){
if(child != NULL){
delete child;
}
}
delete grid;
}
string PWNode::name() const {
if(parent == NULL){
return "r";
}else{
return parent->name() + std::to_string(index);
}
}
float PWNode::spacing(){
return float(potreeWriter->spacing / pow(2.0, float(level)));
}
string PWNode::workDir(){
return potreeWriter->workDir;
}
string PWNode::hierarchyPath(){
string path = "r/";
int hierarchyStepSize = potreeWriter->hierarchyStepSize;
string indices = name().substr(1);
int numParts = (int)floor((float)indices.size() / (float)hierarchyStepSize);
for(int i = 0; i < numParts; i++){
path += indices.substr(i * hierarchyStepSize, hierarchyStepSize) + "/";
}
return path;
}
string PWNode::path(){
string path = hierarchyPath() + name() + potreeWriter->getExtension();
return path;
}
PointReader *PWNode::createReader(string path){
PointReader *reader = NULL;
OutputFormat outputFormat = this->potreeWriter->outputFormat;
if(outputFormat == OutputFormat::LAS || outputFormat == OutputFormat::LAZ){
reader = new LASPointReader(path);
}else if(outputFormat == OutputFormat::BINARY){
reader = new BINPointReader(path, aabb, potreeWriter->scale, this->potreeWriter->pointAttributes);
}
return reader;
}
PointWriter *PWNode::createWriter(string path){
PointWriter *writer = NULL;
OutputFormat outputFormat = this->potreeWriter->outputFormat;
if(outputFormat == OutputFormat::LAS || outputFormat == OutputFormat::LAZ){
writer = new LASPointWriter(path, aabb, potreeWriter->scale);
}else if(outputFormat == OutputFormat::BINARY){
writer = new BINPointWriter(path, aabb, potreeWriter->scale, this->potreeWriter->pointAttributes);
}
return writer;
}
void PWNode::loadFromDisk(){
PointReader *reader = createReader(workDir() + "/data/" + path());
while(reader->readNextPoint()){
Point p = reader->getPoint();
if(isLeafNode()){
store.push_back(p);
}else{
grid->addWithoutCheck(p.position);
}
}
grid->numAccepted = numAccepted;
reader->close();
delete reader;
isInMemory = true;
}
PWNode *PWNode::createChild(int childIndex ){
AABB cAABB = childAABB(aabb, childIndex);
PWNode *child = new PWNode(potreeWriter, childIndex, cAABB, level+1);
child->parent = this;
children[childIndex] = child;
return child;
}
void PWNode ::split(){
children.resize(8, NULL);
string filepath = workDir() + "/data/" + path();
if(fs::exists(filepath)){
fs::remove(filepath);
}
for(Point &point : store){
add(point);
}
store = vector<Point>();
}
PWNode *PWNode::add(Point &point){
addCalledSinceLastFlush = true;
if(!isInMemory){
loadFromDisk();
}
if(isLeafNode()){
store.push_back(point);
if(int(store.size()) >= potreeWriter->storeSize){
split();
}
return this;
}else{
bool accepted = false;
//if(potreeWriter->quality == ConversionQuality::FAST){
accepted = grid->add(point.position);
//}else/* if(potreeWriter->quality == ConversionQuality::DEFAULT)*/{
// PWNode *node = this;
// accepted = true;
// while(accepted && node != NULL){
// accepted = accepted && node->grid->willBeAccepted(point.position, grid->squaredSpacing);
// node = node->parent;
// }
//
// //node = this;
// //while(accepted && node != NULL && node->children.size() > 0){
// // int childIndex = nodeIndex(node->aabb, point);
// //
// // if(childIndex == -1){
// // break;
// // }
// //
// // node = node->children[childIndex];
// //
// // if(node == NULL){
// // break;
// // }
// //
// // accepted = accepted && node->grid->willBeAccepted(point.position, grid->squaredSpacing);
// //}
//
// if(accepted){
// grid->addWithoutCheck(point.position);
// }
//}/*else if(potreeWriter->quality == ConversionQuality::NICE){
// PWNode *node = this;
// accepted = true;
// while(accepted && node != NULL){
// accepted = accepted && node->grid->willBeAccepted(point.position, grid->squaredSpacing);
// node = node->parent;
// }
//
// node = this;
// while(accepted && node != NULL && node->children.size() > 0){
// int childIndex = nodeIndex(node->aabb, point);
//
// if(childIndex == -1){
// break;
// }
//
// node = node->children[childIndex];
//
// if(node == NULL){
// break;
// }
//
// accepted = accepted && node->grid->willBeAccepted(point.position, grid->squaredSpacing);
// }
//
//
// if(accepted){
// grid->addWithoutCheck(point.position);
// }
//}*/
if(accepted){
cache.push_back(point);
acceptedAABB.update(point.position);
numAccepted++;
return this;
}else{
// try adding point to higher level
if(potreeWriter->maxDepth != -1 && level >= potreeWriter->maxDepth){
return NULL;
}
int childIndex = nodeIndex(aabb, point);
if(childIndex >= 0){
if(isLeafNode()){
children.resize(8, NULL);
}
PWNode *child = children[childIndex];
// create child node if not existent
if(child == NULL){
child = createChild(childIndex);
}
return child->add(point);
//child->add(point, targetLevel);
} else {
return NULL;
}
}
return NULL;
}
}
void PWNode::flush(){
std::function<void(vector<Point> &points, bool append)> writeToDisk = [&](vector<Point> &points, bool append){
string filepath = workDir() + "/data/" + path();
PointWriter *writer = NULL;
if(!fs::exists(workDir() + "/data/" + hierarchyPath())){
fs::create_directories(workDir() + "/data/" + hierarchyPath());
}
if(append){
string temppath = workDir() + "/temp/prepend" + potreeWriter->getExtension();
if(fs::exists(filepath)){
fs::rename(fs::path(filepath), fs::path(temppath));
}
writer = createWriter(filepath);
if(fs::exists(temppath)){
PointReader *reader = createReader(temppath);
while(reader->readNextPoint()){
writer->write(reader->getPoint());
}
reader->close();
delete reader;
fs::remove(temppath);
}
}else{
if(fs::exists(filepath)){
fs::remove(filepath);
}
writer = createWriter(filepath);
}
for(auto &e_c : points){
writer->write(e_c);
}
if(append && (writer->numPoints != this->numAccepted)){
cout << "writeToDisk " << writer->numPoints << " != " << this->numAccepted << endl;
exit(1);
}
writer->close();
delete writer;
};
if(isLeafNode()){
if(addCalledSinceLastFlush){
writeToDisk(store, false);
//if(store.size() != this->numAccepted){
// cout << "store " << store.size() << " != " << this->numAccepted << " - " << this->name() << endl;
//}
}else if(!addCalledSinceLastFlush && isInMemory){
store = vector<Point>();
isInMemory = false;
}
}else{
if(addCalledSinceLastFlush){
writeToDisk(cache, true);
//if(cache.size() != this->numAccepted){
// cout << "cache " << cache.size() << " != " << this->numAccepted << " - " << this->name() << endl;
//
// exit(1);
//}
cache = vector<Point>();
}else if(!addCalledSinceLastFlush && isInMemory){
delete grid;
grid = new SparseGrid(aabb, spacing());
isInMemory = false;
}
}
addCalledSinceLastFlush = false;
for(PWNode *child : children){
if(child != NULL){
child->flush();
}
}
}
vector<PWNode*> PWNode::getHierarchy(int levels){
vector<PWNode*> hierarchy;
list<PWNode*> stack;
stack.push_back(this);
while(!stack.empty()){
PWNode *node = stack.front();
stack.pop_front();
if(node->level >= this->level + levels){
break;
}
hierarchy.push_back(node);
for(PWNode *child : node->children){
if(child != NULL){
stack.push_back(child);
}
}
}
return hierarchy;
}
void PWNode::traverse(std::function<void(PWNode*)> callback){
callback(this);
for(PWNode *child : this->children){
if(child != NULL){
child->traverse(callback);
}
}
}
void PWNode::traverseBreadthFirst(std::function<void(PWNode*)> callback){
// https://en.wikipedia.org/wiki/Iterative_deepening_depth-first_search
int currentLevel = 0;
int visitedAtLevel = 0;
do{
// doing depth first search until node->level = curentLevel
stack<PWNode*> st;
st.push(this);
while(!st.empty()){
PWNode *node = st.top();
st.pop();
if(node->level == currentLevel){
callback(node);
visitedAtLevel++;
}else if(node->level < currentLevel){
for(PWNode *child : node->children){
if(child != NULL){
st.push(child);
}
}
}
}
currentLevel++;
}while(visitedAtLevel > 0);
}
PWNode* PWNode::findNode(string name){
string thisName = this->name();
if(name.size() == thisName.size()){
return (name == thisName) ? this : NULL;
}else if(name.size() > thisName.size()){
int childIndex = stoi(string(1, name[thisName.size()]));
if(!isLeafNode() && children[childIndex] != NULL){
return children[childIndex]->findNode(name);
}else{
return NULL;
}
}else{
return NULL;
}
}
PotreeWriter::PotreeWriter(string workDir, ConversionQuality quality){
this->workDir = workDir;
this->quality = quality;
}
PotreeWriter::PotreeWriter(string workDir, AABB aabb, float spacing, int maxDepth, double scale, OutputFormat outputFormat, PointAttributes pointAttributes, ConversionQuality quality){
this->workDir = workDir;
this->aabb = aabb;
this->spacing = spacing;
this->scale = scale;
this->maxDepth = maxDepth;
this->outputFormat = outputFormat;
this->quality = quality;
this->pointAttributes = pointAttributes;
if(this->scale == 0){
if(aabb.size.length() > 1'000'000){
this->scale = 0.01;
}else if(aabb.size.length() > 100'000){
this->scale = 0.001;
}else if(aabb.size.length() > 1){
this->scale = 0.001;
}else{
this->scale = 0.0001;
}
}
cloudjs.outputFormat = outputFormat;
cloudjs.boundingBox = aabb;
cloudjs.octreeDir = "data";
cloudjs.spacing = spacing;
cloudjs.version = "1.8";
cloudjs.scale = this->scale;
cloudjs.pointAttributes = pointAttributes;
root = new PWNode(this, aabb);
}
string PotreeWriter::getExtension(){
if(outputFormat == OutputFormat::LAS){
return ".las";
}else if(outputFormat == OutputFormat::LAZ){
return ".laz";
}else if(outputFormat == OutputFormat::BINARY){
return ".bin";
}
return "";
}
void PotreeWriter::waitUntilProcessed(){
if(storeThread.joinable()){
storeThread.join();
}
}
void PotreeWriter::add(Point &p){
if(numAdded == 0){
fs::path dataDir(workDir + "/data");
fs::path tempDir(workDir + "/temp");
fs::create_directories(dataDir);
fs::create_directories(tempDir);
}
store.push_back(p);
numAdded++;
if(store.size() > 10'000){
processStore();
}
}
void PotreeWriter::processStore(){
vector<Point> st = store;
store = vector<Point>();
waitUntilProcessed();
storeThread = thread([this, st]{
for(Point p : st){
PWNode *acceptedBy = root->add(p);
if(acceptedBy != NULL){
tightAABB.update(p.position);
pointsInMemory++;
numAccepted++;
}
}
});
}
void PotreeWriter::flush(){
processStore();
if(storeThread.joinable()){
storeThread.join();
}
//auto start = high_resolution_clock::now();
root->flush();
//auto end = high_resolution_clock::now();
//long long duration = duration_cast<milliseconds>(end-start).count();
//float seconds = duration / 1'000.0f;
//cout << "flush nodes: " << seconds << "s" << endl;
{// update cloud.js
cloudjs.hierarchy = vector<CloudJS::Node>();
cloudjs.hierarchyStepSize = hierarchyStepSize;
cloudjs.tightBoundingBox = tightAABB;
cloudjs.numAccepted = numAccepted;
cloudjs.projection = projection;
ofstream cloudOut(workDir + "/cloud.js", ios::out);
cloudOut << cloudjs.getString();
cloudOut.close();
}
{// write hierarchy
//auto start = high_resolution_clock::now();
int hrcTotal = 0;
int hrcFlushed = 0;
list<PWNode*> stack;
stack.push_back(root);
while(!stack.empty()){
PWNode *node = stack.front();
stack.pop_front();
hrcTotal++;
vector<PWNode*> hierarchy = node->getHierarchy(hierarchyStepSize + 1);
bool needsFlush = false;
for(const auto &descendant : hierarchy){
if(descendant->level == node->level + hierarchyStepSize ){
stack.push_back(descendant);
}
needsFlush = needsFlush || descendant->addedSinceLastFlush;
}
if(needsFlush){
string dest = workDir + "/data/" + node->hierarchyPath() + "/" + node->name() + ".hrc";
ofstream fout;
fout.open(dest, ios::out | ios::binary);
for(const auto &descendant : hierarchy){
char children = 0;
for(int j = 0; j < (int)descendant->children.size(); j++){
if(descendant->children[j] != NULL){
children = children | (1 << j);
}
}
fout.write(reinterpret_cast<const char*>(&children), 1);
fout.write(reinterpret_cast<const char*>(&(descendant->numAccepted)), 4);
}
fout.close();
hrcFlushed++;
}
}
root->traverse([](PWNode* node){
node->addedSinceLastFlush = false;
});
//cout << "hrcTotal: " << hrcTotal << "; " << "hrcFlushed: " << hrcFlushed << endl;
//auto end = high_resolution_clock::now();
//long long duration = duration_cast<milliseconds>(end-start).count();
//float seconds = duration / 1'000.0f;
//cout << "writing hierarchy: " << seconds << "s" << endl;
}
}
void PotreeWriter::setProjection(string projection){
this->projection = projection;
}
void PotreeWriter::loadStateFromDisk(){
{// cloudjs
string cloudJSPath = workDir + "/cloud.js";
ifstream file(cloudJSPath);
string line;
string content;
while (std::getline(file, line)){
content += line + "\n";
}
cloudjs = CloudJS(content);
}
{
this->outputFormat = cloudjs.outputFormat;
this->pointAttributes = cloudjs.pointAttributes;
this->hierarchyStepSize = cloudjs.hierarchyStepSize;
this->spacing = cloudjs.spacing;
this->scale = cloudjs.scale;
this->aabb = cloudjs.boundingBox;
this->numAccepted = cloudjs.numAccepted;
}
{// tree
vector<string> hrcPaths;
fs::path rootDir(workDir + "/data/r");
for (fs::recursive_directory_iterator iter(rootDir), end; iter != end; ++iter){
fs::path path = iter->path();
if(fs::is_regular_file(path)){
if(iEndsWith(path.extension().string(), ".hrc")){
hrcPaths.push_back(path.string());
}else{
}
}else if(fs::is_directory(path)){
}
}
std::sort(hrcPaths.begin(), hrcPaths.end(), [](string &a, string &b){
return a.size() < b.size();
});
PWNode *root = new PWNode(this, cloudjs.boundingBox);
for(string hrcPath : hrcPaths){
fs::path pHrcPath(hrcPath);
string hrcName = pHrcPath.stem().string();
PWNode *hrcRoot = root->findNode(hrcName);
PWNode *current = hrcRoot;
current->addedSinceLastFlush = false;
current->isInMemory = false;
vector<PWNode*> nodes;
nodes.push_back(hrcRoot);
ifstream fin(hrcPath, ios::in | ios::binary);
std::vector<char> buffer((std::istreambuf_iterator<char>(fin)), (std::istreambuf_iterator<char>()));
for(int i = 0; 5*i < (int)buffer.size(); i++){
PWNode *current= nodes[i];
char children = buffer[i*5];
char *p = &buffer[i*5+1];
unsigned int* ip = reinterpret_cast<unsigned int*>(p);
unsigned int numPoints = *ip;
//std::bitset<8> bs(children);
//cout << i << "\t: " << "children: " << bs << "; " << "numPoints: " << numPoints << endl;
current->numAccepted = numPoints;
if(children != 0){
current->children.resize(8, NULL);
for(int j = 0; j < 8; j++){
if((children & (1 << j)) != 0){
AABB cAABB = childAABB(current->aabb, j);
PWNode *child = new PWNode(this, j, cAABB, current->level + 1);
child->parent = current;
child->addedSinceLastFlush = false;
child->isInMemory = false;
current->children[j] = child;
nodes.push_back(child);
}
}
}
}
}
this->root = root;
// TODO set it to actual number
this->numAdded = 1;
//int numNodes = 0;
//root->traverse([&](PWNode *node){
// if(numNodes < 50){
// cout << std::left << std::setw(10) << node->name();
// cout << std::right << std::setw(10) << node->numAccepted << "; ";
// cout << node->aabb.min << " - " << node->aabb.max << endl;
// }
//
// numNodes++;
//
//});
}
}
}

View File

@@ -1,195 +0,0 @@
#include <iostream>
#include <math.h>
#include "SparseGrid.h"
#include "GridIndex.h"
using std::min;
namespace Potree{
const double cellSizeFactor = 5.0;
SparseGrid::SparseGrid(AABB aabb, float spacing){
this->aabb = aabb;
this->width = (int)(aabb.size.x / (spacing * cellSizeFactor) );
this->height = (int)(aabb.size.y / (spacing * cellSizeFactor) );
this->depth = (int)(aabb.size.z / (spacing * cellSizeFactor) );
this->squaredSpacing = spacing * spacing;
}
SparseGrid::~SparseGrid(){
SparseGrid::iterator it;
for(it = begin(); it != end(); it++){
delete it->second;
}
}
bool SparseGrid::isDistant(const Vector3<double> &p, GridCell *cell){
if(!cell->isDistant(p, squaredSpacing)){
return false;
}
for(const auto &neighbour : cell->neighbours) {
if(!neighbour->isDistant(p, squaredSpacing)){
return false;
}
}
return true;
}
bool SparseGrid::isDistant(const Vector3<double> &p, GridCell *cell, float &squaredSpacing){
if(!cell->isDistant(p, squaredSpacing)){
return false;
}
for(const auto &neighbour : cell->neighbours) {
if(!neighbour->isDistant(p, squaredSpacing)){
return false;
}
}
return true;
}
bool SparseGrid::willBeAccepted(const Vector3<double> &p, float &squaredSpacing){
int nx = (int)(width*(p.x - aabb.min.x) / aabb.size.x);
int ny = (int)(height*(p.y - aabb.min.y) / aabb.size.y);
int nz = (int)(depth*(p.z - aabb.min.z) / aabb.size.z);
int i = min(nx, width-1);
int j = min(ny, height-1);
int k = min(nz, depth-1);
GridIndex index(i,j,k);
long long key = ((long long)k << 40) | ((long long)j << 20) | (long long)i;
SparseGrid::iterator it = find(key);
if(it == end()){
it = this->insert(value_type(key, new GridCell(this, index))).first;
}
if(isDistant(p, it->second, squaredSpacing)){
return true;
}else{
return false;
}
}
//bool SparseGrid::willBeAccepted(const Vector3<double> &p, float &squaredSpacing){
// float spacing = sqrt(squaredSpacing);
// float cellSize = sqrt(this->squaredSpacing) * cellSizeFactor;
//
// float fx = (width*(p.x - aabb.min.x) / aabb.size.x);
// float fy = (height*(p.y - aabb.min.y) / aabb.size.y);
// float fz = (depth*(p.z - aabb.min.z) / aabb.size.z);
//
// float cx = fmod(fx, cellSize);
// float cy = fmod(fy, cellSize);
// float cz = fmod(fz, cellSize);
//
// bool inner = cx < spacing || cx > (cellSize - spacing);
// inner = inner && (cy < spacing || cy > (cellSize - spacing));
// inner = inner && (cz < spacing || cz > (cellSize - spacing));
//
// int nx = (int)fx;
// int ny = (int)fy;
// int nz = (int)fz;
//
// int i = min(nx, width-1);
// int j = min(ny, height-1);
// int k = min(nz, depth-1);
//
// GridIndex index(i,j,k);
// long long key = ((long long)k << 40) | ((long long)j << 20) | (long long)i;
// SparseGrid::iterator it = find(key);
// if(it == end()){
// it = this->insert(value_type(key, new GridCell(this, index))).first;
// }
//
// if(!it->second->isDistant(p, squaredSpacing)){
// return false;
// }
//
// if(!inner){
// for(const auto &neighbour : it->second->neighbours) {
// if(!neighbour->isDistant(p, squaredSpacing)){
// return false;
// }
// }
// }
//
// return true;
//}
bool SparseGrid::willBeAccepted(const Vector3<double> &p){
int nx = (int)(width*(p.x - aabb.min.x) / aabb.size.x);
int ny = (int)(height*(p.y - aabb.min.y) / aabb.size.y);
int nz = (int)(depth*(p.z - aabb.min.z) / aabb.size.z);
int i = min(nx, width-1);
int j = min(ny, height-1);
int k = min(nz, depth-1);
GridIndex index(i,j,k);
long long key = ((long long)k << 40) | ((long long)j << 20) | (long long)i;
SparseGrid::iterator it = find(key);
if(it == end()){
it = this->insert(value_type(key, new GridCell(this, index))).first;
}
if(isDistant(p, it->second)){
return true;
}else{
return false;
}
}
bool SparseGrid::add(Vector3<double> &p){
int nx = (int)(width*(p.x - aabb.min.x) / aabb.size.x);
int ny = (int)(height*(p.y - aabb.min.y) / aabb.size.y);
int nz = (int)(depth*(p.z - aabb.min.z) / aabb.size.z);
int i = min(nx, width-1);
int j = min(ny, height-1);
int k = min(nz, depth-1);
GridIndex index(i,j,k);
long long key = ((long long)k << 40) | ((long long)j << 20) | (long long)i;
SparseGrid::iterator it = find(key);
if(it == end()){
it = this->insert(value_type(key, new GridCell(this, index))).first;
}
if(isDistant(p, it->second)){
this->operator[](key)->add(p);
numAccepted++;
return true;
}else{
return false;
}
}
void SparseGrid::addWithoutCheck(Vector3<double> &p){
int nx = (int)(width*(p.x - aabb.min.x) / aabb.size.x);
int ny = (int)(height*(p.y - aabb.min.y) / aabb.size.y);
int nz = (int)(depth*(p.z - aabb.min.z) / aabb.size.z);
int i = min(nx, width-1);
int j = min(ny, height-1);
int k = min(nz, depth-1);
GridIndex index(i,j,k);
long long key = ((long long)k << 40) | ((long long)j << 20) | (long long)i;
SparseGrid::iterator it = find(key);
if(it == end()){
it = this->insert(value_type(key, new GridCell(this, index))).first;
}
it->second->add(p);
}
}

View File

@@ -1,333 +1,49 @@
#include <chrono>
#include <vector>
#include <map>
#include <string>
#include <exception>
#include <fstream>
#include "AABB.h"
#include "PotreeConverter.h"
#include "PotreeException.h"
//#include <filesystem>
#include "arguments.hpp"
#include <filesystem>
namespace fs = std::experimental::filesystem;
#include "stuff.h"
using std::string;
using std::cout;
using std::cerr;
using std::endl;
using std::vector;
using std::binary_function;
using std::map;
using std::chrono::high_resolution_clock;
using std::chrono::milliseconds;
using std::chrono::duration_cast;
using std::exception;
using Potree::PotreeConverter;
using Potree::StoreOption;
using Potree::ConversionQuality;
#define MAX_FLOAT std::numeric_limits<float>::max()
//using std::string;
class SparseGrid;
struct PotreeArguments {
bool help = false;
StoreOption storeOption = StoreOption::ABORT_IF_EXISTS;
vector<string> source;
string outdir;
float spacing;
int levels;
string format;
double scale;
int diagonalFraction;
Potree::OutputFormat outFormat;
vector<double> colorRange;
vector<double> intensityRange;
vector<string> outputAttributes;
bool generatePage;
bool pageTemplate;
string pageTemplatePath = "";
vector<double> aabbValues;
string pageName = "";
string projection = "";
bool sourceListingOnly = false;
string listOfFiles = "";
ConversionQuality conversionQuality = ConversionQuality::DEFAULT;
string conversionQualityString = "";
string title = "PotreeViewer";
string description = "";
bool edlEnabled = false;
bool showSkybox = false;
string material = "RGB";
string executablePath;
int storeSize;
int flushLimit;
};
PotreeArguments parseArguments(int argc, char **argv){
Arguments args(argc, argv);
args.addArgument("source,i,", "input files");
args.addArgument("help,h", "prints usage");
args.addArgument("generate-page,p", "Generates a ready to use web page with the given name.");
args.addArgument("page-template", "directory where the web page template is located.");
args.addArgument("outdir,o", "output directory");
args.addArgument("spacing,s", "Distance between points at root level. Distance halves each level.");
args.addArgument("spacing-by-diagonal-fraction,d", "Maximum number of points on the diagonal in the first level (sets spacing). spacing = diagonal / value");
args.addArgument("levels,l", "Number of levels that will be generated. 0: only root, 1: root and its children, ...");
args.addArgument("input-format,f", "Input format. xyz: cartesian coordinates as floats, rgb: colors as numbers, i: intensity as number");
args.addArgument("color-range", "");
args.addArgument("intensity-range", "");
args.addArgument("output-format", "Output format can be BINARY, LAS or LAZ. Default is BINARY");
args.addArgument("output-attributes,a", "can be any combination of RGB, INTENSITY and CLASSIFICATION. Default is RGB.");
args.addArgument("scale", "Scale of the X, Y, Z coordinate in LAS and LAZ files.");
args.addArgument("aabb", "Bounding cube as \"minX minY minZ maxX maxY maxZ\". If not provided it is automatically computed");
args.addArgument("incremental", "Add new points to existing conversion");
args.addArgument("overwrite", "Replace existing conversion at target directory");
args.addArgument("source-listing-only", "Create a sources.json but no octree.");
args.addArgument("projection", "Specify projection in proj4 format.");
args.addArgument("list-of-files", "A text file containing a list of files to be converted.");
args.addArgument("source", "Source file. Can be LAS, LAZ, PTX or PLY");
args.addArgument("title", "Page title");
args.addArgument("description", "Description to be shown in the page.");
args.addArgument("edl-enabled", "Enable Eye-Dome-Lighting.");
args.addArgument("show-skybox", "");
args.addArgument("material", "RGB, ELEVATION, INTENSITY, INTENSITY_GRADIENT, CLASSIFICATION, RETURN_NUMBER, SOURCE, LEVEL_OF_DETAIL");
args.addArgument("store-size", "A node is split once more than store-size points are added. Reduce for better results at cost of performance. Default is 20000");
args.addArgument("flush-limit", "Flush after X points. Default is 10000000");
PotreeArguments a;
if (args.has("help")){
cout << args.usage() << endl;
exit(0);
} else if (!args.has("source") && !args.has("list-of-files")){
cout << args.usage() << endl;
exit(1);
} else if (argc == 1) {
cout << args.usage() << endl;
exit(0);
}
if (args.has("incremental") && args.has("overwrite")) {
cout << "cannot have --incremental and --overwrite at the same time";
exit(1);
}
///a.source = args.get("source").as<vector<string>>();
a.generatePage = args.has("generate-page");
if (a.generatePage) {
a.pageName = args.get("generate-page").as<string>();
}
a.pageTemplate = args.has("page-template");
if (a.pageTemplate) {
a.pageTemplatePath = args.get("page-template").as<string>();
}
a.outdir = args.get("outdir").as<string>();
a.spacing = args.get("spacing").as<double>(0.0);
a.storeSize = args.get("store-size").as<int>(20'000);
a.flushLimit= args.get("flush-limit").as<int>(10'000'000);
a.diagonalFraction = args.get("d").as<double>(0.0);
a.levels = args.get("levels").as<int>(-1);
a.format = args.get("input-format").as<string>();
a.colorRange = args.get("color-range").as<vector<double>>();
a.intensityRange = args.get("intensity-range").as<vector<double>>();
if (args.has("output-format")) {
string of = args.get("output-format").as<string>("BINARY");
if (of == "BINARY") {
a.outFormat = Potree::OutputFormat::BINARY;
} else if (of == "LAS") {
a.outFormat = Potree::OutputFormat::LAS;
} else if (of == "LAZ") {
a.outFormat = Potree::OutputFormat::LAZ;
} else {
a.outFormat = Potree::OutputFormat::BINARY;
}
} else {
a.outFormat = Potree::OutputFormat::BINARY;
}
if (args.has("output-attributes")) {
a.outputAttributes = args.get("output-attributes").as<vector<string>>();
} else {
//a.outputAttributes = { "RGB" };
}
a.scale = args.get("scale").as<double>(0.0);
if (args.has("aabb")) {
string strAABB = args.get("aabb").as<string>();
vector<double> aabbValues;
char sep = ' ';
for (size_t p = 0, q = 0; p != strAABB.npos; p = q)
aabbValues.push_back(atof(strAABB.substr(p + (p != 0), (q = strAABB.find(sep, p + 1)) - p - (p != 0)).c_str()));
if (aabbValues.size() != 6) {
cerr << "AABB requires 6 arguments" << endl;
exit(1);
}
a.aabbValues = aabbValues;
}
if(args.has("incremental")){
a.storeOption = StoreOption::INCREMENTAL;
}else if(args.has("overwrite")){
a.storeOption = StoreOption::OVERWRITE;
}else{
a.storeOption = StoreOption::ABORT_IF_EXISTS;
}
a.sourceListingOnly = args.has("source-listing-only");
a.projection = args.get("projection").as<string>();
if (args.has("source")) {
a.source = args.get("source").as<vector<string>>();
}
if (a.source.size() == 0 && args.has("list-of-files")) {
string lof = args.get("list-of-files").as<string>();
a.listOfFiles = lof;
if (fs::exists(fs::path(a.listOfFiles))) {
std::ifstream in(a.listOfFiles);
string line;
while (std::getline(in, line)) {
string path;
if (fs::path(line).is_absolute()) {
path = line;
} else {
fs::path absPath = fs::canonical(fs::path(a.listOfFiles));
fs::path lofDir = absPath.parent_path();
path = lofDir.string() + "/" + line;
}
if (fs::exists(fs::path(path))) {
a.source.push_back(path);
} else {
cerr << "ERROR: file not found: " << path << endl;
exit(1);
}
}
in.close();
} else {
cerr << "ERROR: specified list of files not found: '" << a.listOfFiles << "'" << endl;
exit(1);
}
}
a.title = args.get("title").as<string>();
a.description = args.get("description").as<string>();
a.edlEnabled = args.has("edl-enabled");
a.showSkybox = args.has("show-skybox");
a.material = args.get("material").as<string>("RGB");
vector<string> validMaterialNames = {"RGB", "ELEVATION", "INTENSITY", "INTENSITY_GRADIENT", "CLASSIFICATION", "RETURN_NUMBER", "SOURCE", "LEVEL_OF_DETAIL"};
if(std::find(validMaterialNames.begin(), validMaterialNames.end(), a.material) == validMaterialNames.end()){
cout << args.usage();
cout << endl;
cout << "ERROR: " << "invalid material name specified" << endl;
exit(1);
}
// set default parameters
fs::path pSource(a.source[0]);
if (args.has("outdir")) {
a.outdir = args.get("outdir").as<string>();
} else {
string name = fs::canonical(pSource).filename().string();
a.outdir = name + "_converted";
}
if (a.diagonalFraction != 0) {
a.spacing = 0;
}else if(a.spacing == 0){
a.diagonalFraction = 200;
}
try {
auto absolutePath = fs::canonical(fs::system_complete(argv[0]));
a.executablePath = absolutePath.parent_path().string();
} catch (const fs::filesystem_error &e) {
// do nothing
}
return a;
}
void printArguments(PotreeArguments &a){
try{
cout << "== params ==" << endl;
int i = 0;
for(const auto &s : a.source) {
cout << "source[" << i << "]: \t" << a.source[i] << endl;
++i;
}
cout << "outdir: \t" << a.outdir << endl;
cout << "spacing: \t" << a.spacing << endl;
cout << "diagonal-fraction: \t" << a.diagonalFraction << endl;
cout << "levels: \t" << a.levels << endl;
cout << "format: \t" << a.format << endl;
cout << "scale: \t" << a.scale << endl;
cout << "pageName: \t" << a.pageName << endl;
cout << "projection: \t" << a.projection << endl;
cout << endl;
}catch(exception &e){
cout << "ERROR: " << e.what() << endl;
exit(1);
}
}
#include "Vector3.h"
#include <random>
#include "Chunker.h"
#include "Indexer_Centered.h"
#include "./modules/index_bluenoise/Indexer.h"
#include "Indexer_Centered_countsort.h"
#include "Indexer_Centered_nochunks.h"
int main(int argc, char **argv){
cout.imbue(std::locale(""));
double tStart = now();
//string path = "D:/dev/pointclouds/Riegl/Retz_Airborne_Terrestrial_Combined_1cm.las";
//string path = "D:/dev/pointclouds/Riegl/niederweiden.las";
//string path = "D:/dev/pointclouds/Riegl/Retz_Airborne_Terrestrial_Combined_1cm.las";
//string path = "D:/dev/pointclouds/open_topography/ca13/morro_rock/merged.las";
try{
PotreeArguments a = parseArguments(argc, argv);
printArguments(a);
string pathIn = "D:/dev/pointclouds/pix4d/eclepens.las";
//string pathIn = "D:/dev/pointclouds/archpro/heidentor.las";
//string pathIn = "D:/dev/pointclouds/mschuetz/lion.las";
//string pathIn = "D:/dev/pointclouds/mschuetz/plane.las";
//string pathIn = "D:/dev/pointclouds/mschuetz/plane_small.las";
PotreeConverter pc(a.executablePath, a.outdir, a.source);
string pathOut = "C:/dev/workspaces/potree/develop/test/converter1/pointcloud";
doChunking(pathIn, pathOut);
//countsort::doIndexing(pathOut);
//centered::doIndexing(pathOut);
//centered_nochunks::doIndexing(pathIn, pathOut);
bluenoise::doIndexing(pathOut);
printElapsedTime("total time", tStart);
pc.spacing = a.spacing;
pc.diagonalFraction = a.diagonalFraction;
pc.maxDepth = a.levels;
pc.format = a.format;
pc.colorRange = a.colorRange;
pc.intensityRange = a.intensityRange;
pc.scale = a.scale;
pc.outputFormat = a.outFormat;
pc.outputAttributes = a.outputAttributes;
pc.aabbValues = a.aabbValues;
pc.pageName = a.pageName;
pc.pageTemplatePath = a.pageTemplatePath;
pc.storeOption = a.storeOption;
pc.projection = a.projection;
pc.sourceListingOnly = a.sourceListingOnly;
pc.quality = a.conversionQuality;
pc.title = a.title;
pc.description = a.description;
pc.edlEnabled = a.edlEnabled;
pc.material = a.material;
pc.showSkybox = a.showSkybox;
pc.storeSize = a.storeSize;
pc.flushLimit = a.flushLimit;
pc.convert();
}catch(exception &e){
cout << "ERROR: " << e.what() << endl;
return 1;
}
return 0;
}

View File

@@ -1,300 +1,200 @@
#include "stuff.h"
#include "stuff.h"
#include <iostream>
#include <fstream>
#include <vector>
#include <sstream>
#include <string>
using std::stringstream;
using std::string;
using std::to_string;
using std::cout;
using std::endl;
using std::ofstream;
using std::ifstream;
using std::vector;
string repeat(string str, int count) {
stringstream ss;
for (int i = 0; i < count; i++) {
ss << str;
}
return ss.str();
}
void writeFile(string path, string text) {
ofstream out;
out.open(path);
out << text;
out.close();
}
// taken from: https://stackoverflow.com/questions/18816126/c-read-the-whole-file-in-buffer
vector<char> readBinaryFile(string path) {
std::ifstream file(path, std::ios::binary | std::ios::ate);
std::streamsize size = file.tellg();
file.seekg(0, std::ios::beg);
std::vector<char> buffer(size);
file.read(buffer.data(), size);
return buffer;
}
// taken from: https://stackoverflow.com/questions/2602013/read-whole-ascii-file-into-c-stdstring/2602060
string readTextFile(string path) {
std::ifstream t(path);
std::string str;
t.seekg(0, std::ios::end);
str.reserve(t.tellg());
t.seekg(0, std::ios::beg);
str.assign((std::istreambuf_iterator<char>(t)),
std::istreambuf_iterator<char>());
return str;
}
string stringReplace(string str, string search, string replacement) {
auto index = str.find(search);
if (index == str.npos) {
return str;
}
string strCopy = str;
strCopy.replace(index, search.length(), replacement);
return strCopy;
}
// see https://stackoverflow.com/questions/23943728/case-insensitive-standard-string-comparison-in-c
bool icompare_pred(unsigned char a, unsigned char b) {
return std::tolower(a) == std::tolower(b);
}
// see https://stackoverflow.com/questions/23943728/case-insensitive-standard-string-comparison-in-c
bool icompare(std::string const& a, std::string const& b) {
if (a.length() == b.length()) {
return std::equal(b.begin(), b.end(), a.begin(), icompare_pred);
} else {
return false;
}
}
bool endsWith(const string& str, const string& suffix) {
if (str.size() < suffix.size()) {
return false;
}
auto tstr = str.substr(str.size() - suffix.size());
return tstr.compare(suffix) == 0;
}
bool iEndsWith(const std::string& str, const std::string& suffix) {
if (str.size() < suffix.size()) {
return false;
}
auto tstr = str.substr(str.size() - suffix.size());
return icompare(tstr, suffix);
}
static long long unsuc_start_time = std::chrono::high_resolution_clock::now().time_since_epoch().count();
double now() {
auto now = std::chrono::high_resolution_clock::now();
long long nanosSinceStart = now.time_since_epoch().count() - unsuc_start_time;
double secondsSinceStart = double(nanosSinceStart) / 1'000'000'000;
return secondsSinceStart;
}
void printElapsedTime(string label, double startTime) {
double elapsed = now() - startTime;
string msg = label + ": " + to_string(elapsed) + "s\n";
cout << msg;
//cout << label << ": " << elapsed << "s" << endl;
}
void printThreadsafe(string str) {
stringstream ss;
ss << str << endl;
cout << ss.str();
}
void printThreadsafe(string str1, string str2) {
stringstream ss;
ss << str1 << str2 << endl;
cout << ss.str();
}
void printThreadsafe(string str1, string str2, string str3) {
stringstream ss;
ss << str1 << str2 << str3 << endl;
cout << ss.str();
}
void printThreadsafe(string str1, string str2, string str3, string str4) {
stringstream ss;
ss << str1 << str2 << str3 << str4 << endl;
cout << ss.str();
}
#include <Windows.h>
#include "psapi.h"
#include <vector>
#include <map>
#include <iostream>
#include <math.h>
#include <string>
#include <fstream>
MemoryUsage getMemoryUsage() {
//#include <unistd.h>
#include <sys/stat.h>
#include <sys/types.h>
MemoryUsage usage;
#include "Vector3.h"
#include "AABB.h"
#include "Point.h"
#include "GridIndex.h"
#include "SparseGrid.h"
#include "GridCell.h"
{
MEMORYSTATUSEX memInfo;
memInfo.dwLength = sizeof(MEMORYSTATUSEX);
GlobalMemoryStatusEx(&memInfo);
using std::ifstream;
using std::ofstream;
using std::ios;
using std::string;
using std::min;
using std::max;
using std::ostream;
using std::cout;
using std::cin;
using std::endl;
using std::vector;
using std::binary_function;
using std::map;
usage.totalMemory = memInfo.ullTotalPhys;
namespace Potree{
/**
* y
* |-z
* |/
* O----x
*
* 3----7
* /| /|
* 2----6 |
* | 1--|-5
* |/ |/
* 0----4
*
*/
AABB childAABB(const AABB &aabb, const int &index){
Vector3<double> min = aabb.min;
Vector3<double> max = aabb.max;
if((index & 0b0001) > 0){
min.z += aabb.size.z / 2;
}else{
max.z -= aabb.size.z / 2;
}
if((index & 0b0010) > 0){
min.y += aabb.size.y / 2;
}else{
max.y -= aabb.size.y / 2;
{
PROCESS_MEMORY_COUNTERS_EX pmc;
GetProcessMemoryInfo(GetCurrentProcess(), (PROCESS_MEMORY_COUNTERS*)& pmc, sizeof(pmc));
SIZE_T virtualMemUsedByMe = pmc.PrivateUsage;
usage.usedMemory = pmc.WorkingSetSize;
}
if((index & 0b0100) > 0){
min.x += aabb.size.x / 2;
}else{
max.x -= aabb.size.x / 2;
}
return AABB(min, max);
}
/**
* y
* |-z
* |/
* O----x
*
* 3----7
* /| /|
* 2----6 |
* | 1--|-5
* |/ |/
* 0----4
*
*/
int nodeIndex(const AABB &aabb, const Point &point){
int mx = (int)(2.0 * (point.position.x - aabb.min.x) / aabb.size.x);
int my = (int)(2.0 * (point.position.y - aabb.min.y) / aabb.size.y);
int mz = (int)(2.0 * (point.position.z - aabb.min.z) / aabb.size.z);
mx = min(mx, 1);
my = min(my, 1);
mz = min(mz, 1);
return (mx << 2) | (my << 1) | mz;
}
/**
* from http://stackoverflow.com/questions/5840148/how-can-i-get-a-files-size-in-c
*/
long filesize(string filename){
struct stat stat_buf;
int rc = stat(filename.c_str(), &stat_buf);
return rc == 0 ? stat_buf.st_size : -1;
}
///**
// * from http://stackoverflow.com/questions/874134/find-if-string-endswith-another-string-in-c
// */
//bool endsWith (std::string const &fullString, std::string const &ending)
//{
// if (fullString.length() >= ending.length()) {
// return (0 == fullString.compare (fullString.length() - ending.length(), ending.length(), ending));
// } else {
// return false;
// }
//}
/**
* see http://stackoverflow.com/questions/735204/convert-a-string-in-c-to-upper-case
*/
string toUpper(string str){
string tmp = str;
std::transform(tmp.begin(), tmp.end(),tmp.begin(), ::toupper);
return tmp;
}
// http://stackoverflow.com/questions/8593608/how-can-i-copy-a-directory-using-boost-filesystem
bool copyDir(fs::path source, fs::path destination){
try{
// Check whether the function call is valid
if(!fs::exists(source) || !fs::is_directory(source) ) {
std::cerr << "Source directory " << source.string() << " does not exist or is not a directory." << '\n';
return false;
}
//if(fs::exists(destination)){
// std::cerr << "Destination directory " << destination.string()
// << " already exists." << '\n';
// return false;
//}
// Create the destination directory
if(!fs::exists(destination)){
if(!fs::create_directory(destination)){
std::cerr << "Unable to create destination directory" << destination.string() << '\n';
return false;
}
}
}catch(fs::filesystem_error const & e){
std::cerr << e.what() << '\n';
return false;
}
// Iterate through the source directory
for( fs::directory_iterator file(source); file != fs::directory_iterator(); ++file){
try{
fs::path current(file->path());
if(fs::is_directory(current)) {
// Found directory: Recursion
if(!copyDir(current, destination / current.filename())){
return false;
}
}else{
// Found file: Copy
fs::copy_file(current, destination / current.filename(), fs::copy_options::overwrite_existing);
}
}catch(fs::filesystem_error const & e){
std:: cerr << e.what() << '\n';
}
}
return true;
}
float psign(float value){
if(value == 0.0){
return 0.0;
}else if(value < 0.0){
return -1.0;
}else{
return 1.0;
}
}
// see https://stackoverflow.com/questions/23943728/case-insensitive-standard-string-comparison-in-c
bool icompare_pred(unsigned char a, unsigned char b) {
return std::tolower(a) == std::tolower(b);
}
// see https://stackoverflow.com/questions/23943728/case-insensitive-standard-string-comparison-in-c
bool icompare(std::string const& a, std::string const& b) {
if (a.length() == b.length()) {
return std::equal(b.begin(), b.end(), a.begin(), icompare_pred);
}
else {
return false;
}
}
//bool endsWith(const std::string &str, const std::string &suffix) {
// return str.size() >= suffix.size() && str.compare(str.size() - suffix.size(), suffix.size(), suffix) == 0;
//}
bool endsWith(const string &str, const string &suffix) {
if (str.size() < suffix.size()) {
return false;
}
auto tstr = str.substr(str.size() - suffix.size());
return tstr.compare(suffix) == 0;
}
bool iEndsWith(const std::string &str, const std::string &suffix) {
if (str.size() < suffix.size()) {
return false;
}
auto tstr = str.substr(str.size() - suffix.size());
return icompare(tstr, suffix);
}
vector<string> split(string str, vector<char> delimiters) {
vector<string> tokens;
auto isDelimiter = [&delimiters](char ch) {
for (auto &delimiter : delimiters) {
if (ch == delimiter) {
return true;
}
}
return false;
};
int start = 0;
for (int i = 0; i < str.size(); i++) {
if (isDelimiter(str[i])) {
if (start < i) {
auto token = str.substr(start, i - start);
tokens.push_back(token);
}
start = i + 1;
}
}
if (start < str.size()) {
tokens.push_back(str.substr(start));
}
return tokens;
}
vector<string> split(string str, char delimiter) {
return split(str, { delimiter });
}
// see https://stackoverflow.com/questions/216823/whats-the-best-way-to-trim-stdstring
string ltrim(string s) {
s.erase(s.begin(), std::find_if(s.begin(), s.end(), [](unsigned char ch) {
return !std::isspace(ch);
}));
return s;
}
// see https://stackoverflow.com/questions/216823/whats-the-best-way-to-trim-stdstring
string rtrim(string s) {
s.erase(std::find_if(s.rbegin(), s.rend(), [](unsigned char ch) {
return !std::isspace(ch);
}).base(), s.end());
return s;
}
// see https://stackoverflow.com/questions/216823/whats-the-best-way-to-trim-stdstring
string trim(string s) {
s = ltrim(s);
s = rtrim(s);
return s;
}
}
return usage;
}

51
test/test.js Normal file
View File

@@ -0,0 +1,51 @@
let min = {x: -8.0960000000000001, y: -4.7999999999999998, z: 1.6870000000000001 };
let max = {x: 7.4619999999999997, y: 10.757999999999999, z: 17.245000000000001 };
let size = {
x: max.x - min.x,
y: max.y - min.y,
z: max.z - min.z
};
let center = {
x: (max.x + min.x) / 2,
y: (max.y + min.y) / 2,
z: (max.z + min.z) / 2,
};
let center2 = {
x: min.x + size.x * 0.5,
y: min.y + size.y * 0.5,
z: min.z + size.z * 0.5,
};
let p1 = {x: -7.9370000000000003, y: 2.9790000000000001, z: 2.0329999999999999};
let p2 = {x: -7.9740000000000002, y: 3.0569999999999999, z: 1.9600000000000000}
let gridSize = 128;
let dGridSize = gridSize;
function indexOf(point){
let dix = dGridSize * (point.x - min.x) / size.x;
let diy = dGridSize * (point.y - min.y) / size.y;
let diz = dGridSize * (point.z - min.z) / size.z;
console.log(dix, diy, diz);
let ix = parseInt(Math.min(dix, dGridSize - 1.0));
let iy = parseInt(Math.min(diy, dGridSize - 1.0));
let iz = parseInt(Math.min(diz, dGridSize - 1.0));
console.log(ix, iy, iz);
let index = ix + iy * gridSize + iz * gridSize * gridSize;
return index;
}
console.log("==============");
console.log(center);
console.log(center2);
console.log(indexOf(p1));
console.log(indexOf(p2));