OpenVDB 10.0.1
Loading...
Searching...
No Matches
GridBuilder.h
Go to the documentation of this file.
1// Copyright Contributors to the OpenVDB Project
2// SPDX-License-Identifier: MPL-2.0
3
4/*!
5 \file GridBuilder.h
6
7 \author Ken Museth
8
9 \date June 26, 2020
10
11 \brief Generates a NanoVDB grid from any volume or function.
12
13 \note This is only intended as a simple tool to generate nanovdb grids without
14 any dependency on openvdb.
15*/
16
17#ifndef NANOVDB_GRIDBUILDER_H_HAS_BEEN_INCLUDED
18#define NANOVDB_GRIDBUILDER_H_HAS_BEEN_INCLUDED
19
20#include "GridHandle.h"
21#include "GridStats.h"
22#include "GridChecksum.h"
23#include "Range.h"
24#include "Invoke.h"
25#include "ForEach.h"
26#include "Reduce.h"
27#include "DitherLUT.h"// for nanovdb::DitherLUT
28
29#include <map>
30#include <limits>
31#include <sstream> // for stringstream
32#include <vector>
33#include <cstring> // for memcpy
34
35namespace nanovdb {
36
37/// @brief Compression oracle based on absolute difference
39{
40 float mTolerance;// absolute error tolerance
41public:
42 /// @note The default value of -1 means it's un-initialized!
43 AbsDiff(float tolerance = -1.0f) : mTolerance(tolerance) {}
44 AbsDiff(const AbsDiff&) = default;
45 ~AbsDiff() = default;
46 void setTolerance(float tolerance) { mTolerance = tolerance; }
47 float getTolerance() const { return mTolerance; }
48 /// @brief Return true if the approximate value is within the accepted
49 /// absolute error bounds of the exact value.
50 ///
51 /// @details Required member method
52 bool operator()(float exact, float approx) const
53 {
54 return Abs(exact - approx) <= mTolerance;
55 }
56};// AbsDiff
57
58inline std::ostream& operator<<(std::ostream& os, const AbsDiff& diff)
59{
60 os << "Absolute tolerance: " << diff.getTolerance();
61 return os;
62}
63
64/// @brief Compression oracle based on relative difference
66{
67 float mTolerance;// relative error tolerance
68public:
69 /// @note The default value of -1 means it's un-initialized!
70 RelDiff(float tolerance = -1.0f) : mTolerance(tolerance) {}
71 RelDiff(const RelDiff&) = default;
72 ~RelDiff() = default;
73 void setTolerance(float tolerance) { mTolerance = tolerance; }
74 float getTolerance() const { return mTolerance; }
75 /// @brief Return true if the approximate value is within the accepted
76 /// relative error bounds of the exact value.
77 ///
78 /// @details Required member method
79 bool operator()(float exact, float approx) const
80 {
81 return Abs(exact - approx)/Max(Abs(exact), Abs(approx)) <= mTolerance;
82 }
83};// RelDiff
84
85inline std::ostream& operator<<(std::ostream& os, const RelDiff& diff)
86{
87 os << "Relative tolerance: " << diff.getTolerance();
88 return os;
89}
90
91/// @brief Allows for the construction of NanoVDB grids without any dependency
92template<typename ValueT, typename BuildT = ValueT, typename StatsT = Stats<ValueT>>
94{
95 struct BuildLeaf;
96 template<typename ChildT>
97 struct BuildNode;
98 template<typename ChildT>
99 struct BuildRoot;
100
101 struct Codec {float min, max; uint16_t log2, size;};// used for adaptive bit-rate quantization
102
103 using SrcNode0 = BuildLeaf;
104 using SrcNode1 = BuildNode<SrcNode0>;
105 using SrcNode2 = BuildNode<SrcNode1>;
106 using SrcRootT = BuildRoot<SrcNode2>;
107
108 using DstNode0 = NanoLeaf< BuildT>;// nanovdb::LeafNode<ValueT>; // leaf
109 using DstNode1 = NanoLower<BuildT>;// nanovdb::InternalNode<DstNode0>; // lower
110 using DstNode2 = NanoUpper<BuildT>;// nanovdb::InternalNode<DstNode1>; // upper
111 using DstRootT = NanoRoot< BuildT>;// nanovdb::RootNode<DstNode2>;
114
115 ValueT mDelta; // skip node if: node.max < -mDelta || node.min > mDelta
116 uint8_t* mBufferPtr;// pointer to the beginning of the buffer
117 uint64_t mBufferOffsets[9];//grid, tree, root, upper, lower, leafs, meta data, blind data, buffer size
118 int mVerbose;
119 uint64_t mBlindDataSize;
120 SrcRootT mRoot;// this root supports random write
121 std::vector<SrcNode0*> mArray0; // leaf nodes
122 std::vector<SrcNode1*> mArray1; // lower internal nodes
123 std::vector<SrcNode2*> mArray2; // upper internal nodes
124 std::unique_ptr<Codec[]> mCodec;// defines a codec per leaf node
125 GridClass mGridClass;
126 StatsMode mStats;
127 ChecksumMode mChecksum;
128 bool mDitherOn;
129
130 // Below are private methods use to serialize nodes into NanoVDB
131 template< typename OracleT, typename BufferT>
132 GridHandle<BufferT> initHandle(const OracleT &oracle, const BufferT& buffer);
133
134 template <typename T, typename OracleT>
135 inline typename std::enable_if<!is_same<T, FpN>::value>::type
136 compression(uint64_t&, OracleT) {}// no-op
137
138 template <typename T, typename OracleT>
139 inline typename std::enable_if<is_same<T, FpN>::value>::type
140 compression(uint64_t &offset, OracleT oracle);
141
142 template<typename T>
143 typename std::enable_if<!is_same<Fp4, typename T::BuildType>::value &&
147 processLeafs(std::vector<T*>&);
148
149 template<typename T>
150 typename std::enable_if<is_same<Fp4, typename T::BuildType>::value ||
153 processLeafs(std::vector<T*>&);
154
155 template<typename T>
156 typename std::enable_if<is_same<FpN, typename T::BuildType>::value>::type
157 processLeafs(std::vector<T*>&);
158
159 template<typename SrcNodeT>
160 void processNodes(std::vector<SrcNodeT*>&);
161
162 DstRootT* processRoot();
163
164 DstTreeT* processTree();
165
166 DstGridT* processGrid(const Map&, const std::string&);
167
168 template<typename T, typename FlagT>
169 typename std::enable_if<!std::is_floating_point<T>::value>::type
170 setFlag(const T&, const T&, FlagT& flag) const { flag &= ~FlagT(1); } // unset first bit
171
172 template<typename T, typename FlagT>
173 typename std::enable_if<std::is_floating_point<T>::value>::type
174 setFlag(const T& min, const T& max, FlagT& flag) const;
175
176public:
177 struct ValueAccessor;
178
179 GridBuilder(ValueT background = ValueT(),
181 uint64_t blindDataSize = 0);
182
184
185 /// @brief Performs multi-threaded bottom-up signed-distance flood-filling and changes GridClass to LevelSet
186 ///
187 /// @warning Only call this method once this GridBuilder contains a valid signed distance field
189
190 /// @brief Performs multi-threaded bottom-up signed-distance flood-filling followed by level-set -> FOG volume
191 /// conversion. It also changes the GridClass to FogVolume
192 ///
193 /// @warning Only call this method once this GridBuilder contains a valid signed distance field
194 void sdfToFog();
195
196 void setVerbose(int mode = 1) { mVerbose = mode; }
197
198 void enableDithering(bool on = true) { mDitherOn = on; }
199
200 void setStats(StatsMode mode = StatsMode::Default) { mStats = mode; }
201
202 void setChecksum(ChecksumMode mode = ChecksumMode::Default) { mChecksum = mode; }
203
204 void setGridClass(GridClass mode = GridClass::Unknown) { mGridClass = mode; }
205
206 /// @brief Return an instance of a GridHandle (invoking move semantics)
207 template<typename OracleT = AbsDiff, typename BufferT = HostBuffer>
208 GridHandle<BufferT> getHandle(double voxelSize = 1.0,
209 const Vec3d& gridOrigin = Vec3d(0),
210 const std::string& name = "",
211 const OracleT& oracle = OracleT(),
212 const BufferT& buffer = BufferT());
213
214 /// @brief Return an instance of a GridHandle (invoking move semantics)
215 template<typename OracleT = AbsDiff, typename BufferT = HostBuffer>
217 const std::string& name = "",
218 const OracleT& oracle = OracleT(),
219 const BufferT& buffer = BufferT());
220
221 /// @brief Sets grids values in domain of the @a bbox to those returned by the specified @a func with the
222 /// expected signature [](const Coord&)->ValueT.
223 ///
224 /// @note If @a func returns a value equal to the background value (specified in the constructor) at a
225 /// specific voxel coordinate, then the active state of that coordinate is left off! Else the value
226 /// value is set and the active state is on. This is done to allow for sparse grids to be generated.
227 ///
228 /// @param func Functor used to evaluate the grid values in the @a bbox
229 /// @param bbox Coordinate bounding-box over which the grid values will be set.
230 /// @param delta Specifies a lower threshold value for rendering (optional). Typically equals the voxel size
231 /// for level sets and otherwise it's zero.
232 template<typename Func>
233 void operator()(const Func& func, const CoordBBox& bbox, ValueT delta = ValueT(0));
234
235}; // GridBuilder
236
237//================================================================================================
238
239template<typename ValueT, typename BuildT, typename StatsT>
241GridBuilder(ValueT background, GridClass gClass, uint64_t blindDataSize)
242 : mDelta(0)
243 , mVerbose(0)
244 , mBlindDataSize(blindDataSize)
245 , mRoot(background)
246 , mGridClass(gClass)
247 , mStats(StatsMode::Default)
248 , mChecksum(ChecksumMode::Default)
249 , mDitherOn(false)
250{
251}
252
253template<typename ValueT, typename BuildT, typename StatsT>
254template<typename Func>
256operator()(const Func& func, const CoordBBox& voxelBBox, ValueT delta)
257{
258 static_assert(is_same<ValueT, typename std::result_of<Func(const Coord&)>::type>::value, "GridBuilder: mismatched ValueType");
259 mDelta = delta; // delta = voxel size for level sets, else 0
260
261 using LeafT = BuildLeaf;
262 const CoordBBox leafBBox(voxelBBox[0] >> LeafT::TOTAL, voxelBBox[1] >> LeafT::TOTAL);
263 std::mutex mutex;
264 auto kernel = [&](const CoordBBox& b) {
265 LeafT* leaf = nullptr;
266 for (auto it = b.begin(); it; ++it) {
267 Coord min(*it << LeafT::TOTAL), max(min + Coord(LeafT::DIM - 1));
268 const CoordBBox bbox(min.maxComponent(voxelBBox.min()),
269 max.minComponent(voxelBBox.max()));// crop
270 if (leaf == nullptr) {
271 leaf = new LeafT(bbox[0], mRoot.mBackground, false);
272 } else {
273 leaf->mOrigin = bbox[0] & ~LeafT::MASK;
274 NANOVDB_ASSERT(leaf->mValueMask.isOff());
275 }
276 leaf->mDstOffset = 0;// no prune
277 for (auto ijk = bbox.begin(); ijk; ++ijk) {
278 const auto v = func(*ijk);
279 if (v == mRoot.mBackground) {// don't insert background values
280 continue;
281 }
282 leaf->setValue(*ijk, v);
283 }
284 if (!leaf->mValueMask.isOff()) {// has active values
285 if (leaf->mValueMask.isOn()) {// only active values
286 const auto first = leaf->getFirstValue();
287 int n=1;
288 while (n<512) {// 8^3 = 512
289 if (leaf->mValues[n++] != first) break;
290 }
291 if (n == 512) leaf->mDstOffset = 1;// prune below
292 }
293 std::lock_guard<std::mutex> guard(mutex);
294 NANOVDB_ASSERT(leaf != nullptr);
295 mRoot.addNode(leaf);
296 NANOVDB_ASSERT(leaf == nullptr);
297 }
298 }// loop over sub-part of leafBBox
299 if (leaf) {
300 delete leaf;
301 }
302 }; // kernel
303 forEach(leafBBox, kernel);
304
305 // Prune leaf and tile nodes
306 for (auto it2 = mRoot.mTable.begin(); it2 != mRoot.mTable.end(); ++it2) {
307 if (auto *upper = it2->second.child) {//upper level internal node
308 for (auto it1 = upper->mChildMask.beginOn(); it1; ++it1) {
309 auto *lower = upper->mTable[*it1].child;// lower level internal node
310 for (auto it0 = lower->mChildMask.beginOn(); it0; ++it0) {
311 auto *leaf = lower->mTable[*it0].child;// leaf nodes
312 if (leaf->mDstOffset) {
313 lower->mTable[*it0].value = leaf->getFirstValue();
314 lower->mChildMask.setOff(*it0);
315 lower->mValueMask.setOn(*it0);
316 delete leaf;
317 }
318 }// loop over leaf nodes
319 if (lower->mChildMask.isOff()) {//only tiles
320 const auto first = lower->getFirstValue();
321 int n=1;
322 while (n < 4096) {// 16^3 = 4096
323 if (lower->mTable[n++].value != first) break;
324 }
325 if (n == 4096) {// identical tile values so prune
326 upper->mTable[*it1].value = first;
327 upper->mChildMask.setOff(*it1);
328 upper->mValueMask.setOn(*it1);
329 delete lower;
330 }
331 }
332 }// loop over lower internal nodes
333 if (upper->mChildMask.isOff()) {//only tiles
334 const auto first = upper->getFirstValue();
335 int n=1;
336 while (n < 32768) {// 32^3 = 32768
337 if (upper->mTable[n++].value != first) break;
338 }
339 if (n == 32768) {// identical tile values so prune
340 it2->second.value = first;
341 it2->second.state = upper->mValueMask.isOn();
342 it2->second.child = nullptr;
343 delete upper;
344 }
345 }
346 }// is child node of the root
347 }// loop over root table
348}
349
350//================================================================================================
351
352template<typename ValueT, typename BuildT, typename StatsT>
353template<typename OracleT, typename BufferT>
355initHandle(const OracleT &oracle, const BufferT& buffer)
356{
357 mArray0.clear();
358 mArray1.clear();
359 mArray2.clear();
360 mArray0.reserve(mRoot.template nodeCount<SrcNode0>());
361 mArray1.reserve(mRoot.template nodeCount<SrcNode1>());
362 mArray2.reserve(mRoot.template nodeCount<SrcNode2>());
363
364 uint64_t offset[3] = {0};
365 for (auto it2 = mRoot.mTable.begin(); it2 != mRoot.mTable.end(); ++it2) {
366 if (SrcNode2 *upper = it2->second.child) {
367 upper->mDstOffset = offset[2];
368 mArray2.emplace_back(upper);
369 offset[2] += DstNode2::memUsage();
370 for (auto it1 = upper->mChildMask.beginOn(); it1; ++it1) {
371 SrcNode1 *lower = upper->mTable[*it1].child;
372 lower->mDstOffset = offset[1];
373 mArray1.emplace_back(lower);
374 offset[1] += DstNode1::memUsage();
375 for (auto it0 = lower->mChildMask.beginOn(); it0; ++it0) {
376 SrcNode0 *leaf = lower->mTable[*it0].child;
377 leaf->mDstOffset = offset[0];// dummy if BuildT = FpN
378 mArray0.emplace_back(leaf);
379 offset[0] += sizeof(DstNode0);// dummy if BuildT = FpN
380 }// loop over leaf nodes
381 }// loop over lower internal nodes
382 }// is child node of the root
383 }// loop over root table
384
385 this->template compression<BuildT, OracleT>(offset[0], oracle);// no-op unless BuildT = FpN
386
387 mBufferOffsets[0] = 0;// grid is always stored at the start of the buffer!
388 mBufferOffsets[1] = DstGridT::memUsage(); // tree
389 mBufferOffsets[2] = DstTreeT::memUsage(); // root
390 mBufferOffsets[3] = DstRootT::memUsage(static_cast<uint32_t>(mRoot.mTable.size())); // upper internal nodes
391 mBufferOffsets[4] = offset[2]; // lower internal nodes
392 mBufferOffsets[5] = offset[1]; // leaf nodes
393 mBufferOffsets[6] = offset[0]; // blind meta data
394 mBufferOffsets[7] = GridBlindMetaData::memUsage(mBlindDataSize > 0 ? 1 : 0); // blind data
395 mBufferOffsets[8] = mBlindDataSize;// end of buffer
396
397 // Compute the prefixed sum
398 for (int i = 2; i < 9; ++i) {
399 mBufferOffsets[i] += mBufferOffsets[i - 1];
400 }
401
402 GridHandle<BufferT> handle(BufferT::create(mBufferOffsets[8], &buffer));
403 mBufferPtr = handle.data();
404 return handle;
405} // GridBuilder::initHandle
406
407//================================================================================================
408
409template<typename ValueT, typename BuildT, typename StatsT>
410template <typename T, typename OracleT>
411inline typename std::enable_if<is_same<T, FpN>::value>::type
412GridBuilder<ValueT, BuildT, StatsT>::compression(uint64_t &offset, OracleT oracle)
413{
414 static_assert(is_same<FpN , BuildT>::value, "compression: expected BuildT == float");
415 static_assert(is_same<float, ValueT>::value, "compression: expected ValueT == float");
416 if (is_same<AbsDiff, OracleT>::value && oracle.getTolerance() < 0.0f) {// default tolerance for level set and fog volumes
417 if (mGridClass == GridClass::LevelSet) {
418 static const float halfWidth = 3.0f;
419 oracle.setTolerance(0.1f * mRoot.mBackground / halfWidth);// range of ls: [-3dx; 3dx]
420 } else if (mGridClass == GridClass::FogVolume) {
421 oracle.setTolerance(0.01f);// range of FOG volumes: [0;1]
422 } else {
423 oracle.setTolerance(0.0f);
424 }
425 }
426
427 const size_t size = mArray0.size();
428 mCodec.reset(new Codec[size]);
429
430 DitherLUT lut(mDitherOn);
431 auto kernel = [&](const Range1D &r) {
432 for (auto i=r.begin(); i!=r.end(); ++i) {
433 const float *data = mArray0[i]->mValues;
434 float min = std::numeric_limits<float>::max(), max = -min;
435 for (int j=0; j<512; ++j) {
436 float v = data[j];
437 if (v<min) min = v;
438 if (v>max) max = v;
439 }
440 mCodec[i].min = min;
441 mCodec[i].max = max;
442 const float range = max - min;
443 uint16_t logBitWidth = 0;// 0,1,2,3,4 => 1,2,4,8,16 bits
444 while (range > 0.0f && logBitWidth < 4u) {
445 const uint32_t mask = (uint32_t(1) << (uint32_t(1) << logBitWidth)) - 1u;
446 const float encode = mask/range;
447 const float decode = range/mask;
448 int j = 0;
449 do {
450 const float exact = data[j];// exact value
451 const uint32_t code = uint32_t(encode*(exact - min) + lut(j));
452 const float approx = code * decode + min;// approximate value
453 j += oracle(exact, approx) ? 1 : 513;
454 } while(j < 512);
455 if (j == 512) break;
456 ++logBitWidth;
457 }
458 mCodec[i].log2 = logBitWidth;
459 mCodec[i].size = DstNode0::DataType::memUsage(1u << logBitWidth);
460 }
461 };// kernel
462 forEach(0, size, 4, kernel);
463
464 if (mVerbose) {
465 uint32_t counters[5+1] = {0};
466 ++counters[mCodec[0].log2];
467 for (size_t i=1; i<size; ++i) {
468 ++counters[mCodec[i].log2];
469 mArray0[i]->mDstOffset = mArray0[i-1]->mDstOffset + mCodec[i-1].size;
470 }
471 std::cout << "\n" << oracle << std::endl;
472 std::cout << "Dithering: " << (mDitherOn ? "enabled" : "disabled") << std::endl;
473 float avg = 0.0f;
474 for (uint32_t i=0; i<=5; ++i) {
475 if (uint32_t n = counters[i]) {
476 avg += n * float(1 << i);
477 printf("%2i bits: %6u leaf nodes, i.e. %4.1f%%\n",1<<i, n, 100.0f*n/float(size));
478 }
479 }
480 printf("%4.1f bits per value on average\n", avg/float(size));
481 } else {
482 for (size_t i=1; i<size; ++i) {
483 mArray0[i]->mDstOffset = mArray0[i-1]->mDstOffset + mCodec[i-1].size;
484 }
485 }
486 offset = mArray0[size-1]->mDstOffset + mCodec[size-1].size;
487}// GridBuilder::compression
488
489//================================================================================================
490
491template<typename ValueT, typename BuildT, typename StatsT>
494{
495 mArray0.clear();
496 mArray1.clear();
497 mArray2.clear();
498 mArray0.reserve(mRoot.template nodeCount<SrcNode0>());
499 mArray1.reserve(mRoot.template nodeCount<SrcNode1>());
500 mArray2.reserve(mRoot.template nodeCount<SrcNode2>());
501
502 for (auto it2 = mRoot.mTable.begin(); it2 != mRoot.mTable.end(); ++it2) {
503 if (SrcNode2 *upper = it2->second.child) {
504 mArray2.emplace_back(upper);
505 for (auto it1 = upper->mChildMask.beginOn(); it1; ++it1) {
506 SrcNode1 *lower = upper->mTable[*it1].child;
507 mArray1.emplace_back(lower);
508 for (auto it0 = lower->mChildMask.beginOn(); it0; ++it0) {
509 mArray0.emplace_back(lower->mTable[*it0].child);
510 }// loop over leaf nodes
511 }// loop over lower internal nodes
512 }// is child node of the root
513 }// loop over root table
514
515 // Note that the bottom-up flood filling is essential
516 const ValueT outside = mRoot.mBackground;
517 forEach(mArray0, 8, [&](const Range1D& r) {
518 for (auto i = r.begin(); i != r.end(); ++i)
519 mArray0[i]->signedFloodFill(outside);
520 });
521 forEach(mArray1, 1, [&](const Range1D& r) {
522 for (auto i = r.begin(); i != r.end(); ++i)
523 mArray1[i]->signedFloodFill(outside);
524 });
525 forEach(mArray2, 1, [&](const Range1D& r) {
526 for (auto i = r.begin(); i != r.end(); ++i)
527 mArray2[i]->signedFloodFill(outside);
528 });
529 mRoot.signedFloodFill(outside);
530 mGridClass = GridClass::LevelSet;
531} // GridBuilder::sdfToLevelSet
532
533//================================================================================================
534
535template<typename ValueT, typename BuildT, typename StatsT>
536template<typename OracleT, typename BufferT>
538 getHandle(double dx, //voxel size
539 const Vec3d& p0, // origin
540 const std::string& name,
541 const OracleT& oracle,
542 const BufferT& buffer)
543{
544 if (dx <= 0) {
545 throw std::runtime_error("GridBuilder: voxel size is zero or negative");
546 }
547 Map map; // affine map
548 map.set(dx, p0, 1.0);
549 return this->getHandle(map, name, oracle, buffer);
550} // GridBuilder::getHandle
551
552//================================================================================================
553
554template<typename ValueT, typename BuildT, typename StatsT>
555template< typename OracleT, typename BufferT>
557 getHandle(const Map& map,
558 const std::string& name,
559 const OracleT& oracle,
560 const BufferT& buffer)
561{
563 throw std::runtime_error("Level sets are expected to be floating point types");
564 } else if (mGridClass == GridClass::FogVolume && !is_floating_point<ValueT>::value) {
565 throw std::runtime_error("Fog volumes are expected to be floating point types");
566 }
567
568 auto handle = this->template initHandle<OracleT, BufferT>(oracle, buffer);// initialize the arrays of nodes
569
570 this->processLeafs(mArray0);
571
572 this->processNodes(mArray1);
573
574 this->processNodes(mArray2);
575
576 auto *grid = this->processGrid(map, name);
577
578 gridStats(*grid, mStats);
579
580 updateChecksum(*grid, mChecksum);
581
582 return handle;
583} // GridBuilder::getHandle
584
585//================================================================================================
586
587template<typename ValueT, typename BuildT, typename StatsT>
588template<typename T, typename FlagT>
589inline typename std::enable_if<std::is_floating_point<T>::value>::type
591 setFlag(const T& min, const T& max, FlagT& flag) const
592{
593 if (mDelta > 0 && (min > mDelta || max < -mDelta)) {
594 flag |= FlagT(1); // set first bit
595 } else {
596 flag &= ~FlagT(1); // unset first bit
597 }
598}
599
600//================================================================================================
601
602template<typename ValueT, typename BuildT, typename StatsT>
604 sdfToFog()
605{
606 this->sdfToLevelSet(); // performs signed flood fill
607
608 const ValueT d = -mRoot.mBackground, w = 1.0f / d;
609 auto op = [&](ValueT& v) -> bool {
610 if (v > ValueT(0)) {
611 v = ValueT(0);
612 return false;
613 }
614 v = v > d ? v * w : ValueT(1);
615 return true;
616 };
617 auto kernel0 = [&](const Range1D& r) {
618 for (auto i = r.begin(); i != r.end(); ++i) {
619 SrcNode0* node = mArray0[i];
620 for (uint32_t i = 0; i < SrcNode0::SIZE; ++i)
621 node->mValueMask.set(i, op(node->mValues[i]));
622 }
623 };
624 auto kernel1 = [&](const Range1D& r) {
625 for (auto i = r.begin(); i != r.end(); ++i) {
626 SrcNode1* node = mArray1[i];
627 for (uint32_t i = 0; i < SrcNode1::SIZE; ++i) {
628 if (node->mChildMask.isOn(i)) {
629 SrcNode0* leaf = node->mTable[i].child;
630 if (leaf->mValueMask.isOff()) {
631 node->mTable[i].value = leaf->getFirstValue();
632 node->mChildMask.setOff(i);
633 delete leaf;
634 }
635 } else {
636 node->mValueMask.set(i, op(node->mTable[i].value));
637 }
638 }
639 }
640 };
641 auto kernel2 = [&](const Range1D& r) {
642 for (auto i = r.begin(); i != r.end(); ++i) {
643 SrcNode2* node = mArray2[i];
644 for (uint32_t i = 0; i < SrcNode2::SIZE; ++i) {
645 if (node->mChildMask.isOn(i)) {
646 SrcNode1* child = node->mTable[i].child;
647 if (child->mChildMask.isOff() && child->mValueMask.isOff()) {
648 node->mTable[i].value = child->getFirstValue();
649 node->mChildMask.setOff(i);
650 delete child;
651 }
652 } else {
653 node->mValueMask.set(i, op(node->mTable[i].value));
654 }
655 }
656 }
657 };
658 forEach(mArray0, 8, kernel0);
659 forEach(mArray1, 1, kernel1);
660 forEach(mArray2, 1, kernel2);
661
662 for (auto it = mRoot.mTable.begin(); it != mRoot.mTable.end(); ++it) {
663 SrcNode2* child = it->second.child;
664 if (child == nullptr) {
665 it->second.state = op(it->second.value);
666 } else if (child->mChildMask.isOff() && child->mValueMask.isOff()) {
667 it->second.value = child->getFirstValue();
668 it->second.state = false;
669 it->second.child = nullptr;
670 delete child;
671 }
672 }
673 mGridClass = GridClass::FogVolume;
674} // GridBuilder::sdfToFog
675
676//================================================================================================
677
678template<typename ValueT, typename BuildT, typename StatsT>
679template<typename T>
680inline typename std::enable_if<!is_same<Fp4, typename T::BuildType>::value &&
685 processLeafs(std::vector<T*>& srcLeafs)
686{
687 static_assert(!is_same<bool, ValueT>::value, "Does not yet support bool leafs");
688 static_assert(!is_same<ValueMask, ValueT>::value, "Does not yet support mask leafs");
689 auto kernel = [&](const Range1D& r) {
690 auto *ptr = mBufferPtr + mBufferOffsets[5];
691 for (auto i = r.begin(); i != r.end(); ++i) {
692 auto *srcLeaf = srcLeafs[i];
693 auto *dstLeaf = PtrAdd<DstNode0>(ptr, srcLeaf->mDstOffset);
694 auto *data = dstLeaf->data();
695 if (DstNode0::DataType::padding()>0u) {
696 std::memset(data, 0, DstNode0::DataType::memUsage());
697 } else {
698 data->mBBoxDif[0] = 0u;
699 data->mBBoxDif[1] = 0u;
700 data->mBBoxDif[2] = 0u;
701 data->mFlags = 0u;// enable rendering, no bbox
702 data->mMinimum = data->mMaximum = ValueT();
703 data->mAverage = data->mStdDevi = 0;
704 }
705 srcLeaf->mDstNode = dstLeaf;
706 data->mBBoxMin = srcLeaf->mOrigin; // copy origin of node
707 data->mValueMask = srcLeaf->mValueMask; // copy value mask
708 const ValueT* src = srcLeaf->mValues;
709 for (ValueT *dst = data->mValues, *end = dst + SrcNode0::SIZE; dst != end; dst += 4, src += 4) {
710 dst[0] = src[0]; // copy *all* voxel values in sets of four, i.e. loop-unrolling
711 dst[1] = src[1];
712 dst[2] = src[2];
713 dst[3] = src[3];
714 }
715 }
716 };
717 forEach(srcLeafs, 8, kernel);
718} // GridBuilder::processLeafs<T>
719
720//================================================================================================
721
722template<typename ValueT, typename BuildT, typename StatsT>
723template<typename T>
724inline typename std::enable_if<is_same<Fp4, typename T::BuildType>::value ||
727GridBuilder<ValueT, BuildT, StatsT>::
728 processLeafs(std::vector<T*>& srcLeafs)
729{
730 static_assert(is_same<float, ValueT>::value, "Expected ValueT == float");
731 using ArrayT = typename DstNode0::DataType::ArrayType;
732 using FloatT = typename std::conditional<DstNode0::DataType::bitWidth()>=16, double, float>::type;// 16 compression and higher requires double
733 static constexpr FloatT UNITS = FloatT((1 << DstNode0::DataType::bitWidth()) - 1);// # of unique non-zero values
734 DitherLUT lut(mDitherOn);
735
736 auto kernel = [&](const Range1D& r) {
737 uint8_t* ptr = mBufferPtr + mBufferOffsets[5];
738 for (auto i = r.begin(); i != r.end(); ++i) {
739 auto *srcLeaf = srcLeafs[i];
740 auto *dstLeaf = PtrAdd<DstNode0>(ptr, srcLeaf->mDstOffset);
741 srcLeaf->mDstNode = dstLeaf;
742 auto *data = dstLeaf->data();
743 if (DstNode0::DataType::padding()>0u) {
744 std::memset(data, 0, DstNode0::DataType::memUsage());
745 } else {
746 data->mFlags = data->mBBoxDif[2] = data->mBBoxDif[1] = data->mBBoxDif[0] = 0u;
747 data->mDev = data->mAvg = data->mMax = data->mMin = 0u;
748 }
749 data->mBBoxMin = srcLeaf->mOrigin; // copy origin of node
750 data->mValueMask = srcLeaf->mValueMask; // copy value mask
751 const float* src = srcLeaf->mValues;
752 // compute extrema values
753 float min = std::numeric_limits<float>::max(), max = -min;
754 for (int i=0; i<512; ++i) {
755 const float v = src[i];
756 if (v < min) min = v;
757 if (v > max) max = v;
758 }
759 data->init(min, max, DstNode0::DataType::bitWidth());
760 // perform quantization relative to the values in the current leaf node
761 const FloatT encode = UNITS/(max-min);
762 auto *code = reinterpret_cast<ArrayT*>(data->mCode);
763 int offset = 0;
764 if (is_same<Fp4, BuildT>::value) {// resolved at compile-time
765 for (int j=0; j<128; ++j) {
766 auto tmp = ArrayT(encode * (*src++ - min) + lut(offset++));
767 *code++ = ArrayT(encode * (*src++ - min) + lut(offset++)) << 4 | tmp;
768 tmp = ArrayT(encode * (*src++ - min) + lut(offset++));
769 *code++ = ArrayT(encode * (*src++ - min) + lut(offset++)) << 4 | tmp;
770 }
771 } else {
772 for (int j=0; j<128; ++j) {
773 *code++ = ArrayT(encode * (*src++ - min) + lut(offset++));
774 *code++ = ArrayT(encode * (*src++ - min) + lut(offset++));
775 *code++ = ArrayT(encode * (*src++ - min) + lut(offset++));
776 *code++ = ArrayT(encode * (*src++ - min) + lut(offset++));
777 }
778 }
779 }
780 };
781 forEach(srcLeafs, 8, kernel);
782} // GridBuilder::processLeafs<Fp4, Fp8, Fp16>
783
784//================================================================================================
785
786template<typename ValueT, typename BuildT, typename StatsT>
787template<typename T>
788inline typename std::enable_if<is_same<FpN, typename T::BuildType>::value>::type
789GridBuilder<ValueT, BuildT, StatsT>::
790 processLeafs(std::vector<T*>& srcLeafs)
791{
792 static_assert(is_same<float, ValueT>::value, "Expected ValueT == float");
793
794 DitherLUT lut(mDitherOn);
795 auto kernel = [&](const Range1D& r) {
796 uint8_t* ptr = mBufferPtr + mBufferOffsets[5];
797 for (auto i = r.begin(); i != r.end(); ++i) {
798 auto *srcLeaf = srcLeafs[i];
799 auto *dstLeaf = PtrAdd<DstNode0>(ptr, srcLeaf->mDstOffset);
800 auto *data = dstLeaf->data();
801 data->mBBoxMin = srcLeaf->mOrigin; // copy origin of node
802 data->mBBoxDif[0] = 0u;
803 data->mBBoxDif[1] = 0u;
804 data->mBBoxDif[2] = 0u;
805 srcLeaf->mDstNode = dstLeaf;
806 const uint8_t logBitWidth = uint8_t(mCodec[i].log2);
807 data->mFlags = logBitWidth << 5;// pack logBitWidth into 3 MSB of mFlag
808 data->mValueMask = srcLeaf->mValueMask; // copy value mask
809 const float* src = srcLeaf->mValues;
810 const float min = mCodec[i].min, max = mCodec[i].max;
811 data->init(min, max, uint8_t(1) << logBitWidth);
812 // perform quantization relative to the values in the current leaf node
813 int offset = 0;
814 switch (logBitWidth) {
815 case 0u: {// 1 bit
816 auto *dst = reinterpret_cast<uint8_t*>(data+1);
817 const float encode = 1.0f/(max - min);
818 for (int j=0; j<64; ++j) {
819 uint8_t a = 0;
820 for (int k=0; k<8; ++k) {
821 a |= uint8_t(encode * (*src++ - min) + lut(offset++)) << k;
822 }
823 *dst++ = a;
824 }
825 }
826 break;
827 case 1u: {// 2 bits
828 auto *dst = reinterpret_cast<uint8_t*>(data+1);
829 const float encode = 3.0f/(max - min);
830 for (int j=0; j<128; ++j) {
831 auto a = uint8_t(encode * (*src++ - min) + lut(offset++));
832 a |= uint8_t(encode * (*src++ - min) + lut(offset++)) << 2;
833 a |= uint8_t(encode * (*src++ - min) + lut(offset++)) << 4;
834 *dst++ = uint8_t(encode * (*src++ - min) + lut(offset++)) << 6 | a;
835 }
836 }
837 break;
838 case 2u: {// 4 bits
839 auto *dst = reinterpret_cast<uint8_t*>(data+1);
840 const float encode = 15.0f/(max - min);
841 for (int j=0; j<128; ++j) {
842 auto a = uint8_t(encode * (*src++ - min) + lut(offset++));
843 *dst++ = uint8_t(encode * (*src++ - min) + lut(offset++)) << 4 | a;
844 a = uint8_t(encode * (*src++ - min) + lut(offset++));
845 *dst++ = uint8_t(encode * (*src++ - min) + lut(offset++)) << 4 | a;
846 }
847 }
848 break;
849 case 3u: {// 8 bits
850 auto *dst = reinterpret_cast<uint8_t*>(data+1);
851 const float encode = 255.0f/(max - min);
852 for (int j=0; j<128; ++j) {
853 *dst++ = uint8_t(encode * (*src++ - min) + lut(offset++));
854 *dst++ = uint8_t(encode * (*src++ - min) + lut(offset++));
855 *dst++ = uint8_t(encode * (*src++ - min) + lut(offset++));
856 *dst++ = uint8_t(encode * (*src++ - min) + lut(offset++));
857 }
858 }
859 break;
860 default: {// 16 bits
861 auto *dst = reinterpret_cast<uint16_t*>(data+1);
862 const double encode = 65535.0/(max - min);// note that double is required!
863 for (int j=0; j<128; ++j) {
864 *dst++ = uint16_t(encode * (*src++ - min) + lut(offset++));
865 *dst++ = uint16_t(encode * (*src++ - min) + lut(offset++));
866 *dst++ = uint16_t(encode * (*src++ - min) + lut(offset++));
867 *dst++ = uint16_t(encode * (*src++ - min) + lut(offset++));
868 }
869 }
870 }// end switch
871 }
872 };// kernel
873 forEach(srcLeafs, 8, kernel);
874} // GridBuilder::processLeafs<FpN>
875
876//================================================================================================
877
878template<typename ValueT, typename BuildT, typename StatsT>
879template<typename SrcNodeT>
880void GridBuilder<ValueT, BuildT, StatsT>::
881 processNodes(std::vector<SrcNodeT*>& srcNodes)
882{
883 using DstNodeT = typename SrcNodeT::NanoNodeT;
884 static_assert(DstNodeT::LEVEL == 1 || DstNodeT::LEVEL == 2, "Expected internal node");
885 auto kernel = [&](const Range1D& r) {
886 uint8_t* ptr = mBufferPtr + mBufferOffsets[5 - DstNodeT::LEVEL];// 3 or 4
887 for (auto i = r.begin(); i != r.end(); ++i) {
888 SrcNodeT *srcNode = srcNodes[i];
889 DstNodeT *dstNode = PtrAdd<DstNodeT>(ptr, srcNode->mDstOffset);
890 auto *data = dstNode->data();
891 if (DstNodeT::DataType::padding()>0u) std::memset(data, 0, DstNodeT::memUsage());
892 srcNode->mDstNode = dstNode;
893 data->mBBox[0] = srcNode->mOrigin; // copy origin of node
894 data->mValueMask = srcNode->mValueMask; // copy value mask
895 data->mChildMask = srcNode->mChildMask; // copy child mask
896 for (uint32_t j = 0; j != SrcNodeT::SIZE; ++j) {
897 if (data->mChildMask.isOn(j)) {
898 data->setChild(j, srcNode->mTable[j].child->mDstNode);
899 } else
900 data->setValue(j, srcNode->mTable[j].value);
901 }
902 }
903 };
904 forEach(srcNodes, 4, kernel);
905} // GridBuilder::processNodes
906
907//================================================================================================
908
909template<typename ValueT, typename BuildT, typename StatsT>
910NanoRoot<BuildT>* GridBuilder<ValueT, BuildT, StatsT>::processRoot()
911{
912 auto *dstRoot = reinterpret_cast<DstRootT*>(mBufferPtr + mBufferOffsets[2]);
913 auto *data = dstRoot->data();
914 if (data->padding()>0) std::memset(data, 0, DstRootT::memUsage(uint32_t(mRoot.mTable.size())));
915 data->mTableSize = uint32_t(mRoot.mTable.size());
916 data->mMinimum = data->mMaximum = data->mBackground = mRoot.mBackground;
917 data->mBBox = CoordBBox(); // // set to an empty bounding box
918
919 uint32_t tileID = 0;
920 for (auto iter = mRoot.mTable.begin(); iter != mRoot.mTable.end(); ++iter) {
921 auto *dstTile = data->tile(tileID++);
922 if (auto* srcChild = iter->second.child) {
923 dstTile->setChild(srcChild->mOrigin, srcChild->mDstNode, data);
924 } else {
925 dstTile->setValue(iter->first, iter->second.state, iter->second.value);
926 }
927 }
928 return dstRoot;
929} // GridBuilder::processRoot
930
931//================================================================================================
932
933template<typename ValueT, typename BuildT, typename StatsT>
934NanoTree<BuildT>* GridBuilder<ValueT, BuildT, StatsT>::processTree()
935{
936 auto *dstTree = reinterpret_cast<DstTreeT*>(mBufferPtr + mBufferOffsets[1]);
937 auto *data = dstTree->data();
938 data->setRoot( this->processRoot() );
939
940 DstNode2 *node2 = mArray2.empty() ? nullptr : reinterpret_cast<DstNode2*>(mBufferPtr + mBufferOffsets[3]);
941 data->setFirstNode(node2);
942
943 DstNode1 *node1 = mArray1.empty() ? nullptr : reinterpret_cast<DstNode1*>(mBufferPtr + mBufferOffsets[4]);
944 data->setFirstNode(node1);
945
946 DstNode0 *node0 = mArray0.empty() ? nullptr : reinterpret_cast<DstNode0*>(mBufferPtr + mBufferOffsets[5]);
947 data->setFirstNode(node0);
948
949 data->mNodeCount[0] = static_cast<uint32_t>(mArray0.size());
950 data->mNodeCount[1] = static_cast<uint32_t>(mArray1.size());
951 data->mNodeCount[2] = static_cast<uint32_t>(mArray2.size());
952
953 // Count number of active leaf level tiles
954 data->mTileCount[0] = reduce(mArray1, uint32_t(0), [&](Range1D &r, uint32_t sum){
955 for (auto i=r.begin(); i!=r.end(); ++i) sum += mArray1[i]->mValueMask.countOn();
956 return sum;}, std::plus<uint32_t>());
957
958 // Count number of active lower internal node tiles
959 data->mTileCount[1] = reduce(mArray2, uint32_t(0), [&](Range1D &r, uint32_t sum){
960 for (auto i=r.begin(); i!=r.end(); ++i) sum += mArray2[i]->mValueMask.countOn();
961 return sum;}, std::plus<uint32_t>());
962
963 // Count number of active upper internal node tiles
964 uint32_t sum = 0;
965 for (auto &tile : mRoot.mTable) {
966 if (tile.second.child==nullptr && tile.second.state) ++sum;
967 }
968 data->mTileCount[2] = sum;
969
970 // Count number of active voxels
971 data->mVoxelCount = reduce(mArray0, uint64_t(0), [&](Range1D &r, uint64_t sum){
972 for (auto i=r.begin(); i!=r.end(); ++i) sum += mArray0[i]->mValueMask.countOn();
973 return sum;}, std::plus<uint64_t>());
974
975 data->mVoxelCount += data->mTileCount[0]*DstNode0::NUM_VALUES;
976 data->mVoxelCount += data->mTileCount[1]*DstNode1::NUM_VALUES;
977 data->mVoxelCount += data->mTileCount[2]*DstNode2::NUM_VALUES;
978
979 return dstTree;
980} // GridBuilder::processTree
981
982//================================================================================================
983
984template<typename ValueT, typename BuildT, typename StatsT>
985NanoGrid<BuildT>* GridBuilder<ValueT, BuildT, StatsT>::
986processGrid(const Map& map,
987 const std::string& name)
988{
989 auto *dstGrid = reinterpret_cast<DstGridT*>(mBufferPtr + mBufferOffsets[0]);
990 this->processTree();
991 auto* data = dstGrid->data();
992 data->mMagic = NANOVDB_MAGIC_NUMBER;
993 data->mChecksum = 0u;
994 data->mVersion = Version();
995 data->mFlags = static_cast<uint32_t>(GridFlags::IsBreadthFirst);
996 data->mGridIndex = 0;
997 data->mGridCount = 1;
998 data->mGridSize = mBufferOffsets[8];
999 data->mWorldBBox = BBox<Vec3R>();
1000 data->mBlindMetadataOffset = 0;
1001 data->mBlindMetadataCount = 0;
1002 data->mGridClass = mGridClass;
1003 data->mGridType = mapToGridType<BuildT>();
1004 data->mData0 = 0u;
1005 data->mData1 = 0u;
1006 data->mData2 = 0u;
1007
1008 if (!isValid(data->mGridType, data->mGridClass)) {
1009 std::stringstream ss;
1010 ss << "Invalid combination of GridType("<<int(data->mGridType)
1011 << ") and GridClass("<<int(data->mGridClass)<<"). See NanoVDB.h for details!";
1012 throw std::runtime_error(ss.str());
1013 }
1014
1015 std::memset(data->mGridName, '\0', GridData::MaxNameSize);//overwrite mGridName
1016 strncpy(data->mGridName, name.c_str(), GridData::MaxNameSize-1);
1017 if (name.length() >= GridData::MaxNameSize) {// currently we don't support long grid names
1018 std::stringstream ss;
1019 ss << "Grid name \"" << name << "\" is more then " << GridData::MaxNameSize << " characters";
1020 throw std::runtime_error(ss.str());
1021 }
1022
1023 data->mVoxelSize = map.applyMap(Vec3d(1)) - map.applyMap(Vec3d(0));
1024 data->mMap = map;
1025
1026 if (mBlindDataSize>0) {
1027 auto *metaData = reinterpret_cast<GridBlindMetaData*>(mBufferPtr + mBufferOffsets[6]);
1028 data->mBlindMetadataOffset = PtrDiff(metaData, dstGrid);
1029 data->mBlindMetadataCount = 1u;// we currently support only 1 set of blind data
1030 auto *blindData = reinterpret_cast<char*>(mBufferPtr + mBufferOffsets[7]);
1031 metaData->setBlindData(blindData);
1032 }
1033
1034 return dstGrid;
1035} // GridBuilder::processGrid
1036
1037//================================================================================================
1038
1039template<typename ValueT, typename BuildT, typename StatsT>
1040template<typename ChildT>
1041struct GridBuilder<ValueT, BuildT, StatsT>::BuildRoot
1042{
1043 using ValueType = typename ChildT::ValueType;
1044 using ChildType = ChildT;
1045 static constexpr uint32_t LEVEL = 1 + ChildT::LEVEL; // level 0 = leaf
1046 struct Tile
1047 {
1048 Tile(ChildT* c = nullptr)
1049 : child(c)
1050 {
1051 }
1052 Tile(const ValueT& v, bool s)
1053 : child(nullptr)
1054 , value(v)
1055 , state(s)
1056 {
1057 }
1058 ChildT* child;
1059 ValueT value;
1060 bool state;
1061 };
1062 using MapT = std::map<Coord, Tile>;
1063 MapT mTable;
1064 ValueT mBackground;
1065
1066 BuildRoot(const ValueT& background)
1067 : mBackground(background)
1068 {
1069 }
1070 BuildRoot(const BuildRoot&) = delete; // disallow copy-construction
1071 BuildRoot(BuildRoot&&) = default; // allow move construction
1072 BuildRoot& operator=(const BuildRoot&) = delete; // disallow copy assignment
1073 BuildRoot& operator=(BuildRoot&&) = default; // allow move assignment
1074
1075 ~BuildRoot() { this->clear(); }
1076
1077 bool empty() const { return mTable.empty(); }
1078
1079 void clear()
1080 {
1081 for (auto iter = mTable.begin(); iter != mTable.end(); ++iter)
1082 delete iter->second.child;
1083 mTable.clear();
1084 }
1085
1086 static Coord CoordToKey(const Coord& ijk) { return ijk & ~ChildT::MASK; }
1087
1088 template<typename AccT>
1089 bool isActiveAndCache(const Coord& ijk, AccT& acc) const
1090 {
1091 auto iter = mTable.find(CoordToKey(ijk));
1092 if (iter == mTable.end())
1093 return false;
1094 if (iter->second.child) {
1095 acc.insert(ijk, iter->second.child);
1096 return iter->second.child->isActiveAndCache(ijk, acc);
1097 }
1098 return iter->second.state;
1099 }
1100
1101 const ValueT& getValue(const Coord& ijk) const
1102 {
1103 auto iter = mTable.find(CoordToKey(ijk));
1104 if (iter == mTable.end()) {
1105 return mBackground;
1106 } else if (iter->second.child) {
1107 return iter->second.child->getValue(ijk);
1108 } else {
1109 return iter->second.value;
1110 }
1111 }
1112
1113 template<typename AccT>
1114 const ValueT& getValueAndCache(const Coord& ijk, AccT& acc) const
1115 {
1116 auto iter = mTable.find(CoordToKey(ijk));
1117 if (iter == mTable.end())
1118 return mBackground;
1119 if (iter->second.child) {
1120 acc.insert(ijk, iter->second.child);
1121 return iter->second.child->getValueAndCache(ijk, acc);
1122 }
1123 return iter->second.value;
1124 }
1125
1126 template<typename AccT>
1127 void setValueAndCache(const Coord& ijk, const ValueT& value, AccT& acc)
1128 {
1129 ChildT* child = nullptr;
1130 const Coord key = CoordToKey(ijk);
1131 auto iter = mTable.find(key);
1132 if (iter == mTable.end()) {
1133 child = new ChildT(ijk, mBackground, false);
1134 mTable[key] = Tile(child);
1135 } else if (iter->second.child != nullptr) {
1136 child = iter->second.child;
1137 } else {
1138 child = new ChildT(ijk, iter->second.value, iter->second.state);
1139 iter->second.child = child;
1140 }
1142 acc.insert(ijk, child);
1143 child->setValueAndCache(ijk, value, acc);
1144 }
1145
1146 template<typename NodeT>
1147 uint32_t nodeCount() const
1148 {
1149 static_assert(is_same<ValueT, typename NodeT::ValueType>::value, "Root::getNodes: Invalid type");
1150 static_assert(NodeT::LEVEL < LEVEL, "Root::getNodes: LEVEL error");
1151 uint32_t sum = 0;
1152 for (auto iter = mTable.begin(); iter != mTable.end(); ++iter) {
1153 if (iter->second.child == nullptr)
1154 continue; // skip tiles
1155 if (is_same<NodeT, ChildT>::value) { //resolved at compile-time
1156 ++sum;
1157 } else {
1158 sum += iter->second.child->template nodeCount<NodeT>();
1159 }
1160 }
1161 return sum;
1162 }
1163
1164 template<typename NodeT>
1165 void getNodes(std::vector<NodeT*>& array)
1166 {
1167 static_assert(is_same<ValueT, typename NodeT::ValueType>::value, "Root::getNodes: Invalid type");
1168 static_assert(NodeT::LEVEL < LEVEL, "Root::getNodes: LEVEL error");
1169 for (auto iter = mTable.begin(); iter != mTable.end(); ++iter) {
1170 if (iter->second.child == nullptr)
1171 continue;
1172 if (is_same<NodeT, ChildT>::value) { //resolved at compile-time
1173 array.push_back(reinterpret_cast<NodeT*>(iter->second.child));
1174 } else {
1175 iter->second.child->getNodes(array);
1176 }
1177 }
1178 }
1179
1180 void addChild(ChildT*& child)
1181 {
1183 const Coord key = CoordToKey(child->mOrigin);
1184 auto iter = mTable.find(key);
1185 if (iter != mTable.end() && iter->second.child != nullptr) { // existing child node
1186 delete iter->second.child;
1187 iter->second.child = child;
1188 } else {
1189 mTable[key] = Tile(child);
1190 }
1191 child = nullptr;
1192 }
1193
1194 template<typename NodeT>
1195 void addNode(NodeT*& node)
1196 {
1197 if (is_same<NodeT, ChildT>::value) { //resolved at compile-time
1198 this->addChild(reinterpret_cast<ChildT*&>(node));
1199 } else {
1200 ChildT* child = nullptr;
1201 const Coord key = CoordToKey(node->mOrigin);
1202 auto iter = mTable.find(key);
1203 if (iter == mTable.end()) {
1204 child = new ChildT(node->mOrigin, mBackground, false);
1205 mTable[key] = Tile(child);
1206 } else if (iter->second.child != nullptr) {
1207 child = iter->second.child;
1208 } else {
1209 child = new ChildT(node->mOrigin, iter->second.value, iter->second.state);
1210 iter->second.child = child;
1211 }
1212 child->addNode(node);
1213 }
1214 }
1215
1216 template<typename T>
1217 typename std::enable_if<std::is_floating_point<T>::value>::type
1218 signedFloodFill(T outside);
1219
1220 template<typename T>
1221 typename std::enable_if<!std::is_floating_point<T>::value>::type
1222 signedFloodFill(T) {} // no-op for none floating point values
1223}; // GridBuilder::BuildRoot
1224
1225//================================================================================================
1226
1227template<typename ValueT, typename BuildT, typename StatsT>
1228template<typename ChildT>
1229template<typename T>
1230inline typename std::enable_if<std::is_floating_point<T>::value>::type
1231GridBuilder<ValueT, BuildT, StatsT>::BuildRoot<ChildT>::
1232 signedFloodFill(T outside)
1233{
1234 std::map<Coord, ChildT*> nodeKeys;
1235 for (auto iter = mTable.begin(); iter != mTable.end(); ++iter) {
1236 if (iter->second.child == nullptr)
1237 continue;
1238 nodeKeys.insert(std::pair<Coord, ChildT*>(iter->first, iter->second.child));
1239 }
1240
1241 // We employ a simple z-scanline algorithm that inserts inactive tiles with
1242 // the inside value if they are sandwiched between inside child nodes only!
1243 auto b = nodeKeys.begin(), e = nodeKeys.end();
1244 if (b == e)
1245 return;
1246 for (auto a = b++; b != e; ++a, ++b) {
1247 Coord d = b->first - a->first; // delta of neighboring coordinates
1248 if (d[0] != 0 || d[1] != 0 || d[2] == int(ChildT::DIM))
1249 continue; // not same z-scanline or neighbors
1250 const ValueT fill[] = {a->second->getLastValue(), b->second->getFirstValue()};
1251 if (!(fill[0] < 0) || !(fill[1] < 0))
1252 continue; // scanline isn't inside
1253 Coord c = a->first + Coord(0u, 0u, ChildT::DIM);
1254 for (; c[2] != b->first[2]; c[2] += ChildT::DIM) {
1255 const Coord key = SrcRootT::CoordToKey(c);
1256 mTable[key] = typename SrcRootT::Tile(-outside, false); // inactive tile
1257 }
1258 }
1259} // Root::signedFloodFill
1260
1261//================================================================================================
1262
1263template<typename ValueT, typename BuildT, typename StatsT>
1264template<typename ChildT>
1265struct GridBuilder<ValueT, BuildT, StatsT>::
1266 BuildNode
1267{
1268 using ValueType = ValueT;
1269 using BuildType = BuildT;
1270 using ChildType = ChildT;
1271 static constexpr uint32_t LOG2DIM = ChildT::LOG2DIM + 1;
1272 static constexpr uint32_t TOTAL = LOG2DIM + ChildT::TOTAL; //dimension in index space
1273 static constexpr uint32_t DIM = 1u << TOTAL;
1274 static constexpr uint32_t SIZE = 1u << (3 * LOG2DIM); //number of tile values (or child pointers)
1275 static constexpr int32_t MASK = DIM - 1;
1276 static constexpr uint32_t LEVEL = 1 + ChildT::LEVEL; // level 0 = leaf
1277 static constexpr uint64_t NUM_VALUES = uint64_t(1) << (3 * TOTAL); // total voxel count represented by this node
1278 using MaskT = Mask<LOG2DIM>;
1279 using NanoNodeT = typename NanoNode<BuildT, LEVEL>::Type;
1280
1281 struct Tile
1282 {
1283 Tile(ChildT* c = nullptr)
1284 : child(c)
1285 {
1286 }
1287 union
1288 {
1289 ChildT* child;
1290 ValueT value;
1291 };
1292 };
1293 Coord mOrigin;
1294 MaskT mValueMask;
1295 MaskT mChildMask;
1296 Tile mTable[SIZE];
1297
1298 union {
1299 NanoNodeT *mDstNode;
1300 uint64_t mDstOffset;
1301 };
1302
1303 BuildNode(const Coord& origin, const ValueT& value, bool state)
1304 : mOrigin(origin & ~MASK)
1305 , mValueMask(state)
1306 , mChildMask()
1307 , mDstOffset(0)
1308 {
1309 for (uint32_t i = 0; i < SIZE; ++i) {
1310 mTable[i].value = value;
1311 }
1312 }
1313 BuildNode(const BuildNode&) = delete; // disallow copy-construction
1314 BuildNode(BuildNode&&) = delete; // disallow move construction
1315 BuildNode& operator=(const BuildNode&) = delete; // disallow copy assignment
1316 BuildNode& operator=(BuildNode&&) = delete; // disallow move assignment
1317 ~BuildNode()
1318 {
1319 for (auto iter = mChildMask.beginOn(); iter; ++iter) {
1320 delete mTable[*iter].child;
1321 }
1322 }
1323
1324 static uint32_t CoordToOffset(const Coord& ijk)
1325 {
1326 return (((ijk[0] & MASK) >> ChildT::TOTAL) << (2 * LOG2DIM)) +
1327 (((ijk[1] & MASK) >> ChildT::TOTAL) << (LOG2DIM)) +
1328 ((ijk[2] & MASK) >> ChildT::TOTAL);
1329 }
1330
1331 static Coord OffsetToLocalCoord(uint32_t n)
1332 {
1333 NANOVDB_ASSERT(n < SIZE);
1334 const uint32_t m = n & ((1 << 2 * LOG2DIM) - 1);
1335 return Coord(n >> 2 * LOG2DIM, m >> LOG2DIM, m & ((1 << LOG2DIM) - 1));
1336 }
1337
1338 void localToGlobalCoord(Coord& ijk) const
1339 {
1340 ijk <<= ChildT::TOTAL;
1341 ijk += mOrigin;
1342 }
1343
1344 Coord offsetToGlobalCoord(uint32_t n) const
1345 {
1346 Coord ijk = BuildNode::OffsetToLocalCoord(n);
1347 this->localToGlobalCoord(ijk);
1348 return ijk;
1349 }
1350
1351 template<typename AccT>
1352 bool isActiveAndCache(const Coord& ijk, AccT& acc) const
1353 {
1354 const uint32_t n = CoordToOffset(ijk);
1355 if (mChildMask.isOn(n)) {
1356 acc.insert(ijk, const_cast<ChildT*>(mTable[n].child));
1357 return mTable[n].child->isActiveAndCache(ijk, acc);
1358 }
1359 return mValueMask.isOn(n);
1360 }
1361
1362 ValueT getFirstValue() const { return mChildMask.isOn(0) ? mTable[0].child->getFirstValue() : mTable[0].value; }
1363 ValueT getLastValue() const { return mChildMask.isOn(SIZE - 1) ? mTable[SIZE - 1].child->getLastValue() : mTable[SIZE - 1].value; }
1364
1365 const ValueT& getValue(const Coord& ijk) const
1366 {
1367 const uint32_t n = CoordToOffset(ijk);
1368 if (mChildMask.isOn(n)) {
1369 return mTable[n].child->getValue(ijk);
1370 }
1371 return mTable[n].value;
1372 }
1373
1374 template<typename AccT>
1375 const ValueT& getValueAndCache(const Coord& ijk, AccT& acc) const
1376 {
1377 const uint32_t n = CoordToOffset(ijk);
1378 if (mChildMask.isOn(n)) {
1379 acc.insert(ijk, const_cast<ChildT*>(mTable[n].child));
1380 return mTable[n].child->getValueAndCache(ijk, acc);
1381 }
1382 return mTable[n].value;
1383 }
1384
1385 void setValue(const Coord& ijk, const ValueT& value)
1386 {
1387 const uint32_t n = CoordToOffset(ijk);
1388 ChildT* child = nullptr;
1389 if (mChildMask.isOn(n)) {
1390 child = mTable[n].child;
1391 } else {
1392 child = new ChildT(ijk, mTable[n].value, mValueMask.isOn(n));
1393 mTable[n].child = child;
1394 mChildMask.setOn(n);
1395 }
1396 child->setValue(ijk, value);
1397 }
1398
1399 template<typename AccT>
1400 void setValueAndCache(const Coord& ijk, const ValueT& value, AccT& acc)
1401 {
1402 const uint32_t n = CoordToOffset(ijk);
1403 ChildT* child = nullptr;
1404 if (mChildMask.isOn(n)) {
1405 child = mTable[n].child;
1406 } else {
1407 child = new ChildT(ijk, mTable[n].value, mValueMask.isOn(n));
1408 mTable[n].child = child;
1409 mChildMask.setOn(n);
1410 }
1411 acc.insert(ijk, child);
1412 child->setValueAndCache(ijk, value, acc);
1413 }
1414
1415 template<typename NodeT>
1416 uint32_t nodeCount() const
1417 {
1418 static_assert(is_same<ValueT, typename NodeT::ValueType>::value, "Node::getNodes: Invalid type");
1419 NANOVDB_ASSERT(NodeT::LEVEL < LEVEL);
1420 uint32_t sum = 0;
1421 if (is_same<NodeT, ChildT>::value) { //resolved at compile-time
1422 sum += mChildMask.countOn();
1423 } else {
1424 for (auto iter = mChildMask.beginOn(); iter; ++iter) {
1425 sum += mTable[*iter].child->template nodeCount<NodeT>();
1426 }
1427 }
1428 return sum;
1429 }
1430
1431 template<typename NodeT>
1432 void getNodes(std::vector<NodeT*>& array)
1433 {
1434 static_assert(is_same<ValueT, typename NodeT::ValueType>::value, "Node::getNodes: Invalid type");
1435 NANOVDB_ASSERT(NodeT::LEVEL < LEVEL);
1436 for (auto iter = mChildMask.beginOn(); iter; ++iter) {
1437 if (is_same<NodeT, ChildT>::value) { //resolved at compile-time
1438 array.push_back(reinterpret_cast<NodeT*>(mTable[*iter].child));
1439 } else {
1440 mTable[*iter].child->getNodes(array);
1441 }
1442 }
1443 }
1444
1445 void addChild(ChildT*& child)
1446 {
1447 NANOVDB_ASSERT(child && (child->mOrigin & ~MASK) == this->mOrigin);
1448 const uint32_t n = CoordToOffset(child->mOrigin);
1449 if (mChildMask.isOn(n)) {
1450 delete mTable[n].child;
1451 } else {
1452 mChildMask.setOn(n);
1453 }
1454 mTable[n].child = child;
1455 child = nullptr;
1456 }
1457
1458 template<typename NodeT>
1459 void addNode(NodeT*& node)
1460 {
1461 if (is_same<NodeT, ChildT>::value) { //resolved at compile-time
1462 this->addChild(reinterpret_cast<ChildT*&>(node));
1463 } else {
1464 const uint32_t n = CoordToOffset(node->mOrigin);
1465 ChildT* child = nullptr;
1466 if (mChildMask.isOn(n)) {
1467 child = mTable[n].child;
1468 } else {
1469 child = new ChildT(node->mOrigin, mTable[n].value, mValueMask.isOn(n));
1470 mTable[n].child = child;
1471 mChildMask.setOn(n);
1472 }
1473 child->addNode(node);
1474 }
1475 }
1476
1477 template<typename T>
1478 typename std::enable_if<std::is_floating_point<T>::value>::type
1479 signedFloodFill(T outside);
1480 template<typename T>
1481 typename std::enable_if<!std::is_floating_point<T>::value>::type
1482 signedFloodFill(T) {} // no-op for none floating point values
1483}; // GridBuilder::BuildNode
1484
1485//================================================================================================
1486
1487template<typename ValueT, typename BuildT, typename StatsT>
1488template<typename ChildT>
1489template<typename T>
1490inline typename std::enable_if<std::is_floating_point<T>::value>::type
1491GridBuilder<ValueT, BuildT, StatsT>::BuildNode<ChildT>::
1492 signedFloodFill(T outside)
1493{
1494 const uint32_t first = *mChildMask.beginOn();
1495 if (first < NUM_VALUES) {
1496 bool xInside = mTable[first].child->getFirstValue() < 0;
1497 bool yInside = xInside, zInside = xInside;
1498 for (uint32_t x = 0; x != (1 << LOG2DIM); ++x) {
1499 const uint32_t x00 = x << (2 * LOG2DIM); // offset for block(x, 0, 0)
1500 if (mChildMask.isOn(x00)) {
1501 xInside = mTable[x00].child->getLastValue() < 0;
1502 }
1503 yInside = xInside;
1504 for (uint32_t y = 0; y != (1u << LOG2DIM); ++y) {
1505 const uint32_t xy0 = x00 + (y << LOG2DIM); // offset for block(x, y, 0)
1506 if (mChildMask.isOn(xy0))
1507 yInside = mTable[xy0].child->getLastValue() < 0;
1508 zInside = yInside;
1509 for (uint32_t z = 0; z != (1 << LOG2DIM); ++z) {
1510 const uint32_t xyz = xy0 + z; // offset for block(x, y, z)
1511 if (mChildMask.isOn(xyz)) {
1512 zInside = mTable[xyz].child->getLastValue() < 0;
1513 } else {
1514 mTable[xyz].value = zInside ? -outside : outside;
1515 }
1516 }
1517 }
1518 }
1519 }
1520} // Node::signedFloodFill
1521
1522//================================================================================================
1523
1524template<typename ValueT, typename BuildT, typename StatsT>
1525struct GridBuilder<ValueT, BuildT, StatsT>::
1526 BuildLeaf
1527{
1528 using ValueType = ValueT;
1529 using BuildType = BuildT;
1530 static constexpr uint32_t LOG2DIM = 3;
1531 static constexpr uint32_t TOTAL = LOG2DIM; // needed by parent nodes
1532 static constexpr uint32_t DIM = 1u << TOTAL;
1533 static constexpr uint32_t SIZE = 1u << 3 * LOG2DIM; // total number of voxels represented by this node
1534 static constexpr int32_t MASK = DIM - 1; // mask for bit operations
1535 static constexpr uint32_t LEVEL = 0; // level 0 = leaf
1536 static constexpr uint64_t NUM_VALUES = uint64_t(1) << (3 * TOTAL); // total voxel count represented by this node
1539
1542 ValueT mValues[SIZE];
1543 union {
1545 uint64_t mDstOffset;
1546 };
1547
1548 BuildLeaf(const Coord& ijk, const ValueT& value, bool state)
1549 : mOrigin(ijk & ~MASK)
1550 , mValueMask(state) //invalid
1551 , mDstOffset(0)
1552 {
1553 ValueT* target = mValues;
1554 uint32_t n = SIZE;
1555 while (n--) {
1556 *target++ = value;
1557 }
1558 }
1559 BuildLeaf(const BuildLeaf&) = delete; // disallow copy-construction
1560 BuildLeaf(BuildLeaf&&) = delete; // disallow move construction
1561 BuildLeaf& operator=(const BuildLeaf&) = delete; // disallow copy assignment
1562 BuildLeaf& operator=(BuildLeaf&&) = delete; // disallow move assignment
1563 ~BuildLeaf() = default;
1564
1565 /// @brief Return the linear offset corresponding to the given coordinate
1566 static uint32_t CoordToOffset(const Coord& ijk)
1567 {
1568 return ((ijk[0] & MASK) << (2 * LOG2DIM)) + ((ijk[1] & MASK) << LOG2DIM) + (ijk[2] & MASK);
1569 }
1570
1571 static Coord OffsetToLocalCoord(uint32_t n)
1572 {
1573 NANOVDB_ASSERT(n < SIZE);
1574 const int32_t m = n & ((1 << 2 * LOG2DIM) - 1);
1575 return Coord(n >> 2 * LOG2DIM, m >> LOG2DIM, m & MASK);
1576 }
1577
1578 void localToGlobalCoord(Coord& ijk) const
1579 {
1580 ijk += mOrigin;
1581 }
1582
1583 Coord offsetToGlobalCoord(uint32_t n) const
1584 {
1586 this->localToGlobalCoord(ijk);
1587 return ijk;
1588 }
1589
1590 template<typename AccT>
1591 bool isActiveAndCache(const Coord& ijk, const AccT&) const
1592 {
1593 return mValueMask.isOn(CoordToOffset(ijk));
1594 }
1595
1596 ValueT getFirstValue() const { return mValues[0]; }
1597 ValueT getLastValue() const { return mValues[SIZE - 1]; }
1598
1599 const ValueT& getValue(const Coord& ijk) const
1600 {
1601 return mValues[CoordToOffset(ijk)];
1602 }
1603
1604 template<typename AccT>
1605 const ValueT& getValueAndCache(const Coord& ijk, const AccT&) const
1606 {
1607 return mValues[CoordToOffset(ijk)];
1608 }
1609
1610 template<typename AccT>
1611 void setValueAndCache(const Coord& ijk, const ValueT& value, const AccT&)
1612 {
1613 const uint32_t n = CoordToOffset(ijk);
1614 mValueMask.setOn(n);
1615 mValues[n] = value;
1616 }
1617
1618 void setValue(const Coord& ijk, const ValueT& value)
1619 {
1620 const uint32_t n = CoordToOffset(ijk);
1621 mValueMask.setOn(n);
1622 mValues[n] = value;
1623 }
1624
1625 template<typename NodeT>
1626 void getNodes(std::vector<NodeT*>&) { NANOVDB_ASSERT(false); }
1627
1628 template<typename NodeT>
1629 void addNode(NodeT*&) {}
1630
1631 template<typename NodeT>
1632 uint32_t nodeCount() const
1633 {
1634 NANOVDB_ASSERT(false);// should never get called
1635 return 1;
1636 }
1637
1638 template<typename T>
1639 typename std::enable_if<std::is_floating_point<T>::value>::type
1640 signedFloodFill(T outside);
1641 template<typename T>
1642 typename std::enable_if<!std::is_floating_point<T>::value>::type
1643 signedFloodFill(T) {} // no-op for none floating point values
1644}; // BuildLeaf
1645
1646//================================================================================================
1647
1648template<typename ValueT, typename BuildT, typename StatsT>
1649template<typename T>
1650inline typename std::enable_if<std::is_floating_point<T>::value>::type
1652 signedFloodFill(T outside)
1653{
1654 const uint32_t first = *mValueMask.beginOn();
1655 if (first < SIZE) {
1656 bool xInside = mValues[first] < 0, yInside = xInside, zInside = xInside;
1657 for (uint32_t x = 0; x != DIM; ++x) {
1658 const uint32_t x00 = x << (2 * LOG2DIM);
1659 if (mValueMask.isOn(x00))
1660 xInside = mValues[x00] < 0; // element(x, 0, 0)
1661 yInside = xInside;
1662 for (uint32_t y = 0; y != DIM; ++y) {
1663 const uint32_t xy0 = x00 + (y << LOG2DIM);
1664 if (mValueMask.isOn(xy0))
1665 yInside = mValues[xy0] < 0; // element(x, y, 0)
1666 zInside = yInside;
1667 for (uint32_t z = 0; z != (1 << LOG2DIM); ++z) {
1668 const uint32_t xyz = xy0 + z; // element(x, y, z)
1669 if (mValueMask.isOn(xyz)) {
1670 zInside = mValues[xyz] < 0;
1671 } else {
1672 mValues[xyz] = zInside ? -outside : outside;
1673 }
1674 }
1675 }
1676 }
1677 }
1678} // BuildLeaf::signedFloodFill
1679
1680//================================================================================================
1681
1682template<typename ValueT, typename BuildT, typename StatsT>
1683struct GridBuilder<ValueT, BuildT, StatsT>::
1685{
1686 ValueAccessor(SrcRootT& root)
1687 : mKeys{Coord(Maximum<int>::value()), Coord(Maximum<int>::value()), Coord(Maximum<int>::value())}
1688 , mNode{nullptr, nullptr, nullptr, &root}
1689 {
1690 }
1691 template<typename NodeT>
1692 bool isCached(const Coord& ijk) const
1693 {
1694 return (ijk[0] & ~NodeT::MASK) == mKeys[NodeT::LEVEL][0] &&
1695 (ijk[1] & ~NodeT::MASK) == mKeys[NodeT::LEVEL][1] &&
1696 (ijk[2] & ~NodeT::MASK) == mKeys[NodeT::LEVEL][2];
1697 }
1698 const ValueT& getValue(const Coord& ijk)
1699 {
1700 if (this->isCached<SrcNode0>(ijk)) {
1701 return ((SrcNode0*)mNode[0])->getValueAndCache(ijk, *this);
1702 } else if (this->isCached<SrcNode1>(ijk)) {
1703 return ((SrcNode1*)mNode[1])->getValueAndCache(ijk, *this);
1704 } else if (this->isCached<SrcNode2>(ijk)) {
1705 return ((SrcNode2*)mNode[2])->getValueAndCache(ijk, *this);
1706 }
1707 return ((SrcRootT*)mNode[3])->getValueAndCache(ijk, *this);
1708 }
1709 /// @brief Sets value in a leaf node and returns it.
1710 SrcNode0* setValue(const Coord& ijk, const ValueT& value)
1711 {
1712 if (this->isCached<SrcNode0>(ijk)) {
1713 ((SrcNode0*)mNode[0])->setValueAndCache(ijk, value, *this);
1714 } else if (this->isCached<SrcNode1>(ijk)) {
1715 ((SrcNode1*)mNode[1])->setValueAndCache(ijk, value, *this);
1716 } else if (this->isCached<SrcNode2>(ijk)) {
1717 ((SrcNode2*)mNode[2])->setValueAndCache(ijk, value, *this);
1718 } else {
1719 ((SrcRootT*)mNode[3])->setValueAndCache(ijk, value, *this);
1720 }
1721 NANOVDB_ASSERT(this->isCached<SrcNode0>(ijk));
1722 return (SrcNode0*)mNode[0];
1723 }
1724 bool isActive(const Coord& ijk)
1725 {
1726 if (this->isCached<SrcNode0>(ijk)) {
1727 return ((SrcNode0*)mNode[0])->isActiveAndCache(ijk, *this);
1728 } else if (this->isCached<SrcNode1>(ijk)) {
1729 return ((SrcNode1*)mNode[1])->isActiveAndCache(ijk, *this);
1730 } else if (this->isCached<SrcNode2>(ijk)) {
1731 return ((SrcNode2*)mNode[2])->isActiveAndCache(ijk, *this);
1732 }
1733 return ((SrcRootT*)mNode[3])->isActiveAndCache(ijk, *this);
1734 }
1735 bool isValueOn(const Coord& ijk) { return this->isActive(ijk); }
1736 template<typename NodeT>
1737 void insert(const Coord& ijk, NodeT* node)
1738 {
1739 mKeys[NodeT::LEVEL] = ijk & ~NodeT::MASK;
1740 mNode[NodeT::LEVEL] = node;
1741 }
1742 Coord mKeys[3];
1743 void* mNode[4];
1744}; // ValueAccessor
1745
1746} // namespace nanovdb
1747
1748#endif // NANOVDB_GRIDBUILDER_H_HAS_BEEN_INCLUDED
Defines look up table to do dithering of 8^3 leaf nodes.
A unified wrapper for tbb::parallel_for and a naive std::thread fallback.
ValueT value
Definition: GridBuilder.h:1290
NanoNodeT * mDstNode
Definition: GridBuilder.h:1299
uint64_t mDstOffset
Definition: GridBuilder.h:1300
ChildT * child
Definition: GridBuilder.h:1289
Computes a pair of 32bit checksums, og a Grid, by means of Cyclic Redundancy Check (CRC)
Defines two classes, a GridRegister the defines the value type (e.g. Double, Float etc) of a NanoVDB ...
Re-computes min/max/avg/var/bbox information for each node in a pre-existing NanoVDB grid.
A unified wrapper for tbb::parallel_invoke and a naive std::thread analog.
#define NANOVDB_ASSERT(x)
Definition: NanoVDB.h:173
#define NANOVDB_MAGIC_NUMBER
Definition: NanoVDB.h:121
Custom Range class that is compatible with the tbb::blocked_range classes.
A unified wrapper for tbb::parallel_reduce and a naive std::future analog.
Compression oracle based on absolute difference.
Definition: GridBuilder.h:39
~AbsDiff()=default
bool operator()(float exact, float approx) const
Return true if the approximate value is within the accepted absolute error bounds of the exact value.
Definition: GridBuilder.h:52
float getTolerance() const
Definition: GridBuilder.h:47
AbsDiff(float tolerance=-1.0f)
Definition: GridBuilder.h:43
AbsDiff(const AbsDiff &)=default
void setTolerance(float tolerance)
Definition: GridBuilder.h:46
Signed (i, j, k) 32-bit integer coordinate class, similar to openvdb::math::Coord.
Definition: NanoVDB.h:967
Allows for the construction of NanoVDB grids without any dependency.
Definition: GridBuilder.h:94
GridHandle< BufferT > getHandle(double voxelSize=1.0, const Vec3d &gridOrigin=Vec3d(0), const std::string &name="", const OracleT &oracle=OracleT(), const BufferT &buffer=BufferT())
Return an instance of a GridHandle (invoking move semantics)
Definition: GridBuilder.h:538
GridHandle< BufferT > getHandle(const Map &map, const std::string &name="", const OracleT &oracle=OracleT(), const BufferT &buffer=BufferT())
Return an instance of a GridHandle (invoking move semantics)
Definition: GridBuilder.h:557
GridBuilder(ValueT background=ValueT(), GridClass gClass=GridClass::Unknown, uint64_t blindDataSize=0)
Definition: GridBuilder.h:241
void setStats(StatsMode mode=StatsMode::Default)
Definition: GridBuilder.h:200
void setChecksum(ChecksumMode mode=ChecksumMode::Default)
Definition: GridBuilder.h:202
void enableDithering(bool on=true)
Definition: GridBuilder.h:198
ValueAccessor getAccessor()
Definition: GridBuilder.h:183
void setVerbose(int mode=1)
Definition: GridBuilder.h:196
void setGridClass(GridClass mode=GridClass::Unknown)
Definition: GridBuilder.h:204
void sdfToLevelSet()
Performs multi-threaded bottom-up signed-distance flood-filling and changes GridClass to LevelSet.
Definition: GridBuilder.h:493
void operator()(const Func &func, const CoordBBox &bbox, ValueT delta=ValueT(0))
Sets grids values in domain of the bbox to those returned by the specified func with the expected sig...
Definition: GridBuilder.h:256
void sdfToFog()
Performs multi-threaded bottom-up signed-distance flood-filling followed by level-set -> FOG volume c...
Definition: GridBuilder.h:604
This class serves to manage a raw memory buffer of a NanoVDB Grid.
Definition: GridHandle.h:71
Highest level of the data structure. Contains a tree and a world->index transform (that currently onl...
Definition: NanoVDB.h:2556
Internal nodes of a VDB treedim(),.
Definition: NanoVDB.h:3521
Leaf nodes of the VDB tree. (defaults to 8x8x8 = 512 voxels)
Definition: NanoVDB.h:4252
Bit-mask to encode active states and facilitate sequential iterators and a fast codec for I/O compres...
Definition: NanoVDB.h:1957
bool isOn(uint32_t n) const
Return true if the given bit is set.
Definition: NanoVDB.h:2085
void setOn(uint32_t n)
Set the specified bit on.
Definition: NanoVDB.h:2109
Definition: Range.h:28
Compression oracle based on relative difference.
Definition: GridBuilder.h:66
RelDiff(float tolerance=-1.0f)
Definition: GridBuilder.h:70
bool operator()(float exact, float approx) const
Return true if the approximate value is within the accepted relative error bounds of the exact value.
Definition: GridBuilder.h:79
RelDiff(const RelDiff &)=default
float getTolerance() const
Definition: GridBuilder.h:74
~RelDiff()=default
void setTolerance(float tolerance)
Definition: GridBuilder.h:73
Top-most node of the VDB tree structure.
Definition: NanoVDB.h:3074
VDB Tree, which is a thin wrapper around a RootNode.
Definition: NanoVDB.h:2799
Definition: NanoVDB.h:208
static int64_t PtrDiff(const T1 *p, const T2 *q)
Definition: NanoVDB.h:535
T Abs(T x)
Definition: NanoVDB.h:854
T reduce(RangeT range, const T &identity, const FuncT &func, const JoinT &join)
Definition: Reduce.h:42
GridClass
Classes (defined in OpenVDB) that are currently supported by NanoVDB.
Definition: NanoVDB.h:281
BBox< Coord > CoordBBox
Definition: NanoVDB.h:1809
StatsMode
Grid flags which indicate what extra information is present in the grid buffer.
Definition: GridStats.h:32
void updateChecksum(NanoGrid< ValueT > &grid, ChecksumMode mode=ChecksumMode::Default)
Updates the checksum of a grid.
Definition: GridChecksum.h:277
Vec3< double > Vec3d
Definition: NanoVDB.h:1288
std::ostream & operator<<(std::ostream &os, const AbsDiff &diff)
Definition: GridBuilder.h:58
void gridStats(NanoGrid< BuildT > &grid, StatsMode mode=StatsMode::Default)
Re-computes the min/max, stats and bbox information for an existing NanoVDB Grid.
Definition: GridStats.h:713
ChecksumMode
List of different modes for computing for a checksum.
Definition: GridChecksum.h:33
Type Max(Type a, Type b)
Definition: NanoVDB.h:779
void forEach(RangeT range, const FuncT &func)
simple wrapper for tbb::parallel_for with a naive std fallback
Definition: ForEach.h:40
Range< 1, size_t > Range1D
Definition: Range.h:30
static bool isValid(const void *p)
return true if the specified pointer is aligned and not NULL
Definition: NanoVDB.h:504
const std::enable_if<!VecTraits< T >::IsVec, T >::type & max(const T &a, const T &b)
Definition: Composite.h:110
const std::enable_if<!VecTraits< T >::IsVec, T >::type & min(const T &a, const T &b)
Definition: Composite.h:106
void signedFloodFill(TreeOrLeafManagerT &tree, bool threaded=true, size_t grainSize=1, Index minLevel=0)
Set the values of all inactive voxels and tiles of a narrow-band level set from the signs of the acti...
Definition: SignedFloodFill.h:267
static uint64_t memUsage(uint64_t blindDataCount=0)
return memory usage in bytes for the class (note this computes for all blindMetaData structures....
Definition: NanoVDB.h:2326
Definition: GridBuilder.h:1527
void addNode(NodeT *&)
Definition: GridBuilder.h:1629
Coord offsetToGlobalCoord(uint32_t n) const
Definition: GridBuilder.h:1583
void getNodes(std::vector< NodeT * > &)
Definition: GridBuilder.h:1626
const ValueT & getValue(const Coord &ijk) const
Definition: GridBuilder.h:1599
static uint32_t CoordToOffset(const Coord &ijk)
Return the linear offset corresponding to the given coordinate.
Definition: GridBuilder.h:1566
BuildLeaf(const BuildLeaf &)=delete
static constexpr uint32_t DIM
Definition: GridBuilder.h:1532
std::enable_if<!std::is_floating_point< T >::value >::type signedFloodFill(T)
Definition: GridBuilder.h:1643
uint32_t nodeCount() const
Definition: GridBuilder.h:1632
void localToGlobalCoord(Coord &ijk) const
Definition: GridBuilder.h:1578
BuildLeaf & operator=(const BuildLeaf &)=delete
ValueT ValueType
Definition: GridBuilder.h:1528
bool isActiveAndCache(const Coord &ijk, const AccT &) const
Definition: GridBuilder.h:1591
ValueT getFirstValue() const
Definition: GridBuilder.h:1596
ValueT getLastValue() const
Definition: GridBuilder.h:1597
void setValue(const Coord &ijk, const ValueT &value)
Definition: GridBuilder.h:1618
BuildLeaf & operator=(BuildLeaf &&)=delete
static Coord OffsetToLocalCoord(uint32_t n)
Definition: GridBuilder.h:1571
std::enable_if< std::is_floating_point< T >::value >::type signedFloodFill(T outside)
Definition: GridBuilder.h:1652
Mask< LOG2DIM > mValueMask
Definition: GridBuilder.h:1541
BuildLeaf(const Coord &ijk, const ValueT &value, bool state)
Definition: GridBuilder.h:1548
static constexpr uint32_t LOG2DIM
Definition: GridBuilder.h:1530
BuildLeaf(BuildLeaf &&)=delete
typename NanoNode< BuildT, 0 >::Type NanoLeafT
Definition: GridBuilder.h:1538
void setValueAndCache(const Coord &ijk, const ValueT &value, const AccT &)
Definition: GridBuilder.h:1611
static constexpr uint32_t SIZE
Definition: GridBuilder.h:1533
ValueT mValues[SIZE]
Definition: GridBuilder.h:1542
const ValueT & getValueAndCache(const Coord &ijk, const AccT &) const
Definition: GridBuilder.h:1605
Coord mOrigin
Definition: GridBuilder.h:1540
BuildT BuildType
Definition: GridBuilder.h:1529
Definition: GridBuilder.h:1282
Tile(ChildT *c=nullptr)
Definition: GridBuilder.h:1283
Definition: GridBuilder.h:1047
ValueT value
Definition: GridBuilder.h:1059
Tile(const ValueT &v, bool s)
Definition: GridBuilder.h:1052
Tile(ChildT *c=nullptr)
Definition: GridBuilder.h:1048
ChildT * child
Definition: GridBuilder.h:1058
bool state
Definition: GridBuilder.h:1060
Definition: GridBuilder.h:1685
bool isValueOn(const Coord &ijk)
Definition: GridBuilder.h:1735
ValueAccessor(SrcRootT &root)
Definition: GridBuilder.h:1686
const ValueT & getValue(const Coord &ijk)
Definition: GridBuilder.h:1698
bool isActive(const Coord &ijk)
Definition: GridBuilder.h:1724
SrcNode0 * setValue(const Coord &ijk, const ValueT &value)
Sets value in a leaf node and returns it.
Definition: GridBuilder.h:1710
bool isCached(const Coord &ijk) const
Definition: GridBuilder.h:1692
void insert(const Coord &ijk, NodeT *node)
Definition: GridBuilder.h:1737
static const int MaxNameSize
Definition: NanoVDB.h:2433
Defines an affine transform and its inverse represented as a 3x3 matrix and a vec3 translation.
Definition: NanoVDB.h:2224
void set(const Mat3T &mat, const Mat3T &invMat, const Vec3T &translate, double taper)
Initialize the member data.
Definition: NanoVDB.h:2279
Maximum floating-point values.
Definition: NanoVDB.h:745
Trait to map from LEVEL to node type.
Definition: NanoVDB.h:4567
C++11 implementation of std::is_floating_point.
Definition: NanoVDB.h:414
C++11 implementation of std::is_same.
Definition: NanoVDB.h:357
static constexpr bool value
Definition: NanoVDB.h:358