Thanks to visit codestin.com
Credit goes to github.com

Skip to content

Commit 95fba2a

Browse files
committed
Changed to use lfpAlloc from ltalloc for experimental multi-threaded .obj parser since ltalloc is not a porable(e.g. it does not support ARM archtecture).
1 parent 99518b6 commit 95fba2a

File tree

12 files changed

+402
-1064
lines changed

12 files changed

+402
-1064
lines changed

experimental/README.md

Lines changed: 12 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,16 @@
1-
Experimental code for .obj loader.
1+
# Experimental code for .obj loader.
22

33
* Multi-threaded optimized parser : tinyobj_loader_opt.h
4+
5+
## Requirements
6+
7+
* C++-11 compiler
8+
9+
## Compile options
10+
411
* zstd compressed .obj support. `--with-zstd` premake option.
512
* gzip compressed .obj support. `--with-zlib` premake option.
13+
14+
## Licenses
15+
16+
* lfpAlloc : MIT license.
Lines changed: 89 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,89 @@
1+
#ifndef LF_POOL_ALLOCATOR
2+
#define LF_POOL_ALLOCATOR
3+
4+
#include <memory>
5+
#include <thread>
6+
#include <lfpAlloc/PoolDispatcher.hpp>
7+
8+
namespace lfpAlloc {
9+
template <typename T, std::size_t NumPools = 70>
10+
class lfpAllocator {
11+
public:
12+
using value_type = T;
13+
using pointer = T*;
14+
using const_pointer = const T*;
15+
using reference = T&;
16+
using const_reference = T const&;
17+
18+
template <typename U>
19+
struct rebind {
20+
typedef lfpAllocator<U, NumPools> other;
21+
};
22+
23+
lfpAllocator() {}
24+
25+
template <typename U>
26+
lfpAllocator(lfpAllocator<U, NumPools>&&) noexcept {}
27+
28+
template <typename U>
29+
lfpAllocator(const lfpAllocator<U, NumPools>&) noexcept {}
30+
31+
T* allocate(std::size_t count) {
32+
if (sizeof(T) * count <=
33+
alignof(std::max_align_t) * NumPools - sizeof(void*)) {
34+
return reinterpret_cast<T*>(
35+
dispatcher_.allocate(sizeof(T) * count));
36+
} else {
37+
return new T[count];
38+
}
39+
}
40+
41+
void deallocate(T* p, std::size_t count) noexcept {
42+
if (sizeof(T) * count <=
43+
alignof(std::max_align_t) * NumPools - sizeof(void*)) {
44+
dispatcher_.deallocate(p, sizeof(T) * count);
45+
} else {
46+
delete[] p;
47+
}
48+
}
49+
50+
// Should not be required, but allocator_traits is not complete in
51+
// gcc 4.9.1
52+
template <typename U>
53+
void destroy(U* p) {
54+
p->~U();
55+
}
56+
57+
template <typename U, typename... Args>
58+
void construct(U* p, Args&&... args) {
59+
new (p) U(std::forward<Args>(args)...);
60+
}
61+
62+
template <typename Ty, typename U, std::size_t N, std::size_t M>
63+
friend bool operator==(const lfpAllocator<Ty, N>&,
64+
const lfpAllocator<U, M>&) noexcept;
65+
66+
template <typename U, std::size_t M>
67+
friend class lfpAllocator;
68+
69+
private:
70+
static PoolDispatcher<NumPools> dispatcher_;
71+
};
72+
73+
template <typename T, std::size_t N>
74+
PoolDispatcher<N> lfpAllocator<T, N>::dispatcher_;
75+
76+
template <typename T, typename U, std::size_t N, std::size_t M>
77+
inline bool operator==(const lfpAllocator<T, N>&,
78+
const lfpAllocator<U, M>&) noexcept {
79+
return N == M;
80+
}
81+
82+
template <typename T, typename U, std::size_t N, std::size_t M>
83+
inline bool operator!=(const lfpAllocator<T, N>& left,
84+
const lfpAllocator<U, M>& right) noexcept {
85+
return !(left == right);
86+
}
87+
}
88+
89+
#endif
Lines changed: 116 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,116 @@
1+
#ifndef LF_POOL_ALLOC_CHUNK_LIST
2+
#define LF_POOL_ALLOC_CHUNK_LIST
3+
4+
#include <cstdint>
5+
#include <atomic>
6+
#include <type_traits>
7+
8+
#ifndef LFP_ALLOW_BLOCKING
9+
static_assert(ATOMIC_POINTER_LOCK_FREE == 2,
10+
"Atomic pointer is not lock-free.");
11+
#endif
12+
13+
namespace lfpAlloc {
14+
15+
template <std::size_t Size>
16+
struct Cell {
17+
uint8_t val_[Size];
18+
Cell* next_ = this + 1;
19+
};
20+
21+
// For small types (less than the size of void*), no additional
22+
// space is needed, so union val_ with next_ to avoid overhead.
23+
template <>
24+
struct Cell<0> {
25+
Cell() : next_{this + 1} {}
26+
union {
27+
uint8_t val_[sizeof(Cell*)];
28+
Cell* next_;
29+
};
30+
};
31+
32+
template <std::size_t Size, std::size_t AllocationsPerChunk>
33+
struct Chunk {
34+
Chunk() noexcept {
35+
auto& last = memBlock_[AllocationsPerChunk - 1];
36+
last.next_ = nullptr;
37+
}
38+
Cell<Size> memBlock_[AllocationsPerChunk];
39+
};
40+
41+
template <typename T>
42+
struct Node {
43+
Node() : val_(), next_(nullptr) {}
44+
Node(const T& val) : val_(val), next_(nullptr) {}
45+
T val_;
46+
std::atomic<Node<T>*> next_;
47+
};
48+
49+
template <std::size_t Size, std::size_t AllocationsPerChunk>
50+
class ChunkList {
51+
static constexpr auto CellSize =
52+
(Size > sizeof(void*)) ? Size - sizeof(void*) : 0;
53+
using Chunk_t = Chunk<CellSize, AllocationsPerChunk>;
54+
using Cell_t = Cell<CellSize>;
55+
56+
using ChunkNode = Node<Chunk_t>;
57+
using CellNode = Node<Cell_t*>;
58+
59+
public:
60+
static ChunkList& getInstance() {
61+
static ChunkList c;
62+
return c;
63+
}
64+
65+
Cell_t* allocateChain() {
66+
CellNode* recentHead = head_.load();
67+
CellNode* currentNext = nullptr;
68+
do {
69+
// If there are no available chains, allocate a new chunk
70+
if (!recentHead) {
71+
ChunkNode* currentHandle;
72+
73+
// Make a new node
74+
auto newChunk = new ChunkNode();
75+
76+
// Add the chunk to the chain
77+
do {
78+
currentHandle = handle_.load();
79+
newChunk->next_ = currentHandle;
80+
} while (
81+
!handle_.compare_exchange_weak(currentHandle, newChunk));
82+
return &newChunk->val_.memBlock_[0];
83+
}
84+
85+
currentNext = recentHead->next_;
86+
} while (!head_.compare_exchange_weak(recentHead, currentNext));
87+
88+
auto retnValue = recentHead->val_;
89+
delete recentHead;
90+
return retnValue;
91+
}
92+
93+
void deallocateChain(Cell_t* newCell) {
94+
if (!newCell) {
95+
return;
96+
}
97+
CellNode* currentHead = head_.load();
98+
99+
// Construct a new node to be added to the linked list
100+
CellNode* newHead = new CellNode(newCell);
101+
102+
// Add the chain to the linked list
103+
do {
104+
newHead->next_.store(currentHead, std::memory_order_release);
105+
} while (!head_.compare_exchange_weak(currentHead, newHead));
106+
}
107+
108+
private:
109+
ChunkList() : handle_(nullptr), head_(nullptr) {}
110+
111+
std::atomic<ChunkNode*> handle_;
112+
std::atomic<CellNode*> head_;
113+
};
114+
}
115+
116+
#endif

experimental/lfpAlloc/LICENSE

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
The MIT License (MIT)
2+
3+
Copyright (c) 2014 Adam Schwalm
4+
5+
Permission is hereby granted, free of charge, to any person obtaining a copy
6+
of this software and associated documentation files (the "Software"), to deal
7+
in the Software without restriction, including without limitation the rights
8+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9+
copies of the Software, and to permit persons to whom the Software is
10+
furnished to do so, subject to the following conditions:
11+
12+
The above copyright notice and this permission notice shall be included in
13+
all copies or substantial portions of the Software.
14+
15+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
21+
THE SOFTWARE.

experimental/lfpAlloc/Pool.hpp

Lines changed: 48 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,48 @@
1+
#ifndef LF_POOL_ALLOC_POOL
2+
#define LF_POOL_ALLOC_POOL
3+
4+
#include <lfpAlloc/Utils.hpp>
5+
#include <lfpAlloc/ChunkList.hpp>
6+
7+
namespace lfpAlloc {
8+
template <std::size_t Size, std::size_t AllocationsPerChunk>
9+
class Pool {
10+
using ChunkList_t = ChunkList<Size, AllocationsPerChunk>;
11+
12+
public:
13+
static constexpr auto CellSize =
14+
(Size > sizeof(void*)) ? Size - sizeof(void*) : 0;
15+
using Cell_t = Cell<CellSize>;
16+
17+
Pool() : head_(nullptr) {}
18+
19+
~Pool() { ChunkList_t::getInstance().deallocateChain(head_); }
20+
21+
void* allocate() {
22+
// Head loaded from head_
23+
Cell_t* currentHead = head_;
24+
Cell_t* next;
25+
26+
// Out of cells to allocate
27+
if (!currentHead) {
28+
currentHead = ChunkList_t::getInstance().allocateChain();
29+
}
30+
31+
next = currentHead->next_;
32+
head_ = next;
33+
return &currentHead->val_;
34+
}
35+
36+
void deallocate(void* p) noexcept {
37+
auto newHead = reinterpret_cast<Cell_t*>(p);
38+
Cell_t* currentHead = head_;
39+
newHead->next_ = currentHead;
40+
head_ = newHead;
41+
}
42+
43+
private:
44+
Cell_t* head_;
45+
};
46+
}
47+
48+
#endif
Lines changed: 79 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,79 @@
1+
#ifndef LF_POOL_DISPATCHER
2+
#define LF_POOL_DISPATCHER
3+
4+
#include <tuple>
5+
#include <cassert>
6+
#include <cstddef>
7+
#include <lfpAlloc/Pool.hpp>
8+
9+
#ifndef LFP_ALLOCATIONS_PER_CHUNK
10+
#define LFP_ALLOCATIONS_PER_CHUNK 64 * 100
11+
#endif
12+
13+
namespace lfpAlloc {
14+
namespace detail {
15+
16+
template <std::size_t Num, uint16_t... Ts>
17+
struct Pools : Pools<Num - 1, alignof(std::max_align_t) * Num, Ts...> {};
18+
19+
template <uint16_t... Size>
20+
struct Pools<0, Size...> {
21+
using type = std::tuple<Pool<Size, LFP_ALLOCATIONS_PER_CHUNK>...>;
22+
};
23+
}
24+
25+
template <std::size_t NumPools>
26+
class PoolDispatcher {
27+
public:
28+
void* allocate(std::size_t size) { return dispatchAllocate<0>(size); }
29+
30+
void deallocate(void* p, std::size_t size) noexcept {
31+
dispatchDeallocate<0>(p, size);
32+
}
33+
34+
private:
35+
thread_local static typename detail::Pools<NumPools>::type pools_;
36+
static_assert(NumPools > 0, "Invalid number of pools");
37+
38+
template <std::size_t Index>
39+
typename std::enable_if <
40+
Index<NumPools, void*>::type
41+
dispatchAllocate(std::size_t const& requestSize) {
42+
if (requestSize <= std::get<Index>(pools_).CellSize) {
43+
return std::get<Index>(pools_).allocate();
44+
} else {
45+
return dispatchAllocate<Index + 1>(requestSize);
46+
}
47+
}
48+
49+
template <std::size_t Index>
50+
typename std::enable_if<!(Index < NumPools), void*>::type
51+
dispatchAllocate(std::size_t const&) {
52+
assert(false && "Invalid allocation size.");
53+
return nullptr;
54+
}
55+
56+
template <std::size_t Index>
57+
typename std::enable_if <
58+
Index<NumPools>::type
59+
dispatchDeallocate(void* p, std::size_t const& requestSize) noexcept {
60+
if (requestSize <= std::get<Index>(pools_).CellSize) {
61+
std::get<Index>(pools_).deallocate(p);
62+
} else {
63+
dispatchDeallocate<Index + 1>(p, requestSize);
64+
}
65+
}
66+
67+
template <std::size_t Index>
68+
typename std::enable_if<!(Index < NumPools)>::type
69+
dispatchDeallocate(void*, std::size_t const&) noexcept {
70+
assert(false && "Invalid deallocation size.");
71+
}
72+
};
73+
74+
template <std::size_t NumPools>
75+
thread_local typename detail::Pools<NumPools>::type
76+
PoolDispatcher<NumPools>::pools_;
77+
}
78+
79+
#endif

experimental/lfpAlloc/Utils.hpp

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,20 @@
1+
#include <cstdint>
2+
3+
namespace lfpAlloc {
4+
namespace detail {
5+
template <std::size_t Val, std::size_t base = 2>
6+
struct Log {
7+
enum { value = 1 + Log<Val / base, base>::value };
8+
};
9+
10+
template <std::size_t base>
11+
struct Log<1, base> {
12+
enum { value = 0 };
13+
};
14+
15+
template <std::size_t base>
16+
struct Log<0, base> {
17+
enum { value = 0 };
18+
};
19+
}
20+
}

0 commit comments

Comments
 (0)