This repository was archived by the owner on Jul 18, 2024. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 10
/
Copy pathAllocatorTest.hpp
128 lines (101 loc) · 2.99 KB
/
AllocatorTest.hpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
// This file setups up various definitions for unit testing.
//
// - Defines AllocatorType
// - Defines STLAllocator according to the std::allocator concept.
// - Redefines the new operator.
#ifndef _ALLOCATORTEST_HPP
#define _ALLOCATORTEST_HPP
#include <limits>
#include "StdAllocator.hpp"
#include "DynamicSizePool.hpp"
// If USE_CUDA is defined, test with a managed allocation
#if defined(USE_CUDA)
#include "cuda_runtime.h"
struct CUDAAllocator
{
static inline void *allocate(std::size_t size) {
void *ptr;
cudaMalloc(&ptr, size);
return ptr;
}
static inline void deallocate(void *ptr) { cudaFree(ptr); }
};
#if defined(USE_UVM)
struct UVMAllocator
{
static inline void *allocate(std::size_t size) {
void *ptr;
cudaMallocManaged(&ptr, size);
return ptr;
}
static inline void deallocate(void *ptr) { cudaFree(ptr); }
};
typedef UVMAllocator AllocatorType;
#elif defined(USE_CUDA_HOSTALLOC)
struct CUDAHostAllocator
{
static inline void *allocate(std::size_t size) {
void *ptr;
cudaHostAlloc(&ptr, size);
return ptr;
}
static inline void deallocate(void *ptr) { cudaFree(ptr); }
};
typedef CUDAHostAllocator AllocatorType;
#else
typedef CUDAAllocator AllocatorType;
#endif
// If USE_OMP is defined, test with omp_target_alloc
#elif defined(USE_OMP)
#include <omp.h>
struct OMPAllocator
{
static inline void *allocate(std::size_t size) {
void *ptr;
omp_target_alloc(&ptr, omp_get_default_device());
return ptr;
}
static inline void deallocate(void *ptr) { omp_target_free(ptr); }
};
typedef OMPAllocator AllocatorType;
#else
// Else, use the default StdAllocator from StdAllocator.hpp
typedef StdAllocator AllocatorType;
#endif
// Declare an STL allocator
template <class T>
struct STLAllocator {
typedef T value_type;
typedef std::size_t size_type;
typedef DynamicSizePool<AllocatorType> PoolType;
PoolType &m;
STLAllocator() : m(PoolType::getInstance()) { }
STLAllocator(const STLAllocator &other) { }
T* allocate(std::size_t n) { return static_cast<T*>( m.allocate( n * sizeof(T) ) ); }
void deallocate(T* p, std::size_t n) { m.deallocate(p); }
size_type max_size() const { return std::numeric_limits<unsigned int>::max(); }
};
template <class T, class U>
bool operator==(const STLAllocator<T>&, const STLAllocator<U>&) { return true; }
template <class T, class U>
bool operator!=(const STLAllocator<T>&, const STLAllocator<U>&) { return false; }
#if defined(REDEFINE_NEW)
// Redefine "::new(std::size_t)"
void *operator new (std::size_t size) throw (std::bad_alloc)
{
return DynamicSizePool<AllocatorType>::getInstance().allocate(size);
}
void *operator new[] (std::size_t size) throw (std::bad_alloc)
{
return DynamicSizePool<AllocatorType>::getInstance().allocate(size);
}
void operator delete (void *ptr) throw()
{
return DynamicSizePool<AllocatorType>::getInstance().deallocate(ptr);
}
void operator delete[] (void *ptr) throw()
{
return DynamicSizePool<AllocatorType>::getInstance().deallocate(ptr);
}
#endif
#endif