forked from slava77/mkfit-hackathon
-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy patheigen_mul.cu
136 lines (112 loc) · 3.27 KB
/
eigen_mul.cu
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
#include "gplex_mul.h"
#ifdef EIGEN_TEST
#include <Eigen/Dense>
using Matrix66 = Eigen::Matrix<float, 6, 6, Eigen::AutoAlign>;
__global__ void set_mem(Matrix66* a, float val, size_t N) {
Matrix66 v = Matrix66::Constant(val);
for (int n = threadIdx.x + blockIdx.x * blockDim.x;
n < N;
n += blockDim.x * gridDim.x) {
a[n] = v;
}
}
bool check(const int N, const Matrix66* c, bool managed)
{
const float eps = 1e-30f;
float c0, c36;
if (managed) {
c0 = c[0](0,0);
c36 = c[1](0,0);
int device = -1;
cudaGetDevice(&device);
cudaMemPrefetchAsync(c, sizeof(Matrix66)*N, device, NULL);
} else {
Matrix66 h[N];
cudaMemcpy(&h, c, N*sizeof(Matrix66), cudaMemcpyDefault);
c0 = h[0](0,0);
c36 = h[1](0,0);
}
bool pass = (std::abs(c0 - c36) < eps) && (std::abs(c0 - 6.0f) < eps);
if (!pass) {
std::cout << "Fail check c[0]=" << c0 << " c[36]=" << c36 << std::endl;
}
return pass;
}
__global__ void eigen_naive_mult_kn(const Matrix66* RESTRICT a, const Matrix66* RESTRICT b, Matrix66* c, const int N)
{
for (int n = threadIdx.x + blockIdx.x * blockDim.x;
n < N;
n += blockDim.x * gridDim.x) {
c[n] = a[n] * b[n];
}
}
__global__ void eigen_reg_c_mult_kn(const Matrix66* RESTRICT a, const Matrix66* RESTRICT b, Matrix66* c, const int N)
{
for (int n = threadIdx.x + blockIdx.x * blockDim.x;
n < N;
n += blockDim.x * gridDim.x) {
Matrix66 c_reg;
c_reg = a[n] * b[n];
c[n] = c_reg;
}
}
__global__ void eigen_reg_mult_kn(const Matrix66* RESTRICT a, const Matrix66* RESTRICT b, Matrix66* c, const int N)
{
for (int n = threadIdx.x + blockIdx.x * blockDim.x;
n < N;
n += blockDim.x * gridDim.x) {
Matrix66 a_reg(a[n]), b_reg(b[n]);
Matrix66 c_reg(a_reg * b_reg);
c[n] = c_reg;
}
}
void eigen_run_naive_mul(int iter, bool managed)
{
constexpr int N = Nwidth;
constexpr int sz = sizeof(Matrix66)*N;
Matrix66* a;
Matrix66* b;
Matrix66* c;
if (managed) {
cudaMallocManaged((void**)&a, sz);
cudaMallocManaged((void**)&b, sz);
cudaMallocManaged((void**)&c, sz);
int device = -1;
cudaGetDevice(&device);
cudaMemPrefetchAsync(a, sz, device, NULL);
cudaMemPrefetchAsync(b, sz, device, NULL);
cudaMemPrefetchAsync(c, sz, device, NULL);
} else {
cudaMalloc((void**)&a, sz);
cudaMalloc((void**)&b, sz);
cudaMalloc((void**)&c, sz);
}
cudaCheckError();
dim3 grid (((N-1)/block_size + 1), 1, 1);
dim3 block (block_size, 1, 1);
set_mem <<< grid, block >>> (a, 1.f , N);
set_mem <<< grid, block >>> (b, 1.f, N);
set_mem <<< grid, block >>> (c, 0.f, N);
if (managed) {
cudaMemAdvise(a, sz, cudaMemAdviseSetReadMostly, 0);
cudaMemAdvise(b, sz, cudaMemAdviseSetReadMostly, 0);
}
cudaCheckErrorSync();
for (int i = 0; i < iter; ++i)
eigen_naive_mult_kn <<< grid, block >>> (a, b, c, N);
cudaCheckErrorSync();
assert(check(N, c, managed));
for (int i = 0; i < iter; ++i)
eigen_reg_c_mult_kn <<< grid, block >>> (a, b, c, N);
cudaCheckErrorSync();
assert(check(N, c, managed));
for (int i = 0; i < iter; ++i)
eigen_reg_mult_kn <<< grid, block >>> (a, b, c, N);
cudaCheckErrorSync();
assert(check(N, c, managed));
cudaFree(a);
cudaFree(b);
cudaFree(c);
cudaCheckErrorSync();
}
#endif