Skip to content

Commit

Permalink
Adapt interface to KMM 0.3
Browse files Browse the repository at this point in the history
  • Loading branch information
isazi committed Nov 27, 2024
1 parent 54dab35 commit ef23299
Show file tree
Hide file tree
Showing 3 changed files with 15 additions and 15 deletions.
2 changes: 1 addition & 1 deletion include/compas/core/assertion.h
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ namespace compas {
[[noreturn]] __device__ __noinline__ void
panic(const char* message, const char* file, const int line, const char* func) {
printf(
"[CUDA thread (%d, %d, %d)(%d, %d, %d)] panic occured: (%s:%d:%s): %s\n",
"[GPU thread (%d, %d, %d)(%d, %d, %d)] panic occured: (%s:%d:%s): %s\n",
blockIdx.x,
blockIdx.y,
blockIdx.z,
Expand Down
2 changes: 1 addition & 1 deletion include/compas/core/context.h
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
#include "compas/core/view.h"
#include "kmm/kmm.hpp"

#define COMPAS_CUDA_CHECK(...) KMM_CUDA_CHECK(__VA_ARGS__)
#define COMPAS_GPU_CHECK(...) KMM_GPU_CHECK(__VA_ARGS__)

namespace compas {

Expand Down
26 changes: 13 additions & 13 deletions src/trajectories/signal.cu
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ void magnetization_to_signal_cartesian_direct(
echos,
parameters,
trajectory);
COMPAS_CUDA_CHECK(gpuGetLastError());
COMPAS_GPU_CHECK(gpuGetLastError());

block_dim = {256};
grid_dim = {div_ceil(uint(nvoxels), block_dim.x)};
Expand All @@ -50,7 +50,7 @@ void magnetization_to_signal_cartesian_direct(
exponents,
parameters,
trajectory);
COMPAS_CUDA_CHECK(gpuGetLastError());
COMPAS_GPU_CHECK(gpuGetLastError());

const uint block_size_x = 64;
const uint block_size_y = 1;
Expand Down Expand Up @@ -79,7 +79,7 @@ void magnetization_to_signal_cartesian_direct(
factors,
coil_sensitivities);

COMPAS_CUDA_CHECK(gpuGetLastError());
COMPAS_GPU_CHECK(gpuGetLastError());
}

void magnetization_to_signal_cartesian_gemm(
Expand Down Expand Up @@ -115,7 +115,7 @@ void magnetization_to_signal_cartesian_gemm(
echos,
parameters,
trajectory);
COMPAS_CUDA_CHECK(gpuGetLastError());
COMPAS_GPU_CHECK(gpuGetLastError());

for (index_t icoil = 0; icoil < ncoils; icoil++) {
block_dim = {256};
Expand All @@ -126,17 +126,17 @@ void magnetization_to_signal_cartesian_gemm(
coil_sensitivities.drop_axis<0>(icoil),
parameters,
trajectory);
COMPAS_CUDA_CHECK(gpuGetLastError());
COMPAS_GPU_CHECK(gpuGetLastError());

cuComplex alpha = {1, 0};
cuComplex beta = {0, 0};

gpuDataType_t output_type = CUDA_C_32F;
gpuDataType_t input_type = CUDA_C_32F;
gpuDataType_t output_type = GPU_C_32F;
gpuDataType_t input_type = GPU_C_32F;
cublasGemmAlgo_t compute_algo = CUBLAS_GEMM_DEFAULT;

COMPAS_CUDA_CHECK(cublasSetStream(context.cublas(), nullptr));
COMPAS_CUDA_CHECK(cublasGemmEx(
COMPAS_GPU_CHECK(cublasSetStream(context.cublas(), nullptr));
COMPAS_GPU_CHECK(cublasGemmEx(
context.cublas(),
CUBLAS_OP_T, // transa
CUBLAS_OP_N, // transb
Expand All @@ -158,7 +158,7 @@ void magnetization_to_signal_cartesian_gemm(
compute_algo));
}

COMPAS_CUDA_CHECK(gpuGetLastError());
COMPAS_GPU_CHECK(gpuGetLastError());
}

void magnetization_to_signal_spiral(
Expand Down Expand Up @@ -193,7 +193,7 @@ void magnetization_to_signal_spiral(
echos,
parameters,
trajectory);
COMPAS_CUDA_CHECK(gpuGetLastError());
COMPAS_GPU_CHECK(gpuGetLastError());

block_dim = {32, 4};
grid_dim = {div_ceil(uint(nvoxels), block_dim.x), div_ceil(uint(nreadouts), block_dim.y)};
Expand All @@ -202,7 +202,7 @@ void magnetization_to_signal_spiral(
exponents,
parameters,
trajectory);
COMPAS_CUDA_CHECK(gpuGetLastError());
COMPAS_GPU_CHECK(gpuGetLastError());

const uint threads_per_block = 64;
const uint threads_cooperative = 32;
Expand All @@ -228,7 +228,7 @@ void magnetization_to_signal_spiral(
factors,
coil_sensitivities);

COMPAS_CUDA_CHECK(gpuGetLastError());
COMPAS_GPU_CHECK(gpuGetLastError());
}

cublasComputeType_t cublas_compute_type_from_simulate_method(SimulateSignalMethod method) {
Expand Down

0 comments on commit ef23299

Please sign in to comment.