64 T* AllocOnGpu(
int size)
67 cudaError_t cudaError = cudaMalloc((
void**)&dataGpu, size *
sizeof(T));
68 if (cudaError != cudaSuccess) {
89 void CopyFromGpu(T* destCpu, T* sourceGpu,
int size)
91 cudaError_t cudaError = cudaMemcpy(
95 cudaMemcpyDeviceToHost
97 if (cudaError != cudaSuccess)
98 throw LSST_EXCEPT(GpuMemoryError,
"CopyFromGpu: failed");
116 void CopyToGpu(T* destGpu, T* sourceCpu,
int size)
118 cudaError_t cudaError;
119 cudaError = cudaMemcpy(
123 cudaMemcpyHostToDevice
125 if (cudaError != cudaSuccess) {
126 throw LSST_EXCEPT(GpuMemoryError,
"CopyToGpu: failed");
150 T* TransferToGpu(
const T* sourceCpu,
int size)
153 cudaError_t cudaError = cudaMalloc((
void**)&dataGpu, size *
sizeof(T));
154 if (cudaError != cudaSuccess) {
157 cudaError = cudaMemcpy(
161 cudaMemcpyHostToDevice
163 if (cudaError != cudaSuccess) {
164 throw LSST_EXCEPT(GpuMemoryError,
"TransferToGpu: transfer failed");
186 void operator=(
const GpuMemOwner& rhs);
189 GpuMemOwner(
const GpuMemOwner& rhs) {
190 assert(rhs.getPtr() == NULL);
198 GpuMemOwner() : ptr(NULL) {}
235 T* Transfer(
const T* source,
int size_p) {
238 ptr = TransferToGpu(source, size);
253 T* Transfer(
const GpuBuffer2D<T>& source) {
255 size = source.Size();
256 ptr = TransferToGpu(source.img, size);
271 T* TransferVec(
const std::vector<T>& source) {
273 size = int(source.size());
274 ptr = TransferToGpu(&source[0], size);
295 T* Alloc(
int size_p) {
298 ptr = AllocOnGpu<T>(size);
306 T* CopyToGpu(detail::GpuBuffer2D<T>& source)
const {
308 assert(source.Size() == size);
309 lsst::afw::gpu::detail::CopyToGpu(ptr, source.img, size);
317 T* CopyFromGpu(detail::GpuBuffer2D<T>& dest)
const {
319 assert(dest.Size() == size);
320 lsst::afw::gpu::detail::CopyFromGpu(dest.img, ptr, size);
328 T* CopyFromGpu(T* dest)
const {
330 lsst::afw::gpu::detail::CopyFromGpu(dest, ptr, size);
364 if (ptr != NULL) cudaFree(ptr);
370 #endif //IS_GPU_BUILD
The base class for all image classed (Image, Mask, MaskedImage, ...)
#define LSST_EXCEPT(type,...)