in extensions/nanoarrow_device/src/nanoarrow/nanoarrow_device_cuda.c [233:267]
static ArrowErrorCode ArrowDeviceCudaBufferCopy(struct ArrowDevice* device_src,
struct ArrowBufferView src,
struct ArrowDevice* device_dst,
struct ArrowBufferView dst) {
enum cudaMemcpyKind memcpy_kind;
if (device_src->device_type == ARROW_DEVICE_CPU &&
device_dst->device_type == ARROW_DEVICE_CUDA) {
memcpy_kind = cudaMemcpyHostToDevice;
} else if (device_src->device_type == ARROW_DEVICE_CUDA &&
device_dst->device_type == ARROW_DEVICE_CUDA) {
memcpy_kind = cudaMemcpyDeviceToDevice;
} else if (device_src->device_type == ARROW_DEVICE_CUDA &&
device_dst->device_type == ARROW_DEVICE_CPU) {
memcpy_kind = cudaMemcpyDeviceToHost;
} else if (device_src->device_type == ARROW_DEVICE_CPU &&
device_dst->device_type == ARROW_DEVICE_CUDA_HOST) {
memcpy_kind = cudaMemcpyHostToHost;
} else if (device_src->device_type == ARROW_DEVICE_CUDA_HOST &&
device_dst->device_type == ARROW_DEVICE_CUDA_HOST) {
memcpy_kind = cudaMemcpyHostToHost;
} else if (device_src->device_type == ARROW_DEVICE_CUDA_HOST &&
device_dst->device_type == ARROW_DEVICE_CPU) {
memcpy_kind = cudaMemcpyHostToHost;
} else {
return ENOTSUP;
}
cudaError_t result = cudaMemcpy((void*)dst.data.as_uint8, src.data.as_uint8,
dst.size_bytes, memcpy_kind);
if (result != cudaSuccess) {
return EINVAL;
}
return NANOARROW_OK;
}