in extensions/nanoarrow_device/src/nanoarrow/nanoarrow_device_cuda.c [55:110]
static ArrowErrorCode ArrowDeviceCudaAllocateBuffer(struct ArrowDevice* device,
struct ArrowBuffer* buffer,
int64_t size_bytes) {
int prev_device = 0;
cudaError_t result = cudaGetDevice(&prev_device);
if (result != cudaSuccess) {
return EINVAL;
}
result = cudaSetDevice((int)device->device_id);
if (result != cudaSuccess) {
cudaSetDevice(prev_device);
return EINVAL;
}
struct ArrowDeviceCudaAllocatorPrivate* allocator_private =
(struct ArrowDeviceCudaAllocatorPrivate*)ArrowMalloc(
sizeof(struct ArrowDeviceCudaAllocatorPrivate));
if (allocator_private == NULL) {
cudaSetDevice(prev_device);
return ENOMEM;
}
void* ptr = NULL;
switch (device->device_type) {
case ARROW_DEVICE_CUDA:
result = cudaMalloc(&ptr, (int64_t)size_bytes);
break;
case ARROW_DEVICE_CUDA_HOST:
result = cudaMallocHost(&ptr, (int64_t)size_bytes);
break;
default:
ArrowFree(allocator_private);
cudaSetDevice(prev_device);
return EINVAL;
}
if (result != cudaSuccess) {
ArrowFree(allocator_private);
cudaSetDevice(prev_device);
return ENOMEM;
}
allocator_private->device_id = device->device_id;
allocator_private->device_type = device->device_type;
allocator_private->allocated_ptr = ptr;
buffer->data = (uint8_t*)ptr;
buffer->size_bytes = size_bytes;
buffer->capacity_bytes = size_bytes;
buffer->allocator =
ArrowBufferDeallocator(&ArrowDeviceCudaDeallocator, allocator_private);
cudaSetDevice(prev_device);
return NANOARROW_OK;
}