From 0d77fbd77427e38bf79707eee3f72a90d44f4c83 Mon Sep 17 00:00:00 2001 From: slaren Date: Fri, 22 Dec 2023 23:46:45 +0100 Subject: [PATCH 01/21] cuda : improve cuda pool efficiency using virtual memory --- Makefile | 3 +- ggml-cuda.cu | 173 +++++++++++++++++++++++++++++++++++++++++++-------- 2 files changed, 150 insertions(+), 26 deletions(-) diff --git a/Makefile b/Makefile index cb5a4e948e5e3..489591a3418e4 100644 --- a/Makefile +++ b/Makefile @@ -367,9 +367,10 @@ endif # LLAMA_BLIS ifdef LLAMA_CUBLAS MK_CPPFLAGS += -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/opt/cuda/include -I$(CUDA_PATH)/targets/x86_64-linux/include -I/usr/local/cuda/targets/aarch64-linux/include - MK_LDFLAGS += -lcublas -lculibos -lcudart -lcublasLt -lpthread -ldl -lrt -L/usr/local/cuda/lib64 -L/opt/cuda/lib64 -L$(CUDA_PATH)/targets/x86_64-linux/lib -L/usr/local/cuda/targets/aarch64-linux/lib + MK_LDFLAGS += -lcuda -L/usr/lib/wsl/lib -lcublas -lculibos -lcudart -lcublasLt -lpthread -ldl -lrt -L/usr/local/cuda/lib64 -L/opt/cuda/lib64 -L$(CUDA_PATH)/targets/x86_64-linux/lib -L/usr/local/cuda/targets/aarch64-linux/lib OBJS += ggml-cuda.o MK_NVCCFLAGS = -use_fast_math + ifndef JETSON_EOL_MODULE_DETECT MK_NVCCFLAGS += --forward-unknown-to-host-compiler endif # JETSON_EOL_MODULE_DETECT diff --git a/ggml-cuda.cu b/ggml-cuda.cu index 7c2a834e34382..f7af7e1ff2e47 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -88,6 +88,7 @@ #define __trap abort #else #include +#include #include #include // CUDA 10.2 does not have these macro definitions. @@ -213,6 +214,24 @@ static_assert(sizeof(half) == sizeof(ggml_fp16_t), "wrong fp16 size"); } \ } while (0) +// driver API +#define CU_CHECK(err) \ + do { \ + CUresult err_ = (err); \ + if (err_ != CUDA_SUCCESS) { \ + int id; \ + cuDeviceGet(&id, 0); \ + const char * err_str; \ + cuGetErrorString(err_, &err_str); \ + fprintf(stderr, "\nCUDA error %d at %s:%d: %s\n", err_, __FILE__, __LINE__, \ + err_str); \ + fprintf(stderr, "%s\n", #err); \ + fprintf(stderr, "current device: %d\n", id); \ + GGML_ASSERT(!"CUDA error"); \ + } \ + } while (0) + + #if CUDART_VERSION >= 12000 #define CUBLAS_CHECK(err) \ do { \ @@ -6543,13 +6562,18 @@ struct scoped_spin_lock { scoped_spin_lock& operator=(const scoped_spin_lock&) = delete; }; +static std::atomic_flag g_cuda_pool_lock = ATOMIC_FLAG_INIT; + +#if 0 +#define DEBUG_CUDA_MALLOC struct cuda_buffer { void * ptr = nullptr; size_t size = 0; }; static cuda_buffer g_cuda_buffer_pool[GGML_CUDA_MAX_DEVICES][MAX_CUDA_BUFFERS]; -static std::atomic_flag g_cuda_pool_lock = ATOMIC_FLAG_INIT; + +static size_t g_cuda_pool_size[GGML_CUDA_MAX_DEVICES] = {0}; static void * ggml_cuda_pool_malloc(size_t size, size_t * actual_size) { scoped_spin_lock lock(g_cuda_pool_lock); @@ -6557,7 +6581,7 @@ static void * ggml_cuda_pool_malloc(size_t size, size_t * actual_size) { CUDA_CHECK(cudaGetDevice(&id)); #ifdef DEBUG_CUDA_MALLOC int nnz = 0; - size_t max_size = 0, tot_size = 0; + size_t max_size = 0; #endif size_t best_diff = 1ull << 36; int ibest = -1; @@ -6566,7 +6590,6 @@ static void * ggml_cuda_pool_malloc(size_t size, size_t * actual_size) { if (b.ptr != nullptr) { #ifdef DEBUG_CUDA_MALLOC ++nnz; - tot_size += b.size; if (b.size > max_size) max_size = b.size; #endif if (b.size >= size) { @@ -6593,15 +6616,16 @@ static void * ggml_cuda_pool_malloc(size_t size, size_t * actual_size) { b.size = 0; return ptr; } -#ifdef DEBUG_CUDA_MALLOC - fprintf(stderr, "%s: %d buffers, max_size = %u MB, tot_size = %u MB, requested %u MB\n", __func__, nnz, - (uint32_t)(max_size/1024/1024), (uint32_t)(tot_size/1024/1024), (uint32_t)(size/1024/1024)); -#endif void * ptr; size_t look_ahead_size = (size_t) (1.05 * size); look_ahead_size = 256 * ((look_ahead_size + 255)/256); CUDA_CHECK(cudaMalloc((void **) &ptr, look_ahead_size)); *actual_size = look_ahead_size; + g_cuda_pool_size[id] += look_ahead_size; +#ifdef DEBUG_CUDA_MALLOC + fprintf(stderr, "%s[%d]: %d buffers, max_size = %u MB, pool_size = %u MB, requested %u MB\n", __func__, id, nnz, + (uint32_t)(max_size/1024/1024), (uint32_t)(g_cuda_pool_size[id]/1024/1024), (uint32_t)(size/1024/1024)); +#endif return ptr; } @@ -6620,8 +6644,107 @@ static void ggml_cuda_pool_free(void * ptr, size_t size) { } fprintf(stderr, "WARNING: cuda buffer pool full, increase MAX_CUDA_BUFFERS\n"); CUDA_CHECK(cudaFree(ptr)); + g_cuda_pool_size[id] -= size; +} +#else + +static std::vector g_cuda_pool_handles[GGML_CUDA_MAX_DEVICES]; +static CUdeviceptr g_cuda_pool_addr[GGML_CUDA_MAX_DEVICES] = {0}; +static size_t g_cuda_pool_size[GGML_CUDA_MAX_DEVICES] = {0}; +static size_t g_cuda_pool_used[GGML_CUDA_MAX_DEVICES] = {0}; + +static const size_t CUDA_POOL_MAX_SIZE = 1ull << 36; // 64 GB + +//#define DEBUG_CUDA_MALLOC + +#define ggml_cuda_pool_malloc(size, actual_size) ggml_cuda_pool_malloc_(size, actual_size, #size " " #actual_size) +static void * ggml_cuda_pool_malloc_(size_t size, size_t * actual_size, const char * call) { + scoped_spin_lock lock(g_cuda_pool_lock); + int id; + CUDA_CHECK(cudaGetDevice(&id)); + + size_t avail = g_cuda_pool_size[id] - g_cuda_pool_used[id]; + + if (size > avail) { + size_t reserve_size = size - avail; + + // allocate more physical memory + CUmemAllocationProp prop = {}; + prop.type = CU_MEM_ALLOCATION_TYPE_PINNED; + prop.location.type = CU_MEM_LOCATION_TYPE_DEVICE; + prop.location.id = id; + + // get the minimum allocation granularity for this device + size_t granularity = 0; + CU_CHECK(cuMemGetAllocationGranularity(&granularity, &prop, CU_MEM_ALLOC_GRANULARITY_MINIMUM)); + + // round up to the nearest granularity + reserve_size = granularity * ((reserve_size + granularity - 1) / granularity); + + GGML_ASSERT(g_cuda_pool_size[id] + reserve_size <= CUDA_POOL_MAX_SIZE); + + CUmemGenericAllocationHandle handle; + CU_CHECK(cuMemCreate(&handle, reserve_size, &prop, 0)); + + // reserve virtual address space (if not already reserved) + if (g_cuda_pool_addr[id] == 0) { + CU_CHECK(cuMemAddressReserve(&g_cuda_pool_addr[id], CUDA_POOL_MAX_SIZE, 0, 0, 0)); + } + + // map at the end of the pool + CU_CHECK(cuMemMap(g_cuda_pool_addr[id] + g_cuda_pool_size[id], reserve_size, 0, handle, 0)); + + // set access + CUmemAccessDesc access = {}; + access.location.type = CU_MEM_LOCATION_TYPE_DEVICE; + access.location.id = id; + access.flags = CU_MEM_ACCESS_FLAGS_PROT_READWRITE; + CU_CHECK(cuMemSetAccess(g_cuda_pool_addr[id] + g_cuda_pool_size[id], reserve_size, &access, 1)); + + // add to the pool + g_cuda_pool_handles[id].push_back(handle); + g_cuda_pool_size[id] += reserve_size; + + printf("cuda pool[%d]: size increased to %llu MB (reserved %llu MB) [%s]\n", + id, (unsigned long long) (g_cuda_pool_size[id]/1024/1024), + (unsigned long long) (reserve_size/1024/1024), call); + } + + GGML_ASSERT(g_cuda_pool_addr[id] != 0); + + void * ptr = (void *) (g_cuda_pool_addr[id] + g_cuda_pool_used[id]); + *actual_size = size; + g_cuda_pool_used[id] += size; + +#ifdef DEBUG_CUDA_MALLOC + printf("cuda pool[%d]: allocated %llu bytes at %llx [%s]\n", id, (unsigned long long) size, ptr, call); +#endif + + return ptr; + + GGML_UNUSED(call); } +#define ggml_cuda_pool_free(ptr, size) ggml_cuda_pool_free_(ptr, size, #ptr " " #size) +static void ggml_cuda_pool_free_(void * ptr, size_t size, const char * call) { + scoped_spin_lock lock(g_cuda_pool_lock); + int id; + CUDA_CHECK(cudaGetDevice(&id)); + +#ifdef DEBUG_CUDA_MALLOC + printf("cuda pool[%d]: free %llu bytes at %llx [%s]\n", id, (unsigned long long) size, ptr, call); +#endif + + g_cuda_pool_used[id] -= size; + + // all deallocations must be in reverse order of the allocations + GGML_ASSERT(ptr == (void *) (g_cuda_pool_addr[id] + g_cuda_pool_used[id])); + + GGML_UNUSED(call); +} + +#endif + static bool g_cublas_loaded = false; bool ggml_cublas_loaded(void) { @@ -7437,13 +7560,13 @@ inline void ggml_cuda_op_mul_mat_cublas( ggml_cuda_pool_free(dst_f16, dst_as); - if (src0_as != 0) { - ggml_cuda_pool_free(src0_as_f16, src0_as); - } - if (src1_as != 0) { ggml_cuda_pool_free(src1_as_f16, src1_as); } + + if (src0_as != 0) { + ggml_cuda_pool_free(src0_as_f16, src0_as); + } } else { float * src0_ddq_as_f32 = nullptr; @@ -7800,14 +7923,14 @@ static void ggml_cuda_op_flatten(const ggml_tensor * src0, const ggml_tensor * s CUDA_CHECK(cudaMemcpyAsync(dst->data, dst_ddf, ggml_nbytes(dst), cudaMemcpyDeviceToHost, main_stream)); } - if (src0_asf > 0) { - ggml_cuda_pool_free(src0_ddf, src0_asf); + if (dst_asf > 0) { + ggml_cuda_pool_free(dst_ddf, dst_asf); } if (src1_asf > 0) { ggml_cuda_pool_free(src1_ddf, src1_asf); } - if (dst_asf > 0) { - ggml_cuda_pool_free(dst_ddf, dst_asf); + if (src0_asf > 0) { + ggml_cuda_pool_free(src0_ddf, src0_asf); } if (dst->backend == GGML_BACKEND_CPU) { @@ -8119,17 +8242,17 @@ static void ggml_cuda_op_mul_mat( CUDA_CHECK(ggml_cuda_set_device(id)); // free buffers again when done - if (src0_as[id] > 0) { - ggml_cuda_pool_free(src0_dd[id], src0_as[id]); - } - if (src1_asf[id] > 0) { - ggml_cuda_pool_free(src1_ddf[id], src1_asf[id]); + if (dst_as[id] > 0) { + ggml_cuda_pool_free(dst_dd[id], dst_as[id]); } if (src1_asq[id] > 0) { ggml_cuda_pool_free(src1_ddq[id], src1_asq[id]); } - if (dst_as[id] > 0) { - ggml_cuda_pool_free(dst_dd[id], dst_as[id]); + if (src1_asf[id] > 0) { + ggml_cuda_pool_free(src1_ddf[id], src1_asf[id]); + } + if (src0_as[id] > 0) { + ggml_cuda_pool_free(src0_dd[id], src0_as[id]); } } @@ -8497,12 +8620,12 @@ static void ggml_cuda_mul_mat_mat_batched_cublas(const ggml_tensor * src0, const cu_compute_type, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); - if (ptrs_src_s != 0) { - ggml_cuda_pool_free(ptrs_src, ptrs_src_s); - } if (ptrs_dst_s != 0) { ggml_cuda_pool_free(ptrs_dst, ptrs_dst_s); } + if (ptrs_src_s != 0) { + ggml_cuda_pool_free(ptrs_src, ptrs_src_s); + } } #endif From eb223dcddda36aa9cc4afc873a9a5a8ed2293b76 Mon Sep 17 00:00:00 2001 From: slaren Date: Sat, 23 Dec 2023 00:34:20 +0100 Subject: [PATCH 02/21] fix mixtral --- ggml-cuda.cu | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ggml-cuda.cu b/ggml-cuda.cu index f7af7e1ff2e47..74463f3281786 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -9026,8 +9026,8 @@ static void ggml_cuda_mul_mat_id(const ggml_tensor * src0, const ggml_tensor * s } } - ggml_cuda_pool_free(src1_contiguous, as_src1); ggml_cuda_pool_free(dst_contiguous, as_dst); + ggml_cuda_pool_free(src1_contiguous, as_src1); } if (dst->backend == GGML_BACKEND_CPU) { From bd78dc9aee33cb2024358ebfe2e796faaa6376f3 Mon Sep 17 00:00:00 2001 From: slaren Date: Sat, 23 Dec 2023 02:06:49 +0100 Subject: [PATCH 03/21] fix cmake build --- CMakeLists.txt | 2 ++ tests/test-grad0.cpp | 3 --- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 6fc6508c598ff..545aab267dbec 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -302,6 +302,8 @@ if (LLAMA_CUBLAS) set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} CUDA::cudart CUDA::cublas CUDA::cublasLt) endif() + set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} CUDA::cuda_driver) + if (NOT DEFINED CMAKE_CUDA_ARCHITECTURES) # 52 == lowest CUDA 12 standard # 60 == f16 CUDA intrinsics diff --git a/tests/test-grad0.cpp b/tests/test-grad0.cpp index 14914def565d9..8ff76c8910c49 100644 --- a/tests/test-grad0.cpp +++ b/tests/test-grad0.cpp @@ -883,9 +883,6 @@ int main(int argc, const char ** argv) { srand(seed); const int nargs = 1; - int64_t ne2[4]; - ne2[0] = 1; - for (int ndims = 1; ndims <= 2; ++ndims) { x[0] = get_random_tensor_f32(ctx0, ndims, ne, -1.0f, 1.0f); From 872408cfb71795b9fc179f1f38157d9b0cbfde71 Mon Sep 17 00:00:00 2001 From: slaren Date: Sat, 23 Dec 2023 02:29:21 +0100 Subject: [PATCH 04/21] check for vmm support, disable for hip ggml-ci --- ggml-cuda.cu | 73 +++++++++++++++++++++++++++++++++------------------- 1 file changed, 47 insertions(+), 26 deletions(-) diff --git a/ggml-cuda.cu b/ggml-cuda.cu index 74463f3281786..a2bbd22fbaeb6 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -6564,18 +6564,16 @@ struct scoped_spin_lock { static std::atomic_flag g_cuda_pool_lock = ATOMIC_FLAG_INIT; -#if 0 -#define DEBUG_CUDA_MALLOC +// #define DEBUG_CUDA_MALLOC struct cuda_buffer { void * ptr = nullptr; size_t size = 0; }; static cuda_buffer g_cuda_buffer_pool[GGML_CUDA_MAX_DEVICES][MAX_CUDA_BUFFERS]; - static size_t g_cuda_pool_size[GGML_CUDA_MAX_DEVICES] = {0}; -static void * ggml_cuda_pool_malloc(size_t size, size_t * actual_size) { +static void * ggml_cuda_pool_malloc_leg(size_t size, size_t * actual_size) { scoped_spin_lock lock(g_cuda_pool_lock); int id; CUDA_CHECK(cudaGetDevice(&id)); @@ -6629,7 +6627,7 @@ static void * ggml_cuda_pool_malloc(size_t size, size_t * actual_size) { return ptr; } -static void ggml_cuda_pool_free(void * ptr, size_t size) { +static void ggml_cuda_pool_free_leg(void * ptr, size_t size) { scoped_spin_lock lock(g_cuda_pool_lock); int id; CUDA_CHECK(cudaGetDevice(&id)); @@ -6646,19 +6644,15 @@ static void ggml_cuda_pool_free(void * ptr, size_t size) { CUDA_CHECK(cudaFree(ptr)); g_cuda_pool_size[id] -= size; } -#else +#if !defined(GGML_USE_HIPBLAS) +// pool with virtual memory static std::vector g_cuda_pool_handles[GGML_CUDA_MAX_DEVICES]; static CUdeviceptr g_cuda_pool_addr[GGML_CUDA_MAX_DEVICES] = {0}; -static size_t g_cuda_pool_size[GGML_CUDA_MAX_DEVICES] = {0}; static size_t g_cuda_pool_used[GGML_CUDA_MAX_DEVICES] = {0}; +static const size_t CUDA_POOL_VMM_MAX_SIZE = 1ull << 36; // 64 GB -static const size_t CUDA_POOL_MAX_SIZE = 1ull << 36; // 64 GB - -//#define DEBUG_CUDA_MALLOC - -#define ggml_cuda_pool_malloc(size, actual_size) ggml_cuda_pool_malloc_(size, actual_size, #size " " #actual_size) -static void * ggml_cuda_pool_malloc_(size_t size, size_t * actual_size, const char * call) { +static void * ggml_cuda_pool_malloc_vmm(size_t size, size_t * actual_size) { scoped_spin_lock lock(g_cuda_pool_lock); int id; CUDA_CHECK(cudaGetDevice(&id)); @@ -6681,14 +6675,14 @@ static void * ggml_cuda_pool_malloc_(size_t size, size_t * actual_size, const ch // round up to the nearest granularity reserve_size = granularity * ((reserve_size + granularity - 1) / granularity); - GGML_ASSERT(g_cuda_pool_size[id] + reserve_size <= CUDA_POOL_MAX_SIZE); + GGML_ASSERT(g_cuda_pool_size[id] + reserve_size <= CUDA_POOL_VMM_MAX_SIZE); CUmemGenericAllocationHandle handle; CU_CHECK(cuMemCreate(&handle, reserve_size, &prop, 0)); // reserve virtual address space (if not already reserved) if (g_cuda_pool_addr[id] == 0) { - CU_CHECK(cuMemAddressReserve(&g_cuda_pool_addr[id], CUDA_POOL_MAX_SIZE, 0, 0, 0)); + CU_CHECK(cuMemAddressReserve(&g_cuda_pool_addr[id], CUDA_POOL_VMM_MAX_SIZE, 0, 0, 0)); } // map at the end of the pool @@ -6705,9 +6699,9 @@ static void * ggml_cuda_pool_malloc_(size_t size, size_t * actual_size, const ch g_cuda_pool_handles[id].push_back(handle); g_cuda_pool_size[id] += reserve_size; - printf("cuda pool[%d]: size increased to %llu MB (reserved %llu MB) [%s]\n", - id, (unsigned long long) (g_cuda_pool_size[id]/1024/1024), - (unsigned long long) (reserve_size/1024/1024), call); + //printf("cuda pool[%d]: size increased to %llu MB (reserved %llu MB)\n", + // id, (unsigned long long) (g_cuda_pool_size[id]/1024/1024), + // (unsigned long long) (reserve_size/1024/1024)); } GGML_ASSERT(g_cuda_pool_addr[id] != 0); @@ -6717,32 +6711,51 @@ static void * ggml_cuda_pool_malloc_(size_t size, size_t * actual_size, const ch g_cuda_pool_used[id] += size; #ifdef DEBUG_CUDA_MALLOC - printf("cuda pool[%d]: allocated %llu bytes at %llx [%s]\n", id, (unsigned long long) size, ptr, call); + printf("cuda pool[%d]: allocated %llu bytes at %llx [%s]\n", id, (unsigned long long) size, ptr); #endif return ptr; - - GGML_UNUSED(call); } -#define ggml_cuda_pool_free(ptr, size) ggml_cuda_pool_free_(ptr, size, #ptr " " #size) -static void ggml_cuda_pool_free_(void * ptr, size_t size, const char * call) { +static void ggml_cuda_pool_free_vmm(void * ptr, size_t size) { scoped_spin_lock lock(g_cuda_pool_lock); int id; CUDA_CHECK(cudaGetDevice(&id)); #ifdef DEBUG_CUDA_MALLOC - printf("cuda pool[%d]: free %llu bytes at %llx [%s]\n", id, (unsigned long long) size, ptr, call); + printf("cuda pool[%d]: freed %llu bytes at %llx\n", id, (unsigned long long) size, ptr); #endif g_cuda_pool_used[id] -= size; // all deallocations must be in reverse order of the allocations GGML_ASSERT(ptr == (void *) (g_cuda_pool_addr[id] + g_cuda_pool_used[id])); +} - GGML_UNUSED(call); +static bool g_device_vmm[GGML_CUDA_MAX_DEVICES] = {false}; + +static void * ggml_cuda_pool_malloc(size_t size, size_t * actual_size) { + int id; + CUDA_CHECK(cudaGetDevice(&id)); + if (g_device_vmm[id]) { + return ggml_cuda_pool_malloc_vmm(size, actual_size); + } else { + return ggml_cuda_pool_malloc_leg(size, actual_size); + } } +static void ggml_cuda_pool_free(void * ptr, size_t size) { + int id; + CUDA_CHECK(cudaGetDevice(&id)); + if (g_device_vmm[id]) { + ggml_cuda_pool_free_vmm(ptr, size); + } else { + ggml_cuda_pool_free_leg(ptr, size); + } +} +#else +#define ggml_cuda_pool_malloc ggml_cuda_pool_malloc_leg +#define ggml_cuda_pool_free ggml_cuda_pool_free_leg #endif static bool g_cublas_loaded = false; @@ -6783,9 +6796,17 @@ void ggml_init_cublas() { #endif fprintf(stderr, "%s: found %d " GGML_CUDA_NAME " devices:\n", __func__, g_device_count); for (int id = 0; id < g_device_count; ++id) { + int deviceSupportsVmm = 0; +#if !defined(GGML_USE_HIPBLAS) + CUdevice device; + CU_CHECK(cuDeviceGet(&device, id)); + CU_CHECK(cuDeviceGetAttribute(&deviceSupportsVmm, CU_DEVICE_ATTRIBUTE_VIRTUAL_MEMORY_MANAGEMENT_SUPPORTED, device)); +#endif + g_device_vmm[id] = !!deviceSupportsVmm; + cudaDeviceProp prop; CUDA_CHECK(cudaGetDeviceProperties(&prop, id)); - fprintf(stderr, " Device %d: %s, compute capability %d.%d\n", id, prop.name, prop.major, prop.minor); + fprintf(stderr, " Device %d: %s, compute capability %d.%d, VMM: %s\n", id, prop.name, prop.major, prop.minor, g_device_vmm[id] ? "yes" : "no"); g_tensor_split[id] = total_vram; total_vram += prop.totalGlobalMem; From 9452d0d54bcbb6f4f46b2d5606869f8b09fdec3c Mon Sep 17 00:00:00 2001 From: slaren Date: Sat, 23 Dec 2023 13:40:06 +0100 Subject: [PATCH 05/21] fix hip build --- ggml-cuda.cu | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/ggml-cuda.cu b/ggml-cuda.cu index a2bbd22fbaeb6..bf53cef73d11a 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -6796,17 +6796,18 @@ void ggml_init_cublas() { #endif fprintf(stderr, "%s: found %d " GGML_CUDA_NAME " devices:\n", __func__, g_device_count); for (int id = 0; id < g_device_count; ++id) { - int deviceSupportsVmm = 0; + int device_vmm = 0; + #if !defined(GGML_USE_HIPBLAS) CUdevice device; CU_CHECK(cuDeviceGet(&device, id)); - CU_CHECK(cuDeviceGetAttribute(&deviceSupportsVmm, CU_DEVICE_ATTRIBUTE_VIRTUAL_MEMORY_MANAGEMENT_SUPPORTED, device)); + CU_CHECK(cuDeviceGetAttribute(&device_vmm, CU_DEVICE_ATTRIBUTE_VIRTUAL_MEMORY_MANAGEMENT_SUPPORTED, device)); + g_device_vmm[id] = !!device_vmm; #endif - g_device_vmm[id] = !!deviceSupportsVmm; cudaDeviceProp prop; CUDA_CHECK(cudaGetDeviceProperties(&prop, id)); - fprintf(stderr, " Device %d: %s, compute capability %d.%d, VMM: %s\n", id, prop.name, prop.major, prop.minor, g_device_vmm[id] ? "yes" : "no"); + fprintf(stderr, " Device %d: %s, compute capability %d.%d, VMM: %s\n", id, prop.name, prop.major, prop.minor, device_vmm ? "yes" : "no"); g_tensor_split[id] = total_vram; total_vram += prop.totalGlobalMem; From 20860daee29b94038b639eb95cc4785e52006268 Mon Sep 17 00:00:00 2001 From: slaren Date: Sat, 23 Dec 2023 13:42:18 +0100 Subject: [PATCH 06/21] clarify granularity --- ggml-cuda.cu | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ggml-cuda.cu b/ggml-cuda.cu index bf53cef73d11a..a488e4082edb5 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -6669,10 +6669,10 @@ static void * ggml_cuda_pool_malloc_vmm(size_t size, size_t * actual_size) { prop.location.id = id; // get the minimum allocation granularity for this device - size_t granularity = 0; + size_t granularity; CU_CHECK(cuMemGetAllocationGranularity(&granularity, &prop, CU_MEM_ALLOC_GRANULARITY_MINIMUM)); - // round up to the nearest granularity + // round up to the next multiple of the granularity reserve_size = granularity * ((reserve_size + granularity - 1) / granularity); GGML_ASSERT(g_cuda_pool_size[id] + reserve_size <= CUDA_POOL_VMM_MAX_SIZE); From 4c0f300a2c29546948fbe3fe7c6ede0e04dca8bf Mon Sep 17 00:00:00 2001 From: slaren Date: Sat, 23 Dec 2023 15:27:49 +0100 Subject: [PATCH 07/21] move all caps to g_device_caps --- ggml-cuda.cu | 79 +++++++++++++++++++++++++++++----------------------- 1 file changed, 44 insertions(+), 35 deletions(-) diff --git a/ggml-cuda.cu b/ggml-cuda.cu index a488e4082edb5..532292c78a2de 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -535,9 +535,17 @@ inline cudaError_t ggml_cuda_set_device(const int device) { static int g_device_count = -1; static int g_main_device = 0; -static int g_compute_capabilities[GGML_CUDA_MAX_DEVICES]; static float g_tensor_split[GGML_CUDA_MAX_DEVICES] = {0}; +struct device_capabilities { + int cc; // compute capability + bool vmm; // virtual memory support + size_t vmm_granularity; // granularity of virtual memory +}; + +static device_capabilities g_device_caps[GGML_CUDA_MAX_DEVICES] = { {0, false, 0} }; + + static void * g_scratch_buffer = nullptr; static size_t g_scratch_size = 0; // disabled by default static size_t g_scratch_offset = 0; @@ -5894,7 +5902,7 @@ static void ggml_mul_mat_q4_0_q8_1_cuda( int id; CUDA_CHECK(cudaGetDevice(&id)); - const int compute_capability = g_compute_capabilities[id]; + const int compute_capability = g_device_caps[id].cc; int mmq_x, mmq_y, nwarps; if (compute_capability >= CC_RDNA2) { @@ -5939,7 +5947,7 @@ static void ggml_mul_mat_q4_1_q8_1_cuda( int id; CUDA_CHECK(cudaGetDevice(&id)); - const int compute_capability = g_compute_capabilities[id]; + const int compute_capability = g_device_caps[id].cc; int mmq_x, mmq_y, nwarps; if (compute_capability >= CC_RDNA2) { @@ -5984,7 +5992,7 @@ static void ggml_mul_mat_q5_0_q8_1_cuda( int id; CUDA_CHECK(cudaGetDevice(&id)); - const int compute_capability = g_compute_capabilities[id]; + const int compute_capability = g_device_caps[id].cc; int mmq_x, mmq_y, nwarps; if (compute_capability >= CC_RDNA2) { @@ -6029,7 +6037,7 @@ static void ggml_mul_mat_q5_1_q8_1_cuda( int id; CUDA_CHECK(cudaGetDevice(&id)); - const int compute_capability = g_compute_capabilities[id]; + const int compute_capability = g_device_caps[id].cc; int mmq_x, mmq_y, nwarps; if (compute_capability >= CC_RDNA2) { @@ -6074,7 +6082,7 @@ static void ggml_mul_mat_q8_0_q8_1_cuda( int id; CUDA_CHECK(cudaGetDevice(&id)); - const int compute_capability = g_compute_capabilities[id]; + const int compute_capability = g_device_caps[id].cc; int mmq_x, mmq_y, nwarps; if (compute_capability >= CC_RDNA2) { @@ -6119,7 +6127,7 @@ static void ggml_mul_mat_q2_K_q8_1_cuda( int id; CUDA_CHECK(cudaGetDevice(&id)); - const int compute_capability = g_compute_capabilities[id]; + const int compute_capability = g_device_caps[id].cc; int mmq_x, mmq_y, nwarps; if (compute_capability >= CC_RDNA2) { @@ -6166,7 +6174,7 @@ static void ggml_mul_mat_q3_K_q8_1_cuda( int id; CUDA_CHECK(cudaGetDevice(&id)); - const int compute_capability = g_compute_capabilities[id]; + const int compute_capability = g_device_caps[id].cc; int mmq_x, mmq_y, nwarps; if (compute_capability >= CC_RDNA2) { @@ -6212,7 +6220,7 @@ static void ggml_mul_mat_q4_K_q8_1_cuda( int id; CUDA_CHECK(cudaGetDevice(&id)); - const int compute_capability = g_compute_capabilities[id]; + const int compute_capability = g_device_caps[id].cc; int mmq_x, mmq_y, nwarps; if (compute_capability >= CC_RDNA2) { @@ -6257,7 +6265,7 @@ static void ggml_mul_mat_q5_K_q8_1_cuda( int id; CUDA_CHECK(cudaGetDevice(&id)); - const int compute_capability = g_compute_capabilities[id]; + const int compute_capability = g_device_caps[id].cc; int mmq_x, mmq_y, nwarps; if (compute_capability >= CC_RDNA2) { @@ -6302,7 +6310,7 @@ static void ggml_mul_mat_q6_K_q8_1_cuda( int id; CUDA_CHECK(cudaGetDevice(&id)); - const int compute_capability = g_compute_capabilities[id]; + const int compute_capability = g_device_caps[id].cc; int mmq_x, mmq_y, nwarps; if (compute_capability >= CC_RDNA2) { @@ -6660,23 +6668,18 @@ static void * ggml_cuda_pool_malloc_vmm(size_t size, size_t * actual_size) { size_t avail = g_cuda_pool_size[id] - g_cuda_pool_used[id]; if (size > avail) { + // round up to the next multiple of the granularity size_t reserve_size = size - avail; + const size_t granularity = g_device_caps[id].vmm_granularity; + reserve_size = granularity * ((reserve_size + granularity - 1) / granularity); + + GGML_ASSERT(g_cuda_pool_size[id] + reserve_size <= CUDA_POOL_VMM_MAX_SIZE); // allocate more physical memory CUmemAllocationProp prop = {}; prop.type = CU_MEM_ALLOCATION_TYPE_PINNED; prop.location.type = CU_MEM_LOCATION_TYPE_DEVICE; prop.location.id = id; - - // get the minimum allocation granularity for this device - size_t granularity; - CU_CHECK(cuMemGetAllocationGranularity(&granularity, &prop, CU_MEM_ALLOC_GRANULARITY_MINIMUM)); - - // round up to the next multiple of the granularity - reserve_size = granularity * ((reserve_size + granularity - 1) / granularity); - - GGML_ASSERT(g_cuda_pool_size[id] + reserve_size <= CUDA_POOL_VMM_MAX_SIZE); - CUmemGenericAllocationHandle handle; CU_CHECK(cuMemCreate(&handle, reserve_size, &prop, 0)); @@ -6732,12 +6735,10 @@ static void ggml_cuda_pool_free_vmm(void * ptr, size_t size) { GGML_ASSERT(ptr == (void *) (g_cuda_pool_addr[id] + g_cuda_pool_used[id])); } -static bool g_device_vmm[GGML_CUDA_MAX_DEVICES] = {false}; - static void * ggml_cuda_pool_malloc(size_t size, size_t * actual_size) { int id; CUDA_CHECK(cudaGetDevice(&id)); - if (g_device_vmm[id]) { + if (g_device_caps[id].vmm) { return ggml_cuda_pool_malloc_vmm(size, actual_size); } else { return ggml_cuda_pool_malloc_leg(size, actual_size); @@ -6747,7 +6748,7 @@ static void * ggml_cuda_pool_malloc(size_t size, size_t * actual_size) { static void ggml_cuda_pool_free(void * ptr, size_t size) { int id; CUDA_CHECK(cudaGetDevice(&id)); - if (g_device_vmm[id]) { + if (g_device_caps[id].vmm) { ggml_cuda_pool_free_vmm(ptr, size); } else { ggml_cuda_pool_free_leg(ptr, size); @@ -6802,8 +6803,16 @@ void ggml_init_cublas() { CUdevice device; CU_CHECK(cuDeviceGet(&device, id)); CU_CHECK(cuDeviceGetAttribute(&device_vmm, CU_DEVICE_ATTRIBUTE_VIRTUAL_MEMORY_MANAGEMENT_SUPPORTED, device)); - g_device_vmm[id] = !!device_vmm; + + if (device_vmm) { + CUmemAllocationProp alloc_prop = {}; + alloc_prop.type = CU_MEM_ALLOCATION_TYPE_PINNED; + alloc_prop.location.type = CU_MEM_LOCATION_TYPE_DEVICE; + alloc_prop.location.id = id; + CU_CHECK(cuMemGetAllocationGranularity(&g_device_caps[id].vmm_granularity, &alloc_prop, CU_MEM_ALLOC_GRANULARITY_MINIMUM)); + } #endif + g_device_caps[id].vmm = !!device_vmm; cudaDeviceProp prop; CUDA_CHECK(cudaGetDeviceProperties(&prop, id)); @@ -6812,9 +6821,9 @@ void ggml_init_cublas() { g_tensor_split[id] = total_vram; total_vram += prop.totalGlobalMem; #if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__) - g_compute_capabilities[id] = 100*prop.major + 10*prop.minor + CC_OFFSET_AMD; + g_device_caps[id] = 100*prop.major + 10*prop.minor + CC_OFFSET_AMD; #else - g_compute_capabilities[id] = 100*prop.major + 10*prop.minor; + g_device_caps[id].cc = 100*prop.major + 10*prop.minor; #endif // defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__) } for (int id = 0; id < g_device_count; ++id) { @@ -7324,11 +7333,11 @@ static int64_t get_row_rounding(ggml_type type) { int64_t max_compute_capability = INT_MIN; for (int64_t id = 0; id < g_device_count; ++id) { if (g_tensor_split[id] < (id + 1 < g_device_count ? g_tensor_split[id + 1] : 1.0f)) { - if (min_compute_capability > g_compute_capabilities[id]) { - min_compute_capability = g_compute_capabilities[id]; + if (min_compute_capability > g_device_caps[id].cc) { + min_compute_capability = g_device_caps[id].cc; } - if (max_compute_capability < g_compute_capabilities[id]) { - max_compute_capability = g_compute_capabilities[id]; + if (max_compute_capability < g_device_caps[id].cc) { + max_compute_capability = g_device_caps[id].cc; } } } @@ -7536,7 +7545,7 @@ inline void ggml_cuda_op_mul_mat_cublas( // ldc == nrows of the matrix that cuBLAS writes into int ldc = dst->backend == GGML_BACKEND_GPU && id == g_main_device ? ne0 : row_diff; - const int compute_capability = g_compute_capabilities[id]; + const int compute_capability = g_device_caps[id].cc; if (compute_capability >= CC_VOLTA && (src0->type == GGML_TYPE_F16 || ggml_is_quantized(src0->type)) && ggml_is_contiguous(src0) && row_diff == src0->ne[1] && dst->op_params[0] == GGML_PREC_DEFAULT) { // convert src0 and src1 to fp16, multiply as fp16, convert dst to fp32 @@ -8671,8 +8680,8 @@ static void ggml_cuda_mul_mat(const ggml_tensor * src0, const ggml_tensor * src1 int64_t min_compute_capability = INT_MAX; for (int64_t id = 0; id < g_device_count; ++id) { - if (min_compute_capability > g_compute_capabilities[id] && g_tensor_split[id] < (id + 1 < g_device_count ? g_tensor_split[id + 1] : 1.0f)) { - min_compute_capability = g_compute_capabilities[id]; + if (min_compute_capability > g_device_caps[id].cc && g_tensor_split[id] < (id + 1 < g_device_count ? g_tensor_split[id + 1] : 1.0f)) { + min_compute_capability = g_device_caps[id].cc; } } From 545f23d07b4f2bc83491f21a9aec801844eb9cff Mon Sep 17 00:00:00 2001 From: slaren Date: Sat, 23 Dec 2023 15:48:46 +0100 Subject: [PATCH 08/21] refactor error checking --- ggml-cuda.cu | 94 +++++++++++++++++++++------------------------------- 1 file changed, 37 insertions(+), 57 deletions(-) diff --git a/ggml-cuda.cu b/ggml-cuda.cu index 532292c78a2de..3af228f580131 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -201,63 +201,43 @@ static __device__ __forceinline__ int __dp4a(const int a, const int b, int c) { static_assert(sizeof(half) == sizeof(ggml_fp16_t), "wrong fp16 size"); -#define CUDA_CHECK(err) \ - do { \ - cudaError_t err_ = (err); \ - if (err_ != cudaSuccess) { \ - int id; \ - cudaGetDevice(&id); \ - fprintf(stderr, "\nCUDA error %d at %s:%d: %s\n", err_, __FILE__, __LINE__, \ - cudaGetErrorString(err_)); \ - fprintf(stderr, "current device: %d\n", id); \ - GGML_ASSERT(!"CUDA error"); \ - } \ - } while (0) - -// driver API -#define CU_CHECK(err) \ - do { \ - CUresult err_ = (err); \ - if (err_ != CUDA_SUCCESS) { \ - int id; \ - cuDeviceGet(&id, 0); \ - const char * err_str; \ - cuGetErrorString(err_, &err_str); \ - fprintf(stderr, "\nCUDA error %d at %s:%d: %s\n", err_, __FILE__, __LINE__, \ - err_str); \ - fprintf(stderr, "%s\n", #err); \ - fprintf(stderr, "current device: %d\n", id); \ - GGML_ASSERT(!"CUDA error"); \ - } \ - } while (0) - - #if CUDART_VERSION >= 12000 -#define CUBLAS_CHECK(err) \ - do { \ - cublasStatus_t err_ = (err); \ - if (err_ != CUBLAS_STATUS_SUCCESS) { \ - int id; \ - cudaGetDevice(&id); \ - fprintf(stderr, "\ncuBLAS error %d at %s:%d: %s\n", \ - err_, __FILE__, __LINE__, cublasGetStatusString(err_)); \ - fprintf(stderr, "current device: %d\n", id); \ - GGML_ASSERT(!"cuBLAS error"); \ - } \ - } while (0) + static const char * cublas_get_error_str(const cublasStatus_t err) { + return cublasGetStatusString(err); + } #else -#define CUBLAS_CHECK(err) \ - do { \ - cublasStatus_t err_ = (err); \ - if (err_ != CUBLAS_STATUS_SUCCESS) { \ - int id; \ - cudaGetDevice(&id); \ - fprintf(stderr, "\ncuBLAS error %d at %s:%d\n", err_, __FILE__, __LINE__); \ - fprintf(stderr, "current device: %d\n", id); \ - GGML_ASSERT(!"cuBLAS error"); \ - } \ - } while (0) -#endif // CUDART_VERSION >= 11 + static const char * cublas_get_error_str(const cublasStatus_t err) { + switch (err) { + case CUBLAS_STATUS_SUCCESS: return "CUBLAS_STATUS_SUCCESS"; + case CUBLAS_STATUS_NOT_INITIALIZED: return "CUBLAS_STATUS_NOT_INITIALIZED"; + case CUBLAS_STATUS_ALLOC_FAILED: return "CUBLAS_STATUS_ALLOC_FAILED"; + case CUBLAS_STATUS_INVALID_VALUE: return "CUBLAS_STATUS_INVALID_VALUE"; + case CUBLAS_STATUS_ARCH_MISMATCH: return "CUBLAS_STATUS_ARCH_MISMATCH"; + case CUBLAS_STATUS_MAPPING_ERROR: return "CUBLAS_STATUS_MAPPING_ERROR"; + case CUBLAS_STATUS_EXECUTION_FAILED: return "CUBLAS_STATUS_EXECUTION_FAILED"; + case CUBLAS_STATUS_INTERNAL_ERROR: return "CUBLAS_STATUS_INTERNAL_ERROR"; + case CUBLAS_STATUS_NOT_SUPPORTED: return "CUBLAS_STATUS_NOT_SUPPORTED"; + case CUBLAS_STATUS_LICENSE_ERROR: return "CUBLAS_STATUS_LICENSE_ERROR"; + default: return "unknown error"; + } +#endif // CUDART_VERSION >= 12000 + +static const char * cu_get_error_str(CUresult err) { + const char * err_str; + cuGetErrorString(err, &err_str); + return err_str; +} + +[[noreturn]] +static void ggml_cuda_error(const char * stmt, const char * func, const char * file, const int line, const char * msg) { + fprintf(stderr, "CUDA error: %s: %s\n", stmt, msg); + fprintf(stderr, " in function %s at %s:%d\n", func, file, line); + GGML_ASSERT(!"CUDA error"); +} + +#define CUDA_CHECK(err) do { auto err_ = (err); if (err_ != cudaSuccess) ggml_cuda_error(#err, __func__, __FILE__, __LINE__, cudaGetErrorString(err_)); } while (0) +#define CUBLAS_CHECK(err) do { auto err_ = (err); if (err_ != CUBLAS_STATUS_SUCCESS) ggml_cuda_error(#err, __func__, __FILE__, __LINE__, cublas_get_error_str(err_)); } while (0) +#define CU_CHECK(err) do { auto err_ = (err); if (err_ != CUDA_SUCCESS) ggml_cuda_error(#err, __func__, __FILE__, __LINE__, cu_get_error_str(err_)); } while (0) #if CUDART_VERSION >= 11100 #define GGML_CUDA_ASSUME(x) __builtin_assume(x) @@ -537,13 +517,13 @@ static int g_device_count = -1; static int g_main_device = 0; static float g_tensor_split[GGML_CUDA_MAX_DEVICES] = {0}; -struct device_capabilities { +struct cuda_device_capabilities { int cc; // compute capability bool vmm; // virtual memory support size_t vmm_granularity; // granularity of virtual memory }; -static device_capabilities g_device_caps[GGML_CUDA_MAX_DEVICES] = { {0, false, 0} }; +static cuda_device_capabilities g_device_caps[GGML_CUDA_MAX_DEVICES] = { {0, false, 0} }; static void * g_scratch_buffer = nullptr; From 110b5055da12604726ef49b9799c14cff642cf0b Mon Sep 17 00:00:00 2001 From: slaren Date: Sat, 23 Dec 2023 16:38:43 +0100 Subject: [PATCH 09/21] add cuda_pool_alloc, refactor most pool allocations ggml-ci --- ggml-cuda.cu | 192 ++++++++++++++++++++++----------------------------- 1 file changed, 83 insertions(+), 109 deletions(-) diff --git a/ggml-cuda.cu b/ggml-cuda.cu index 3af228f580131..aced0fa6ece5d 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -220,6 +220,7 @@ static_assert(sizeof(half) == sizeof(ggml_fp16_t), "wrong fp16 size"); case CUBLAS_STATUS_LICENSE_ERROR: return "CUBLAS_STATUS_LICENSE_ERROR"; default: return "unknown error"; } + } #endif // CUDART_VERSION >= 12000 static const char * cu_get_error_str(CUresult err) { @@ -6739,6 +6740,39 @@ static void ggml_cuda_pool_free(void * ptr, size_t size) { #define ggml_cuda_pool_free ggml_cuda_pool_free_leg #endif +template +struct cuda_pool_alloc { + T * ptr = nullptr; + size_t act_size = 0; + + // size is in number of elements + T * alloc(size_t size) { + GGML_ASSERT(ptr == nullptr); + ptr = (T *) ggml_cuda_pool_malloc(size * sizeof(T), &this->act_size); + return ptr; + } + + cuda_pool_alloc(size_t size) { + alloc(size); + } + + ~cuda_pool_alloc() { + if (ptr != nullptr) { + ggml_cuda_pool_free(ptr, act_size); + } + } + + T * get() { + return ptr; + } + + cuda_pool_alloc() = default; + cuda_pool_alloc(const cuda_pool_alloc &) = delete; + cuda_pool_alloc(cuda_pool_alloc &&) = delete; + cuda_pool_alloc& operator=(const cuda_pool_alloc &) = delete; + cuda_pool_alloc& operator=(cuda_pool_alloc &&) = delete; +}; + static bool g_cublas_loaded = false; bool ggml_cublas_loaded(void) { @@ -7432,8 +7466,8 @@ inline void ggml_cuda_op_dequantize_mul_mat_vec( // on some GPUs it is faster to convert src1 to half and to use half precision intrinsics #ifdef GGML_CUDA_F16 - size_t ash; - dfloat * src1_dfloat = nullptr; // dfloat == half + cuda_pool_alloc src1_dfloat_a; + half * src1_dfloat = nullptr; // dfloat == half bool src1_convert_f16 = src0->type == GGML_TYPE_Q4_0 || src0->type == GGML_TYPE_Q4_1 || @@ -7441,7 +7475,7 @@ inline void ggml_cuda_op_dequantize_mul_mat_vec( src0->type == GGML_TYPE_Q8_0 || src0->type == GGML_TYPE_F16; if (src1_convert_f16) { - src1_dfloat = (half *) ggml_cuda_pool_malloc(ne00*sizeof(half), &ash); + src1_dfloat = src1_dfloat_a.alloc(ne00); ggml_cpy_f32_f16_cuda((const char *) src1_ddf_i, (char *) src1_dfloat, ne00, ne00, 1, sizeof(float), 0, 0, ne00, 1, sizeof(half), 0, 0, stream); @@ -7489,12 +7523,6 @@ inline void ggml_cuda_op_dequantize_mul_mat_vec( break; } -#ifdef GGML_CUDA_F16 - if (src1_convert_f16) { - ggml_cuda_pool_free(src1_dfloat, ash); - } -#endif // GGML_CUDA_F16 - (void) src1; (void) dst; (void) src1_ddq_i; @@ -7529,29 +7557,26 @@ inline void ggml_cuda_op_mul_mat_cublas( if (compute_capability >= CC_VOLTA && (src0->type == GGML_TYPE_F16 || ggml_is_quantized(src0->type)) && ggml_is_contiguous(src0) && row_diff == src0->ne[1] && dst->op_params[0] == GGML_PREC_DEFAULT) { // convert src0 and src1 to fp16, multiply as fp16, convert dst to fp32 - half * src0_as_f16 = nullptr; - size_t src0_as = 0; + cuda_pool_alloc src0_as_f16; if (src0->type != GGML_TYPE_F16) { const to_fp16_cuda_t to_fp16_cuda = ggml_get_to_fp16_cuda(src0->type); GGML_ASSERT(to_fp16_cuda != nullptr); size_t ne = row_diff*ne00; - src0_as_f16 = (half *) ggml_cuda_pool_malloc(ne * sizeof(half), &src0_as); - to_fp16_cuda(src0_dd_i, src0_as_f16, ne, stream); + src0_as_f16.alloc(ne); + to_fp16_cuda(src0_dd_i, src0_as_f16.get(), ne, stream); } - const half * src0_ptr = src0->type == GGML_TYPE_F16 ? (const half *) src0_dd_i : src0_as_f16; + const half * src0_ptr = src0->type == GGML_TYPE_F16 ? (const half *) src0_dd_i : src0_as_f16.get(); - half * src1_as_f16 = nullptr; - size_t src1_as = 0; + cuda_pool_alloc src1_as_f16; if (src1->type != GGML_TYPE_F16) { const to_fp16_cuda_t to_fp16_cuda = ggml_get_to_fp16_cuda(src1->type); GGML_ASSERT(to_fp16_cuda != nullptr); size_t ne = src1_ncols*ne10; - src1_as_f16 = (half *) ggml_cuda_pool_malloc(ne * sizeof(half), &src1_as); - to_fp16_cuda(src1_ddf_i, src1_as_f16, ne, stream); + src1_as_f16.alloc(ne); + to_fp16_cuda(src1_ddf_i, src1_as_f16.get(), ne, stream); } - const half * src1_ptr = src1->type == GGML_TYPE_F16 ? (const half *) src1_ddf_i : src1_as_f16; - size_t dst_as = 0; - half * dst_f16 = (half *) ggml_cuda_pool_malloc(row_diff*src1_ncols * sizeof(half), &dst_as); + const half * src1_ptr = src1->type == GGML_TYPE_F16 ? (const half *) src1_ddf_i : src1_as_f16.get(); + cuda_pool_alloc dst_f16(row_diff*src1_ncols); const half alpha_f16 = 1.0f; const half beta_f16 = 0.0f; @@ -7560,36 +7585,25 @@ inline void ggml_cuda_op_mul_mat_cublas( CUBLAS_CHECK( cublasGemmEx(g_cublas_handles[id], CUBLAS_OP_T, CUBLAS_OP_N, row_diff, src1_ncols, ne10, - &alpha_f16, src0_ptr, CUDA_R_16F, ne00, - src1_ptr, CUDA_R_16F, ne10, - &beta_f16, dst_f16, CUDA_R_16F, ldc, + &alpha_f16, src0_ptr, CUDA_R_16F, ne00, + src1_ptr, CUDA_R_16F, ne10, + &beta_f16, dst_f16.get(), CUDA_R_16F, ldc, CUBLAS_COMPUTE_16F, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); const to_fp32_cuda_t to_fp32_cuda = ggml_get_to_fp32_cuda(GGML_TYPE_F16); - to_fp32_cuda(dst_f16, dst_dd_i, row_diff*src1_ncols, stream); - - ggml_cuda_pool_free(dst_f16, dst_as); - - if (src1_as != 0) { - ggml_cuda_pool_free(src1_as_f16, src1_as); - } - - if (src0_as != 0) { - ggml_cuda_pool_free(src0_as_f16, src0_as); - } + to_fp32_cuda(dst_f16.get(), dst_dd_i, row_diff*src1_ncols, stream); } else { - float * src0_ddq_as_f32 = nullptr; - size_t src0_as = 0; + cuda_pool_alloc src0_ddq_as_f32; if (src0->type != GGML_TYPE_F32) { const to_fp32_cuda_t to_fp32_cuda = ggml_get_to_fp32_cuda(src0->type); GGML_ASSERT(to_fp32_cuda != nullptr); - src0_ddq_as_f32 = (float *) ggml_cuda_pool_malloc(row_diff*ne00 * sizeof(float), &src0_as); // NOLINT - to_fp32_cuda(src0_dd_i, src0_ddq_as_f32, row_diff*ne00, stream); + src0_ddq_as_f32.alloc(row_diff*ne00); + to_fp32_cuda(src0_dd_i, src0_ddq_as_f32.get(), row_diff*ne00, stream); } - const float * src0_ddf_i = src0->type == GGML_TYPE_F32 ? (const float *) src0_dd_i : src0_ddq_as_f32; + const float * src0_ddf_i = src0->type == GGML_TYPE_F32 ? (const float *) src0_dd_i : src0_ddq_as_f32.get(); const float alpha = 1.0f; const float beta = 0.0f; @@ -7601,10 +7615,6 @@ inline void ggml_cuda_op_mul_mat_cublas( &alpha, src0_ddf_i, ne00, src1_ddf_i, ne10, &beta, dst_dd_i, ldc)); - - if (src0_as != 0) { - ggml_cuda_pool_free(src0_ddq_as_f32, src0_as); - } } (void) dst; @@ -7896,18 +7906,17 @@ static void ggml_cuda_op_flatten(const ggml_tensor * src0, const ggml_tensor * s float * src1_ddf = nullptr; float * dst_ddf = nullptr; - // as = actual size - size_t src0_asf = 0; - size_t src1_asf = 0; - size_t dst_asf = 0; + cuda_pool_alloc src0_f; + cuda_pool_alloc src1_f; + cuda_pool_alloc dst_f; ggml_cuda_set_device(g_main_device); - const cudaStream_t main_stream = g_cudaStreams[g_main_device][0]; + cudaStream_t main_stream = g_cudaStreams[g_main_device][0]; if (src0_on_device) { src0_ddf = (float *) src0_extra->data_device[g_main_device]; } else { - src0_ddf = (float *) ggml_cuda_pool_malloc(ggml_nbytes(src0), &src0_asf); + src0_ddf = src0_f.alloc(ggml_nelements(src0)); CUDA_CHECK(ggml_cuda_cpy_tensor_2d(src0_ddf, src0, 0, 0, 0, nrows0, main_stream)); } @@ -7915,14 +7924,14 @@ static void ggml_cuda_op_flatten(const ggml_tensor * src0, const ggml_tensor * s if (src1_on_device) { src1_ddf = (float *) src1_extra->data_device[g_main_device]; } else { - src1_ddf = (float *) ggml_cuda_pool_malloc(ggml_nbytes(src1), &src1_asf); + src1_ddf = src1_f.alloc(ggml_nelements(src1)); CUDA_CHECK(ggml_cuda_cpy_tensor_2d(src1_ddf, src1, 0, 0, 0, nrows1, main_stream)); } } if (dst_on_device) { dst_ddf = (float *) dst_extra->data_device[g_main_device]; } else { - dst_ddf = (float *) ggml_cuda_pool_malloc(ggml_nbytes(dst), &dst_asf); + dst_ddf = dst_f.alloc(ggml_nelements(dst)); } // do the computation @@ -7934,16 +7943,6 @@ static void ggml_cuda_op_flatten(const ggml_tensor * src0, const ggml_tensor * s CUDA_CHECK(cudaMemcpyAsync(dst->data, dst_ddf, ggml_nbytes(dst), cudaMemcpyDeviceToHost, main_stream)); } - if (dst_asf > 0) { - ggml_cuda_pool_free(dst_ddf, dst_asf); - } - if (src1_asf > 0) { - ggml_cuda_pool_free(src1_ddf, src1_asf); - } - if (src0_asf > 0) { - ggml_cuda_pool_free(src0_ddf, src0_asf); - } - if (dst->backend == GGML_BACKEND_CPU) { CUDA_CHECK(cudaDeviceSynchronize()); } @@ -8516,14 +8515,11 @@ static void ggml_cuda_mul_mat_mat_batched_cublas(const ggml_tensor * src0, const const to_fp16_cuda_t to_fp16_cuda = ggml_get_to_fp16_cuda(src1->type); GGML_ASSERT(to_fp16_cuda != nullptr); - size_t src1_as = 0; - half * src1_as_f16 = (half *) ggml_cuda_pool_malloc(ne1 * sizeof(half), &src1_as); - to_fp16_cuda(src1_ddf, src1_as_f16, ne1, main_stream); + cuda_pool_alloc src1_as_f16(ne1); + to_fp16_cuda(src1_ddf, src1_as_f16.get(), ne1, main_stream); - size_t dst_as = 0; - - half * dst_f16 = nullptr; - char * dst_t = nullptr; + cuda_pool_alloc dst_f16; + char * dst_t; cublasComputeType_t cu_compute_type = CUBLAS_COMPUTE_16F; cudaDataType_t cu_data_type = CUDA_R_16F; @@ -8542,8 +8538,7 @@ static void ggml_cuda_mul_mat_mat_batched_cublas(const ggml_tensor * src0, const const void * beta = &beta_f16; if (dst->op_params[0] == GGML_PREC_DEFAULT) { - dst_f16 = (half *) ggml_cuda_pool_malloc(ne * sizeof(half), &dst_as); - dst_t = (char *) dst_f16; + dst_t = (char *) dst_f16.alloc(ne); nbd2 /= sizeof(float) / sizeof(half); nbd3 /= sizeof(float) / sizeof(half); @@ -8590,9 +8585,9 @@ static void ggml_cuda_mul_mat_mat_batched_cublas(const ggml_tensor * src0, const CUBLAS_CHECK( cublasGemmStridedBatchedEx(g_cublas_handles[g_main_device], CUBLAS_OP_T, CUBLAS_OP_N, ne01, ne11, ne10, - alpha, (const char *) src0_as_f16, CUDA_R_16F, nb01/sizeof(half), src0->nb[2]/sizeof(half), // strideA - (const char *) src1_as_f16, CUDA_R_16F, nb11/sizeof(float), src1->nb[2]/sizeof(float), // strideB - beta, ( char *) dst_t, cu_data_type, ne01, dst->nb[2]/sizeof(float), // strideC + alpha, (const char *) src0_as_f16, CUDA_R_16F, nb01/sizeof(half), src0->nb[2]/sizeof(half), // strideA + (const char *) src1_as_f16.get(), CUDA_R_16F, nb11/sizeof(float), src1->nb[2]/sizeof(float), // strideB + beta, ( char *) dst_t, cu_data_type, ne01, dst->nb[2]/sizeof(float), // strideC ne12*ne13, cu_compute_type, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); @@ -8600,19 +8595,13 @@ static void ggml_cuda_mul_mat_mat_batched_cublas(const ggml_tensor * src0, const // use cublasGemmBatchedEx const int ne23 = ne12*ne13; - const void ** ptrs_src = nullptr; - void ** ptrs_dst = nullptr; - - size_t ptrs_src_s = 0; - size_t ptrs_dst_s = 0; - - ptrs_src = (const void **) ggml_cuda_pool_malloc(2*ne23*sizeof(void *), &ptrs_src_s); - ptrs_dst = ( void **) ggml_cuda_pool_malloc(1*ne23*sizeof(void *), &ptrs_dst_s); + cuda_pool_alloc ptrs_src(2*ne23); + cuda_pool_alloc< void *> ptrs_dst(1*ne23); dim3 block_dims(ne13, ne12); k_compute_batched_ptrs<<<1, block_dims, 0, main_stream>>>( - src0_as_f16, src1_as_f16, dst_t, - ptrs_src, ptrs_dst, + src0_as_f16, src1_as_f16.get(), dst_t, + ptrs_src.get(), ptrs_dst.get(), ne12, ne13, ne23, nb02, nb03, @@ -8624,30 +8613,19 @@ static void ggml_cuda_mul_mat_mat_batched_cublas(const ggml_tensor * src0, const CUBLAS_CHECK( cublasGemmBatchedEx(g_cublas_handles[g_main_device], CUBLAS_OP_T, CUBLAS_OP_N, ne01, ne11, ne10, - alpha, (const void **) (ptrs_src + 0*ne23), CUDA_R_16F, nb01/sizeof(half), - (const void **) (ptrs_src + 1*ne23), CUDA_R_16F, nb11/sizeof(float), - beta, ( void **) (ptrs_dst + 0*ne23), cu_data_type, ne01, + alpha, (const void **) (ptrs_src.get() + 0*ne23), CUDA_R_16F, nb01/sizeof(half), + (const void **) (ptrs_src.get() + 1*ne23), CUDA_R_16F, nb11/sizeof(float), + beta, ( void **) (ptrs_dst.get() + 0*ne23), cu_data_type, ne01, ne23, cu_compute_type, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); - - if (ptrs_dst_s != 0) { - ggml_cuda_pool_free(ptrs_dst, ptrs_dst_s); - } - if (ptrs_src_s != 0) { - ggml_cuda_pool_free(ptrs_src, ptrs_src_s); - } } #endif if (dst->op_params[0] == GGML_PREC_DEFAULT) { const to_fp32_cuda_t to_fp32_cuda = ggml_get_to_fp32_cuda(GGML_TYPE_F16); - to_fp32_cuda(dst_f16, dst_ddf, ne, main_stream); - - ggml_cuda_pool_free(dst_f16, dst_as); + to_fp32_cuda(dst_f16.get(), dst_ddf, ne, main_stream); } - - ggml_cuda_pool_free(src1_as_f16, src1_as); } static void ggml_cuda_mul_mat(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { @@ -8974,12 +8952,11 @@ static void ggml_cuda_mul_mat_id(const ggml_tensor * src0, const ggml_tensor * s ggml_cuda_mul_mat(src0_row, &src1_row, &dst_row); } } else { - size_t as_src1, as_dst; - char * src1_contiguous = (char *) ggml_cuda_pool_malloc(sizeof(float)*ggml_nelements(src1), &as_src1); - char * dst_contiguous = (char *) ggml_cuda_pool_malloc(sizeof(float)*ggml_nelements(dst), &as_dst); + cuda_pool_alloc src1_contiguous(sizeof(float)*ggml_nelements(src1)); + cuda_pool_alloc dst_contiguous(sizeof(float)*ggml_nelements(dst)); - src1_row_extra.data_device[g_main_device] = src1_contiguous; - dst_row_extra.data_device[g_main_device] = dst_contiguous; + src1_row_extra.data_device[g_main_device] = src1_contiguous.get(); + dst_row_extra.data_device[g_main_device] = dst_contiguous.get(); const cudaMemcpyKind src1_kind = src1->backend == GGML_BACKEND_CPU ? cudaMemcpyHostToDevice : cudaMemcpyDeviceToDevice; @@ -8999,7 +8976,7 @@ static void ggml_cuda_mul_mat_id(const ggml_tensor * src0, const ggml_tensor * s GGML_ASSERT(row_id >= 0 && row_id < n_as); - CUDA_CHECK(cudaMemcpyAsync(src1_contiguous + num_src1_rows*nb11, src1_original + i01*nb11, + CUDA_CHECK(cudaMemcpyAsync(src1_contiguous.get() + num_src1_rows*nb11, src1_original + i01*nb11, nb11, src1_kind, stream)); num_src1_rows++; } @@ -9031,14 +9008,11 @@ static void ggml_cuda_mul_mat_id(const ggml_tensor * src0, const ggml_tensor * s GGML_ASSERT(row_id >= 0 && row_id < n_as); - CUDA_CHECK(cudaMemcpyAsync(dst_original + i01*nb1, dst_contiguous + num_src1_rows*nb1, + CUDA_CHECK(cudaMemcpyAsync(dst_original + i01*nb1, dst_contiguous.get() + num_src1_rows*nb1, nb1, dst_kind, stream)); num_src1_rows++; } } - - ggml_cuda_pool_free(dst_contiguous, as_dst); - ggml_cuda_pool_free(src1_contiguous, as_src1); } if (dst->backend == GGML_BACKEND_CPU) { From b7da1ba00ee108f4637a8873e26ba6dc9d5801a2 Mon Sep 17 00:00:00 2001 From: slaren Date: Sat, 23 Dec 2023 17:08:29 +0100 Subject: [PATCH 10/21] fix hip build --- ggml-cuda.cu | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ggml-cuda.cu b/ggml-cuda.cu index aced0fa6ece5d..da7dea29c0ed4 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -6835,7 +6835,7 @@ void ggml_init_cublas() { g_tensor_split[id] = total_vram; total_vram += prop.totalGlobalMem; #if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__) - g_device_caps[id] = 100*prop.major + 10*prop.minor + CC_OFFSET_AMD; + g_device_caps[id].cc = 100*prop.major + 10*prop.minor + CC_OFFSET_AMD; #else g_device_caps[id].cc = 100*prop.major + 10*prop.minor; #endif // defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__) From d8b06c214802e31ebba3ff24aef8222f59f351a4 Mon Sep 17 00:00:00 2001 From: slaren Date: Sat, 23 Dec 2023 17:57:25 +0100 Subject: [PATCH 11/21] CUBLAS_TF32_TENSOR_OP_MATH is not a macro --- ggml-cuda.cu | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/ggml-cuda.cu b/ggml-cuda.cu index da7dea29c0ed4..e9677943aa7fe 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -91,13 +91,14 @@ #include #include #include -// CUDA 10.2 does not have these macro definitions. -#ifndef CUBLAS_TF32_TENSOR_OP_MATH + +#if CUDART_VERSION < 11020 #define CUBLAS_TF32_TENSOR_OP_MATH CUBLAS_TENSOR_OP_MATH #define CUBLAS_COMPUTE_16F CUDA_R_16F #define CUBLAS_COMPUTE_32F CUDA_R_32F #define cublasComputeType_t cudaDataType_t -#endif +#endif // CUDART_VERSION < 11020 + #endif // defined(GGML_USE_HIPBLAS) #include "ggml-cuda.h" From 9f5ac6d2d201abb3c6d23baa450f5ccfc44cd99a Mon Sep 17 00:00:00 2001 From: slaren Date: Sat, 23 Dec 2023 18:05:35 +0100 Subject: [PATCH 12/21] more hip crap --- ggml-cuda.cu | 24 +++++++++++++++++------- 1 file changed, 17 insertions(+), 7 deletions(-) diff --git a/ggml-cuda.cu b/ggml-cuda.cu index e9677943aa7fe..007b893592aab 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -86,6 +86,15 @@ #define cudaStream_t hipStream_t #define cudaSuccess hipSuccess #define __trap abort +#define CUBLAS_STATUS_SUCCESS HIPBLAS_STATUS_SUCCESS +#define CUBLAS_STATUS_NOT_INITIALIZED HIPBLAS_STATUS_NOT_INITIALIZED +#define CUBLAS_STATUS_ALLOC_FAILED HIPBLAS_STATUS_ALLOC_FAILED +#define CUBLAS_STATUS_INVALID_VALUE HIPBLAS_STATUS_INVALID_VALUE +#define CUBLAS_STATUS_ARCH_MISMATCH HIPBLAS_STATUS_ARCH_MISMATCH +#define CUBLAS_STATUS_MAPPING_ERROR HIPBLAS_STATUS_MAPPING_ERROR +#define CUBLAS_STATUS_EXECUTION_FAILED HIPBLAS_STATUS_EXECUTION_FAILED +#define CUBLAS_STATUS_INTERNAL_ERROR HIPBLAS_STATUS_INTERNAL_ERROR +#define CUBLAS_STATUS_NOT_SUPPORTED HIPBLAS_STATUS_NOT_SUPPORTED #else #include #include @@ -218,18 +227,11 @@ static_assert(sizeof(half) == sizeof(ggml_fp16_t), "wrong fp16 size"); case CUBLAS_STATUS_EXECUTION_FAILED: return "CUBLAS_STATUS_EXECUTION_FAILED"; case CUBLAS_STATUS_INTERNAL_ERROR: return "CUBLAS_STATUS_INTERNAL_ERROR"; case CUBLAS_STATUS_NOT_SUPPORTED: return "CUBLAS_STATUS_NOT_SUPPORTED"; - case CUBLAS_STATUS_LICENSE_ERROR: return "CUBLAS_STATUS_LICENSE_ERROR"; default: return "unknown error"; } } #endif // CUDART_VERSION >= 12000 -static const char * cu_get_error_str(CUresult err) { - const char * err_str; - cuGetErrorString(err, &err_str); - return err_str; -} - [[noreturn]] static void ggml_cuda_error(const char * stmt, const char * func, const char * file, const int line, const char * msg) { fprintf(stderr, "CUDA error: %s: %s\n", stmt, msg); @@ -239,7 +241,15 @@ static void ggml_cuda_error(const char * stmt, const char * func, const char * f #define CUDA_CHECK(err) do { auto err_ = (err); if (err_ != cudaSuccess) ggml_cuda_error(#err, __func__, __FILE__, __LINE__, cudaGetErrorString(err_)); } while (0) #define CUBLAS_CHECK(err) do { auto err_ = (err); if (err_ != CUBLAS_STATUS_SUCCESS) ggml_cuda_error(#err, __func__, __FILE__, __LINE__, cublas_get_error_str(err_)); } while (0) + +#if !defined(GGML_USE_HIPBLAS) +static const char * cu_get_error_str(CUresult err) { + const char * err_str; + cuGetErrorString(err, &err_str); + return err_str; +} #define CU_CHECK(err) do { auto err_ = (err); if (err_ != CUDA_SUCCESS) ggml_cuda_error(#err, __func__, __FILE__, __LINE__, cu_get_error_str(err_)); } while (0) +#endif #if CUDART_VERSION >= 11100 #define GGML_CUDA_ASSUME(x) __builtin_assume(x) From 5eb626225dc7aaee79dd7d48b8674e40777f8364 Mon Sep 17 00:00:00 2001 From: slaren Date: Sat, 23 Dec 2023 19:13:52 +0100 Subject: [PATCH 13/21] llama : fix msvc warnings --- llama.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/llama.cpp b/llama.cpp index 4e4495739bbbd..819d29d2b4cd4 100644 --- a/llama.cpp +++ b/llama.cpp @@ -1275,7 +1275,7 @@ struct llama_hparams { if (this->rope_finetuned != other.rope_finetuned) return true; if (this->n_yarn_orig_ctx != other.n_yarn_orig_ctx) return true; - const float EPSILON = 1e-9; + const float EPSILON = 1e-9f; if (!is_float_close(this->f_norm_eps, other.f_norm_eps, EPSILON)) return true; if (!is_float_close(this->f_norm_rms_eps, other.f_norm_rms_eps, EPSILON)) return true; @@ -10294,7 +10294,7 @@ int llama_token_to_piece(const struct llama_model * model, llama_token token, ch std::string result = model->vocab.id_to_token[token].text; llama_unescape_whitespace(result); if (length < (int) result.length()) { - return -result.length(); + return -(int) result.length(); } memcpy(buf, result.c_str(), result.length()); return result.length(); @@ -10324,7 +10324,7 @@ int llama_token_to_piece(const struct llama_model * model, llama_token token, ch std::string result = model->vocab.id_to_token[token].text; result = llama_decode_text(result); if (length < (int) result.length()) { - return -result.length(); + return -(int) result.length(); } memcpy(buf, result.c_str(), result.length()); return result.length(); From 6fe9da0f8a83b4a4112531c1d0ad4e55a23ef6a0 Mon Sep 17 00:00:00 2001 From: slaren Date: Sat, 23 Dec 2023 19:20:59 +0100 Subject: [PATCH 14/21] ggml : fix msvc warnings --- ggml-backend.c | 2 +- ggml.c | 2 +- ggml.h | 2 ++ 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/ggml-backend.c b/ggml-backend.c index 0c8c9ec430475..526ddab6709d2 100644 --- a/ggml-backend.c +++ b/ggml-backend.c @@ -297,7 +297,7 @@ static void ggml_backend_registry_init(void) { void ggml_backend_register(const char * name, ggml_backend_init_fn init_fn, ggml_backend_buffer_type_t default_buffer_type, void * user_data) { GGML_ASSERT(ggml_backend_registry_count < GGML_MAX_BACKENDS_REG); - int id = ggml_backend_registry_count; + size_t id = ggml_backend_registry_count; ggml_backend_registry[id] = (struct ggml_backend_reg) { /* .name = */ {0}, diff --git a/ggml.c b/ggml.c index 3656422d73767..73600ab050ec8 100644 --- a/ggml.c +++ b/ggml.c @@ -19351,7 +19351,7 @@ void gguf_set_kv(struct gguf_context * ctx, struct gguf_context * src) { data[j] = ((struct gguf_str *)src->kv[i].value.arr.data)[j].data; } gguf_set_arr_str(ctx, src->kv[i].key.data, data, src->kv[i].value.arr.n); - free(data); + free((void *)data); } else if (src->kv[i].value.arr.type == GGUF_TYPE_ARRAY) { GGML_ASSERT(false && "nested arrays not supported"); } else { diff --git a/ggml.h b/ggml.h index 338f355a408b3..67d6bc4f1ef1b 100644 --- a/ggml.h +++ b/ggml.h @@ -255,6 +255,8 @@ #define GGML_UNREACHABLE() GGML_ASSERT(!"statement should not be reached") #elif defined(__GNUC__) #define GGML_UNREACHABLE() __builtin_unreachable() +#elif defined(_MSC_VER) +#define GGML_UNREACHABLE() __assume(0) #else #define GGML_UNREACHABLE() ((void) 0) #endif From d8883623a64ca775ae1ecabd06613e89786aeffe Mon Sep 17 00:00:00 2001 From: slaren Date: Sat, 23 Dec 2023 19:25:29 +0100 Subject: [PATCH 15/21] minor --- Makefile | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/Makefile b/Makefile index 489591a3418e4..28c6d79bcd7d5 100644 --- a/Makefile +++ b/Makefile @@ -367,18 +367,15 @@ endif # LLAMA_BLIS ifdef LLAMA_CUBLAS MK_CPPFLAGS += -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/opt/cuda/include -I$(CUDA_PATH)/targets/x86_64-linux/include -I/usr/local/cuda/targets/aarch64-linux/include - MK_LDFLAGS += -lcuda -L/usr/lib/wsl/lib -lcublas -lculibos -lcudart -lcublasLt -lpthread -ldl -lrt -L/usr/local/cuda/lib64 -L/opt/cuda/lib64 -L$(CUDA_PATH)/targets/x86_64-linux/lib -L/usr/local/cuda/targets/aarch64-linux/lib + MK_LDFLAGS += -lcuda -lcublas -lculibos -lcudart -lcublasLt -lpthread -ldl -lrt -L/usr/local/cuda/lib64 -L/opt/cuda/lib64 -L$(CUDA_PATH)/targets/x86_64-linux/lib -L/usr/local/cuda/targets/aarch64-linux/lib -L/usr/lib/wsl/lib OBJS += ggml-cuda.o MK_NVCCFLAGS = -use_fast_math - ifndef JETSON_EOL_MODULE_DETECT MK_NVCCFLAGS += --forward-unknown-to-host-compiler endif # JETSON_EOL_MODULE_DETECT - ifdef LLAMA_DEBUG MK_NVCCFLAGS += -lineinfo -endif - +endif # LLAMA_DEBUG ifdef LLAMA_CUDA_NVCC NVCC = $(LLAMA_CUDA_NVCC) else From ab6ad5e6e83f318dac74d94f7e817f0ac2ec5cc0 Mon Sep 17 00:00:00 2001 From: slaren Date: Sun, 24 Dec 2023 11:58:36 +0100 Subject: [PATCH 16/21] minor --- ggml-backend.c | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/ggml-backend.c b/ggml-backend.c index 526ddab6709d2..526ce732be5b5 100644 --- a/ggml-backend.c +++ b/ggml-backend.c @@ -330,6 +330,8 @@ size_t ggml_backend_reg_find_by_name(const char * name) { return i; } } + + // not found return SIZE_MAX; } @@ -340,15 +342,15 @@ ggml_backend_t ggml_backend_reg_init_backend_from_str(const char * backend_str) const char * params = strchr(backend_str, ':'); char backend_name[128]; if (params == NULL) { - strcpy(backend_name, backend_str); + snprintf(backend_name, sizeof(backend_name), "%s", backend_str); params = ""; } else { - strncpy(backend_name, backend_str, params - backend_str); - backend_name[params - backend_str] = '\0'; + snprintf(backend_name, sizeof(backend_name), "%.*s", (int)(params - backend_str), backend_str); params++; } size_t backend_i = ggml_backend_reg_find_by_name(backend_name); + if (backend_i == SIZE_MAX) { fprintf(stderr, "%s: backend %s not found\n", __func__, backend_name); return NULL; @@ -396,18 +398,12 @@ static void ggml_backend_cpu_buffer_free_buffer(ggml_backend_buffer_t buffer) { } static void ggml_backend_cpu_buffer_set_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size) { - GGML_ASSERT(offset + size <= ggml_nbytes(tensor) && "tensor write out of bounds"); - GGML_ASSERT(tensor->data != NULL && "tensor not allocated"); - memcpy((char *)tensor->data + offset, data, size); GGML_UNUSED(buffer); } static void ggml_backend_cpu_buffer_get_tensor(ggml_backend_buffer_t buffer, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size) { - GGML_ASSERT(offset + size <= ggml_nbytes(tensor) && "tensor read out of bounds"); - GGML_ASSERT(tensor->data != NULL && "tensor not allocated"); - memcpy(data, (const char *)tensor->data + offset, size); GGML_UNUSED(buffer); From 5acc9e50f5f47709207e844acdfffa488370b0d9 Mon Sep 17 00:00:00 2001 From: slaren Date: Sun, 24 Dec 2023 11:59:15 +0100 Subject: [PATCH 17/21] cuda : fallback to CPU on host buffer alloc fail --- ggml-cuda.cu | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/ggml-cuda.cu b/ggml-cuda.cu index f61f06853da29..bdf7498bae9ce 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -9797,8 +9797,10 @@ static void ggml_backend_cuda_host_buffer_free_buffer(ggml_backend_buffer_t buff static ggml_backend_buffer_t ggml_backend_cuda_host_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) { void * ptr = ggml_cuda_host_malloc(size); + if (ptr == nullptr) { - return nullptr; + // fallback to cpu buffer + return ggml_backend_buft_alloc_buffer(ggml_backend_cpu_buffer_type(), size); } // FIXME: this is a hack to avoid having to implement a new buffer type From b9c5a6e74ab90359ffc311f074cbd01317467a04 Mon Sep 17 00:00:00 2001 From: slaren Date: Sun, 24 Dec 2023 12:12:08 +0100 Subject: [PATCH 18/21] Update ggml-cuda.cu MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Johannes Gäßler --- ggml-cuda.cu | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ggml-cuda.cu b/ggml-cuda.cu index bdf7498bae9ce..167bcf1c52fd9 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -6749,7 +6749,7 @@ static void ggml_cuda_pool_free(void * ptr, size_t size) { #else #define ggml_cuda_pool_malloc ggml_cuda_pool_malloc_leg #define ggml_cuda_pool_free ggml_cuda_pool_free_leg -#endif +#endif // !defined(GGML_USE_HIPBLAS) template struct cuda_pool_alloc { From 3081c4e768b96cc06f9553c4e5df765831a829b4 Mon Sep 17 00:00:00 2001 From: slaren Date: Sun, 24 Dec 2023 12:13:55 +0100 Subject: [PATCH 19/21] Update ggml-cuda.cu MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Johannes Gäßler --- ggml-cuda.cu | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ggml-cuda.cu b/ggml-cuda.cu index 167bcf1c52fd9..01a7bc82e031a 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -6836,7 +6836,7 @@ void ggml_init_cublas() { alloc_prop.location.id = id; CU_CHECK(cuMemGetAllocationGranularity(&g_device_caps[id].vmm_granularity, &alloc_prop, CU_MEM_ALLOC_GRANULARITY_MINIMUM)); } -#endif +#endif // !defined(GGML_USE_HIPBLAS) g_device_caps[id].vmm = !!device_vmm; cudaDeviceProp prop; From 3ad45fc3a839e5543cb42d2230aa7f3fbb12b66a Mon Sep 17 00:00:00 2001 From: slaren Date: Sun, 24 Dec 2023 12:26:04 +0100 Subject: [PATCH 20/21] ensure allocations are always aligned --- ggml-cuda.cu | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/ggml-cuda.cu b/ggml-cuda.cu index 01a7bc82e031a..b5679c42fcb8e 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -6657,6 +6657,10 @@ static void * ggml_cuda_pool_malloc_vmm(size_t size, size_t * actual_size) { int id; CUDA_CHECK(cudaGetDevice(&id)); + // round up the allocation size to the alignment to ensure that all allocations are aligned for all data types + const size_t alignment = 128; + size = alignment * ((size + alignment - 1) / alignment); + size_t avail = g_cuda_pool_size[id] - g_cuda_pool_used[id]; if (size > avail) { From 532cb9b99ca94085a14bd5699ca7a58efa36900d Mon Sep 17 00:00:00 2001 From: slaren Date: Sun, 24 Dec 2023 12:26:35 +0100 Subject: [PATCH 21/21] act_size -> actual_size --- ggml-cuda.cu | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ggml-cuda.cu b/ggml-cuda.cu index b5679c42fcb8e..ac3b3c14d53df 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -6758,12 +6758,12 @@ static void ggml_cuda_pool_free(void * ptr, size_t size) { template struct cuda_pool_alloc { T * ptr = nullptr; - size_t act_size = 0; + size_t actual_size = 0; // size is in number of elements T * alloc(size_t size) { GGML_ASSERT(ptr == nullptr); - ptr = (T *) ggml_cuda_pool_malloc(size * sizeof(T), &this->act_size); + ptr = (T *) ggml_cuda_pool_malloc(size * sizeof(T), &this->actual_size); return ptr; } @@ -6773,7 +6773,7 @@ struct cuda_pool_alloc { ~cuda_pool_alloc() { if (ptr != nullptr) { - ggml_cuda_pool_free(ptr, act_size); + ggml_cuda_pool_free(ptr, actual_size); } }