From dc1c5ae7ecc9bdceece7720ba894aa0868b1ff0e Mon Sep 17 00:00:00 2001 From: Sebastian Apel <13675545+SebastianApel@users.noreply.github.com> Date: Mon, 3 Apr 2023 13:49:15 +0200 Subject: [PATCH 1/5] Experimental code that achives 30k FLOPS --- ggml.c | 194 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 192 insertions(+), 2 deletions(-) diff --git a/ggml.c b/ggml.c index 59e84ab45d120..a2ed6bca08426 100644 --- a/ggml.c +++ b/ggml.c @@ -2188,6 +2188,162 @@ static void ggml_vec_dot_q4_0(const int n, float * restrict s, const void * rest *s = sumf; } +static void seap_ggml_vec_dot_q4_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy, const int tilesize_x, const int tilesize_y, const int rowlength, const int dst_stridelength) { + const int nb = n / QK; + + assert(n % QK == 0); + assert(nb % 2 == 0); + + const block_q4_0 * restrict x = vx; + const block_q4_0 * restrict y = vy; + + float sumf = 0.0; + + +//#if defined(__AVX2__) +#if 1 + +#define SEAP_TILESIZE_X 1 +#define SEAP_TILESIZE_Y 8 +#define UNROLL_COUNT 8/SEAP_TILESIZE_Y +#undef SEAP_DEBUG + + // Initialize accumulator with zeros + __m256 acc[SEAP_TILESIZE_Y]; // = 0; // _mm256_setzero_ps(); + for (int i=0;id); +#endif + + /* get input from y + Input: 32 Nibbles (16 bytes) at *y[i+u] + Output: 2 vectors with 16 values of type int16_t (y_high_q, y_low_q) */ + __m256i y_high_q[SEAP_TILESIZE_Y]; + __m256i y_low_q[SEAP_TILESIZE_Y]; + + EXPAND_32_Q4_NIBBLES_INTO_TWO_M256_VECTORS(y_high_q, y_low_q, y[i+u+t*rowlength].qs,t) + + /* Compute products of int16_t integers, add pairwise, store as int32_t */ + __m256i xy_high_q[SEAP_TILESIZE_Y]; + xy_high_q[t] = _mm256_madd_epi16( x_high_q[0], y_high_q[t] ); + __m256i xy_low_q[SEAP_TILESIZE_Y]; + xy_low_q[t]= _mm256_madd_epi16( x_low_q[0], y_low_q[t] ); + + /* Accumulate the products of int32_t integers -> we now have a vector of 8 int_32t */ + __m256i xy_q[SEAP_TILESIZE_Y]; + xy_q[t] = _mm256_add_epi32( xy_high_q[t], xy_low_q[t] ); + + /* Convert to vectore of 8 int32_t to 8 floats */ + __m256 q[SEAP_TILESIZE_Y]; + q[t] = _mm256_cvtepi32_ps( xy_q[t] ); + + /* Multiply q with scale and accumulate */ + acc[t] = _mm256_fmadd_ps( scale[t], q[t], acc[t] ); + + } + + } + + } + + for (int t=0;t> 4) - 8); + + const float f2 = d1*((int8_t) (v1 & 0xf) - 8); + const float f3 = d1*((int8_t) (v1 >> 4) - 8); + + sumf += f0*f2 + f1*f3; + } + } + *s = sumf; +#endif +} + static void ggml_vec_dot_q4_1(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) { const int nb = n / QK; @@ -6718,9 +6874,43 @@ static void ggml_compute_forward_mul_mat_q_f32( assert(ne00 % 32 == 0); - for (int64_t ic = 0; ic < ne11; ++ic) { - vec_dot_q(ne00, &dst_col[ic*ne0], src0_row, (void *) (src1_col + ic*row_size)); + if (ne11 < SEAP_TILESIZE_Y) { + // existing implementation tiled implementation + for (int64_t ic = 0; ic < ne11; ++ic) { + vec_dot_q(ne00, &dst_col[ic*ne0], src0_row, (void *) (src1_col + ic*row_size)); + } + } else { + // tiled implementation + if ((ne11 % SEAP_TILESIZE_Y) != 0) { + printf("ne11=%i\n",ne11); + } + assert((ne11 % SEAP_TILESIZE_Y) == 0); // make sure we have a multiple of the tilesize + + for (int64_t ic = 0; ic < ne11; ic+=SEAP_TILESIZE_Y) { + //vec_dot_q(ne00, &dst_col[ic*ne0], src0_row, (void *) (src1_col + ic*row_size)); + + #ifdef SEAP_DEBUG + for (int t=0;td); + } + #endif + + seap_ggml_vec_dot_q4_0(ne00, &dst_col[ic*ne0], src0_row, (void *) (src1_col + ic*row_size), SEAP_TILESIZE_X, SEAP_TILESIZE_Y, row_size/GGML_TYPE_SIZE[type], ne0); + + #ifdef SEAP_DEBUG + for (int t=0;t=3) exit(0); + #endif + } + + } + } //int64_t t1 = ggml_time_us(); From 361632264c7aa86ac0f30652701841cdac36d81a Mon Sep 17 00:00:00 2001 From: Sebastian Apel <13675545+SebastianApel@users.noreply.github.com> Date: Mon, 3 Apr 2023 21:20:55 +0200 Subject: [PATCH 2/5] Working version of tiled implementation --- ggml.c | 297 ++++++++++++++++++++++++++++++++++++++++----------------- 1 file changed, 212 insertions(+), 85 deletions(-) diff --git a/ggml.c b/ggml.c index a2ed6bca08426..a5f0054d1c748 100644 --- a/ggml.c +++ b/ggml.c @@ -2188,7 +2188,8 @@ static void ggml_vec_dot_q4_0(const int n, float * restrict s, const void * rest *s = sumf; } -static void seap_ggml_vec_dot_q4_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy, const int tilesize_x, const int tilesize_y, const int rowlength, const int dst_stridelength) { +static void seap_ggml_vec_dot_q4_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy, + const int rowlength_x, const int rowlength_y, const int dst_stridelength_x, const int dst_stridelength_y) { const int nb = n / QK; assert(n % QK == 0); @@ -2203,15 +2204,28 @@ static void seap_ggml_vec_dot_q4_0(const int n, float * restrict s, const void * //#if defined(__AVX2__) #if 1 -#define SEAP_TILESIZE_X 1 -#define SEAP_TILESIZE_Y 8 -#define UNROLL_COUNT 8/SEAP_TILESIZE_Y -#undef SEAP_DEBUG +#define EXPERIMENT_TILESIZE_X 8 +#define EXPERIMENT_TILESIZE_Y 1 +//#define EXPERIMENT_TILESIZE_X 2 +//#define EXPERIMENT_TILESIZE_Y 2 + +#define UNROLL_COUNT 1 // 8/EXPERIMENT_TILESIZE_Y +//#define EXPERIMENT_DEBUG +#undef EXPERIMENT_DEBUG +#undef EXPERIMENT_DEBUG2 + +#ifdef EXPERIMENT_DEBUG + printf("rowlength_x=%i,rowlength_y=%i,dst_stridelength_x=%i,dst_stridelength_y=%i\n",rowlength_x,rowlength_y,dst_stridelength_x,dst_stridelength_y); +#endif + // Initialize accumulator with zeros - __m256 acc[SEAP_TILESIZE_Y]; // = 0; // _mm256_setzero_ps(); - for (int i=0;id); -#endif + for (int tx=0;txd); + } + + #endif - /* get input from y - Input: 32 Nibbles (16 bytes) at *y[i+u] - Output: 2 vectors with 16 values of type int16_t (y_high_q, y_low_q) */ - __m256i y_high_q[SEAP_TILESIZE_Y]; - __m256i y_low_q[SEAP_TILESIZE_Y]; + __m256 scale[EXPERIMENT_TILESIZE_X][EXPERIMENT_TILESIZE_Y]; - EXPAND_32_Q4_NIBBLES_INTO_TWO_M256_VECTORS(y_high_q, y_low_q, y[i+u+t*rowlength].qs,t) - - /* Compute products of int16_t integers, add pairwise, store as int32_t */ - __m256i xy_high_q[SEAP_TILESIZE_Y]; - xy_high_q[t] = _mm256_madd_epi16( x_high_q[0], y_high_q[t] ); - __m256i xy_low_q[SEAP_TILESIZE_Y]; - xy_low_q[t]= _mm256_madd_epi16( x_low_q[0], y_low_q[t] ); + for (int ty=0;tyd); + } + + #endif - /* Accumulate the products of int32_t integers -> we now have a vector of 8 int_32t */ - __m256i xy_q[SEAP_TILESIZE_Y]; - xy_q[t] = _mm256_add_epi32( xy_high_q[t], xy_low_q[t] ); + /* get input from y + Input: 32 Nibbles (16 bytes) at *y[i+u] + Output: 2 vectors with 16 values of type int16_t (y_high_q, y_low_q) */ + __m256i y_high_q[EXPERIMENT_TILESIZE_X][EXPERIMENT_TILESIZE_Y]; + __m256i y_low_q[EXPERIMENT_TILESIZE_X][EXPERIMENT_TILESIZE_Y]; + + EXPAND_32_Q4_NIBBLES_INTO_TWO_M256_VECTORS(y[i+u+ty*rowlength_y].qs, y_high_q[tx], y_low_q[tx], ty) + + /* Compute products of int16_t integers, add pairwise, store as int32_t */ + __m256i xy_high_q[EXPERIMENT_TILESIZE_X][EXPERIMENT_TILESIZE_Y]; + xy_high_q[tx][ty] = _mm256_madd_epi16( x_high_q[tx], y_high_q[tx][ty] ); - /* Convert to vectore of 8 int32_t to 8 floats */ - __m256 q[SEAP_TILESIZE_Y]; - q[t] = _mm256_cvtepi32_ps( xy_q[t] ); + __m256i xy_low_q[EXPERIMENT_TILESIZE_X][EXPERIMENT_TILESIZE_Y]; + xy_low_q[tx][ty]= _mm256_madd_epi16( x_low_q[tx], y_low_q[tx][ty] ); - /* Multiply q with scale and accumulate */ - acc[t] = _mm256_fmadd_ps( scale[t], q[t], acc[t] ); + /* Accumulate the products of int32_t integers -> we now have a vector of 8 int_32t */ + __m256i xy_q[EXPERIMENT_TILESIZE_X][EXPERIMENT_TILESIZE_Y]; + xy_q[tx][ty] = _mm256_add_epi32( xy_high_q[tx][ty], xy_low_q[tx][ty] ); + /* Convert to vectore of 8 int32_t to 8 floats */ + __m256 q[EXPERIMENT_TILESIZE_X][EXPERIMENT_TILESIZE_Y]; + q[tx][ty] = _mm256_cvtepi32_ps( xy_q[tx][ty] ); + + /* Multiply q with scale and accumulate */ + acc[tx][ty] = _mm256_fmadd_ps( scale[tx][ty], q[tx][ty], acc[tx][ty] ); + + } } - } } - for (int t=0;t dot_vec dst[%i,%i] @ %li = %f \n",sum,tx,ty, (long int)p, (float *)(p)); #endif - } + } // for ty + } // for tx #else // scalar @@ -6704,6 +6732,42 @@ static const quantize_fns_t quantize_fns[GGML_TYPE_COUNT] = { }, }; +float tensor_sum_elements(struct ggml_tensor * tensor) { + float sum = 0; + if (tensor->type==6) { + for (int j = 0; j < tensor->ne[1]; j++) { + for (int k = 0; k < tensor->ne[0]; k++) { + void *p = &((float *) tensor->data)[j*tensor->ne[0]+k]; + float val = ((float *) tensor->data)[j*tensor->ne[0]+k]; +#ifdef EXPERIMENT_DEBUG2 + + printf("val[%i,%i] @ %lli =%f\n",j,k,p,val); +#endif + sum += val; + } + } + return sum; + } else if (tensor->type==0) { + + for (int j = 0; j < tensor->ne[1] / QK; j++) { + for (int k = 0; k < tensor->ne[0] / QK; k++) { + block_q4_0 *blk = tensor->data; + + float *p = (float *) &(blk[k+j*tensor->ne[0]].d); + sum += *p; + //printf("j,k,offset =%i,%i,%i @ %lli\n",j,k,k+j*tensor->ne[0],p); + } + //printf("j=%i\n",j); + } + return sum; + } else { + printf("canot sum type %i", tensor->type); + return 0; + } + +} + + static void ggml_compute_forward_mul_mat_q_f32( const struct ggml_compute_params * params, const struct ggml_tensor * src0, @@ -6854,7 +6918,33 @@ static void ggml_compute_forward_mul_mat_q_f32( void * wdata = params->wdata; const size_t row_size = ne00*GGML_TYPE_SIZE[type]/GGML_BLCK_SIZE[type]; - for (int ir = ir0; ir < ir1; ++ir) { +#define TENSOR_TYPE_AS_STR(TYPE) TYPE == GGML_TYPE_F32 ? "FP32" : TYPE == GGML_TYPE_F16 ? "FP16" : TYPE == GGML_TYPE_Q4_0 ? "Q4_0" : TYPE == GGML_TYPE_Q4_1 ? "Q4_1" : "UNKNOWN" + +#define TENSOR_DUMP(TENSOR) printf("%15s: type = %i (%5s) ne = %5d x %5d x %5d, nb = (%5li, %5li, %5li) @ %lli - ", #TENSOR, \ + TENSOR->type,TENSOR_TYPE_AS_STR(TENSOR->type),\ + TENSOR->ne[0], TENSOR->ne[1], TENSOR->ne[2], TENSOR->nb[0], TENSOR->nb[1], TENSOR->nb[2],(long long int)TENSOR->data); \ + { float sum = tensor_sum_elements(TENSOR); printf("Sum of tensor %s is %6.2f\n",#TENSOR, sum); } + +#ifdef EXPERIMENT_DEBUG2 +//#if 1 + printf("\n"); + TENSOR_DUMP(src0) + TENSOR_DUMP(src1) + //TENSOR_DUMP(src1) + + //printf("rowlength_x=%i,rowlength_y=%i,dst_stridelength_x=%i,dst_stridelength_y=%i\n",rowlength_x,rowlength_y,dst_stridelength_x,dst_stridelength_y); +#endif + + //void *p = (void *) src0->data; + assert((ir1-ir0) % EXPERIMENT_TILESIZE_X == 0); + + int x_stride = EXPERIMENT_TILESIZE_X; + if (ne11 < EXPERIMENT_TILESIZE_Y) { + x_stride = 1; + } + + + for (int ir = ir0; ir < ir1; ir+=x_stride) { // src0 indices const int i03 = ir/(ne02*ne01); const int i02 = (ir - i03*ne02*ne01)/ne01; @@ -6868,43 +6958,74 @@ static void ggml_compute_forward_mul_mat_q_f32( const int i3 = i03; void * src0_row = (void *) ((char *) src0->data + (i01*nb01 + i02*nb02 + i03*nb03)); + char * src1_col = ((char *) wdata + ( (0 + i12*ne11 + i13*ne12*ne11)*row_size)); float * dst_col = (float *) ((char *) dst->data + (i0*nb0 + 0*nb1 + i2*nb2 + i3*nb3)); +#if 0 + printf("src0->type=%i, src0->n_dims=%i, src0->nb=(%i,%i,%i), type_size=%lli \n",src0->type, src0->n_dims, nb01,nb02,nb03,GGML_TYPE_SIZE[src0->type]); + + /*if (src0->n_dims == 3) { + rowlength *= nb03; + }*/ + void *p = src0_row; + printf("src_row[%i] @ %li = %f, rowlength = %li \n", + ir, (long int)p, (float *)(p), + row_size/GGML_TYPE_SIZE[src0->type]); + + if (ir > 5) exit(0); +#endif +#ifdef EXPERIMENT_DEBUG + printf("ir=%i, src0_row=%lli, src1_col=%lli, dst_col=%lli\n",ir, (long long int)src0_row,(long long int)src1_col,(long long int)dst_col ); + if (ir > 5) exit(0); +#endif + assert(ne00 % 32 == 0); - if (ne11 < SEAP_TILESIZE_Y) { + if (ne11 < EXPERIMENT_TILESIZE_Y) { + //printf("using legacy tile size implementation\n"); // existing implementation tiled implementation for (int64_t ic = 0; ic < ne11; ++ic) { vec_dot_q(ne00, &dst_col[ic*ne0], src0_row, (void *) (src1_col + ic*row_size)); } + } else { // tiled implementation - if ((ne11 % SEAP_TILESIZE_Y) != 0) { + if ((ne11 % EXPERIMENT_TILESIZE_Y) != 0) { printf("ne11=%i\n",ne11); } - assert((ne11 % SEAP_TILESIZE_Y) == 0); // make sure we have a multiple of the tilesize + assert((ne11 % EXPERIMENT_TILESIZE_Y) == 0); // make sure we have a multiple of the tilesize - for (int64_t ic = 0; ic < ne11; ic+=SEAP_TILESIZE_Y) { + for (int64_t ic = 0; ic < ne11; ic+=EXPERIMENT_TILESIZE_Y) { //vec_dot_q(ne00, &dst_col[ic*ne0], src0_row, (void *) (src1_col + ic*row_size)); - #ifdef SEAP_DEBUG - for (int t=0;td); } + printf("calling seap_ggml_vec_dot_q4_0 for row, col=(%i,%i)\n",ir,ic); #endif - seap_ggml_vec_dot_q4_0(ne00, &dst_col[ic*ne0], src0_row, (void *) (src1_col + ic*row_size), SEAP_TILESIZE_X, SEAP_TILESIZE_Y, row_size/GGML_TYPE_SIZE[type], ne0); - - #ifdef SEAP_DEBUG - for (int t=0;tdata, *(float *)(p)); + } } - if (ic>=3) exit(0); + //if (ic>=3) exit(0); #endif } @@ -6924,6 +7045,12 @@ static void ggml_compute_forward_mul_mat_q_f32( // printf("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX task %d/%d: %d us, acc = %d\n", ith, nth, (int) (t1 - t0), (int) acc); //} +#ifdef EXPERIMENT_DEBUG2 +//#if 1 + //printf("\n"); + TENSOR_DUMP(dst) + //exit(0); +#endif } static void ggml_compute_forward_mul_mat( From 75eea96d015902e87220168d2f218c4fdeb83843 Mon Sep 17 00:00:00 2001 From: Sebastian Apel <13675545+SebastianApel@users.noreply.github.com> Date: Mon, 3 Apr 2023 22:50:03 +0200 Subject: [PATCH 3/5] Add benchmark script --- Makefile | 15 +- examples/benchmark/benchmark-q4_0-matmult.c | 270 ++++++++++++++++++++ ggml.c | 7 +- run_benchmarks.sh | 32 +++ 4 files changed, 321 insertions(+), 3 deletions(-) create mode 100644 examples/benchmark/benchmark-q4_0-matmult.c create mode 100755 run_benchmarks.sh diff --git a/Makefile b/Makefile index 2f828bf10d747..c7966a398594a 100644 --- a/Makefile +++ b/Makefile @@ -31,14 +31,17 @@ endif # # keep standard at C11 and C++11 -CFLAGS = -I. -O3 -DNDEBUG -std=c11 -fPIC -CXXFLAGS = -I. -I./examples -O3 -DNDEBUG -std=c++11 -fPIC +CFLAGS = -I. -O3 -std=c11 -fPIC +CXXFLAGS = -I. -I./examples -O3 -std=c++11 -fPIC LDFLAGS = # warnings CFLAGS += -Wall -Wextra -Wpedantic -Wcast-qual -Wdouble-promotion -Wshadow -Wstrict-prototypes -Wpointer-arith -Wno-unused-function CXXFLAGS += -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function +CFLAGS += -D EXPERIMENT_TILESIZE_X=$(TILESIZE_X) -D EXPERIMENT_TILESIZE_Y=$(TILESIZE_Y) +CXXFLAGS += -D EXPERIMENT_TILESIZE_X=$(TILESIZE_X) -D EXPERIMENT_TILESIZE_Y=$(TILESIZE_Y) + # OS specific # TODO: support Windows ifeq ($(UNAME_S),Linux) @@ -169,6 +172,14 @@ embedding: examples/embedding/embedding.cpp ggml.o llama.o common.o # Tests # +benchmark: ggml.o + $(CXX) $(CXXFLAGS) examples/benchmark/benchmark-q4_0-matmult.c ggml.o -o examples/benchmark/benchmark-q4_0-matmult $(LDFLAGS) + examples/benchmark/benchmark-q4_0-matmult -i 100 + +benchmark_main: main + ./main -m /tmp/ggml-alpaca-7b-q4-ggjt.bin -p "Building a website can be done in 10 simple steps:" -n 100 -t 2 --seed 1 + .PHONY: tests tests: bash ./tests/run-tests.sh + diff --git a/examples/benchmark/benchmark-q4_0-matmult.c b/examples/benchmark/benchmark-q4_0-matmult.c new file mode 100644 index 0000000000000..9ca9b133a9290 --- /dev/null +++ b/examples/benchmark/benchmark-q4_0-matmult.c @@ -0,0 +1,270 @@ +/* + License: MIT License + + Changelog: + - 2023-03-31 Initial version by Sebastian Apel (https://github.com/SebastianApel) + +*/ + +#include +#include "ggml.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +float tensor_sum_elements(struct ggml_tensor * tensor) { + float sum = 0; + if (tensor->type==6) { + for (int j = 0; j < tensor->ne[1]; j++) { + for (int k = 0; k < tensor->ne[0]; k++) { + sum += ((float *) tensor->data)[j*tensor->ne[0]+k]; + } + } + } + return sum; +} + + +/* + These are mapping to unknown + GGML_TYPE_I8, + GGML_TYPE_I16, + GGML_TYPE_I32, + GGML_TYPE_COUNT, +*/ + +#define TENSOR_TYPE_AS_STR(TYPE) TYPE == GGML_TYPE_F32 ? "FP32" : TYPE == GGML_TYPE_F16 ? "FP16" : TYPE == GGML_TYPE_Q4_0 ? "Q4_0" : TYPE == GGML_TYPE_Q4_1 ? "Q4_1" : "UNKNOWN" + +#define TENSOR_DUMP(TENSOR) printf("%15s: type = %i (%5s) ne = %5d x %5d x %5d, nb = (%5li, %5li, %5li) - ", #TENSOR, \ + TENSOR->type,TENSOR_TYPE_AS_STR(TENSOR->type),\ + TENSOR->ne[0], TENSOR->ne[1], TENSOR->ne[2], TENSOR->nb[0], TENSOR->nb[1], TENSOR->nb[2]); \ + { float sum = tensor_sum_elements(TENSOR); printf("Sum of tensor %s is %6.2f\n",#TENSOR, sum); } + +struct benchmark_params_struct { + int32_t n_threads = 1; + int32_t n_iterations = 10; +}; + +void print_usage(int /*argc*/, char ** argv, struct benchmark_params_struct params) { + fprintf(stderr, "usage: %s [options]\n", argv[0]); + fprintf(stderr, "\n"); + fprintf(stderr, "options:\n"); + fprintf(stderr, " -h, --help show this help message and exit\n"); + fprintf(stderr, " -t N, --threads N number of threads to use during computation (default: %d)\n", params.n_threads); + fprintf(stderr, " -i N, --iter N number of iterations to use during computation (default: %d)\n", params.n_iterations); + fprintf(stderr, "\n"); +} + +int main(int argc, char ** argv) { + + + struct benchmark_params_struct benchmark_params; + + bool invalid_param = false; + std::string arg; + for (int i = 1; i < argc; i++) { + arg = argv[i]; + + if (arg == "-t" || arg == "--threads") { + if (++i >= argc) { + invalid_param = true; + break; + } + benchmark_params.n_threads = std::stoi(argv[i]); + } else if (arg == "-i" || arg == "--iter") { + if (++i >= argc) { + invalid_param = true; + break; + } + benchmark_params.n_iterations = std::stoi(argv[i]); + } else if (arg == "-h" || arg == "--help") { + print_usage(argc, argv, benchmark_params); + exit(0); + } + if (invalid_param) { + fprintf(stderr, "error: invalid parameter for argument: %s\n", arg.c_str()); + print_usage(argc, argv, benchmark_params); + exit(1); + } + } + + + // create the ggml context + printf("Starting Test\n"); + + + + struct ggml_context * ctx; + //const int sizex = 4096; + //const int sizey = 11008; + +#undef VERBOSE_DEBUGGING +#ifndef VERBOSE_DEBUGGING + const int sizey = 4096; + const int sizex = 11008; + const int sizez = 128; +#else + /* Working - let's increase size */ + const int sizey = 1; + const int sizex = (8*32); + const int sizez = 1; + + /*const int sizey = 1; + const int sizex = 3*(8*32); + const int sizez = 1;*/ +#endif + + //printf("Memsize required = %i\n", sizex*sizex); + ggml_type wtype = GGML_TYPE_F32; + + size_t ctx_size = 0; + ctx_size += sizex*sizey*ggml_type_sizef(wtype); + ctx_size += sizex*sizey*ggml_type_sizef(wtype); + ctx_size += sizex*sizey*ggml_type_sizef(GGML_TYPE_F32); + ctx_size += sizex*sizeof(float); + ctx_size += 1024*1024*100; + + printf("Allocating Memory of size %li byes, %li MB\n",ctx_size, (ctx_size/1024/1024)); + + struct ggml_init_params params = { + /*.mem_size =*/ ctx_size, + /*.mem_buffer =*/ NULL, + /* no_alloc =*/ 0 + }; + + ctx = ggml_init(params); + if (!ctx) { + fprintf(stderr, "%s: ggml_init() failed\n", __func__); + return false; + } + + + printf("Creating new tensors\n"); + // printf("Creating new tensor m1\n"); + struct ggml_tensor * m11 = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, sizex, sizey); + ggml_set_f32(m11, 1.0f); + + // printf("Creating new tensor m1\n"); + struct ggml_tensor * m12 = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, sizex, sizey); + ggml_set_f32(m12, 1.5f); + + // printf("Creating new tensor m2\n"); + struct ggml_tensor * m2 = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, sizex, sizez); + ggml_set_f32(m2, 2.0f); + + printf("\n------ Test 1 - Matrix Mult via F32 code ------------------------------------------------------------------------------\n"); + // printf("Creating new tensor m11xm2\n"); + struct ggml_tensor * m11xm2 = ggml_mul_mat(ctx, m11, m2); + + // printf("Creating compute graph\n"); + struct ggml_cgraph gf = ggml_build_forward(m11xm2); + + gf.n_threads=benchmark_params.n_threads; + printf("cgraph->n_threads=%i\n",gf.n_threads); + + TENSOR_DUMP(m11); + TENSOR_DUMP(m2); + + ggml_graph_compute(ctx, &gf); + + TENSOR_DUMP(gf.nodes[0]); + + printf("\n------ Test 2 - Matrix Mult via Q4_0 code ------------------------------------------------------------------------------\n"); + + int32_t nelements = sizex*sizey; + int32_t ne[2] = { sizex, sizey }; + + std::vector hist_cur(1 << 4, 0); + + // Set up a the benchmark matrices + // printf("Creating new tensor q11 & Running quantize\n"); + struct ggml_tensor * q11 = ggml_new_tensor_2d(ctx, GGML_TYPE_Q4_0, sizex, sizey); + ggml_quantize_q4_0((const float *) m11->data, q11->data, nelements, ne[0], hist_cur.data()); + + // Set up a the compute graph + // printf("Creating new tensor q31\n"); + struct ggml_tensor * q31 = ggml_mul_mat(ctx, q11, m2); + + // printf("Creating compute graph\n"); + struct ggml_cgraph gf31 = ggml_build_forward(q31); + gf31.n_threads=benchmark_params.n_threads; + + // Set up a second graph computation to make sure we override the CPU cache lines + // printf("Creating new tensor q12 & Running quantize\n"); + struct ggml_tensor * q12 = ggml_new_tensor_2d(ctx, GGML_TYPE_Q4_0, sizex, sizey); + ggml_quantize_q4_0((const float *) m12->data, q12->data, nelements, ne[0], hist_cur.data()); + + // printf("Creating new tensor q32\n"); + struct ggml_tensor * q32 = ggml_mul_mat(ctx, q12, m2); + + //printf("Creating compute graph\n"); + struct ggml_cgraph gf32 = ggml_build_forward(q32); + gf32.n_threads=benchmark_params.n_threads; + printf("cgraph->n_threads=%i\n",gf31.n_threads); + + const int dimx = sizex; + const int dimy = sizey; + const int dimz = sizez; + long long int flops_per_dot_product = dimy + dimy; + long long int flops_per_matrix = flops_per_dot_product * dimx * dimz; ; + printf("Matrix Multiplication of (%i,%i,%i) x (%i,%i,%i) - aboout %6.2f gFLOPS\n\n", sizex, sizey, 1, sizex, sizez, 1, 1.0f*flops_per_matrix / 1000 / 1000 / 1000); + + + // Let's use the F32 result from above as a reference for the q4_0 multiplication + float sum_of_F32_reference = tensor_sum_elements(gf.nodes[0]); + + + printf("Iteration;NThreads; SizeX; SizeY; SizeZ; Required_FLOPS; Elapsed_u_Seconds; FLOPS_per_u_Second\n"); + printf("==============================================================================================\n"); + + for (int i=0;i allowed_delta) { + printf("\nABORT - ERROR in Matrix Multiplication result - expected %6.2f, got %6.2f (delta %6.2f > allowed_delta %6.2f)\n", + sum_of_F32_reference, + sum_of_Q4_result, + delta, + allowed_delta + ); + exit(0); + } + + // Running a different graph computation to make sure we override the CPU cache lines + ggml_graph_compute(ctx, &gf32); + + } + +} diff --git a/ggml.c b/ggml.c index a5f0054d1c748..afaa4f065dd04 100644 --- a/ggml.c +++ b/ggml.c @@ -2204,8 +2204,13 @@ static void seap_ggml_vec_dot_q4_0(const int n, float * restrict s, const void * //#if defined(__AVX2__) #if 1 +#ifndef EXPERIMENT_TILESIZE_X #define EXPERIMENT_TILESIZE_X 8 -#define EXPERIMENT_TILESIZE_Y 1 +#endif + +#ifndef EXPERIMENT_TILESIZE_Y +#define EXPERIMENT_TILESIZE_Y 2 +#endif //#define EXPERIMENT_TILESIZE_X 2 //#define EXPERIMENT_TILESIZE_Y 2 diff --git a/run_benchmarks.sh b/run_benchmarks.sh new file mode 100755 index 0000000000000..a056213a62c49 --- /dev/null +++ b/run_benchmarks.sh @@ -0,0 +1,32 @@ +#!/bin/bash + +mkdir benchmark-results + +cat /proc/cpuinfo > benchmark-results/cpuinfo.txt + +for x in 1 2 4 8 +do + for y in 1 2 4 8 + do + echo "Benchmarking tilesize ${x}x${y} with synthetic benchmark" + #make clean + #TILESIZE_X=$x TILESIZE_Y=$y make benchmark 2>&1 | tee benchmark-results/benchmark-threads-1-tilesize-${x}x${y}.txt + done +done + +for x in 1 2 4 8 +do + for y in 1 2 4 8 + do + echo "Benchmarking tilesize ${x}x${y} with llama main" + make clean + TILESIZE_X=$x TILESIZE_Y=$y make benchmark_main 2>&1 | tee benchmark-results/benchmark-main-threads-2-tilesize-${x}x${y}.txt + done +done + +MACHINE_ID=$(cat cat /etc/machine-id) +TIMESTAMP=$(date +"%Y%m%d-%H%M%S") + +tar -czvf benchmark-results-$MACHINE_ID-$TIMESTAMP.tgz benchmark-results/* + +echo "Done creating benchmark-results-$MACHINE_ID-$TIMESTAMP.tgz" \ No newline at end of file From a33cbbe03b5bcb605adcd25cb4a828f4a389553a Mon Sep 17 00:00:00 2001 From: Sebastian Apel <13675545+SebastianApel@users.noreply.github.com> Date: Tue, 4 Apr 2023 09:26:29 +0200 Subject: [PATCH 4/5] Makefile: Added defaults for TILESIZE_X and _Y --- Makefile | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/Makefile b/Makefile index c7966a398594a..2eeb5054a6d3b 100644 --- a/Makefile +++ b/Makefile @@ -39,6 +39,14 @@ LDFLAGS = CFLAGS += -Wall -Wextra -Wpedantic -Wcast-qual -Wdouble-promotion -Wshadow -Wstrict-prototypes -Wpointer-arith -Wno-unused-function CXXFLAGS += -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function +ifeq ($(TILESIZE_X),) + TILESIZE_X = 1 +endif + +ifeq ($(TILESIZE_Y),) + TILESIZE_Y = 1 +endif + CFLAGS += -D EXPERIMENT_TILESIZE_X=$(TILESIZE_X) -D EXPERIMENT_TILESIZE_Y=$(TILESIZE_Y) CXXFLAGS += -D EXPERIMENT_TILESIZE_X=$(TILESIZE_X) -D EXPERIMENT_TILESIZE_Y=$(TILESIZE_Y) From 42ad59fe41e9c2903f2803fec3dc0c310cdbd324 Mon Sep 17 00:00:00 2001 From: Sebastian Apel <13675545+SebastianApel@users.noreply.github.com> Date: Tue, 4 Apr 2023 16:23:51 +0200 Subject: [PATCH 5/5] Bugfix: We can handle the situation where matrix rows / thread count is not a multiple of TILESIZE_X --- ggml.c | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/ggml.c b/ggml.c index afaa4f065dd04..d09b698f85aab 100644 --- a/ggml.c +++ b/ggml.c @@ -6941,15 +6941,23 @@ static void ggml_compute_forward_mul_mat_q_f32( #endif //void *p = (void *) src0->data; - assert((ir1-ir0) % EXPERIMENT_TILESIZE_X == 0); int x_stride = EXPERIMENT_TILESIZE_X; + + // if the second matrix is two small, we cannot use the tiled code if (ne11 < EXPERIMENT_TILESIZE_Y) { x_stride = 1; } - for (int ir = ir0; ir < ir1; ir+=x_stride) { + // check if we can advance with x_stride = EXPERIMENT_TILESIZE_X + //printf("ir0=%i -> ir1 - ir=%i\n", ir0, ir1-ir); + if ((ir1-ir) < EXPERIMENT_TILESIZE_X) { + // we do not have enough rows left - we need to go step by step + //printf("ir0=%i - switching to stride 1\n", ir0, ir1-ir); + x_stride = 1; + } + // src0 indices const int i03 = ir/(ne02*ne01); const int i02 = (ir - i03*ne02*ne01)/ne01; @@ -6988,7 +6996,7 @@ static void ggml_compute_forward_mul_mat_q_f32( assert(ne00 % 32 == 0); - if (ne11 < EXPERIMENT_TILESIZE_Y) { + if ((x_stride != EXPERIMENT_TILESIZE_X) || (ne11 < EXPERIMENT_TILESIZE_Y)) { //printf("using legacy tile size implementation\n"); // existing implementation tiled implementation for (int64_t ic = 0; ic < ne11; ++ic) {