@@ -85,6 +85,15 @@ static __global__ void hardswish_f32(const float * x, float * dst, const int k)
85
85
dst[i] = x[i] * fminf (1 .0f , fmaxf (0 .0f , (x[i] + 3 .0f ) / 6 .0f ));
86
86
}
87
87
88
+ static __global__ void exp_f32 (const float * x, float * dst, const int k) {
89
+ const int i = blockDim .x *blockIdx .x + threadIdx .x ;
90
+
91
+ if (i >= k) {
92
+ return ;
93
+ }
94
+ dst[i] = expf (x[i]);
95
+ }
96
+
88
97
static __global__ void leaky_relu_f32 (const float * x, float * dst, const int k, const float negative_slope) {
89
98
const int i = blockDim .x *blockIdx .x + threadIdx .x ;
90
99
if (i >= k) {
@@ -174,6 +183,11 @@ static void hardswish_f32_cuda(const float * x, float * dst, const int k, cudaSt
174
183
hardswish_f32<<<num_blocks, CUDA_HARDSWISH_BLOCK_SIZE, 0 , stream>>> (x, dst, k);
175
184
}
176
185
186
+ static void exp_f32_cuda (const float * x, float * dst, const int k, cudaStream_t stream) {
187
+ const int num_blocks = (k + CUDA_EXP_BLOCK_SIZE - 1 ) / CUDA_EXP_BLOCK_SIZE;
188
+ exp_f32<<<num_blocks, CUDA_EXP_BLOCK_SIZE, 0 , stream>>> (x, dst, k);
189
+ }
190
+
177
191
static void leaky_relu_f32_cuda (const float * x, float * dst, const int k, const float negative_slope, cudaStream_t stream) {
178
192
const int num_blocks = (k + CUDA_RELU_BLOCK_SIZE - 1 ) / CUDA_RELU_BLOCK_SIZE;
179
193
leaky_relu_f32<<<num_blocks, CUDA_RELU_BLOCK_SIZE, 0 , stream>>> (x, dst, k, negative_slope);
@@ -325,6 +339,20 @@ void ggml_cuda_op_hardswish(ggml_backend_cuda_context & ctx, ggml_tensor * dst)
325
339
hardswish_f32_cuda (src0_d, dst_d, ggml_nelements (src0), stream);
326
340
}
327
341
342
+ void ggml_cuda_op_exp (ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
343
+ const ggml_tensor * src0 = dst->src [0 ];
344
+ const float * src0_d = (const float *)src0->data ;
345
+ float * dst_d = (float *)dst->data ;
346
+ cudaStream_t stream = ctx.stream ();
347
+
348
+ GGML_ASSERT (ggml_is_contiguous (src0));
349
+
350
+ GGML_ASSERT (src0->type == GGML_TYPE_F32);
351
+ GGML_ASSERT ( dst->type == GGML_TYPE_F32);
352
+
353
+ exp_f32_cuda (src0_d, dst_d, ggml_nelements (src0), stream);
354
+ }
355
+
328
356
void ggml_cuda_op_leaky_relu (ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
329
357
const ggml_tensor * src0 = dst->src [0 ];
330
358
const float * src0_d = (const float *)src0->data ;
0 commit comments