1
1
#version 450
2
2
3
3
#extension GL_EXT_shader_explicit_arithmetic_types : require
4
+ #extension GL_KHR_shader_subgroup_arithmetic : require
5
+ #extension GL_KHR_shader_subgroup_shuffle : require
6
+ #extension GL_EXT_shader_subgroup_extended_types_int16 : require
4
7
5
8
#include "mul_mat_vec_base.comp"
6
9
@@ -9,16 +12,11 @@ layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in;
9
12
layout (constant_id = 0) const uint BLOCK_SIZE = 32;
10
13
layout (constant_id = 1) const uint NUM_ROWS = 1;
11
14
12
- // a 32 bit cache potentially might write faster due to banking
13
- struct block_q6_K_32stor
14
- {
15
- uint32_t blk[104];
16
- float16_t d;
17
- };
15
+ uint16_t blk[BLOCK_SIZE/16][8];
18
16
19
- shared FLOAT_TYPE tmpsh[NUM_ROWS][BLOCK_SIZE];
20
- shared FLOAT_TYPE sccache[BLOCK_SIZE/16][16] ;
21
- shared block_q6_K_32stor blkcache[BLOCK_SIZE/16];
17
+ uint16_t get_blk_shuffle(uint fbi, uint ix, uint ofst) {
18
+ return subgroupShuffle(blk[ix][ofst/(104/fbi)], ofst%(104/fbi)) ;
19
+ }
22
20
23
21
uint fill_blkcache_its(uint wg_size) {
24
22
// subgroup sizes are always a power of 2
@@ -38,7 +36,7 @@ void fill_blkcache(const int num_blocks, const uint ib0, const uint i0, const ui
38
36
[[unroll]] for (int l = 0; l < num_blocks; ++l) {
39
37
[[unroll]] for (int m = 0; m < fbi; ++m)
40
38
// cache full superblock into shared memory with coalesced reads
41
- blkcache [l].blk[tid + m*bc_t ] = uint32_t( data_a_packed16[ib0 + i0 + l].blk[tid + m*bc_t]) ;
39
+ blk [l][m ] = data_a_packed16[ib0 + i0 + l].blk[tid + m*bc_t];
42
40
}
43
41
}
44
42
}
@@ -64,7 +62,7 @@ void compute_outputs(const uint32_t first_row, const uint32_t num_rows) {
64
62
65
63
const uint ql_offset = 64*v_im + l0;
66
64
const uint qh_offset = 32*v_im + l0;
67
- const uint s_offset = 8*v_im + is;
65
+ const uint s_offset = 16*ix + 8*v_im + is;
68
66
const uint y_offset = 128*v_im + l0;
69
67
const uint bcs_offset = (itid%2 == 1) ? 8 : 0;
70
68
@@ -93,7 +91,7 @@ void compute_outputs(const uint32_t first_row, const uint32_t num_rows) {
93
91
fill_blkcache(blim, ib0, i0, tid, fbi);
94
92
}
95
93
96
- sccache[ix][itid] = FLOAT_TYPE(int8_t(bitfieldExtract(blkcache[ix].blk[ 96 + itid/2] , int(bcs_offset), 8)));
94
+ FLOAT_TYPE sccache = FLOAT_TYPE(int8_t(bitfieldExtract(get_blk_shuffle(fbi, ix, 96 + itid/2) , int(bcs_offset), 8)));
97
95
barrier();
98
96
99
97
ibi += num_blocks_per_row;
@@ -102,15 +100,15 @@ void compute_outputs(const uint32_t first_row, const uint32_t num_rows) {
102
100
103
101
const FLOAT_TYPE d = FLOAT_TYPE(data_a_packed16[ib0 + i].d);
104
102
105
- uint32_t ql0_u32 = uint32_t(blkcache[ix].blk[ ql_offset / 2]) | (uint32_t(blkcache[ix].blk[ ql_offset / 2 + 1] ) << 16);
106
- uint32_t ql32_u32 = uint32_t(blkcache[ix].blk[ ql_offset / 2 + 16]) | (uint32_t(blkcache[ix].blk[ ql_offset / 2 + 17] ) << 16);
103
+ uint32_t ql0_u32 = uint32_t(get_blk_shuffle(fbi, ix, ql_offset / 2)) | (uint32_t(get_blk_shuffle(fbi, ix, ql_offset / 2 + 1) ) << 16);
104
+ uint32_t ql32_u32 = uint32_t(get_blk_shuffle(fbi, ix, ql_offset / 2 + 16)) | (uint32_t(get_blk_shuffle(fbi, ix, ql_offset / 2 + 17) ) << 16);
107
105
108
106
uint32_t ql0_u32_lo4 = ql0_u32 & 0x0F0F0F0F;
109
107
uint32_t ql0_u32_hi4 = (ql0_u32 >> 4) & 0x0F0F0F0F;
110
108
uint32_t ql32_u32_lo4 = ql32_u32 & 0x0F0F0F0F;
111
109
uint32_t ql32_u32_hi4 = (ql32_u32 >> 4) & 0x0F0F0F0F;
112
110
113
- uint32_t qh_u32 = uint32_t(blkcache[ix].blk[ 64 + qh_offset / 2]) | (uint32_t(blkcache[ix].blk[ 64 + qh_offset / 2 + 1] ) << 16);
111
+ uint32_t qh_u32 = uint32_t(get_blk_shuffle(fbi, ix, 64 + qh_offset / 2)) | (uint32_t(get_blk_shuffle(fbi, ix, 64 + qh_offset / 2 + 1) ) << 16);
114
112
uint32_t qh0_u32 = (qh_u32 & 0x03030303) << 4;
115
113
uint32_t qh2_u32 = (qh_u32 & 0x0C0C0C0C) << 2;
116
114
uint32_t qh4_u32 = (qh_u32 & 0x30303030);
@@ -134,28 +132,15 @@ void compute_outputs(const uint32_t first_row, const uint32_t num_rows) {
134
132
sum[3] = fma(FLOAT_TYPE(by96[l]), FLOAT_TYPE(int8_t(q3[l]) - 32), sum[3]);
135
133
}
136
134
137
- temp[n] = fma(fma(sum[0], sccache[ix][ s_offset] , fma(sum[1], sccache[ix][ s_offset + 2] , fma(sum[2], sccache[ix][ s_offset + 4] , sum[3] * sccache[ix][ s_offset + 6] ))), d, temp[n]);
135
+ temp[n] = fma(fma(sum[0], subgroupShuffle( sccache, s_offset) , fma(sum[1], subgroupShuffle( sccache, s_offset + 2) , fma(sum[2], subgroupShuffle( sccache, s_offset + 4) , sum[3] * subgroupShuffle( sccache, s_offset + 6) ))), d, temp[n]);
138
136
}
139
137
}
140
138
141
139
// sum up partial sums and write back result
142
- [[unroll]] for (uint n = 0; n < num_rows; ++n) {
143
- tmpsh[n][tid] = temp[n];
144
- }
145
- barrier();
146
- [[unroll]] for (uint s = BLOCK_SIZE/2; s > 0; s >>= 1) {
147
- if (tid < s) {
148
- [[unroll]] for (uint n = 0; n < num_rows; ++n) {
149
- tmpsh[n][tid] += tmpsh[n][tid + s];
150
- }
151
- }
152
- barrier();
153
- }
154
- if (tid == 0) {
155
- [[unroll]] for (uint n = 0; n < num_rows; ++n) {
156
- data_d[d_offset + first_row + n] = D_TYPE(tmpsh[n][0]);
157
- }
158
- }
140
+ [[unroll]] for (uint n = 0; n < num_rows; ++n)
141
+ temp[n] = subgroupAdd(temp[n]);
142
+ if (tid < num_rows)
143
+ data_d[d_offset + first_row + tid] = D_TYPE(temp[tid]);
159
144
}
160
145
161
146
void main() {
0 commit comments