Skip to content

Commit 815061b

Browse files
committed
formatting
1 parent cbaa80e commit 815061b

File tree

39 files changed

+464
-653
lines changed

39 files changed

+464
-653
lines changed

coresimd/aarch64/crypto.rs

+22-82
Original file line numberDiff line numberDiff line change
@@ -16,36 +16,36 @@ extern "C" {
1616
fn vsha1h_u32_(hash_e: u32) -> u32;
1717
#[link_name = "llvm.aarch64.crypto.sha1su0"]
1818
fn vsha1su0q_u32_(
19-
w0_3: uint32x4_t, w4_7: uint32x4_t, w8_11: uint32x4_t
19+
w0_3: uint32x4_t, w4_7: uint32x4_t, w8_11: uint32x4_t,
2020
) -> uint32x4_t;
2121
#[link_name = "llvm.aarch64.crypto.sha1su1"]
2222
fn vsha1su1q_u32_(tw0_3: uint32x4_t, w12_15: uint32x4_t) -> uint32x4_t;
2323
#[link_name = "llvm.aarch64.crypto.sha1c"]
2424
fn vsha1cq_u32_(
25-
hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t
25+
hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t,
2626
) -> uint32x4_t;
2727
#[link_name = "llvm.aarch64.crypto.sha1p"]
2828
fn vsha1pq_u32_(
29-
hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t
29+
hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t,
3030
) -> uint32x4_t;
3131
#[link_name = "llvm.aarch64.crypto.sha1m"]
3232
fn vsha1mq_u32_(
33-
hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t
33+
hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t,
3434
) -> uint32x4_t;
3535

3636
#[link_name = "llvm.aarch64.crypto.sha256h"]
3737
fn vsha256hq_u32_(
38-
hash_abcd: uint32x4_t, hash_efgh: uint32x4_t, wk: uint32x4_t
38+
hash_abcd: uint32x4_t, hash_efgh: uint32x4_t, wk: uint32x4_t,
3939
) -> uint32x4_t;
4040
#[link_name = "llvm.aarch64.crypto.sha256h2"]
4141
fn vsha256h2q_u32_(
42-
hash_efgh: uint32x4_t, hash_abcd: uint32x4_t, wk: uint32x4_t
42+
hash_efgh: uint32x4_t, hash_abcd: uint32x4_t, wk: uint32x4_t,
4343
) -> uint32x4_t;
4444
#[link_name = "llvm.aarch64.crypto.sha256su0"]
4545
fn vsha256su0q_u32_(w0_3: uint32x4_t, w4_7: uint32x4_t) -> uint32x4_t;
4646
#[link_name = "llvm.aarch64.crypto.sha256su1"]
4747
fn vsha256su1q_u32_(
48-
tw0_3: uint32x4_t, w8_11: uint32x4_t, w12_15: uint32x4_t
48+
tw0_3: uint32x4_t, w8_11: uint32x4_t, w12_15: uint32x4_t,
4949
) -> uint32x4_t;
5050
}
5151

@@ -97,7 +97,7 @@ pub unsafe fn vsha1h_u32(hash_e: u32) -> u32 {
9797
#[target_feature(enable = "crypto")]
9898
#[cfg_attr(test, assert_instr(sha1c))]
9999
pub unsafe fn vsha1cq_u32(
100-
hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t
100+
hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t,
101101
) -> uint32x4_t {
102102
vsha1cq_u32_(hash_abcd, hash_e, wk)
103103
}
@@ -107,7 +107,7 @@ pub unsafe fn vsha1cq_u32(
107107
#[target_feature(enable = "crypto")]
108108
#[cfg_attr(test, assert_instr(sha1m))]
109109
pub unsafe fn vsha1mq_u32(
110-
hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t
110+
hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t,
111111
) -> uint32x4_t {
112112
vsha1mq_u32_(hash_abcd, hash_e, wk)
113113
}
@@ -117,7 +117,7 @@ pub unsafe fn vsha1mq_u32(
117117
#[target_feature(enable = "crypto")]
118118
#[cfg_attr(test, assert_instr(sha1p))]
119119
pub unsafe fn vsha1pq_u32(
120-
hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t
120+
hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t,
121121
) -> uint32x4_t {
122122
vsha1pq_u32_(hash_abcd, hash_e, wk)
123123
}
@@ -127,7 +127,7 @@ pub unsafe fn vsha1pq_u32(
127127
#[target_feature(enable = "crypto")]
128128
#[cfg_attr(test, assert_instr(sha1su0))]
129129
pub unsafe fn vsha1su0q_u32(
130-
w0_3: uint32x4_t, w4_7: uint32x4_t, w8_11: uint32x4_t
130+
w0_3: uint32x4_t, w4_7: uint32x4_t, w8_11: uint32x4_t,
131131
) -> uint32x4_t {
132132
vsha1su0q_u32_(w0_3, w4_7, w8_11)
133133
}
@@ -137,7 +137,7 @@ pub unsafe fn vsha1su0q_u32(
137137
#[target_feature(enable = "crypto")]
138138
#[cfg_attr(test, assert_instr(sha1su1))]
139139
pub unsafe fn vsha1su1q_u32(
140-
tw0_3: uint32x4_t, w12_15: uint32x4_t
140+
tw0_3: uint32x4_t, w12_15: uint32x4_t,
141141
) -> uint32x4_t {
142142
vsha1su1q_u32_(tw0_3, w12_15)
143143
}
@@ -147,7 +147,7 @@ pub unsafe fn vsha1su1q_u32(
147147
#[target_feature(enable = "crypto")]
148148
#[cfg_attr(test, assert_instr(sha256h))]
149149
pub unsafe fn vsha256hq_u32(
150-
hash_abcd: uint32x4_t, hash_efgh: uint32x4_t, wk: uint32x4_t
150+
hash_abcd: uint32x4_t, hash_efgh: uint32x4_t, wk: uint32x4_t,
151151
) -> uint32x4_t {
152152
vsha256hq_u32_(hash_abcd, hash_efgh, wk)
153153
}
@@ -157,7 +157,7 @@ pub unsafe fn vsha256hq_u32(
157157
#[target_feature(enable = "crypto")]
158158
#[cfg_attr(test, assert_instr(sha256h2))]
159159
pub unsafe fn vsha256h2q_u32(
160-
hash_efgh: uint32x4_t, hash_abcd: uint32x4_t, wk: uint32x4_t
160+
hash_efgh: uint32x4_t, hash_abcd: uint32x4_t, wk: uint32x4_t,
161161
) -> uint32x4_t {
162162
vsha256h2q_u32_(hash_efgh, hash_abcd, wk)
163163
}
@@ -167,7 +167,7 @@ pub unsafe fn vsha256h2q_u32(
167167
#[target_feature(enable = "crypto")]
168168
#[cfg_attr(test, assert_instr(sha256su0))]
169169
pub unsafe fn vsha256su0q_u32(
170-
w0_3: uint32x4_t, w4_7: uint32x4_t
170+
w0_3: uint32x4_t, w4_7: uint32x4_t,
171171
) -> uint32x4_t {
172172
vsha256su0q_u32_(w0_3, w4_7)
173173
}
@@ -177,7 +177,7 @@ pub unsafe fn vsha256su0q_u32(
177177
#[target_feature(enable = "crypto")]
178178
#[cfg_attr(test, assert_instr(sha256su1))]
179179
pub unsafe fn vsha256su1q_u32(
180-
tw0_3: uint32x4_t, w8_11: uint32x4_t, w12_15: uint32x4_t
180+
tw0_3: uint32x4_t, w8_11: uint32x4_t, w12_15: uint32x4_t,
181181
) -> uint32x4_t {
182182
vsha256su1q_u32_(tw0_3, w8_11, w12_15)
183183
}
@@ -199,22 +199,8 @@ mod tests {
199199
assert_eq!(
200200
r,
201201
u8x16::new(
202-
124,
203-
123,
204-
124,
205-
118,
206-
124,
207-
123,
208-
124,
209-
197,
210-
124,
211-
123,
212-
124,
213-
118,
214-
124,
215-
123,
216-
124,
217-
197
202+
124, 123, 124, 118, 124, 123, 124, 197, 124, 123, 124, 118,
203+
124, 123, 124, 197
218204
)
219205
);
220206
}
@@ -229,22 +215,7 @@ mod tests {
229215
assert_eq!(
230216
r,
231217
u8x16::new(
232-
9,
233-
213,
234-
9,
235-
251,
236-
9,
237-
213,
238-
9,
239-
56,
240-
9,
241-
213,
242-
9,
243-
251,
244-
9,
245-
213,
246-
9,
247-
56
218+
9, 213, 9, 251, 9, 213, 9, 56, 9, 213, 9, 251, 9, 213, 9, 56
248219
)
249220
);
250221
}
@@ -256,24 +227,7 @@ mod tests {
256227
let r: u8x16 = vaesmcq_u8(data).into_bits();
257228
assert_eq!(
258229
r,
259-
u8x16::new(
260-
3,
261-
4,
262-
9,
263-
10,
264-
15,
265-
8,
266-
21,
267-
30,
268-
3,
269-
4,
270-
9,
271-
10,
272-
15,
273-
8,
274-
21,
275-
30
276-
)
230+
u8x16::new(3, 4, 9, 10, 15, 8, 21, 30, 3, 4, 9, 10, 15, 8, 21, 30)
277231
);
278232
}
279233

@@ -285,22 +239,8 @@ mod tests {
285239
assert_eq!(
286240
r,
287241
u8x16::new(
288-
43,
289-
60,
290-
33,
291-
50,
292-
103,
293-
80,
294-
125,
295-
70,
296-
43,
297-
60,
298-
33,
299-
50,
300-
103,
301-
80,
302-
125,
303-
70
242+
43, 60, 33, 50, 103, 80, 125, 70, 43, 60, 33, 50, 103, 80,
243+
125, 70
304244
)
305245
);
306246
}

coresimd/aarch64/neon.rs

+12-49
Original file line numberDiff line numberDiff line change
@@ -546,7 +546,6 @@ pub unsafe fn vpmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
546546
vpmaxq_f64_(a, b)
547547
}
548548

549-
550549
#[cfg(test)]
551550
mod tests {
552551
use coresimd::aarch64::*;
@@ -800,20 +799,11 @@ mod tests {
800799
#[simd_test = "neon"]
801800
unsafe fn test_vpminq_s8() {
802801
#[cfg_attr(rustfmt, skip)]
803-
let a = i8x16::new(
804-
1, -2, 3, -4, 5, 6, 7, 8,
805-
1, 2, 3, 4, 5, 6, 7, 8
806-
);
802+
let a = i8x16::new(1, -2, 3, -4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
807803
#[cfg_attr(rustfmt, skip)]
808-
let b = i8x16::new(
809-
0, 3, 2, 5, 4, 7, 6, 9,
810-
0, 3, 2, 5, 4, 7, 6, 9
811-
);
804+
let b = i8x16::new(0, 3, 2, 5, 4, 7, 6, 9, 0, 3, 2, 5, 4, 7, 6, 9);
812805
#[cfg_attr(rustfmt, skip)]
813-
let e = i8x16::new(
814-
-2, -4, 5, 7, 1, 3, 5, 7,
815-
0, 2, 4, 6, 0, 2, 4, 6,
816-
);
806+
let e = i8x16::new(-2, -4, 5, 7, 1, 3, 5, 7, 0, 2, 4, 6, 0, 2, 4, 6);
817807
let r: i8x16 = vpminq_s8(a.into_bits(), b.into_bits()).into_bits();
818808
assert_eq!(r, e);
819809
}
@@ -839,20 +829,11 @@ mod tests {
839829
#[simd_test = "neon"]
840830
unsafe fn test_vpminq_u8() {
841831
#[cfg_attr(rustfmt, skip)]
842-
let a = u8x16::new(
843-
1, 2, 3, 4, 5, 6, 7, 8,
844-
1, 2, 3, 4, 5, 6, 7, 8
845-
);
832+
let a = u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
846833
#[cfg_attr(rustfmt, skip)]
847-
let b = u8x16::new(
848-
0, 3, 2, 5, 4, 7, 6, 9,
849-
0, 3, 2, 5, 4, 7, 6, 9
850-
);
834+
let b = u8x16::new(0, 3, 2, 5, 4, 7, 6, 9, 0, 3, 2, 5, 4, 7, 6, 9);
851835
#[cfg_attr(rustfmt, skip)]
852-
let e = u8x16::new(
853-
1, 3, 5, 7, 1, 3, 5, 7,
854-
0, 2, 4, 6, 0, 2, 4, 6,
855-
);
836+
let e = u8x16::new(1, 3, 5, 7, 1, 3, 5, 7, 0, 2, 4, 6, 0, 2, 4, 6);
856837
let r: u8x16 = vpminq_u8(a.into_bits(), b.into_bits()).into_bits();
857838
assert_eq!(r, e);
858839
}
@@ -896,20 +877,11 @@ mod tests {
896877
#[simd_test = "neon"]
897878
unsafe fn test_vpmaxq_s8() {
898879
#[cfg_attr(rustfmt, skip)]
899-
let a = i8x16::new(
900-
1, -2, 3, -4, 5, 6, 7, 8,
901-
1, 2, 3, 4, 5, 6, 7, 8
902-
);
880+
let a = i8x16::new(1, -2, 3, -4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
903881
#[cfg_attr(rustfmt, skip)]
904-
let b = i8x16::new(
905-
0, 3, 2, 5, 4, 7, 6, 9,
906-
0, 3, 2, 5, 4, 7, 6, 9
907-
);
882+
let b = i8x16::new(0, 3, 2, 5, 4, 7, 6, 9, 0, 3, 2, 5, 4, 7, 6, 9);
908883
#[cfg_attr(rustfmt, skip)]
909-
let e = i8x16::new(
910-
1, 3, 6, 8, 2, 4, 6, 8,
911-
3, 5, 7, 9, 3, 5, 7, 9,
912-
);
884+
let e = i8x16::new(1, 3, 6, 8, 2, 4, 6, 8, 3, 5, 7, 9, 3, 5, 7, 9);
913885
let r: i8x16 = vpmaxq_s8(a.into_bits(), b.into_bits()).into_bits();
914886
assert_eq!(r, e);
915887
}
@@ -935,20 +907,11 @@ mod tests {
935907
#[simd_test = "neon"]
936908
unsafe fn test_vpmaxq_u8() {
937909
#[cfg_attr(rustfmt, skip)]
938-
let a = u8x16::new(
939-
1, 2, 3, 4, 5, 6, 7, 8,
940-
1, 2, 3, 4, 5, 6, 7, 8
941-
);
910+
let a = u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
942911
#[cfg_attr(rustfmt, skip)]
943-
let b = u8x16::new(
944-
0, 3, 2, 5, 4, 7, 6, 9,
945-
0, 3, 2, 5, 4, 7, 6, 9
946-
);
912+
let b = u8x16::new(0, 3, 2, 5, 4, 7, 6, 9, 0, 3, 2, 5, 4, 7, 6, 9);
947913
#[cfg_attr(rustfmt, skip)]
948-
let e = u8x16::new(
949-
2, 4, 6, 8, 2, 4, 6, 8,
950-
3, 5, 7, 9, 3, 5, 7, 9,
951-
);
914+
let e = u8x16::new(2, 4, 6, 8, 2, 4, 6, 8, 3, 5, 7, 9, 3, 5, 7, 9);
952915
let r: u8x16 = vpmaxq_u8(a.into_bits(), b.into_bits()).into_bits();
953916
assert_eq!(r, e);
954917
}

coresimd/arm/mod.rs

+14-6
Original file line numberDiff line numberDiff line change
@@ -19,11 +19,19 @@ pub use self::v7::*;
1919

2020
// NEON is supported on AArch64, and on ARM when built with the v7 and neon
2121
// features. Building ARM without neon produces incorrect codegen.
22-
#[cfg(any(target_arch = "aarch64",
23-
all(target_feature = "v7", target_feature = "neon"),
24-
dox))]
22+
#[cfg(
23+
any(
24+
target_arch = "aarch64",
25+
all(target_feature = "v7", target_feature = "neon"),
26+
dox
27+
)
28+
)]
2529
mod neon;
26-
#[cfg(any(target_arch = "aarch64",
27-
all(target_feature = "v7", target_feature = "neon"),
28-
dox))]
30+
#[cfg(
31+
any(
32+
target_arch = "aarch64",
33+
all(target_feature = "v7", target_feature = "neon"),
34+
dox
35+
)
36+
)]
2937
pub use self::neon::*;

0 commit comments

Comments
 (0)