Skip to content

Commit 368f08a

Browse files
authored
Rollup merge of #100383 - fortanix:raoul/aepic_leak_mitigation, r=cuviper
Mitigate stale data reads on SGX platform Intel disclosed the Stale Data Read vulnerability yesterday. In order to mitigate this issue completely, reading userspace from an SGX enclave must be aligned and in 8-bytes chunks. This PR implements this mitigation References: - https://www.intel.com/content/www/us/en/security-center/advisory/intel-sa-00657.html - https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/advisory-guidance/stale-data-read-from-xapic.html cc: ``@jethrogb``
2 parents 3cca140 + 2a23d08 commit 368f08a

File tree

2 files changed

+166
-23
lines changed

2 files changed

+166
-23
lines changed

library/std/src/sys/sgx/abi/usercalls/alloc.rs

+138-21
Original file line numberDiff line numberDiff line change
@@ -305,6 +305,34 @@ where
305305
}
306306
}
307307

308+
// Split a memory region ptr..ptr + len into three parts:
309+
// +--------+
310+
// | small0 | Chunk smaller than 8 bytes
311+
// +--------+
312+
// | big | Chunk 8-byte aligned, and size a multiple of 8 bytes
313+
// +--------+
314+
// | small1 | Chunk smaller than 8 bytes
315+
// +--------+
316+
fn region_as_aligned_chunks(ptr: *const u8, len: usize) -> (usize, usize, usize) {
317+
let small0_size = if ptr as usize % 8 == 0 { 0 } else { 8 - ptr as usize % 8 };
318+
let small1_size = (len - small0_size as usize) % 8;
319+
let big_size = len - small0_size as usize - small1_size as usize;
320+
321+
(small0_size, big_size, small1_size)
322+
}
323+
324+
unsafe fn copy_quadwords(src: *const u8, dst: *mut u8, len: usize) {
325+
unsafe {
326+
asm!(
327+
"rep movsq (%rsi), (%rdi)",
328+
inout("rcx") len / 8 => _,
329+
inout("rdi") dst => _,
330+
inout("rsi") src => _,
331+
options(att_syntax, nostack, preserves_flags)
332+
);
333+
}
334+
}
335+
308336
/// Copies `len` bytes of data from enclave pointer `src` to userspace `dst`
309337
///
310338
/// This function mitigates stale data vulnerabilities by ensuring all writes to untrusted memory are either:
@@ -343,17 +371,6 @@ pub(crate) unsafe fn copy_to_userspace(src: *const u8, dst: *mut u8, len: usize)
343371
}
344372
}
345373

346-
unsafe fn copy_aligned_quadwords_to_userspace(src: *const u8, dst: *mut u8, len: usize) {
347-
unsafe {
348-
asm!(
349-
"rep movsq (%rsi), (%rdi)",
350-
inout("rcx") len / 8 => _,
351-
inout("rdi") dst => _,
352-
inout("rsi") src => _,
353-
options(att_syntax, nostack, preserves_flags)
354-
);
355-
}
356-
}
357374
assert!(!src.is_null());
358375
assert!(!dst.is_null());
359376
assert!(is_enclave_range(src, len));
@@ -370,7 +387,7 @@ pub(crate) unsafe fn copy_to_userspace(src: *const u8, dst: *mut u8, len: usize)
370387
} else if len % 8 == 0 && dst as usize % 8 == 0 {
371388
// Copying 8-byte aligned quadwords: copy quad word per quad word
372389
unsafe {
373-
copy_aligned_quadwords_to_userspace(src, dst, len);
390+
copy_quadwords(src, dst, len);
374391
}
375392
} else {
376393
// Split copies into three parts:
@@ -381,20 +398,16 @@ pub(crate) unsafe fn copy_to_userspace(src: *const u8, dst: *mut u8, len: usize)
381398
// +--------+
382399
// | small1 | Chunk smaller than 8 bytes
383400
// +--------+
401+
let (small0_size, big_size, small1_size) = region_as_aligned_chunks(dst, len);
384402

385403
unsafe {
386404
// Copy small0
387-
let small0_size = (8 - dst as usize % 8) as u8;
388-
let small0_src = src;
389-
let small0_dst = dst;
390-
copy_bytewise_to_userspace(small0_src as _, small0_dst, small0_size as _);
405+
copy_bytewise_to_userspace(src, dst, small0_size as _);
391406

392407
// Copy big
393-
let small1_size = ((len - small0_size as usize) % 8) as u8;
394-
let big_size = len - small0_size as usize - small1_size as usize;
395408
let big_src = src.offset(small0_size as _);
396409
let big_dst = dst.offset(small0_size as _);
397-
copy_aligned_quadwords_to_userspace(big_src as _, big_dst, big_size);
410+
copy_quadwords(big_src as _, big_dst, big_size);
398411

399412
// Copy small1
400413
let small1_src = src.offset(big_size as isize + small0_size as isize);
@@ -404,6 +417,106 @@ pub(crate) unsafe fn copy_to_userspace(src: *const u8, dst: *mut u8, len: usize)
404417
}
405418
}
406419

420+
/// Copies `len` bytes of data from userspace pointer `src` to enclave pointer `dst`
421+
///
422+
/// This function mitigates AEPIC leak vulnerabilities by ensuring all reads from untrusted memory are 8-byte aligned
423+
///
424+
/// # Panics
425+
/// This function panics if:
426+
///
427+
/// * The `src` pointer is null
428+
/// * The `dst` pointer is null
429+
/// * The `src` memory range is not in user memory
430+
/// * The `dst` memory range is not in enclave memory
431+
///
432+
/// # References
433+
/// - https://www.intel.com/content/www/us/en/security-center/advisory/intel-sa-00657.html
434+
/// - https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/advisory-guidance/stale-data-read-from-xapic.html
435+
pub(crate) unsafe fn copy_from_userspace(src: *const u8, dst: *mut u8, len: usize) {
436+
// Copies memory region `src..src + len` to the enclave at `dst`. The source memory region
437+
// is:
438+
// - strictly less than 8 bytes in size and may be
439+
// - located at a misaligned memory location
440+
fn copy_misaligned_chunk_to_enclave(src: *const u8, dst: *mut u8, len: usize) {
441+
let mut tmp_buff = [0u8; 16];
442+
443+
unsafe {
444+
// Compute an aligned memory region to read from
445+
// +--------+ <-- aligned_src + aligned_len (8B-aligned)
446+
// | pad1 |
447+
// +--------+ <-- src + len (misaligned)
448+
// | |
449+
// | |
450+
// | |
451+
// +--------+ <-- src (misaligned)
452+
// | pad0 |
453+
// +--------+ <-- aligned_src (8B-aligned)
454+
let pad0_size = src as usize % 8;
455+
let aligned_src = src.sub(pad0_size);
456+
457+
let pad1_size = 8 - (src.add(len) as usize % 8);
458+
let aligned_len = pad0_size + len + pad1_size;
459+
460+
debug_assert!(len < 8);
461+
debug_assert_eq!(aligned_src as usize % 8, 0);
462+
debug_assert_eq!(aligned_len % 8, 0);
463+
debug_assert!(aligned_len <= 16);
464+
465+
// Copy the aligned buffer to a temporary buffer
466+
// Note: copying from a slightly different memory location is a bit odd. In this case it
467+
// can't lead to page faults or inadvertent copying from the enclave as we only ensured
468+
// that the `src` pointer is aligned at an 8 byte boundary. As pages are 4096 bytes
469+
// aligned, `aligned_src` must be on the same page as `src`. A similar argument can be made
470+
// for `src + len`
471+
copy_quadwords(aligned_src as _, tmp_buff.as_mut_ptr(), aligned_len);
472+
473+
// Copy the correct parts of the temporary buffer to the destination
474+
ptr::copy(tmp_buff.as_ptr().add(pad0_size), dst, len);
475+
}
476+
}
477+
478+
assert!(!src.is_null());
479+
assert!(!dst.is_null());
480+
assert!(is_user_range(src, len));
481+
assert!(is_enclave_range(dst, len));
482+
assert!(!(src as usize).overflowing_add(len + 8).1);
483+
assert!(!(dst as usize).overflowing_add(len + 8).1);
484+
485+
if len < 8 {
486+
copy_misaligned_chunk_to_enclave(src, dst, len);
487+
} else if len % 8 == 0 && src as usize % 8 == 0 {
488+
// Copying 8-byte aligned quadwords: copy quad word per quad word
489+
unsafe {
490+
copy_quadwords(src, dst, len);
491+
}
492+
} else {
493+
// Split copies into three parts:
494+
// +--------+
495+
// | small0 | Chunk smaller than 8 bytes
496+
// +--------+
497+
// | big | Chunk 8-byte aligned, and size a multiple of 8 bytes
498+
// +--------+
499+
// | small1 | Chunk smaller than 8 bytes
500+
// +--------+
501+
let (small0_size, big_size, small1_size) = region_as_aligned_chunks(dst, len);
502+
503+
unsafe {
504+
// Copy small0
505+
copy_misaligned_chunk_to_enclave(src, dst, small0_size);
506+
507+
// Copy big
508+
let big_src = src.add(small0_size);
509+
let big_dst = dst.add(small0_size);
510+
copy_quadwords(big_src, big_dst, big_size);
511+
512+
// Copy small1
513+
let small1_src = src.add(big_size + small0_size);
514+
let small1_dst = dst.add(big_size + small0_size);
515+
copy_misaligned_chunk_to_enclave(small1_src, small1_dst, small1_size);
516+
}
517+
}
518+
}
519+
407520
#[unstable(feature = "sgx_platform", issue = "56975")]
408521
impl<T: ?Sized> UserRef<T>
409522
where
@@ -468,7 +581,7 @@ where
468581
pub fn copy_to_enclave(&self, dest: &mut T) {
469582
unsafe {
470583
assert_eq!(mem::size_of_val(dest), mem::size_of_val(&*self.0.get()));
471-
ptr::copy(
584+
copy_from_userspace(
472585
self.0.get() as *const T as *const u8,
473586
dest as *mut T as *mut u8,
474587
mem::size_of_val(dest),
@@ -494,7 +607,11 @@ where
494607
{
495608
/// Copies the value from user memory into enclave memory.
496609
pub fn to_enclave(&self) -> T {
497-
unsafe { ptr::read(self.0.get()) }
610+
unsafe {
611+
let mut data: T = mem::MaybeUninit::uninit().assume_init();
612+
copy_from_userspace(self.0.get() as _, &mut data as *mut T as _, mem::size_of::<T>());
613+
data
614+
}
498615
}
499616
}
500617

library/std/src/sys/sgx/abi/usercalls/tests.rs

+28-2
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,8 @@
1-
use super::alloc::copy_to_userspace;
21
use super::alloc::User;
2+
use super::alloc::{copy_from_userspace, copy_to_userspace};
33

44
#[test]
5-
fn test_copy_function() {
5+
fn test_copy_to_userspace_function() {
66
let mut src = [0u8; 100];
77
let mut dst = User::<[u8]>::uninitialized(100);
88

@@ -28,3 +28,29 @@ fn test_copy_function() {
2828
}
2929
}
3030
}
31+
32+
#[test]
33+
fn test_copy_from_userspace_function() {
34+
let mut dst = [0u8; 100];
35+
let mut src = User::<[u8]>::uninitialized(100);
36+
37+
src.copy_from_enclave(&[0u8; 100]);
38+
39+
for size in 0..48 {
40+
// For all possible alignment
41+
for offset in 0..8 {
42+
// overwrite complete dst
43+
dst = [0u8; 100];
44+
45+
// Copy src[0..size] to dst + offset
46+
unsafe { copy_from_userspace(src.as_ptr().offset(offset), dst.as_mut_ptr(), size) };
47+
48+
// Verify copy
49+
for byte in 0..size {
50+
unsafe {
51+
assert_eq!(dst[byte as usize], *src.as_ptr().offset(offset + byte as isize));
52+
}
53+
}
54+
}
55+
}
56+
}

0 commit comments

Comments
 (0)