Skip to content

Commit d2f3f6d

Browse files
committed
Somewhat optimize the generic Features::requires_unknown_bits
It turns out we spend several percent of our routefinding time just checking if nodes and channels require unknown features byte-by-byte. While the cost is almost certainly dominated by the memory read latency, avoiding doing the checks byte-by-byte should reduce the branch count slightly, which may reduce the overhead.
1 parent 7b3d73e commit d2f3f6d

File tree

1 file changed

+17
-10
lines changed

1 file changed

+17
-10
lines changed

lightning/src/ln/features.rs

+17-10
Original file line numberDiff line numberDiff line change
@@ -775,16 +775,23 @@ impl<T: sealed::Context> Features<T> {
775775
pub fn requires_unknown_bits(&self) -> bool {
776776
// Bitwise AND-ing with all even bits set except for known features will select required
777777
// unknown features.
778-
let byte_count = T::KNOWN_FEATURE_MASK.len();
779-
self.flags.iter().enumerate().any(|(i, &byte)| {
780-
let required_features = 0b01_01_01_01;
781-
let unknown_features = if i < byte_count {
782-
!T::KNOWN_FEATURE_MASK[i]
783-
} else {
784-
0b11_11_11_11
785-
};
786-
(byte & (required_features & unknown_features)) != 0
787-
})
778+
let mut known_chunks = T::KNOWN_FEATURE_MASK.chunks(8);
779+
for chunk in self.flags.chunks(8) {
780+
let mut flag_bytes = [0; 8];
781+
flag_bytes[..chunk.len()].copy_from_slice(&chunk);
782+
let flag_int = u64::from_le_bytes(flag_bytes);
783+
784+
let known_chunk = known_chunks.next().unwrap_or(&[0; 0]);
785+
let mut known_bytes = [0; 8];
786+
known_bytes[..known_chunk.len()].copy_from_slice(&known_chunk);
787+
let known_int = u64::from_le_bytes(known_bytes);
788+
789+
const REQ_MASK: u64 = 0x55555555_55555555;
790+
if flag_int & (REQ_MASK & !known_int) != 0 {
791+
return true;
792+
}
793+
}
794+
false
788795
}
789796

790797
pub(crate) fn supports_unknown_bits(&self) -> bool {

0 commit comments

Comments
 (0)