Auto merge of #106081 - mina86:c, r=Mark-Simulacrum

char: µoptimise UTF-16 surrogates decoding

According to Godbolt¹, on x86_64 using binary and produces slightly
better code than using subtraction.  Readability of both is pretty
much equivalent so might just as well use the shorter option.

¹ https://rust.godbolt.org/z/9jM3ejbMx
This commit is contained in:
bors 2022-12-24 07:35:23 +00:00
commit 5e8bab91d3
2 changed files with 5 additions and 1 deletions

View File

@ -67,7 +67,7 @@ impl<I: Iterator<Item = u16>> Iterator for DecodeUtf16<I> {
}
// all ok, so lets decode it.
let c = (((u - 0xD800) as u32) << 10 | (u2 - 0xDC00) as u32) + 0x1_0000;
let c = (((u & 0x3ff) as u32) << 10 | (u2 & 0x3ff) as u32) + 0x1_0000;
// SAFETY: we checked that it's a legal unicode value
Some(Ok(unsafe { from_u32_unchecked(c) }))
}

View File

@ -306,6 +306,10 @@ fn test_decode_utf16() {
}
check(&[0xD800, 0x41, 0x42], &[Err(0xD800), Ok('A'), Ok('B')]);
check(&[0xD800, 0], &[Err(0xD800), Ok('\0')]);
check(&[0xD800], &[Err(0xD800)]);
check(&[0xD840, 0xDC00], &[Ok('\u{20000}')]);
check(&[0xD840, 0xD840, 0xDC00], &[Err(0xD840), Ok('\u{20000}')]);
check(&[0xDC00, 0xD840], &[Err(0xDC00), Err(0xD840)]);
}
#[test]