Restore HashMap performance by allowing some functions to be inlined

Since the hashmap and its hasher are implemented in different crates, we
currently can't benefit from inlining, which means that especially for
small, fixed size keys, there is a huge overhead in hash calculations,
because the compiler can't apply optimizations that only apply for these
keys.

Fixes the brainfuck benchmark in #24014.
This commit is contained in:
Björn Steinbrink 2015-05-03 14:02:25 +02:00
parent 0d7d3ec9d2
commit f4176b52d3
2 changed files with 4 additions and 0 deletions

View file

@ -111,6 +111,7 @@ impl SipHasher {
state
}
#[inline]
fn reset(&mut self) {
self.length = 0;
self.v0 = self.k0 ^ 0x736f6d6570736575;
@ -120,6 +121,7 @@ impl SipHasher {
self.ntail = 0;
}
#[inline]
fn write(&mut self, msg: &[u8]) {
let length = msg.len();
self.length += length;
@ -173,6 +175,7 @@ impl Hasher for SipHasher {
self.write(msg)
}
#[inline]
fn finish(&self) -> u64 {
let mut v0 = self.v0;
let mut v1 = self.v1;

View file

@ -1600,6 +1600,7 @@ impl RandomState {
reason = "hashing an hash maps may be altered")]
impl HashState for RandomState {
type Hasher = SipHasher;
#[inline]
fn hasher(&self) -> SipHasher {
SipHasher::new_with_keys(self.k0, self.k1)
}