diff options
author | Brian Cully <bjc@kublai.com> | 2019-08-13 19:04:55 -0400 |
---|---|---|
committer | Brian Cully <bjc@kublai.com> | 2019-08-13 19:19:43 -0400 |
commit | 680a95006deeb98e7c79fa30e3a264cfd6278fe9 (patch) | |
tree | 15e1e6d1cd27f0c0394b486fc9b6a59ea935da8e /src | |
parent | 9be841ec59fa717b7adae8b550d0625a8cd0bab4 (diff) | |
download | starb-680a95006deeb98e7c79fa30e3a264cfd6278fe9.tar.gz starb-680a95006deeb98e7c79fa30e3a264cfd6278fe9.zip |
Add Reader::shift_into function for batch copying.
This function can be used if you need to quickly copy a bunch of
things from the buffer, as it only does its safety checks once, rather
than on every item extraction.
Diffstat (limited to 'src')
-rwxr-xr-x | src/lib.rs | 70 |
1 files changed, 67 insertions, 3 deletions
@@ -8,7 +8,9 @@ use core::{ cell::UnsafeCell, + cmp, marker::PhantomData, + mem, sync::atomic::{AtomicUsize, Ordering}, }; @@ -105,7 +107,7 @@ where /// *minimum* of what may actually be available by the time the /// reading takes place. pub fn len(&self) -> usize { - let rb: &mut RingBuffer<T> = unsafe { core::mem::transmute(self.rb) }; + let rb: &mut RingBuffer<T> = unsafe { mem::transmute(self.rb) }; let h = rb.head.load(Ordering::SeqCst); let t = rb.tail.load(Ordering::SeqCst); let rc = (t + CAPACITY - h) % CAPACITY; @@ -122,7 +124,7 @@ where /// /// If nothing is available in the buffer, returns `None` pub fn shift(&mut self) -> Option<T> { - let rb: &mut RingBuffer<T> = unsafe { core::mem::transmute(self.rb) }; + let rb: &mut RingBuffer<T> = unsafe { mem::transmute(self.rb) }; let h = rb.head.load(Ordering::SeqCst); let t = rb.tail.load(Ordering::SeqCst); if h == t { @@ -137,6 +139,31 @@ where rc } } + + /// Shift all available data into `buf` up to the size of `buf`. + /// + /// Returns the number of items written into `buf`. + pub fn shift_into(&mut self, buf: &mut [T]) -> usize { + let rb: &mut RingBuffer<T> = unsafe { mem::transmute(self.rb) }; + + let mut h = rb.head.load(Ordering::SeqCst); + let t = rb.tail.load(Ordering::SeqCst); + + let mylen = (t + CAPACITY - h) % CAPACITY; + let buflen = buf.len(); + let len = cmp::min(mylen, buflen); + + unsafe { + let rbuf = &mut *rb.buf.get(); + for i in 0..len { + *buf.get_unchecked_mut(i) = *rbuf.get_unchecked(h); + h = (h + 1) % CAPACITY; + } + } + + rb.head.store(h, Ordering::SeqCst); + len + } } impl<T> Iterator for Reader<'_, T> @@ -158,7 +185,7 @@ where /// Returns `BufferFull` if appending `v` would overlap with the /// start of the buffer. pub fn unshift(&mut self, v: T) -> Result<(), Error> { - let rb: &mut RingBuffer<T> = unsafe { core::mem::transmute(self.rb) }; + let rb: &mut RingBuffer<T> = unsafe { mem::transmute(self.rb) }; let h = rb.head.load(Ordering::SeqCst); let t = rb.tail.load(Ordering::SeqCst); let nt = (t + 1) % CAPACITY; @@ -265,4 +292,41 @@ mod test { i += 1; } } + + #[test] + fn shift_into_smaller() { + let rb = RingBuffer::<usize>::new(0); + let (mut rbr, mut rbw) = rb.split(); + for i in 0..CAPACITY - 1 { + assert_eq!(rbw.unshift(i), Ok(())); + } + + let mut buf: [usize; CAPACITY / 2] = [0; CAPACITY / 2]; + assert_eq!(rbr.shift_into(&mut buf), CAPACITY / 2, "return len wrong"); + for i in 0..CAPACITY / 2 { + assert_eq!(buf[i], i, "slot {} wrong", i) + } + + assert!(!rbr.shift().is_none()); + } + + #[test] + fn shift_into_bigger() { + let rb = RingBuffer::<usize>::new(0); + let (mut rbr, mut rbw) = rb.split(); + for i in 0..CAPACITY - 1 { + assert_eq!(rbw.unshift(i), Ok(())); + } + + let mut buf: [usize; CAPACITY * 2] = [0; CAPACITY * 2]; + assert_eq!(rbr.shift_into(&mut buf), CAPACITY - 1, "return len wrong"); + for i in 0..CAPACITY - 1 { + assert_eq!(buf[i], i, "first half") + } + for i in CAPACITY - 1..CAPACITY * 2 { + assert_eq!(buf[i], 0, "second half") + } + + assert!(rbr.shift().is_none()); + } } |