From 680a95006deeb98e7c79fa30e3a264cfd6278fe9 Mon Sep 17 00:00:00 2001 From: Brian Cully Date: Tue, 13 Aug 2019 19:04:55 -0400 Subject: Add Reader::shift_into function for batch copying. This function can be used if you need to quickly copy a bunch of things from the buffer, as it only does its safety checks once, rather than on every item extraction. --- src/lib.rs | 70 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 67 insertions(+), 3 deletions(-) (limited to 'src') diff --git a/src/lib.rs b/src/lib.rs index a4a5be8..2089cfd 100755 --- a/src/lib.rs +++ b/src/lib.rs @@ -8,7 +8,9 @@ use core::{ cell::UnsafeCell, + cmp, marker::PhantomData, + mem, sync::atomic::{AtomicUsize, Ordering}, }; @@ -105,7 +107,7 @@ where /// *minimum* of what may actually be available by the time the /// reading takes place. pub fn len(&self) -> usize { - let rb: &mut RingBuffer = unsafe { core::mem::transmute(self.rb) }; + let rb: &mut RingBuffer = unsafe { mem::transmute(self.rb) }; let h = rb.head.load(Ordering::SeqCst); let t = rb.tail.load(Ordering::SeqCst); let rc = (t + CAPACITY - h) % CAPACITY; @@ -122,7 +124,7 @@ where /// /// If nothing is available in the buffer, returns `None` pub fn shift(&mut self) -> Option { - let rb: &mut RingBuffer = unsafe { core::mem::transmute(self.rb) }; + let rb: &mut RingBuffer = unsafe { mem::transmute(self.rb) }; let h = rb.head.load(Ordering::SeqCst); let t = rb.tail.load(Ordering::SeqCst); if h == t { @@ -137,6 +139,31 @@ where rc } } + + /// Shift all available data into `buf` up to the size of `buf`. + /// + /// Returns the number of items written into `buf`. + pub fn shift_into(&mut self, buf: &mut [T]) -> usize { + let rb: &mut RingBuffer = unsafe { mem::transmute(self.rb) }; + + let mut h = rb.head.load(Ordering::SeqCst); + let t = rb.tail.load(Ordering::SeqCst); + + let mylen = (t + CAPACITY - h) % CAPACITY; + let buflen = buf.len(); + let len = cmp::min(mylen, buflen); + + unsafe { + let rbuf = &mut *rb.buf.get(); + for i in 0..len { + *buf.get_unchecked_mut(i) = *rbuf.get_unchecked(h); + h = (h + 1) % CAPACITY; + } + } + + rb.head.store(h, Ordering::SeqCst); + len + } } impl Iterator for Reader<'_, T> @@ -158,7 +185,7 @@ where /// Returns `BufferFull` if appending `v` would overlap with the /// start of the buffer. pub fn unshift(&mut self, v: T) -> Result<(), Error> { - let rb: &mut RingBuffer = unsafe { core::mem::transmute(self.rb) }; + let rb: &mut RingBuffer = unsafe { mem::transmute(self.rb) }; let h = rb.head.load(Ordering::SeqCst); let t = rb.tail.load(Ordering::SeqCst); let nt = (t + 1) % CAPACITY; @@ -265,4 +292,41 @@ mod test { i += 1; } } + + #[test] + fn shift_into_smaller() { + let rb = RingBuffer::::new(0); + let (mut rbr, mut rbw) = rb.split(); + for i in 0..CAPACITY - 1 { + assert_eq!(rbw.unshift(i), Ok(())); + } + + let mut buf: [usize; CAPACITY / 2] = [0; CAPACITY / 2]; + assert_eq!(rbr.shift_into(&mut buf), CAPACITY / 2, "return len wrong"); + for i in 0..CAPACITY / 2 { + assert_eq!(buf[i], i, "slot {} wrong", i) + } + + assert!(!rbr.shift().is_none()); + } + + #[test] + fn shift_into_bigger() { + let rb = RingBuffer::::new(0); + let (mut rbr, mut rbw) = rb.split(); + for i in 0..CAPACITY - 1 { + assert_eq!(rbw.unshift(i), Ok(())); + } + + let mut buf: [usize; CAPACITY * 2] = [0; CAPACITY * 2]; + assert_eq!(rbr.shift_into(&mut buf), CAPACITY - 1, "return len wrong"); + for i in 0..CAPACITY - 1 { + assert_eq!(buf[i], i, "first half") + } + for i in CAPACITY - 1..CAPACITY * 2 { + assert_eq!(buf[i], 0, "second half") + } + + assert!(rbr.shift().is_none()); + } } -- cgit v1.2.3