@@ -0,0 +1,63 @@
Loading
1 +
/*! The `PartialEq` and `PartialOrd` trait implementations are *surprisingly*
2 +
hard to correctly implement. This test suite is built to ensure that all
3 +
combinations of equality and comparison are correctly present.
4 +
!*/
5 +
6 +
use bitvec::prelude::*;
7 +
8 +
#[test]
9 +
fn no_alloc() {
10 +
	let a = bits![mut Msb0, u8; 0, 1];
11 +
	let b = bits![mut Lsb0, u16; 0, 1];
12 +
	let c = bits![mut 1, 0];
13 +
14 +
	//  BitSlice as PartialEq<BitSlice>
15 +
	assert_eq!(*a, *b);
16 +
	//  BitSlice as PartialEq<&mut BitSlice>
17 +
	assert_eq!(*a, b);
18 +
	//  &mut BitSlice as PartialEq<BitSlice>
19 +
	assert_eq!(a, *b);
20 +
	//  &mut BitSlice as PartialEq<&mut BitSlice>
21 +
	assert_eq!(a, b);
22 +
23 +
	//  &BitSlice as PartialEq<&BitSlice>
24 +
	assert_eq!(&*a, &*b);
25 +
	//  &BitSlice as PartialEq<BitSlice>
26 +
	assert_eq!(&*a, *b);
27 +
	//  BitSlice as PartialEq<&BitSlice>
28 +
	assert_eq!(*a, &*b);
29 +
30 +
	//  &mut BitSlice as PartialEq<&BitSlice>
31 +
	assert_eq!(a, &*b);
32 +
	//  &BitSlice as PartialEq<&mut BitSlice>
33 +
	assert_eq!(&*a, b);
34 +
35 +
	//  BitSlice as PartialOrd<BitSlice>
36 +
	assert!(*b < *c);
37 +
	//  BitSlice as PartialOrd<&mut BitSlice>
38 +
	assert!(*b < c);
39 +
	//  &mut BitSlice as PartialOrd<BitSlice>
40 +
	assert!(b < *c);
41 +
	//  &mut BitSlice as PartialOrd<&mut BitSlice>
42 +
	assert!(b < c);
43 +
44 +
	//  &BitSlice as PartialOrd<&BitSlice>
45 +
	assert!(&*b < &*c);
46 +
	//  &BitSlice as PartialOrd<&mut BitSlice>
47 +
	assert!(&*b < c);
48 +
	//  &mut BitSlice as PartialOrd<&BitSlice>
49 +
	assert!(b < &*c);
50 +
}
51 +
52 +
#[test]
53 +
#[rustfmt::skip]
54 +
#[cfg(feature = "alloc")]
55 +
fn with_alloc() {
56 +
	let a = bits![Msb0, u8; 0, 1];
57 +
	let b = bitbox![Lsb0, u16; 0, 1];
58 +
	let c = bitvec![0, 1];
59 +
60 +
	assert_eq!(a, a); assert_eq!(a, b); assert_eq!(a, c);
61 +
	assert_eq!(b, a); assert_eq!(b, b); assert_eq!(b, c);
62 +
	assert_eq!(c, a); assert_eq!(c, b); assert_eq!(c, c);
63 +
}

@@ -1734,7 +1734,8 @@
Loading
1734 1734
	where R: RangeBounds<usize> {
1735 1735
		let len = self.len();
1736 1736
		let rev = src.contains(&dest);
1737 -
		let iter = dvl::normalize_range(src, len).zip(dest .. len);
1737 +
		let source = dvl::normalize_range(src, len);
1738 +
		let iter = source.zip(dest .. len);
1738 1739
		if rev {
1739 1740
			for (from, to) in iter.rev() {
1740 1741
				self.copy_unchecked(from, to);

@@ -79,6 +79,8 @@
Loading
79 79
	}
80 80
}
81 81
82 +
//  ref-to-val equality
83 +
82 84
#[cfg(not(tarpaulin_include))]
83 85
impl<O1, O2, T1, T2> PartialEq<BitSlice<O2, T2>> for &BitSlice<O1, T1>
84 86
where
@@ -89,7 +91,7 @@
Loading
89 91
{
90 92
	#[inline]
91 93
	fn eq(&self, rhs: &BitSlice<O2, T2>) -> bool {
92 -
		*self == rhs
94 +
		**self == rhs
93 95
	}
94 96
}
95 97
@@ -103,10 +105,12 @@
Loading
103 105
{
104 106
	#[inline]
105 107
	fn eq(&self, rhs: &BitSlice<O2, T2>) -> bool {
106 -
		*self == rhs
108 +
		**self == rhs
107 109
	}
108 110
}
109 111
112 +
//  val-to-ref equality
113 +
110 114
#[cfg(not(tarpaulin_include))]
111 115
impl<O1, O2, T1, T2> PartialEq<&BitSlice<O2, T2>> for BitSlice<O1, T1>
112 116
where
@@ -161,7 +165,8 @@
Loading
161 165
	}
162 166
}
163 167
164 -
#[cfg(not(tarpaulin_include))]
168 +
//  ref-to-val ordering
169 +
165 170
impl<O1, O2, T1, T2> PartialOrd<BitSlice<O2, T2>> for &BitSlice<O1, T1>
166 171
where
167 172
	O1: BitOrder,
@@ -175,7 +180,21 @@
Loading
175 180
	}
176 181
}
177 182
178 -
#[cfg(not(tarpaulin_include))]
183 +
impl<O1, O2, T1, T2> PartialOrd<BitSlice<O2, T2>> for &mut BitSlice<O1, T1>
184 +
where
185 +
	O1: BitOrder,
186 +
	O2: BitOrder,
187 +
	T1: BitStore,
188 +
	T2: BitStore,
189 +
{
190 +
	#[inline]
191 +
	fn partial_cmp(&self, rhs: &BitSlice<O2, T2>) -> Option<cmp::Ordering> {
192 +
		(**self).partial_cmp(rhs)
193 +
	}
194 +
}
195 +
196 +
//  val-to-ref ordering
197 +
179 198
impl<O1, O2, T1, T2> PartialOrd<&BitSlice<O2, T2>> for BitSlice<O1, T1>
180 199
where
181 200
	O1: BitOrder,
@@ -185,7 +204,48 @@
Loading
185 204
{
186 205
	#[inline]
187 206
	fn partial_cmp(&self, rhs: &&BitSlice<O2, T2>) -> Option<cmp::Ordering> {
188 -
		(*self).partial_cmp(*rhs)
207 +
		(*self).partial_cmp(&**rhs)
208 +
	}
209 +
}
210 +
211 +
impl<O1, O2, T1, T2> PartialOrd<&mut BitSlice<O2, T2>> for BitSlice<O1, T1>
212 +
where
213 +
	O1: BitOrder,
214 +
	O2: BitOrder,
215 +
	T1: BitStore,
216 +
	T2: BitStore,
217 +
{
218 +
	#[inline]
219 +
	fn partial_cmp(&self, rhs: &&mut BitSlice<O2, T2>) -> Option<cmp::Ordering> {
220 +
		(*self).partial_cmp(&**rhs)
221 +
	}
222 +
}
223 +
224 +
//  &mut-to-& ordering
225 +
226 +
impl<O1, O2, T1, T2> PartialOrd<&mut BitSlice<O2, T2>> for &BitSlice<O1, T1>
227 +
where
228 +
	O1: BitOrder,
229 +
	O2: BitOrder,
230 +
	T1: BitStore,
231 +
	T2: BitStore,
232 +
{
233 +
	#[inline]
234 +
	fn partial_cmp(&self, rhs: &&mut BitSlice<O2, T2>) -> Option<cmp::Ordering> {
235 +
		(**self).partial_cmp(&**rhs)
236 +
	}
237 +
}
238 +
239 +
impl<O1, O2, T1, T2> PartialOrd<&BitSlice<O2, T2>> for &mut BitSlice<O1, T1>
240 +
where
241 +
	O1: BitOrder,
242 +
	O2: BitOrder,
243 +
	T1: BitStore,
244 +
	T2: BitStore,
245 +
{
246 +
	#[inline]
247 +
	fn partial_cmp(&self, rhs: &&BitSlice<O2, T2>) -> Option<cmp::Ordering> {
248 +
		(**self).partial_cmp(&**rhs)
189 249
	}
190 250
}
191 251
@@ -197,7 +257,7 @@
Loading
197 257
{
198 258
	type Error = &'a [T];
199 259
200 -
	#[inline(always)]
260 +
	#[inline]
201 261
	fn try_from(slice: &'a [T]) -> Result<Self, Self::Error> {
202 262
		BitSlice::from_slice(slice).ok_or(slice)
203 263
	}
@@ -247,7 +307,7 @@
Loading
247 307
	O: BitOrder,
248 308
	T: BitStore,
249 309
{
250 -
	#[inline]
310 +
	#[inline(always)]
251 311
	fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
252 312
		Binary::fmt(self, fmt)
253 313
	}

@@ -120,28 +120,57 @@
Loading
120 120
}
121 121
122 122
#[cfg(not(tarpaulin_include))]
123 -
impl<O, T> PartialEq<BitVec<O, T>> for BitSlice<O, T>
123 +
impl<O1, O2, T1, T2> PartialEq<BitVec<O2, T2>> for BitSlice<O1, T1>
124 124
where
125 -
	O: BitOrder,
126 -
	T: BitStore,
125 +
	O1: BitOrder,
126 +
	O2: BitOrder,
127 +
	T1: BitStore,
128 +
	T2: BitStore,
127 129
{
128 130
	#[inline]
129 -
	fn eq(&self, other: &BitVec<O, T>) -> bool {
131 +
	fn eq(&self, other: &BitVec<O2, T2>) -> bool {
130 132
		self == other.as_bitslice()
131 133
	}
132 134
}
133 135
136 +
#[cfg(not(tarpaulin_include))]
137 +
impl<O1, O2, T1, T2> PartialEq<BitVec<O2, T2>> for &BitSlice<O1, T1>
138 +
where
139 +
	O1: BitOrder,
140 +
	O2: BitOrder,
141 +
	T1: BitStore,
142 +
	T2: BitStore,
143 +
{
144 +
	#[inline]
145 +
	fn eq(&self, other: &BitVec<O2, T2>) -> bool {
146 +
		*self == other.as_bitslice()
147 +
	}
148 +
}
149 +
150 +
#[cfg(not(tarpaulin_include))]
151 +
impl<O1, O2, T1, T2> PartialEq<BitVec<O2, T2>> for &mut BitSlice<O1, T1>
152 +
where
153 +
	O1: BitOrder,
154 +
	O2: BitOrder,
155 +
	T1: BitStore,
156 +
	T2: BitStore,
157 +
{
158 +
	#[inline]
159 +
	fn eq(&self, other: &BitVec<O2, T2>) -> bool {
160 +
		**self == other.as_bitslice()
161 +
	}
162 +
}
163 +
134 164
#[cfg(not(tarpaulin_include))]
135 165
impl<O, T, Rhs> PartialEq<Rhs> for BitVec<O, T>
136 166
where
137 167
	O: BitOrder,
138 168
	T: BitStore,
139 -
	Rhs: ?Sized,
140 -
	BitSlice<O, T>: PartialEq<Rhs>,
169 +
	Rhs: ?Sized + PartialEq<BitSlice<O, T>>,
141 170
{
142 171
	#[inline]
143 172
	fn eq(&self, other: &Rhs) -> bool {
144 -
		self.as_bitslice() == other
173 +
		other == self.as_bitslice()
145 174
	}
146 175
}
147 176
@@ -162,12 +191,11 @@
Loading
162 191
where
163 192
	O: BitOrder,
164 193
	T: BitStore,
165 -
	Rhs: ?Sized,
166 -
	BitSlice<O, T>: PartialOrd<Rhs>,
194 +
	Rhs: ?Sized + PartialOrd<BitSlice<O, T>>,
167 195
{
168 196
	#[inline]
169 197
	fn partial_cmp(&self, other: &Rhs) -> Option<cmp::Ordering> {
170 -
		self.as_bitslice().partial_cmp(other)
198 +
		other.partial_cmp(self.as_bitslice())
171 199
	}
172 200
}
173 201

@@ -64,6 +64,13 @@
Loading
64 64
	*bits.first_mut().unwrap() = false;
65 65
	assert_eq!(bits.last(), Some(&true));
66 66
	*bits.last_mut().unwrap() = false;
67 +
68 +
	let (a, b) = (bits![mut Msb0, u8; 0, 1], bits![mut Lsb0, u16; 1, 0]);
69 +
	assert_eq!(a, bits![0, 1]);
70 +
	assert_eq!(b, bits![1, 0]);
71 +
	a.swap_with_bitslice(b);
72 +
	assert_eq!(a, bits![1, 0]);
73 +
	assert_eq!(b, bits![0, 1]);
67 74
}
68 75
69 76
#[test]
@@ -83,16 +90,26 @@
Loading
83 90
	assert_eq!(bits[1 .. 7].count_zeros(), 3);
84 91
	assert_eq!(bits[.. 24].count_ones(), 16);
85 92
	assert_eq!(bits[16 ..].count_zeros(), 17);
93 +
94 +
	assert!(!bits![0].contains(bits![0, 1]));
95 +
	assert!(bits![0, 1, 0].contains(bits![1, 0]));
96 +
	assert!(bits![0, 1, 0].starts_with(bits![0, 1]));
97 +
	assert!(bits![0, 1, 0].ends_with(bits![1, 0]));
86 98
}
87 99
88 100
#[test]
89 101
fn modify() {
90 102
	let mut data = 0b0000_1111u8;
91 -
	let bits = data.view_bits_mut::<Local>();
92 103
104 +
	let bits = data.view_bits_mut::<Local>();
93 105
	bits.swap(3, 4);
94 106
	assert_eq!(data, 0b0001_0111);
95 107
108 +
	let bits = data.view_bits_mut::<Lsb0>();
109 +
	bits[1 .. 7].reverse();
110 +
	assert_eq!(data, 0b0110_1001);
111 +
	data.view_bits_mut::<Msb0>()[1 .. 7].reverse();
112 +
96 113
	let bits = data.view_bits_mut::<Msb0>();
97 114
	bits.copy_within(2 .. 4, 0);
98 115
	assert_eq!(data, 0b0101_0111);
@@ -101,3 +118,140 @@
Loading
101 118
	bits.copy_within(5 .., 2);
102 119
	assert_eq!(data, 0b0111_1111);
103 120
}
121 +
122 +
#[test]
123 +
fn split() {
124 +
	assert!(BitSlice::<Local, usize>::empty().split_first().is_none());
125 +
	assert_eq!(
126 +
		1u8.view_bits::<Lsb0>().split_first(),
127 +
		Some((&true, bits![Lsb0, u8; 0; 7]))
128 +
	);
129 +
130 +
	assert!(
131 +
		BitSlice::<Local, usize>::empty_mut()
132 +
			.split_first_mut()
133 +
			.is_none()
134 +
	);
135 +
	let mut data = 0u8;
136 +
	let (head, _) = data.view_bits_mut::<Lsb0>().split_first_mut().unwrap();
137 +
	head.set(true);
138 +
	assert_eq!(data, 1);
139 +
140 +
	assert!(BitSlice::<Local, usize>::empty().split_last().is_none());
141 +
	assert_eq!(
142 +
		1u8.view_bits::<Msb0>().split_last(),
143 +
		Some((&true, bits![Msb0, u8; 0; 7]))
144 +
	);
145 +
146 +
	assert!(
147 +
		BitSlice::<Local, usize>::empty_mut()
148 +
			.split_first_mut()
149 +
			.is_none()
150 +
	);
151 +
	let mut data = 0u8;
152 +
	let (head, _) = data.view_bits_mut::<Msb0>().split_last_mut().unwrap();
153 +
	head.set(true);
154 +
	assert_eq!(data, 1);
155 +
156 +
	let mut data = 0b0000_1111u8;
157 +
158 +
	let bits = data.view_bits::<Msb0>();
159 +
	let (left, right) = bits.split_at(4);
160 +
	assert!(left.not_any());
161 +
	assert!(right.all());
162 +
163 +
	let bits = data.view_bits_mut::<Msb0>();
164 +
	let (left, right) = bits.split_at_mut(4);
165 +
	left.set_all(true);
166 +
	right.set_all(false);
167 +
	assert_eq!(data, 0b1111_0000u8);
168 +
}
169 +
170 +
#[test]
171 +
fn iterators() {
172 +
	0b0100_1000u8
173 +
		.view_bits::<Msb0>()
174 +
		.split(|_, bit| *bit)
175 +
		.zip([1usize, 2, 3].iter())
176 +
		.for_each(|(bits, len)| assert_eq!(bits.len(), *len));
177 +
178 +
	let mut data = 0b0100_1000u8;
179 +
	data.view_bits_mut::<Msb0>()
180 +
		.split_mut(|_, bit| *bit)
181 +
		.zip([1usize, 2, 3].iter())
182 +
		.for_each(|(bits, len)| {
183 +
			assert_eq!(bits.len(), *len);
184 +
			bits.set_all(true)
185 +
		});
186 +
	assert_eq!(data, !0);
187 +
188 +
	0b0100_1000u8
189 +
		.view_bits::<Msb0>()
190 +
		.rsplit(|_, bit| *bit)
191 +
		.zip([3usize, 2, 1].iter())
192 +
		.for_each(|(bits, len)| assert_eq!(bits.len(), *len));
193 +
194 +
	let mut data = 0b0100_1000u8;
195 +
	data.view_bits_mut::<Msb0>()
196 +
		.rsplit_mut(|_, bit| *bit)
197 +
		.zip([3usize, 2, 1].iter())
198 +
		.for_each(|(bits, len)| {
199 +
			assert_eq!(bits.len(), *len);
200 +
			bits.set_all(true)
201 +
		});
202 +
	assert_eq!(data, !0);
203 +
204 +
	0b0100_1000u8
205 +
		.view_bits::<Msb0>()
206 +
		.splitn(2, |_, bit| *bit)
207 +
		.zip([1usize, 6].iter())
208 +
		.for_each(|(bits, len)| assert_eq!(bits.len(), *len));
209 +
210 +
	let mut data = 0b0100_1000u8;
211 +
	data.view_bits_mut::<Msb0>()
212 +
		.splitn_mut(2, |_, bit| *bit)
213 +
		.zip([1usize, 6].iter())
214 +
		.for_each(|(bits, len)| {
215 +
			assert_eq!(bits.len(), *len);
216 +
			bits.set_all(true)
217 +
		});
218 +
	assert_eq!(data, !0);
219 +
220 +
	0b0100_1000u8
221 +
		.view_bits::<Msb0>()
222 +
		.rsplitn(2, |_, bit| *bit)
223 +
		.zip([3usize, 4].iter())
224 +
		.for_each(|(bits, len)| assert_eq!(bits.len(), *len));
225 +
226 +
	let mut data = 0b0100_1000u8;
227 +
	data.view_bits_mut::<Msb0>()
228 +
		.rsplitn_mut(2, |_, bit| *bit)
229 +
		.zip([3usize, 4].iter())
230 +
		.for_each(|(bits, len)| {
231 +
			assert_eq!(bits.len(), *len);
232 +
			bits.set_all(true)
233 +
		});
234 +
	assert_eq!(data, !0);
235 +
}
236 +
237 +
#[test]
238 +
fn alignment() {
239 +
	let mut data = [0u16; 5];
240 +
	let addr = &data as *const [u16; 5] as *const u16 as usize;
241 +
	let bits = data.view_bits_mut::<Local>();
242 +
243 +
	let (head, body, tail) = unsafe { bits[5 .. 75].align_to_mut::<u32>() };
244 +
245 +
	//  `data` is aligned to the back half of a `u32`
246 +
	if addr % 4 == 2 {
247 +
		assert_eq!(head.len(), 11);
248 +
		assert_eq!(body.len(), 59);
249 +
		assert!(tail.is_empty());
250 +
	}
251 +
	//  `data` is aligned to the front half of a `u32`
252 +
	else {
253 +
		assert!(head.is_empty());
254 +
		assert_eq!(body.len(), 64);
255 +
		assert_eq!(tail.len(), 6);
256 +
	}
257 +
}

@@ -912,138 +912,12 @@
Loading
912 912
	env!(CARGO_PKG_REPOSITORY)
913 913
));
914 914
915 -
#[cfg(test)]
916 -
mod tests {
917 -
	use super::*;
918 -
919 -
	#[test]
920 -
	fn get_value() {
921 -
		let data = [5u32 << 3, 0x01234567, !5];
922 -
		let bits = data.view_bits::<Lsb0>();
923 -
924 -
		if let Domain::Enclave { head, elem, tail } = bits[3 .. 6].domain() {
925 -
			let byte = get::<u32, u8>(elem, Lsb0::mask(head, tail), 3);
926 -
			assert_eq!(byte, 5u8);
927 -
		}
928 -
		else {
929 -
			unreachable!("it does");
930 -
		}
931 -
932 -
		if let Domain::Region {
933 -
			head: None,
934 -
			body: &[],
935 -
			tail: Some((elem, tail)),
936 -
		} = bits[32 .. 48].domain()
937 -
		{
938 -
			let short = get::<u32, u16>(elem, Lsb0::mask(None, tail), 0);
939 -
			assert_eq!(short, 0x4567u16);
940 -
		}
941 -
		else {
942 -
			unreachable!("it does");
943 -
		}
944 -
945 -
		if let Domain::Region {
946 -
			head: Some((head, elem)),
947 -
			body: &[],
948 -
			tail: None,
949 -
		} = bits[48 .. 64].domain()
950 -
		{
951 -
			let short = get::<u32, u16>(elem, Lsb0::mask(head, None), 16);
952 -
			assert_eq!(short, 0x0123u16);
953 -
		}
954 -
		else {
955 -
			unreachable!("it does");
956 -
		}
957 -
958 -
		if let Domain::Region {
959 -
			head: None,
960 -
			body,
961 -
			tail: None,
962 -
		} = bits[64 .. 96].domain()
963 -
		{
964 -
			assert_eq!(body, &[!5]);
965 -
		}
966 -
		else {
967 -
			unreachable!("it does");
968 -
		}
969 -
	}
970 -
971 -
	#[test]
972 -
	fn set_value() {
973 -
		let mut data = [0u32; 3];
974 -
		let bits = data.view_bits_mut::<Lsb0>();
975 -
976 -
		if let DomainMut::Enclave { head, elem, tail } =
977 -
			bits[3 .. 6].domain_mut()
978 -
		{
979 -
			set::<u32, u16>(elem, 13u16, Lsb0::mask(head, tail), 3);
980 -
		}
981 -
		else {
982 -
			unreachable!("it does");
983 -
		}
984 -
985 -
		if let DomainMut::Region {
986 -
			head: None,
987 -
			body: &mut [],
988 -
			tail: Some((elem, tail)),
989 -
		} = bits[32 .. 48].domain_mut()
990 -
		{
991 -
			set::<u32, u16>(elem, 0x4567u16, Lsb0::mask(None, tail), 0);
992 -
		}
993 -
		else {
994 -
			unreachable!("it does");
995 -
		}
996 -
997 -
		if let DomainMut::Region {
998 -
			head: Some((head, elem)),
999 -
			body: &mut [],
1000 -
			tail: None,
1001 -
		} = bits[48 .. 64].domain_mut()
1002 -
		{
1003 -
			set::<u32, u16>(elem, 0x0123u16, Lsb0::mask(head, None), 16);
1004 -
		}
1005 -
		else {
1006 -
			unreachable!("it does");
1007 -
		}
1008 -
1009 -
		assert_eq!(data[0], 5 << 3);
1010 -
		assert_eq!(data[1], 0x01234567u32);
1011 -
	}
1012 -
1013 -
	#[test]
1014 -
	fn byte_fields() {
1015 -
		let mut data = [0u8; 3];
1016 -
1017 -
		data.view_bits_mut::<Msb0>()[4 .. 20].store_be(0xABCDu16);
1018 -
		assert_eq!(data, [0x0A, 0xBC, 0xD0]);
1019 -
		assert_eq!(data.view_bits::<Msb0>()[4 .. 20].load_be::<u16>(), 0xABCD);
1020 -
1021 -
		data.view_bits_mut::<Msb0>()[2 .. 6].store_be(9u8);
1022 -
		assert_eq!(data, [0x26, 0xBC, 0xD0]);
1023 -
		assert_eq!(data.view_bits::<Msb0>()[2 .. 6].load_be::<u8>(), 9);
1024 -
1025 -
		data = [0; 3];
1026 -
1027 -
		data.view_bits_mut::<Lsb0>()[4 .. 20].store_be(0xABCDu16);
1028 -
		assert_eq!(data, [0xA0, 0xBC, 0x0D]);
1029 -
		assert_eq!(data.view_bits::<Lsb0>()[4 .. 20].load_be::<u16>(), 0xABCD);
1030 -
1031 -
		data.view_bits_mut::<Lsb0>()[2 .. 6].store_be(9u8);
1032 -
		//  0b1010_0000 | 0b00_1001_00
1033 -
		assert_eq!(data, [0xA4, 0xBC, 0x0D]);
1034 -
		assert_eq!(data.view_bits::<Lsb0>()[2 .. 6].load_be::<u8>(), 9);
1035 -
	}
915 +
#[cfg(feature = "std")]
916 +
mod io;
1036 917
1037 -
	#[test]
1038 -
	#[should_panic]
1039 -
	fn check_panic() {
1040 -
		check("fail", 10, 8);
1041 -
	}
1042 -
}
918 +
#[cfg(test)]
919 +
mod tests;
1043 920
1044 921
// These tests are purely mathematical, and do not need to run more than once.
1045 -
#[cfg(all(feature = "std", not(tarpaulin)))]
922 +
#[cfg(all(test, feature = "std", not(tarpaulin)))]
1046 923
mod permutation_tests;
1047 -
1048 -
#[cfg(feature = "std")]
1049 -
mod io;

@@ -2,10 +2,20 @@
Loading
2 2
3 3
use crate::prelude::*;
4 4
5 -
use core::ptr;
5 +
use core::{
6 +
	iter,
7 +
	ptr,
8 +
};
6 9
7 10
use std::panic::catch_unwind;
8 11
12 +
#[test]
13 +
fn from_vec() {
14 +
	let bv = BitVec::<Msb0, u8>::from_vec(vec![0, 1, 2, 3]);
15 +
	assert_eq!(bv.len(), 32);
16 +
	assert_eq!(bv.count_ones(), 4);
17 +
}
18 +
9 19
#[test]
10 20
fn push() {
11 21
	let mut bvm08 = BitVec::<Msb0, u8>::new();
@@ -119,7 +129,7 @@
Loading
119 129
#[test]
120 130
fn iterators() {
121 131
	let data = 0x35u8.view_bits::<Msb0>();
122 -
	let bv: BitVec = data.iter().collect();
132 +
	let bv: BitVec<Msb0, u8> = data.iter().collect();
123 133
	assert_eq!(bv.count_ones(), 4);
124 134
125 135
	for (l, r) in (&bv).into_iter().zip(bits![0, 0, 1, 1, 0, 1, 0, 1]) {
@@ -138,7 +148,20 @@
Loading
138 148
	let mut iter = bv.clone().into_iter();
139 149
	assert!(!iter.next().unwrap());
140 150
	assert_eq!(iter.as_bitslice(), data[1 ..]);
141 -
	assert_eq!(iter.as_slice(), &[0x35]);
151 +
152 +
	let mut bv = bitvec![0, 0, 1, 0, 0, 1, 0, 0];
153 +
	let mut splice = bv.splice(2 .. 6, bits![0; 4].iter().copied());
154 +
	assert!(splice.next().unwrap());
155 +
	assert!(splice.next_back().unwrap());
156 +
	assert!(!splice.nth(0).unwrap());
157 +
	assert!(!splice.nth_back(0).unwrap());
158 +
	drop(splice);
159 +
	assert_eq!(bv, bits![0; 8]);
160 +
161 +
	let mut bv = bitvec![0, 1, 1, 1, 1, 0];
162 +
	let splice = bv.splice(1 .. 5, iter::once(false));
163 +
	drop(splice);
164 +
	assert_eq!(bv, bits![0; 3]);
142 165
}
143 166
144 167
#[test]
@@ -187,3 +210,13 @@
Loading
187 210
	bv.extend_from_slice(&[false, false, true, true, false, true]);
188 211
	assert_eq!(bv, bits![0, 0, 1, 1, 0, 1]);
189 212
}
213 +
214 +
#[test]
215 +
fn cloning() {
216 +
	let mut a = bitvec![0];
217 +
	let b = bitvec![1; 20];
218 +
219 +
	assert_ne!(a, b);
220 +
	a.clone_from(&b);
221 +
	assert_eq!(a, b);
222 +
}

@@ -25,6 +25,7 @@
Loading
25 25
	},
26 26
};
27 27
28 +
#[cfg(not(tarpaulin_include))]
28 29
impl<O, T, Rhs> BitAnd<Rhs> for BitVec<O, T>
29 30
where
30 31
	O: BitOrder,
@@ -40,6 +41,7 @@
Loading
40 41
	}
41 42
}
42 43
44 +
#[cfg(not(tarpaulin_include))]
43 45
impl<O, T, Rhs> BitAndAssign<Rhs> for BitVec<O, T>
44 46
where
45 47
	O: BitOrder,
@@ -52,6 +54,7 @@
Loading
52 54
	}
53 55
}
54 56
57 +
#[cfg(not(tarpaulin_include))]
55 58
impl<O, T, Rhs> BitOr<Rhs> for BitVec<O, T>
56 59
where
57 60
	O: BitOrder,
@@ -67,6 +70,7 @@
Loading
67 70
	}
68 71
}
69 72
73 +
#[cfg(not(tarpaulin_include))]
70 74
impl<O, T, Rhs> BitOrAssign<Rhs> for BitVec<O, T>
71 75
where
72 76
	O: BitOrder,
@@ -79,6 +83,7 @@
Loading
79 83
	}
80 84
}
81 85
86 +
#[cfg(not(tarpaulin_include))]
82 87
impl<O, T, Rhs> BitXor<Rhs> for BitVec<O, T>
83 88
where
84 89
	O: BitOrder,
@@ -94,6 +99,7 @@
Loading
94 99
	}
95 100
}
96 101
102 +
#[cfg(not(tarpaulin_include))]
97 103
impl<O, T, Rhs> BitXorAssign<Rhs> for BitVec<O, T>
98 104
where
99 105
	O: BitOrder,
@@ -174,6 +180,11 @@
Loading
174 180
	}
175 181
}
176 182
183 +
/** This implementation inverts all elements in the live buffer. You cannot rely
184 +
on the value of bits in the buffer that are outside the domain of
185 +
`BitVec::as_mit_bitslice`.
186 +
**/
187 +
#[cfg(not(tarpaulin_include))]
177 188
impl<O, T> Not for BitVec<O, T>
178 189
where
179 190
	O: BitOrder,

@@ -55,7 +55,7 @@
Loading
55 55
			$order, $store; $($val),*
56 56
		);
57 57
		unsafe { $crate::__bits_from_slice!(
58 -
			mut $order, $store, LEN, DATA
58 +
			mut $order, $store, $crate::__count!($($val),*), DATA
59 59
		)}
60 60
	}};
61 61
@@ -73,7 +73,7 @@
Loading
73 73
			$order, $store; $($val),*
74 74
		);
75 75
		unsafe { $crate::__bits_from_slice!(
76 -
			mut $order, $store, LEN, DATA
76 +
			mut $order, $store, $crate::__count!($($val),*), DATA
77 77
		)}
78 78
	}};
79 79

@@ -0,0 +1,134 @@
Loading
1 +
//! Tests for the `field` module.
2 +
3 +
use super::*;
4 +
5 +
#[test]
6 +
fn get_value() {
7 +
	let data = [5u32 << 3, 0x01234567, !5];
8 +
	let bits = data.view_bits::<Lsb0>();
9 +
10 +
	if let Domain::Enclave { head, elem, tail } = bits[3 .. 6].domain() {
11 +
		let byte = get::<u32, u8>(elem, Lsb0::mask(head, tail), 3);
12 +
		assert_eq!(byte, 5u8);
13 +
	}
14 +
	else {
15 +
		unreachable!("it does");
16 +
	}
17 +
18 +
	if let Domain::Region {
19 +
		head: None,
20 +
		body: &[],
21 +
		tail: Some((elem, tail)),
22 +
	} = bits[32 .. 48].domain()
23 +
	{
24 +
		let short = get::<u32, u16>(elem, Lsb0::mask(None, tail), 0);
25 +
		assert_eq!(short, 0x4567u16);
26 +
	}
27 +
	else {
28 +
		unreachable!("it does");
29 +
	}
30 +
31 +
	if let Domain::Region {
32 +
		head: Some((head, elem)),
33 +
		body: &[],
34 +
		tail: None,
35 +
	} = bits[48 .. 64].domain()
36 +
	{
37 +
		let short = get::<u32, u16>(elem, Lsb0::mask(head, None), 16);
38 +
		assert_eq!(short, 0x0123u16);
39 +
	}
40 +
	else {
41 +
		unreachable!("it does");
42 +
	}
43 +
44 +
	if let Domain::Region {
45 +
		head: None,
46 +
		body,
47 +
		tail: None,
48 +
	} = bits[64 .. 96].domain()
49 +
	{
50 +
		assert_eq!(body, &[!5]);
51 +
	}
52 +
	else {
53 +
		unreachable!("it does");
54 +
	}
55 +
}
56 +
57 +
#[test]
58 +
fn set_value() {
59 +
	let mut data = [0u32; 3];
60 +
	let bits = data.view_bits_mut::<Lsb0>();
61 +
62 +
	if let DomainMut::Enclave { head, elem, tail } = bits[3 .. 6].domain_mut() {
63 +
		set::<u32, u16>(elem, 13u16, Lsb0::mask(head, tail), 3);
64 +
	}
65 +
	else {
66 +
		unreachable!("it does");
67 +
	}
68 +
69 +
	if let DomainMut::Region {
70 +
		head: None,
71 +
		body: &mut [],
72 +
		tail: Some((elem, tail)),
73 +
	} = bits[32 .. 48].domain_mut()
74 +
	{
75 +
		set::<u32, u16>(elem, 0x4567u16, Lsb0::mask(None, tail), 0);
76 +
	}
77 +
	else {
78 +
		unreachable!("it does");
79 +
	}
80 +
81 +
	if let DomainMut::Region {
82 +
		head: Some((head, elem)),
83 +
		body: &mut [],
84 +
		tail: None,
85 +
	} = bits[48 .. 64].domain_mut()
86 +
	{
87 +
		set::<u32, u16>(elem, 0x0123u16, Lsb0::mask(head, None), 16);
88 +
	}
89 +
	else {
90 +
		unreachable!("it does");
91 +
	}
92 +
93 +
	assert_eq!(data[0], 5 << 3);
94 +
	assert_eq!(data[1], 0x01234567u32);
95 +
}
96 +
97 +
#[test]
98 +
fn byte_fields() {
99 +
	let mut data = [0u8; 3];
100 +
101 +
	data.view_bits_mut::<Msb0>()[4 .. 20].store_be(0xABCDu16);
102 +
	assert_eq!(data, [0x0A, 0xBC, 0xD0]);
103 +
	assert_eq!(data.view_bits::<Msb0>()[4 .. 20].load_be::<u16>(), 0xABCD);
104 +
105 +
	data.view_bits_mut::<Msb0>()[2 .. 6].store_be(9u8);
106 +
	assert_eq!(data, [0x26, 0xBC, 0xD0]);
107 +
	assert_eq!(data.view_bits::<Msb0>()[2 .. 6].load_be::<u8>(), 9);
108 +
109 +
	data = [0; 3];
110 +
	data.view_bits_mut::<Lsb0>()[4 .. 20].store_be(0xABCDu16);
111 +
	assert_eq!(data, [0xA0, 0xBC, 0x0D]);
112 +
	assert_eq!(data.view_bits::<Lsb0>()[4 .. 20].load_be::<u16>(), 0xABCD);
113 +
114 +
	data.view_bits_mut::<Lsb0>()[2 .. 6].store_be(9u8);
115 +
	//  0b1010_0000 | 0b00_1001_00
116 +
	assert_eq!(data, [0xA4, 0xBC, 0x0D]);
117 +
	assert_eq!(data.view_bits::<Lsb0>()[2 .. 6].load_be::<u8>(), 9);
118 +
119 +
	data = [0; 3];
120 +
	data.view_bits_mut::<Msb0>()[4 .. 20].store_le(0xABCDu16);
121 +
	assert_eq!(data, [0x0D, 0xBC, 0xA0]);
122 +
	assert_eq!(data.view_bits::<Msb0>()[4 .. 20].load_le::<u16>(), 0xABCD);
123 +
124 +
	data = [0; 3];
125 +
	data.view_bits_mut::<Lsb0>()[4 .. 20].store_le(0xABCDu16);
126 +
	assert_eq!(data, [0xD0, 0xBC, 0x0A]);
127 +
	assert_eq!(data.view_bits::<Lsb0>()[4 .. 20].load_le::<u16>(), 0xABCD);
128 +
}
129 +
130 +
#[test]
131 +
#[should_panic]
132 +
fn check_panic() {
133 +
	check("fail", 10, 8);
134 +
}

@@ -713,6 +713,7 @@
Loading
713 713
	///
714 714
	/// Increments `.head` by one. If the increment resulted in a rollover to
715 715
	/// `0`, then the `.addr` field is increased to the next `T::Mem` stepping.
716 +
	#[inline]
716 717
	pub(crate) unsafe fn incr_head(&mut self) {
717 718
		//  Increment the cursor, permitting rollover to `T::Mem::BITS`.
718 719
		let head = self.head().value() as usize + 1;

@@ -231,6 +231,7 @@
Loading
231 231
232 232
	/// Views the array as a slice of its raw underlying memory type.
233 233
	#[inline(always)]
234 +
	#[cfg(not(tarpaulin_include))]
234 235
	pub fn as_raw_slice(&self) -> &[V::Mem] {
235 236
		unsafe {
236 237
			slice::from_raw_parts(
@@ -242,6 +243,7 @@
Loading
242 243
243 244
	/// Views the array as a mutable slice of its raw underlying memory type.
244 245
	#[inline(always)]
246 +
	#[cfg(not(tarpaulin_include))]
245 247
	pub fn as_raw_mut_slice(&mut self) -> &mut [V::Mem] {
246 248
		unsafe {
247 249
			slice::from_raw_parts_mut(

@@ -12,16 +12,21 @@
Loading
12 12
};
13 13
14 14
use core::{
15 +
	fmt::{
16 +
		self,
17 +
		Debug,
18 +
		Formatter,
19 +
	},
15 20
	iter::{
16 21
		FromIterator,
17 22
		FusedIterator,
18 23
	},
24 +
	mem,
19 25
	ops::{
20 26
		Range,
21 27
		RangeBounds,
22 28
	},
23 29
	ptr::NonNull,
24 -
	slice,
25 30
};
26 31
27 32
use wyz::{
@@ -40,14 +45,11 @@
Loading
40 45
		let mut iter = iter.into_iter();
41 46
		match iter.size_hint() {
42 47
			(n, None) | (_, Some(n)) => {
43 -
				// This body exists to try to accelerate
48 +
				// This body exists to try to accelerate the push-per-bit loop.
44 49
				self.reserve(n);
45 50
				let len = self.len();
46 51
				let new_len = len + n;
47 -
				let new = unsafe {
48 -
					self.set_len(new_len);
49 -
					self.get_unchecked_mut(len .. new_len)
50 -
				};
52 +
				let new = unsafe { self.get_unchecked_mut(len .. new_len) };
51 53
				let mut pulled = 0;
52 54
				for (slot, bit) in new.iter_mut().zip(iter.by_ref()) {
53 55
					slot.set(bit);
@@ -209,24 +211,14 @@
Loading
209 211
		self.iter.as_bitslice()
210 212
	}
211 213
212 -
	/// Returns the remaining elements of this iterator as a slice.
213 -
	///
214 -
	/// # Original
215 -
	///
216 -
	/// [`vec::IntoIter::as_slice`](https://doc.rust-lang.org/alloc/vec/struct.IntoIter.html#method.as_slice)
217 -
	///
218 -
	/// # Notes
219 -
	///
220 -
	/// You almost certainly want [`.as_bitslice()`].
221 -
	///
222 -
	/// [`.as_bitslice()`]: #method.as_bitslice
223 -
	#[inline]
214 +
	#[doc(hidden)]
215 +
	#[inline(always)]
224 216
	#[cfg(not(tarpaulin_include))]
225 -
	pub fn as_slice(&self) -> &[T] {
226 -
		let bitptr = self.as_bitslice().bitptr();
227 -
		unsafe {
228 -
			slice::from_raw_parts(bitptr.pointer().to_const(), bitptr.elements())
229 -
		}
217 +
	#[deprecated(
218 +
		note = "Use `.as_bitslice()` on iterators to view the remaining data."
219 +
	)]
220 +
	pub fn as_slice(&self) -> &BitSlice<O, T> {
221 +
		self.as_bitslice()
230 222
	}
231 223
232 224
	/// Returns the remaining bits of this iterator as a mutable slice.
@@ -249,21 +241,22 @@
Loading
249 241
	/// assert!(into_iter.next().unwrap());
250 242
	/// ```
251 243
	#[inline]
244 +
	#[cfg(not(tarpaulin_include))]
252 245
	pub fn as_mut_bitslice(&mut self) -> &mut BitSlice<O, T> {
253 246
		self.iter.as_bitslice().bitptr().to_bitslice_mut()
254 247
	}
255 248
256 -
	#[inline]
249 +
	#[cfg_attr(not(tarpaulin_include), inline(always))]
257 250
	#[doc(hidden)]
258 -
	#[deprecated(
259 -
		note = "Use `.as_mut_bitslice` on iterators to view the remaining data"
260 -
	)]
251 +
	#[deprecated(note = "Use `.as_mut_bitslice()` on iterators to view the \
252 +
	                     remaining data.")]
261 253
	#[cfg(not(tarpaulin_include))]
262 254
	pub fn as_mut_slice(&mut self) -> &mut BitSlice<O, T> {
263 255
		self.as_mut_bitslice()
264 256
	}
265 257
}
266 258
259 +
#[cfg(not(tarpaulin_include))]
267 260
impl<O, T> Iterator for IntoIter<O, T>
268 261
where
269 262
	O: BitOrder,
@@ -271,53 +264,56 @@
Loading
271 264
{
272 265
	type Item = bool;
273 266
274 -
	#[inline]
267 +
	#[cfg_attr(not(tarpaulin_include), inline(always))]
275 268
	fn next(&mut self) -> Option<Self::Item> {
276 269
		self.iter.next().copied()
277 270
	}
278 271
279 -
	#[inline]
272 +
	#[cfg_attr(not(tarpaulin_include), inline(always))]
280 273
	fn size_hint(&self) -> (usize, Option<usize>) {
281 274
		self.iter.size_hint()
282 275
	}
283 276
284 -
	#[inline]
277 +
	#[cfg_attr(not(tarpaulin_include), inline(always))]
285 278
	fn count(self) -> usize {
286 279
		self.len()
287 280
	}
288 281
289 -
	#[inline]
282 +
	#[cfg_attr(not(tarpaulin_include), inline(always))]
290 283
	fn nth(&mut self, n: usize) -> Option<Self::Item> {
291 284
		self.iter.nth(n).copied()
292 285
	}
293 286
294 -
	#[inline]
287 +
	#[cfg_attr(not(tarpaulin_include), inline(always))]
295 288
	fn last(mut self) -> Option<Self::Item> {
296 289
		self.next_back()
297 290
	}
298 291
}
299 292
293 +
#[cfg(not(tarpaulin_include))]
300 294
impl<O, T> DoubleEndedIterator for IntoIter<O, T>
301 295
where
302 296
	O: BitOrder,
303 297
	T: BitStore,
304 298
{
305 -
	#[inline]
299 +
	#[cfg_attr(not(tarpaulin_include), inline(always))]
306 300
	fn next_back(&mut self) -> Option<Self::Item> {
307 301
		self.iter.next_back().copied()
308 302
	}
309 303
310 -
	#[inline]
304 +
	#[cfg_attr(not(tarpaulin_include), inline(always))]
311 305
	fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
312 306
		self.iter.nth_back(n).copied()
313 307
	}
314 308
}
315 309
310 +
#[cfg(not(tarpaulin_include))]
316 311
impl<O, T> ExactSizeIterator for IntoIter<O, T>
317 312
where
318 313
	O: BitOrder,
319 314
	T: BitStore,
320 315
{
316 +
	#[cfg_attr(not(tarpaulin_include), inline(always))]
321 317
	fn len(&self) -> usize {
322 318
		self.iter.len()
323 319
	}
@@ -341,11 +337,10 @@
Loading
341 337
[`BitVec`]: struct.BitVec.html
342 338
[`drain`]: struct.BitVec.html#method.drain
343 339
**/
344 -
#[derive(Debug)]
345 340
pub struct Drain<'a, O, T>
346 341
where
347 342
	O: BitOrder,
348 -
	T: BitStore,
343 +
	T: 'a + BitStore,
349 344
{
350 345
	/// Exclusive reference to the vector this drains.
351 346
	source: NonNull<BitVec<O, T>>,
@@ -359,125 +354,179 @@
Loading
359 354
impl<'a, O, T> Drain<'a, O, T>
360 355
where
361 356
	O: BitOrder,
362 -
	T: BitStore,
357 +
	T: 'a + BitStore,
363 358
{
359 +
	#[inline]
364 360
	pub(super) fn new<R>(source: &'a mut BitVec<O, T>, range: R) -> Self
365 361
	where R: RangeBounds<usize> {
362 +
		//  Hold the current vector size for bounds comparison.
366 363
		let len = source.len();
364 +
		//  Normalize the input range and assert that it is within bounds.
367 365
		let drain = dvl::normalize_range(range, len);
368 366
		dvl::assert_range(drain.clone(), len);
369 367
368 +
		//  The tail region is everything after the drain, before the real end.
370 369
		let tail = drain.end .. len;
370 +
		//  The drain span is an iterator over the provided range.
371 371
		let drain = unsafe {
372 -
			//  Truncate the source vector to the beginning of the drain.
372 +
			//  Set the source vector to end before the drain.
373 373
			source.set_len(drain.start);
374 +
			//  Grab the drain range and produce an iterator over it.
374 375
			source
375 376
				.as_bitslice()
376 377
				.get_unchecked(drain)
377 -
				//  Remove the lifetime and borrow information
378 +
				//  Detach the region from the `source` borrow.
378 379
				.bitptr()
379 -
				.to_bitslice_ref().iter()
380 +
				.to_bitslice_ref()
381 +
				.iter()
380 382
		};
383 +
		let source = source.into();
381 384
		Self {
382 -
			source: source.into(),
385 +
			source,
383 386
			drain,
384 387
			tail,
385 388
		}
386 389
	}
387 390
388 -
	#[inline]
389 -
	fn tail_len(&self) -> usize {
390 -
		self.tail.end - self.tail.start
391 +
	/// Returns the remaining bits of this iterator as a bit-slice.
392 +
	///
393 +
	/// # Original
394 +
	///
395 +
	/// [`Drain::as_slice`](https://doc.rust-lang.org/alloc/vec/struct.Drain.html#method.as_slice)
396 +
	///
397 +
	/// # API Differences
398 +
	///
399 +
	/// This method is renamed, as it operates on a bit-slice rather than an
400 +
	/// element slice.
401 +
	#[inline(always)]
402 +
	#[cfg(not(tarpaulin_include))]
403 +
	pub fn as_bitslice(&self) -> &'a BitSlice<O, T> {
404 +
		self.drain.as_bitslice()
391 405
	}
392 406
393 407
	/// Attempts to overwrite the drained region with another iterator.
394 408
	///
395 409
	/// # Type Parameters
396 410
	///
397 -
	/// - `I`: Some source of `bool`s
411 +
	/// - `I`: Some source of `bool`s.
398 412
	///
399 413
	/// # Parameters
400 414
	///
401 415
	/// - `&mut self`
402 -
	/// - `stream`: A source of `bools` with which to overwrite the drained
403 -
	///   span.
416 +
	/// - `iter`: A source of `bool`s with which to overwrite the drained span.
404 417
	///
405 418
	/// # Returns
406 419
	///
407 -
	/// - `true` if the drained span was completely overwritten by `stream`.
408 -
	/// - `false` if the `stream` exhausted early.
420 +
	/// Whether the drained span was completely filled, or if the replacement
421 +
	/// source `iter`ator was exhausted first.
409 422
	///
410 423
	/// # Effects
411 424
	///
412 -
	/// If the drained region is completely filled by the replacement `stream`,
413 -
	/// then the source vector is restored to its original length, and this
414 -
	/// `Drain` has no further work.
415 -
	///
416 -
	/// If the `stream` exhausts before completely filling the drained region,
417 -
	/// then the source vector is extended only to include the portion of the
418 -
	/// drain that was replaced. The tail section is not restored to the vector
419 -
	/// until the destructor runs.
420 -
	fn fill<I>(&mut self, stream: &mut I) -> bool
425 +
	/// The source vector is extended to include all bits filled in from the
426 +
	/// replacement `iter`ator, but is *not* extended to include the tail, even
427 +
	/// if drained region is completely filled. This work is done in the
428 +
	/// destructor.
429 +
	#[inline]
430 +
	fn fill<I>(&mut self, iter: &mut I) -> FillStatus
421 431
	where I: Iterator<Item = bool> {
422 -
		let tail_len = self.tail_len();
423 -
		let bv = unsafe { self.source.as_mut() };
424 -
425 -
		//  The entire span between `bv.len()` and `tail.start` is considered
426 -
		//  dead, and should be filled by `stream`.
427 -
		for idx in bv.len() .. self.tail.start {
428 -
			if let Some(bit) = stream.next() {
429 -
				unsafe {
430 -
					bv.set_unchecked(idx, bit);
431 -
				}
432 +
		let bitvec = unsafe { self.source.as_mut() };
433 +
		//  Get the length of the source vector. This will be grown as `iter`
434 +
		//  writes into the drain span.
435 +
		let mut len = bitvec.len();
436 +
		//  Get the drain span as a bit-slice.
437 +
		let span = unsafe { bitvec.get_unchecked_mut(len .. self.tail.start) };
438 +
439 +
		//  Set the exit flag to assume completion.
440 +
		let mut out = FillStatus::FullSpan;
441 +
		//  Write the `iter` bits into the drain `span`.
442 +
		for slot in span {
443 +
			//  While the `iter` is not exhausted, write it into the span and
444 +
			//  increase the vector length counter.
445 +
			if let Some(bit) = iter.next() {
446 +
				slot.set(bit);
447 +
				len += 1;
432 448
			}
433 -
			//  When the stream exhausts, extend the front region to the loop
434 -
			//  counter and exit. The destructor will finish relocation.
449 +
			//  If the `iter` exhausts before the drain `span` is filled, set
450 +
			//  the exit flag accordingly.
435 451
			else {
436 -
				unsafe {
437 -
					bv.set_len(idx + tail_len);
438 -
				}
439 -
				return false;
452 +
				out = FillStatus::EmptyInput;
453 +
				break;
440 454
			}
441 455
		}
442 -
		//  If the drain region is completely filled, then the vector’s length
443 -
		//  reaches the end of the tail.
456 +
		//  Update the vector length counter to include the bits written by
457 +
		//  `iter`.
444 458
		unsafe {
445 -
			bv.set_len(self.tail.end);
459 +
			bitvec.set_len(len);
446 460
		}
447 -
		//  Prevent the destructor from running by erasing the tail.
448 -
		self.tail = 0 .. 0;
449 -
		true
461 +
		out
450 462
	}
451 463
452 -
	/// Resizes the middle drain segment to a new width.
464 +
	/// Inserts `additional` capacity between the vector and the tail.
453 465
	///
454 466
	/// # Parameters
455 467
	///
456 468
	/// - `&mut self`
457 -
	/// - `width`: The width, in bits, between the back edge of the head segment
458 -
	///   and the front edge of the tail segment.
469 +
	/// - `additional`: The amount of new bits to reserve between the head and
470 +
	///   tail sections of the vector.
459 471
	///
460 472
	/// # Effects
461 473
	///
462 474
	/// This is permitted to reällocate the buffer in order to grow capacity.
463 -
	/// After completion, the tail segment will be relocated to begin `width`
464 -
	/// bits after the head segment ends. The drain iteration cursor will *not*
465 -
	/// be modified.
466 -
	fn resize_drain(&mut self, width: usize) {
467 -
		let tail_len = self.tail_len();
468 -
		let bv = unsafe { self.source.as_mut() };
469 -
		let base_len = bv.len();
470 -
		let new_tail = base_len + width;
471 -
		let new_end = new_tail + tail_len;
472 -
		//  Ensure capacity for the drain and tail segments.
473 -
		bv.reserve(new_end - base_len);
474 -
		unsafe {
475 -
			bv.copy_within_unchecked(self.tail.clone(), new_tail);
476 -
		}
477 -
		self.tail = new_tail .. new_end;
475 +
	/// After completion, the tail segment will be relocated to begin
476 +
	/// `additional` bits after the head segment ends. The drain iteration
477 +
	/// cursor will not be modified.
478 +
	#[inline]
479 +
	unsafe fn move_tail(&mut self, additional: usize) {
480 +
		let bitvec = self.source.as_mut();
481 +
		let tail_len = self.tail.end - self.tail.start;
482 +
483 +
		//  Reserve allocation capacity for `additional` and the tail.
484 +
		//  `.reserve()` begins from the `bitvec.len()`, so the tail length must
485 +
		//  still be included.
486 +
		let full_len = additional + tail_len;
487 +
		bitvec.reserve(full_len);
488 +
		let new_tail_start = additional + self.tail.start;
489 +
		let orig_tail = mem::replace(
490 +
			&mut self.tail,
491 +
			new_tail_start .. new_tail_start + tail_len,
492 +
		);
493 +
		//  Temporarily resize the vector to include the full buffer. This is
494 +
		//  necessary until `copy_within_unchecked` stops using `.len()`
495 +
		//  internally.
496 +
		let len = bitvec.len();
497 +
		bitvec.set_len(full_len);
498 +
		bitvec.copy_within_unchecked(orig_tail, new_tail_start);
499 +
		bitvec.set_len(len);
478 500
	}
479 501
}
480 502
503 +
#[cfg(not(tarpaulin_include))]
504 +
impl<O, T> AsRef<BitSlice<O, T>> for Drain<'_, O, T>
505 +
where
506 +
	O: BitOrder,
507 +
	T: BitStore,
508 +
{
509 +
	#[inline(always)]
510 +
	fn as_ref(&self) -> &BitSlice<O, T> {
511 +
		self.as_bitslice()
512 +
	}
513 +
}
514 +
515 +
#[cfg(not(tarpaulin_include))]
516 +
impl<'a, O, T> Debug for Drain<'a, O, T>
517 +
where
518 +
	O: BitOrder,
519 +
	T: 'a + BitStore,
520 +
{
521 +
	#[inline]
522 +
	fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
523 +
		fmt.debug_tuple("Drain")
524 +
			.field(&self.drain.as_bitslice())
525 +
			.finish()
526 +
	}
527 +
}
528 +
529 +
#[cfg(not(tarpaulin_include))]
481 530
impl<O, T> Iterator for Drain<'_, O, T>
482 531
where
483 532
	O: BitOrder,
@@ -485,54 +534,56 @@
Loading
485 534
{
486 535
	type Item = bool;
487 536
488 -
	#[inline]
537 +
	#[inline(always)]
489 538
	fn next(&mut self) -> Option<Self::Item> {
490 539
		self.drain.next().copied()
491 540
	}
492 541
493 -
	#[inline]
542 +
	#[inline(always)]
494 543
	fn size_hint(&self) -> (usize, Option<usize>) {
495 544
		self.drain.size_hint()
496 545
	}
497 546
498 -
	#[inline]
547 +
	#[inline(always)]
499 548
	fn count(self) -> usize {
500 549
		self.len()
501 550
	}
502 551
503 -
	#[inline]
552 +
	#[inline(always)]
504 553
	fn nth(&mut self, n: usize) -> Option<Self::Item> {
505 554
		self.drain.nth(n).copied()
506 555
	}
507 556
508 -
	#[inline]
557 +
	#[inline(always)]
509 558
	fn last(mut self) -> Option<Self::Item> {
510 559
		self.next_back()
511 560
	}
512 561
}
513 562
563 +
#[cfg(not(tarpaulin_include))]
514 564
impl<O, T> DoubleEndedIterator for Drain<'_, O, T>
515 565
where
516 566
	O: BitOrder,
517 567
	T: BitStore,
518 568
{
519 -
	#[inline]
569 +
	#[inline(always)]
520 570
	fn next_back(&mut self) -> Option<Self::Item> {
521 571
		self.drain.next_back().copied()
522 572
	}
523 573
524 -
	#[inline]
574 +
	#[inline(always)]
525 575
	fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
526 576
		self.drain.nth_back(n).copied()
527 577
	}
528 578
}
529 579
580 +
#[cfg(not(tarpaulin_include))]
530 581
impl<O, T> ExactSizeIterator for Drain<'_, O, T>
531 582
where
532 583
	O: BitOrder,
533 584
	T: BitStore,
534 585
{
535 -
	#[inline]
586 +
	#[inline(always)]
536 587
	fn len(&self) -> usize {
537 588
		self.drain.len()
538 589
	}
@@ -545,27 +596,62 @@
Loading
545 596
{
546 597
}
547 598
599 +
unsafe impl<O, T> Send for Drain<'_, O, T>
600 +
where
601 +
	O: BitOrder,
602 +
	T: BitStore,
603 +
{
604 +
}
605 +
606 +
unsafe impl<O, T> Sync for Drain<'_, O, T>
607 +
where
608 +
	O: BitOrder,
609 +
	T: BitStore,
610 +
{
611 +
}
612 +
548 613
impl<O, T> Drop for Drain<'_, O, T>
549 614
where
550 615
	O: BitOrder,
551 616
	T: BitStore,
552 617
{
618 +
	#[inline]
553 619
	fn drop(&mut self) {
554 -
		match self.tail_len() {
555 -
			//  If there is no tail segment, there is no finalization work.
556 -
			0 => {},
557 -
			n => unsafe {
558 -
				let bv = self.source.as_mut();
559 -
				let start = bv.len();
560 -
				let new_len = start + n;
561 -
				//  Copy the tail span down to the start of the drained region.
562 -
				bv.copy_within_unchecked(self.tail.clone(), start);
563 -
				bv.set_len(new_len);
564 -
			},
620 +
		//  Grab the tail range descriptor
621 +
		let tail = self.tail.clone();
622 +
		//  And compute its length.
623 +
		let tail_len = tail.end - tail.start;
624 +
		//  If the tail region is empty, then there is no cleanup work to do.
625 +
		if tail_len == 0 {
626 +
			return;
627 +
		}
628 +
		//  Otherwise, access the source vector,
629 +
		let bitvec = unsafe { self.source.as_mut() };
630 +
		//  And grab its current end.
631 +
		let old_len = bitvec.len();
632 +
		let new_len = old_len + tail_len;
633 +
		unsafe {
634 +
			//  Expand the vector to include where the tail bits will be.
635 +
			bitvec.set_len(new_len);
636 +
			//  Then move the tail bits into the new location.
637 +
			bitvec.copy_within_unchecked(tail, old_len);
638 +
			//  This ordering is important! `copy_within_unchecked` uses the
639 +
			//  `len` boundary.
565 640
		}
566 641
	}
567 642
}
568 643
644 +
/// `std` uses a `bool` flag for done/not done, which is less clear about what
645 +
/// it signals.
646 +
#[repr(u8)]
647 +
#[derive(Clone, Copy, Debug, Eq, Ord, PartialEq, PartialOrd)]
648 +
enum FillStatus {
649 +
	/// The drain span is completely filled.
650 +
	FullSpan   = 0,
651 +
	/// The replacement source is completely emptied.
652 +
	EmptyInput = 1,
653 +
}
654 +
569 655
/** A splicing iterator for `BitVec`.
570 656
571 657
This struct is created by the [`splice()`] method on [`BitVec`]. See its
@@ -582,30 +668,30 @@
Loading
582 668
pub struct Splice<'a, O, T, I>
583 669
where
584 670
	O: BitOrder,
585 -
	T: BitStore,
671 +
	T: 'a + BitStore,
586 672
	I: Iterator<Item = bool>,
587 673
{
588 -
	/// Drain controller for the region of the vector being spliced.
674 +
	/// The region of the vector being spliced.
589 675
	drain: Drain<'a, O, T>,
590 -
	/// Source of bits written into the drain.
676 +
	/// The bitstream to be written into the drain.
591 677
	splice: I,
592 678
}
593 679
594 680
impl<'a, O, T, I> Splice<'a, O, T, I>
595 681
where
596 682
	O: BitOrder,
597 -
	T: BitStore,
683 +
	T: 'a + BitStore,
598 684
	I: Iterator<Item = bool>,
599 685
{
686 +
	/// Constructs a splice out of a drain and a replacement.
600 687
	pub(super) fn new<II>(drain: Drain<'a, O, T>, splice: II) -> Self
601 688
	where II: IntoIterator<IntoIter = I, Item = bool> {
602 -
		Self {
603 -
			drain,
604 -
			splice: splice.into_iter(),
605 -
		}
689 +
		let splice = splice.into_iter();
690 +
		Self { drain, splice }
606 691
	}
607 692
}
608 693
694 +
#[cfg(not(tarpaulin_include))]
609 695
impl<O, T, I> Iterator for Splice<'_, O, T, I>
610 696
where
611 697
	O: BitOrder,
@@ -614,69 +700,68 @@
Loading
614 700
{
615 701
	type Item = bool;
616 702
703 +
	#[inline]
617 704
	fn next(&mut self) -> Option<Self::Item> {
618 705
		self.drain.next().tap_some(|_| {
619 706
			/* Attempt to write a bit into the now-vacated slot at the front of
620 -
			the `Drain`. If the `splice` stream produced a bit, then it is
621 -
			written into the end of the `Drain`’s vector handle. This works
622 -
			because `Drain` always truncates its handle to the front edge of the
623 -
			drain region, so `bv.len()` is always the first bit of the `Drain`
624 -
			if the `Drain` is willing to yield.
707 +
			the `Drain`. If the `splice` stream produces a bit, then it is
708 +
			written into the end of the `Drain`’s buffer, extending it by one.
709 +
			This works because `Drain` always truncates its vector to the front
710 +
			edge of the drain region, so `bv.len()` is always the first bit of
711 +
			the `Drain` region if the `Drain` is willing to yield a bit.
625 712
			*/
626 -
			self.splice.next().tap_some(|new| {
713 +
			if let Some(bit) = self.splice.next() {
627 714
				unsafe {
628 715
					let bv = self.drain.source.as_mut();
629 716
					let len = bv.len();
630 -
					//  It is always sound to write directly into the front of a
631 -
					//  `Drain`.
632 -
					/* TODO(myrrlyn): Extend `Iter` to have a `.next_slot`
633 -
					function which permits an `xchg` behavior, to avoid
634 -
					computing the pointer individually for read and write.
717 +
					/* TODO(myrrlyn): Investigate adding functionality to `Iter`
718 +
					that permits an exchange behavior, rather than separated
719 +
					computations of the pointer for read and write access.
635 720
					*/
636 -
					bv.set_unchecked(len, *new);
721 +
					bv.set_unchecked(len, bit);
637 722
					bv.set_len(len + 1);
638 723
				}
639 -
			})
724 +
			}
640 725
		})
641 726
	}
642 727
643 -
	#[inline]
728 +
	#[inline(always)]
644 729
	fn size_hint(&self) -> (usize, Option<usize>) {
645 730
		self.drain.size_hint()
646 731
	}
647 732
648 -
	#[inline]
733 +
	#[inline(always)]
649 734
	fn count(self) -> usize {
650 735
		self.drain.len()
651 736
	}
652 737
}
653 738
654 -
//  Take from the back of the drain, without attempting to fill from the splice.
655 -
//  This makes dead regions that are cleaned up on drop.
739 +
#[cfg(not(tarpaulin_include))]
656 740
impl<O, T, I> DoubleEndedIterator for Splice<'_, O, T, I>
657 741
where
658 742
	O: BitOrder,
659 743
	T: BitStore,
660 744
	I: Iterator<Item = bool>,
661 745
{
662 -
	#[inline]
746 +
	#[inline(always)]
663 747
	fn next_back(&mut self) -> Option<Self::Item> {
664 748
		self.drain.next_back()
665 749
	}
666 750
667 -
	#[inline]
751 +
	#[inline(always)]
668 752
	fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
669 753
		self.drain.nth_back(n)
670 754
	}
671 755
}
672 756
757 +
#[cfg(not(tarpaulin_include))]
673 758
impl<O, T, I> ExactSizeIterator for Splice<'_, O, T, I>
674 759
where
675 760
	O: BitOrder,
676 761
	T: BitStore,
677 762
	I: Iterator<Item = bool>,
678 763
{
679 -
	#[inline]
764 +
	#[inline(always)]
680 765
	fn len(&self) -> usize {
681 766
		self.drain.len()
682 767
	}
@@ -696,43 +781,51 @@
Loading
696 781
	T: BitStore,
697 782
	I: Iterator<Item = bool>,
698 783
{
784 +
	#[inline]
699 785
	fn drop(&mut self) {
700 -
		//  If the drain has no tail segment, copy the splice into the vector.
701 -
		if self.drain.tail_len() == 0 {
702 -
			unsafe { self.drain.source.as_mut() }.extend(self.splice.by_ref());
786 +
		let tail = self.drain.tail.clone();
787 +
		let tail_len = tail.end - tail.start;
788 +
		let bitvec = unsafe { self.drain.source.as_mut() };
789 +
790 +
		//  If the `drain` has no tail span, then extend the vector with the
791 +
		//  splice and exit.
792 +
		if tail_len == 0 {
793 +
			bitvec.extend(self.splice.by_ref());
703 794
			return;
704 795
		}
705 -
		/* Attempt to fill the dead region between the front and back segments
706 -
		the vector with the splice. If the splice exhausts (`return false`),
707 -
		then the `Drain` destructor will handle tail-section cleanup.
708 -
		*/
709 -
		if !self.drain.fill(&mut self.splice) {
796 +
797 +
		//  Fill the drained range first. If the `splice` exhausts, then the
798 +
		//  `Drain` destructor will handle relocating the vector tail segment.
799 +
		if let FillStatus::EmptyInput = self.drain.fill(&mut self.splice) {
710 800
			return;
711 801
		}
712 802
713 -
		let (lower, upper) = self.splice.size_hint();
714 -
715 -
		//  If the splice gives an exact upper bound on its remaining bits, move
716 -
		//  the drain’s tail and fill it. The signal can be safely discarded.
717 -
		if let Some(rem) = upper {
718 -
			//  Relocate the tail section to
719 -
			self.drain.resize_drain(rem);
720 -
			self.drain.fill(&mut self.splice);
803 +
		//  If the `splice` has not yet exhausted, then the `Drain` needs to
804 +
		//  adjust to receive its contents.
805 +
		let len = match self.splice.size_hint() {
806 +
			(n, None) | (_, Some(n)) => n,
807 +
		};
808 +
		unsafe {
809 +
			self.drain.move_tail(len);
810 +
		}
811 +
		//  Now that the tail has been relocated, fill the `splice` into it. If
812 +
		//  this exhausts the `splice`, exit.
813 +
		if let FillStatus::EmptyInput = self.drain.fill(&mut self.splice) {
721 814
			return;
722 815
		}
723 816
724 -
		/* If the slice did not give an upper bound, then it must be collected
725 -
		into a temporary which will either crash the program, or find an exact
726 -
		limit. This temporary can then be used to fill the drain.
727 -
		*/
728 -
		let mut tmp: BitVec = BitVec::with_capacity(lower);
729 -
		tmp.extend(self.splice.by_ref());
730 -
		match tmp.len() {
731 -
			0 => {},
732 -
			n => {
733 -
				self.drain.resize_drain(n);
734 -
				self.drain.fill(&mut tmp.into_iter());
735 -
			},
817 +
		//  If the `splice` *still* has bits to provide, then its `.size_hint()`
818 +
		//  is untrustworthy. Collect the `splice` into a vector, then insert
819 +
		//  the vector into the spliced region.
820 +
		let mut collected = self.splice.by_ref().collect::<BitVec>().into_iter();
821 +
		let len = collected.len();
822 +
		if len > 0 {
823 +
			unsafe {
824 +
				self.drain.move_tail(len);
825 +
			}
826 +
			let filled = self.drain.fill(&mut collected);
827 +
			debug_assert_eq!(filled, FillStatus::EmptyInput);
828 +
			debug_assert_eq!(collected.len(), 0);
736 829
		}
737 830
	}
738 831
}

@@ -569,7 +569,8 @@
Loading
569 569
	/// ```
570 570
	///
571 571
	/// [`as_mut_ptr`]: #method.as_mut_ptr
572 -
	#[inline]
572 +
	#[inline(always)]
573 +
	#[cfg(not(tarpaulin_include))]
573 574
	pub fn as_ptr(&self) -> *const Self {
574 575
		self as *const Self
575 576
	}
@@ -615,7 +616,8 @@
Loading
615 616
	/// }
616 617
	/// assert_eq!(data, 0b0101_0101_0101_0101);
617 618
	/// ```
618 -
	#[inline]
619 +
	#[inline(always)]
620 +
	#[cfg(not(tarpaulin_include))]
619 621
	pub fn as_mut_ptr(&mut self) -> *mut Self {
620 622
		self as *mut Self
621 623
	}
@@ -1968,7 +1970,8 @@
Loading
1968 1970
	/// Copying two bits from a slice into another:
1969 1971
	///
1970 1972
	/// [`clone_from_bitslice`]: #method.clone_from_bitslice
1971 -
	#[inline]
1973 +
	#[inline(always)]
1974 +
	#[cfg(not(tarpaulin_include))]
1972 1975
	pub fn copy_from_bitslice(&mut self, src: &Self) {
1973 1976
		self.clone_from_bitslice(src);
1974 1977
	}
@@ -2147,13 +2150,21 @@
Loading
2147 2150
	pub unsafe fn align_to<U>(&self) -> (&Self, &BitSlice<O, U>, &Self)
2148 2151
	where U: BitStore {
2149 2152
		let bitptr = self.bitptr();
2153 +
		let bp_len = bitptr.len();
2150 2154
		let (l, c, r) = bitptr.as_aliased_slice().align_to::<U::Alias>();
2151 2155
		let l_start = bitptr.head().value() as usize;
2152 -
		let l = BitSlice::<O, T::Alias>::from_slice_unchecked(l)
2153 -
			.get_unchecked(l_start ..);
2154 -
		let c = BitSlice::<O, U::Alias>::from_slice_unchecked(c);
2155 -
		let r = BitSlice::<O, T::Alias>::from_slice_unchecked(r)
2156 -
			.get_unchecked(.. bitptr.len() - l.len() - c.len());
2156 +
		let mut l = BitSlice::<O, T::Alias>::from_slice_unchecked(l);
2157 +
		if l.len() > l_start {
2158 +
			l = l.get_unchecked(l_start ..);
2159 +
		}
2160 +
		let mut c = BitSlice::<O, U::Alias>::from_slice_unchecked(c);
2161 +
		let c_len = cmp::min(c.len(), bp_len - l.len());
2162 +
		c = c.get_unchecked(.. c_len);
2163 +
		let mut r = BitSlice::<O, T::Alias>::from_slice_unchecked(r);
2164 +
		let r_len = bp_len - l.len() - c.len();
2165 +
		if r.len() > r_len {
2166 +
			r = r.get_unchecked(.. r_len);
2167 +
		}
2157 2168
		(
2158 2169
			l.bitptr()
2159 2170
				.pipe(dvl::remove_bitptr_alias::<T>)
@@ -2216,25 +2227,11 @@
Loading
2216 2227
		&mut self,
2217 2228
	) -> (&mut Self, &mut BitSlice<O, U>, &mut Self)
2218 2229
	where U: BitStore {
2219 -
		let bitptr = self.bitptr();
2220 -
		let l_start = bitptr.head().value() as usize;
2221 -
		let bp_len = bitptr.len();
2222 -
		let (l, c, r) = bitptr.as_aliased_slice().align_to::<U::Alias>();
2223 -
		let l = BitSlice::<O, T::Alias>::from_slice_unchecked(l)
2224 -
			.get_unchecked(l_start ..);
2225 -
		let c = BitSlice::<O, U::Alias>::from_slice_unchecked(c);
2226 -
		let r = BitSlice::<O, T::Alias>::from_slice_unchecked(r)
2227 -
			.get_unchecked(.. bp_len - l.len() - c.len());
2230 +
		let (l, c, r) = self.align_to::<U>();
2228 2231
		(
2229 -
			l.bitptr()
2230 -
				.pipe(dvl::remove_bitptr_alias::<T>)
2231 -
				.to_bitslice_mut(),
2232 -
			c.bitptr()
2233 -
				.pipe(dvl::remove_bitptr_alias::<U>)
2234 -
				.to_bitslice_mut(),
2235 -
			r.bitptr()
2236 -
				.pipe(dvl::remove_bitptr_alias::<T>)
2237 -
				.to_bitslice_mut(),
2232 +
			l.bitptr().to_bitslice_mut(),
2233 +
			c.bitptr().to_bitslice_mut(),
2234 +
			r.bitptr().to_bitslice_mut(),
2238 2235
		)
2239 2236
	}
2240 2237
}
@@ -2810,6 +2807,7 @@
Loading
2810 2807
}
2811 2808
2812 2809
/// `RangeFull` is the identity function.
2810 +
#[cfg(not(tarpaulin_include))]
2813 2811
impl<'a, O, T> BitSliceIndex<'a, O, T> for RangeFull
2814 2812
where
2815 2813
	O: 'a + BitOrder,

@@ -440,3 +440,21 @@
Loading
440 440
#[doc(hidden)]
441 441
#[cfg(target_endian = "big")]
442 442
pub use self::u8_from_be_bits as u8_from_ne_bits;
443 +
444 +
#[cfg(test)]
445 +
mod tests {
446 +
	use super::*;
447 +
448 +
	#[test]
449 +
	fn byte_assembly() {
450 +
		assert_eq!(
451 +
			u8_from_le_bits(false, false, true, true, false, true, false, true),
452 +
			0b1010_1100
453 +
		);
454 +
455 +
		assert_eq!(
456 +
			u8_from_be_bits(false, false, true, true, false, true, false, true),
457 +
			0b0011_0101
458 +
		);
459 +
	}
460 +
}
Files Coverage
src 89.92%
tests 95.00%
Project Totals (38 files) 90.10%
Untitled

No yaml found.

Create your codecov.yml to customize your Codecov experience

Sunburst
The inner-most circle is the entire project, moving away from the center are folders then, finally, a single file. The size and color of each slice is representing the number of statements and the coverage, respectively.
Icicle
The top section represents the entire project. Proceeding with folders and finally individual files. The size and color of each slice is representing the number of statements and the coverage, respectively.
Grid
Each block represents a single file in the project. The size and color of each block is represented by the number of statements and the coverage, respectively.
Loading