bevy_ecs/storage/blob_vec.rs
1use std::{
2 alloc::{handle_alloc_error, Layout},
3 cell::UnsafeCell,
4 num::NonZeroUsize,
5 ptr::NonNull,
6};
7
8use bevy_ptr::{OwningPtr, Ptr, PtrMut};
9use bevy_utils::OnDrop;
10
11/// A flat, type-erased data storage type
12///
13/// Used to densely store homogeneous ECS data. A blob is usually just an arbitrary block of contiguous memory without any identity, and
14/// could be used to represent any arbitrary data (i.e. string, arrays, etc). This type is an extendable and re-allocatable blob, which makes
15/// it a blobby Vec, a `BlobVec`.
16pub(super) struct BlobVec {
17 item_layout: Layout,
18 capacity: usize,
19 /// Number of elements, not bytes
20 len: usize,
21 // the `data` ptr's layout is always `array_layout(item_layout, capacity)`
22 data: NonNull<u8>,
23 // None if the underlying type doesn't need to be dropped
24 drop: Option<unsafe fn(OwningPtr<'_>)>,
25}
26
27// We want to ignore the `drop` field in our `Debug` impl
28impl std::fmt::Debug for BlobVec {
29 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
30 f.debug_struct("BlobVec")
31 .field("item_layout", &self.item_layout)
32 .field("capacity", &self.capacity)
33 .field("len", &self.len)
34 .field("data", &self.data)
35 .finish()
36 }
37}
38
39impl BlobVec {
40 /// Creates a new [`BlobVec`] with the specified `capacity`.
41 ///
42 /// `drop` is an optional function pointer that is meant to be invoked when any element in the [`BlobVec`]
43 /// should be dropped. For all Rust-based types, this should match 1:1 with the implementation of [`Drop`]
44 /// if present, and should be `None` if `T: !Drop`. For non-Rust based types, this should match any cleanup
45 /// processes typically associated with the stored element.
46 ///
47 /// # Safety
48 ///
49 /// `drop` should be safe to call with an [`OwningPtr`] pointing to any item that's been pushed into this [`BlobVec`].
50 ///
51 /// If `drop` is `None`, the items will be leaked. This should generally be set as None based on [`needs_drop`].
52 ///
53 /// [`needs_drop`]: core::mem::needs_drop
54 pub unsafe fn new(
55 item_layout: Layout,
56 drop: Option<unsafe fn(OwningPtr<'_>)>,
57 capacity: usize,
58 ) -> BlobVec {
59 let align = NonZeroUsize::new(item_layout.align()).expect("alignment must be > 0");
60 let data = bevy_ptr::dangling_with_align(align);
61 if item_layout.size() == 0 {
62 BlobVec {
63 data,
64 // ZST `BlobVec` max size is `usize::MAX`, and `reserve_exact` for ZST assumes
65 // the capacity is always `usize::MAX` and panics if it overflows.
66 capacity: usize::MAX,
67 len: 0,
68 item_layout,
69 drop,
70 }
71 } else {
72 let mut blob_vec = BlobVec {
73 data,
74 capacity: 0,
75 len: 0,
76 item_layout,
77 drop,
78 };
79 blob_vec.reserve_exact(capacity);
80 blob_vec
81 }
82 }
83
84 /// Returns the number of elements in the vector.
85 #[inline]
86 pub fn len(&self) -> usize {
87 self.len
88 }
89
90 /// Returns `true` if the vector contains no elements.
91 #[inline]
92 pub fn is_empty(&self) -> bool {
93 self.len == 0
94 }
95
96 /// Returns the total number of elements the vector can hold without reallocating.
97 #[inline]
98 pub fn capacity(&self) -> usize {
99 self.capacity
100 }
101
102 /// Returns the [`Layout`] of the element type stored in the vector.
103 #[inline]
104 pub fn layout(&self) -> Layout {
105 self.item_layout
106 }
107
108 /// Reserves the minimum capacity for at least `additional` more elements to be inserted in the given `BlobVec`.
109 /// After calling `reserve_exact`, capacity will be greater than or equal to `self.len() + additional`. Does nothing if
110 /// the capacity is already sufficient.
111 ///
112 /// Note that the allocator may give the collection more space than it requests. Therefore, capacity can not be relied upon
113 /// to be precisely minimal.
114 ///
115 /// # Panics
116 ///
117 /// Panics if new capacity overflows `usize`.
118 pub fn reserve_exact(&mut self, additional: usize) {
119 let available_space = self.capacity - self.len;
120 if available_space < additional {
121 // SAFETY: `available_space < additional`, so `additional - available_space > 0`
122 let increment = unsafe { NonZeroUsize::new_unchecked(additional - available_space) };
123 self.grow_exact(increment);
124 }
125 }
126
127 /// Reserves the minimum capacity for at least `additional` more elements to be inserted in the given `BlobVec`.
128 #[inline]
129 pub fn reserve(&mut self, additional: usize) {
130 /// Similar to `reserve_exact`. This method ensures that the capacity will grow at least `self.capacity()` if there is no
131 /// enough space to hold `additional` more elements.
132 #[cold]
133 fn do_reserve(slf: &mut BlobVec, additional: usize) {
134 let increment = slf.capacity.max(additional - (slf.capacity - slf.len));
135 let increment = NonZeroUsize::new(increment).unwrap();
136 slf.grow_exact(increment);
137 }
138
139 if self.capacity - self.len < additional {
140 do_reserve(self, additional);
141 }
142 }
143
144 /// Grows the capacity by `increment` elements.
145 ///
146 /// # Panics
147 ///
148 /// Panics if the new capacity overflows `usize`.
149 /// For ZST it panics unconditionally because ZST `BlobVec` capacity
150 /// is initialized to `usize::MAX` and always stays that way.
151 fn grow_exact(&mut self, increment: NonZeroUsize) {
152 let new_capacity = self
153 .capacity
154 .checked_add(increment.get())
155 .expect("capacity overflow");
156 let new_layout =
157 array_layout(&self.item_layout, new_capacity).expect("array layout should be valid");
158 let new_data = if self.capacity == 0 {
159 // SAFETY:
160 // - layout has non-zero size as per safety requirement
161 unsafe { std::alloc::alloc(new_layout) }
162 } else {
163 // SAFETY:
164 // - ptr was be allocated via this allocator
165 // - the layout of the ptr was `array_layout(self.item_layout, self.capacity)`
166 // - `item_layout.size() > 0` and `new_capacity > 0`, so the layout size is non-zero
167 // - "new_size, when rounded up to the nearest multiple of layout.align(), must not overflow (i.e., the rounded value must be less than usize::MAX)",
168 // since the item size is always a multiple of its alignment, the rounding cannot happen
169 // here and the overflow is handled in `array_layout`
170 unsafe {
171 std::alloc::realloc(
172 self.get_ptr_mut().as_ptr(),
173 array_layout(&self.item_layout, self.capacity)
174 .expect("array layout should be valid"),
175 new_layout.size(),
176 )
177 }
178 };
179
180 self.data = NonNull::new(new_data).unwrap_or_else(|| handle_alloc_error(new_layout));
181 self.capacity = new_capacity;
182 }
183
184 /// Initializes the value at `index` to `value`. This function does not do any bounds checking.
185 ///
186 /// # Safety
187 /// - index must be in bounds
188 /// - the memory in the [`BlobVec`] starting at index `index`, of a size matching this [`BlobVec`]'s
189 /// `item_layout`, must have been previously allocated.
190 #[inline]
191 pub unsafe fn initialize_unchecked(&mut self, index: usize, value: OwningPtr<'_>) {
192 debug_assert!(index < self.len());
193 let ptr = self.get_unchecked_mut(index);
194 std::ptr::copy_nonoverlapping::<u8>(value.as_ptr(), ptr.as_ptr(), self.item_layout.size());
195 }
196
197 /// Replaces the value at `index` with `value`. This function does not do any bounds checking.
198 ///
199 /// # Safety
200 /// - index must be in-bounds
201 /// - the memory in the [`BlobVec`] starting at index `index`, of a size matching this
202 /// [`BlobVec`]'s `item_layout`, must have been previously initialized with an item matching
203 /// this [`BlobVec`]'s `item_layout`
204 /// - the memory at `*value` must also be previously initialized with an item matching this
205 /// [`BlobVec`]'s `item_layout`
206 pub unsafe fn replace_unchecked(&mut self, index: usize, value: OwningPtr<'_>) {
207 debug_assert!(index < self.len());
208
209 // Pointer to the value in the vector that will get replaced.
210 // SAFETY: The caller ensures that `index` fits in this vector.
211 let destination = NonNull::from(unsafe { self.get_unchecked_mut(index) });
212 let source = value.as_ptr();
213
214 if let Some(drop) = self.drop {
215 // Temporarily set the length to zero, so that if `drop` panics the caller
216 // will not be left with a `BlobVec` containing a dropped element within
217 // its initialized range.
218 let old_len = self.len;
219 self.len = 0;
220
221 // Transfer ownership of the old value out of the vector, so it can be dropped.
222 // SAFETY:
223 // - `destination` was obtained from a `PtrMut` in this vector, which ensures it is non-null,
224 // well-aligned for the underlying type, and has proper provenance.
225 // - The storage location will get overwritten with `value` later, which ensures
226 // that the element will not get observed or double dropped later.
227 // - If a panic occurs, `self.len` will remain `0`, which ensures a double-drop
228 // does not occur. Instead, all elements will be forgotten.
229 let old_value = unsafe { OwningPtr::new(destination) };
230
231 // This closure will run in case `drop()` panics,
232 // which ensures that `value` does not get forgotten.
233 let on_unwind = OnDrop::new(|| drop(value));
234
235 drop(old_value);
236
237 // If the above code does not panic, make sure that `value` doesn't get dropped.
238 core::mem::forget(on_unwind);
239
240 // Make the vector's contents observable again, since panics are no longer possible.
241 self.len = old_len;
242 }
243
244 // Copy the new value into the vector, overwriting the previous value.
245 // SAFETY:
246 // - `source` and `destination` were obtained from `OwningPtr`s, which ensures they are
247 // valid for both reads and writes.
248 // - The value behind `source` will only be dropped if the above branch panics,
249 // so it must still be initialized and it is safe to transfer ownership into the vector.
250 // - `source` and `destination` were obtained from different memory locations,
251 // both of which we have exclusive access to, so they are guaranteed not to overlap.
252 unsafe {
253 std::ptr::copy_nonoverlapping::<u8>(
254 source,
255 destination.as_ptr(),
256 self.item_layout.size(),
257 );
258 }
259 }
260
261 /// Appends an element to the back of the vector.
262 ///
263 /// # Safety
264 /// The `value` must match the [`layout`](`BlobVec::layout`) of the elements in the [`BlobVec`].
265 #[inline]
266 pub unsafe fn push(&mut self, value: OwningPtr<'_>) {
267 self.reserve(1);
268 let index = self.len;
269 self.len += 1;
270 self.initialize_unchecked(index, value);
271 }
272
273 /// Forces the length of the vector to `len`.
274 ///
275 /// # Safety
276 /// `len` must be <= `capacity`. if length is decreased, "out of bounds" items must be dropped.
277 /// Newly added items must be immediately populated with valid values and length must be
278 /// increased. For better unwind safety, call [`BlobVec::set_len`] _after_ populating a new
279 /// value.
280 #[inline]
281 pub unsafe fn set_len(&mut self, len: usize) {
282 debug_assert!(len <= self.capacity());
283 self.len = len;
284 }
285
286 /// Performs a "swap remove" at the given `index`, which removes the item at `index` and moves
287 /// the last item in the [`BlobVec`] to `index` (if `index` is not the last item). It is the
288 /// caller's responsibility to drop the returned pointer, if that is desirable.
289 ///
290 /// # Safety
291 /// It is the caller's responsibility to ensure that `index` is less than `self.len()`.
292 #[inline]
293 #[must_use = "The returned pointer should be used to dropped the removed element"]
294 pub unsafe fn swap_remove_and_forget_unchecked(&mut self, index: usize) -> OwningPtr<'_> {
295 debug_assert!(index < self.len());
296 // Since `index` must be strictly less than `self.len` and `index` is at least zero,
297 // `self.len` must be at least one. Thus, this cannot underflow.
298 let new_len = self.len - 1;
299 let size = self.item_layout.size();
300 if index != new_len {
301 std::ptr::swap_nonoverlapping::<u8>(
302 self.get_unchecked_mut(index).as_ptr(),
303 self.get_unchecked_mut(new_len).as_ptr(),
304 size,
305 );
306 }
307 self.len = new_len;
308 // Cannot use get_unchecked here as this is technically out of bounds after changing len.
309 // SAFETY:
310 // - `new_len` is less than the old len, so it must fit in this vector's allocation.
311 // - `size` is a multiple of the erased type's alignment,
312 // so adding a multiple of `size` will preserve alignment.
313 // - The removed element lives as long as this vector's mutable reference.
314 let p = unsafe { self.get_ptr_mut().byte_add(new_len * size) };
315 // SAFETY: The removed element is unreachable by this vector so it's safe to promote the
316 // `PtrMut` to an `OwningPtr`.
317 unsafe { p.promote() }
318 }
319
320 /// Removes the value at `index` and copies the value stored into `ptr`.
321 /// Does not do any bounds checking on `index`.
322 /// The removed element is replaced by the last element of the `BlobVec`.
323 ///
324 /// # Safety
325 /// It is the caller's responsibility to ensure that `index` is < `self.len()`
326 /// and that `self[index]` has been properly initialized.
327 #[inline]
328 pub unsafe fn swap_remove_unchecked(&mut self, index: usize, ptr: PtrMut<'_>) {
329 debug_assert!(index < self.len());
330 let last = self.get_unchecked_mut(self.len - 1).as_ptr();
331 let target = self.get_unchecked_mut(index).as_ptr();
332 // Copy the item at the index into the provided ptr
333 std::ptr::copy_nonoverlapping::<u8>(target, ptr.as_ptr(), self.item_layout.size());
334 // Recompress the storage by moving the previous last element into the
335 // now-free row overwriting the previous data. The removed row may be the last
336 // one so a non-overlapping copy must not be used here.
337 std::ptr::copy::<u8>(last, target, self.item_layout.size());
338 // Invalidate the data stored in the last row, as it has been moved
339 self.len -= 1;
340 }
341
342 /// Removes the value at `index` and drops it.
343 /// Does not do any bounds checking on `index`.
344 /// The removed element is replaced by the last element of the `BlobVec`.
345 ///
346 /// # Safety
347 /// It is the caller's responsibility to ensure that `index` is `< self.len()`.
348 #[inline]
349 pub unsafe fn swap_remove_and_drop_unchecked(&mut self, index: usize) {
350 debug_assert!(index < self.len());
351 let drop = self.drop;
352 let value = self.swap_remove_and_forget_unchecked(index);
353 if let Some(drop) = drop {
354 drop(value);
355 }
356 }
357
358 /// Returns a reference to the element at `index`, without doing bounds checking.
359 ///
360 /// # Safety
361 /// It is the caller's responsibility to ensure that `index < self.len()`.
362 #[inline]
363 pub unsafe fn get_unchecked(&self, index: usize) -> Ptr<'_> {
364 debug_assert!(index < self.len());
365 let size = self.item_layout.size();
366 // SAFETY:
367 // - The caller ensures that `index` fits in this vector,
368 // so this operation will not overflow the original allocation.
369 // - `size` is a multiple of the erased type's alignment,
370 // so adding a multiple of `size` will preserve alignment.
371 // - The element at `index` outlives this vector's reference.
372 unsafe { self.get_ptr().byte_add(index * size) }
373 }
374
375 /// Returns a mutable reference to the element at `index`, without doing bounds checking.
376 ///
377 /// # Safety
378 /// It is the caller's responsibility to ensure that `index < self.len()`.
379 #[inline]
380 pub unsafe fn get_unchecked_mut(&mut self, index: usize) -> PtrMut<'_> {
381 debug_assert!(index < self.len());
382 let size = self.item_layout.size();
383 // SAFETY:
384 // - The caller ensures that `index` fits in this vector,
385 // so this operation will not overflow the original allocation.
386 // - `size` is a multiple of the erased type's alignment,
387 // so adding a multiple of `size` will preserve alignment.
388 // - The element at `index` outlives this vector's mutable reference.
389 unsafe { self.get_ptr_mut().byte_add(index * size) }
390 }
391
392 /// Gets a [`Ptr`] to the start of the vec
393 #[inline]
394 pub fn get_ptr(&self) -> Ptr<'_> {
395 // SAFETY: the inner data will remain valid for as long as 'self.
396 unsafe { Ptr::new(self.data) }
397 }
398
399 /// Gets a [`PtrMut`] to the start of the vec
400 #[inline]
401 pub fn get_ptr_mut(&mut self) -> PtrMut<'_> {
402 // SAFETY: the inner data will remain valid for as long as 'self.
403 unsafe { PtrMut::new(self.data) }
404 }
405
406 /// Get a reference to the entire [`BlobVec`] as if it were an array with elements of type `T`
407 ///
408 /// # Safety
409 /// The type `T` must be the type of the items in this [`BlobVec`].
410 pub unsafe fn get_slice<T>(&self) -> &[UnsafeCell<T>] {
411 // SAFETY: the inner data will remain valid for as long as 'self.
412 unsafe { std::slice::from_raw_parts(self.data.as_ptr() as *const UnsafeCell<T>, self.len) }
413 }
414
415 /// Clears the vector, removing (and dropping) all values.
416 ///
417 /// Note that this method has no effect on the allocated capacity of the vector.
418 pub fn clear(&mut self) {
419 let len = self.len;
420 // We set len to 0 _before_ dropping elements for unwind safety. This ensures we don't
421 // accidentally drop elements twice in the event of a drop impl panicking.
422 self.len = 0;
423 if let Some(drop) = self.drop {
424 let size = self.item_layout.size();
425 for i in 0..len {
426 // SAFETY:
427 // * 0 <= `i` < `len`, so `i * size` must be in bounds for the allocation.
428 // * `size` is a multiple of the erased type's alignment,
429 // so adding a multiple of `size` will preserve alignment.
430 // * The item lives until it's dropped.
431 // * The item is left unreachable so it can be safely promoted to an `OwningPtr`.
432 // NOTE: `self.get_unchecked_mut(i)` cannot be used here, since the `debug_assert`
433 // would panic due to `self.len` being set to 0.
434 let item = unsafe { self.get_ptr_mut().byte_add(i * size).promote() };
435 // SAFETY: `item` was obtained from this `BlobVec`, so its underlying type must match `drop`.
436 unsafe { drop(item) };
437 }
438 }
439 }
440}
441
442impl Drop for BlobVec {
443 fn drop(&mut self) {
444 self.clear();
445 let array_layout =
446 array_layout(&self.item_layout, self.capacity).expect("array layout should be valid");
447 if array_layout.size() > 0 {
448 // SAFETY: data ptr layout is correct, swap_scratch ptr layout is correct
449 unsafe {
450 std::alloc::dealloc(self.get_ptr_mut().as_ptr(), array_layout);
451 }
452 }
453 }
454}
455
456/// From <https://doc.rust-lang.org/beta/src/core/alloc/layout.rs.html>
457fn array_layout(layout: &Layout, n: usize) -> Option<Layout> {
458 let (array_layout, offset) = repeat_layout(layout, n)?;
459 debug_assert_eq!(layout.size(), offset);
460 Some(array_layout)
461}
462
463// TODO: replace with `Layout::repeat` if/when it stabilizes
464/// From <https://doc.rust-lang.org/beta/src/core/alloc/layout.rs.html>
465fn repeat_layout(layout: &Layout, n: usize) -> Option<(Layout, usize)> {
466 // This cannot overflow. Quoting from the invariant of Layout:
467 // > `size`, when rounded up to the nearest multiple of `align`,
468 // > must not overflow (i.e., the rounded value must be less than
469 // > `usize::MAX`)
470 let padded_size = layout.size() + padding_needed_for(layout, layout.align());
471 let alloc_size = padded_size.checked_mul(n)?;
472
473 // SAFETY: self.align is already known to be valid and alloc_size has been
474 // padded already.
475 unsafe {
476 Some((
477 Layout::from_size_align_unchecked(alloc_size, layout.align()),
478 padded_size,
479 ))
480 }
481}
482
483/// From <https://doc.rust-lang.org/beta/src/core/alloc/layout.rs.html>
484const fn padding_needed_for(layout: &Layout, align: usize) -> usize {
485 let len = layout.size();
486
487 // Rounded up value is:
488 // len_rounded_up = (len + align - 1) & !(align - 1);
489 // and then we return the padding difference: `len_rounded_up - len`.
490 //
491 // We use modular arithmetic throughout:
492 //
493 // 1. align is guaranteed to be > 0, so align - 1 is always
494 // valid.
495 //
496 // 2. `len + align - 1` can overflow by at most `align - 1`,
497 // so the &-mask with `!(align - 1)` will ensure that in the
498 // case of overflow, `len_rounded_up` will itself be 0.
499 // Thus the returned padding, when added to `len`, yields 0,
500 // which trivially satisfies the alignment `align`.
501 //
502 // (Of course, attempts to allocate blocks of memory whose
503 // size and padding overflow in the above manner should cause
504 // the allocator to yield an error anyway.)
505
506 let len_rounded_up = len.wrapping_add(align).wrapping_sub(1) & !align.wrapping_sub(1);
507 len_rounded_up.wrapping_sub(len)
508}
509
510#[cfg(test)]
511mod tests {
512 use crate as bevy_ecs; // required for derive macros
513 use crate::{component::Component, ptr::OwningPtr, world::World};
514
515 use super::BlobVec;
516 use std::{alloc::Layout, cell::RefCell, mem, rc::Rc};
517
518 unsafe fn drop_ptr<T>(x: OwningPtr<'_>) {
519 // SAFETY: The pointer points to a valid value of type `T` and it is safe to drop this value.
520 unsafe {
521 x.drop_as::<T>();
522 }
523 }
524
525 /// # Safety
526 ///
527 /// `blob_vec` must have a layout that matches `Layout::new::<T>()`
528 unsafe fn push<T>(blob_vec: &mut BlobVec, value: T) {
529 OwningPtr::make(value, |ptr| {
530 blob_vec.push(ptr);
531 });
532 }
533
534 /// # Safety
535 ///
536 /// `blob_vec` must have a layout that matches `Layout::new::<T>()`
537 unsafe fn swap_remove<T>(blob_vec: &mut BlobVec, index: usize) -> T {
538 assert!(index < blob_vec.len());
539 let value = blob_vec.swap_remove_and_forget_unchecked(index);
540 value.read::<T>()
541 }
542
543 /// # Safety
544 ///
545 /// `blob_vec` must have a layout that matches `Layout::new::<T>()`, it most store a valid `T`
546 /// value at the given `index`
547 unsafe fn get_mut<T>(blob_vec: &mut BlobVec, index: usize) -> &mut T {
548 assert!(index < blob_vec.len());
549 blob_vec.get_unchecked_mut(index).deref_mut::<T>()
550 }
551
552 #[test]
553 fn resize_test() {
554 let item_layout = Layout::new::<usize>();
555 // SAFETY: `drop` fn is `None`, usize doesn't need dropping
556 let mut blob_vec = unsafe { BlobVec::new(item_layout, None, 64) };
557 // SAFETY: `i` is a usize, i.e. the type corresponding to `item_layout`
558 unsafe {
559 for i in 0..1_000 {
560 push(&mut blob_vec, i as usize);
561 }
562 }
563
564 assert_eq!(blob_vec.len(), 1_000);
565 assert_eq!(blob_vec.capacity(), 1_024);
566 }
567
568 #[derive(Debug, Eq, PartialEq, Clone)]
569 struct Foo {
570 a: u8,
571 b: String,
572 drop_counter: Rc<RefCell<usize>>,
573 }
574
575 impl Drop for Foo {
576 fn drop(&mut self) {
577 *self.drop_counter.borrow_mut() += 1;
578 }
579 }
580
581 #[test]
582 fn blob_vec() {
583 let drop_counter = Rc::new(RefCell::new(0));
584 {
585 let item_layout = Layout::new::<Foo>();
586 let drop = drop_ptr::<Foo>;
587 // SAFETY: drop is able to drop a value of its `item_layout`
588 let mut blob_vec = unsafe { BlobVec::new(item_layout, Some(drop), 2) };
589 assert_eq!(blob_vec.capacity(), 2);
590 // SAFETY: the following code only deals with values of type `Foo`, which satisfies the safety requirement of `push`, `get_mut` and `swap_remove` that the
591 // values have a layout compatible to the blob vec's `item_layout`.
592 // Every index is in range.
593 unsafe {
594 let foo1 = Foo {
595 a: 42,
596 b: "abc".to_string(),
597 drop_counter: drop_counter.clone(),
598 };
599 push(&mut blob_vec, foo1.clone());
600 assert_eq!(blob_vec.len(), 1);
601 assert_eq!(get_mut::<Foo>(&mut blob_vec, 0), &foo1);
602
603 let mut foo2 = Foo {
604 a: 7,
605 b: "xyz".to_string(),
606 drop_counter: drop_counter.clone(),
607 };
608 push::<Foo>(&mut blob_vec, foo2.clone());
609 assert_eq!(blob_vec.len(), 2);
610 assert_eq!(blob_vec.capacity(), 2);
611 assert_eq!(get_mut::<Foo>(&mut blob_vec, 0), &foo1);
612 assert_eq!(get_mut::<Foo>(&mut blob_vec, 1), &foo2);
613
614 get_mut::<Foo>(&mut blob_vec, 1).a += 1;
615 assert_eq!(get_mut::<Foo>(&mut blob_vec, 1).a, 8);
616
617 let foo3 = Foo {
618 a: 16,
619 b: "123".to_string(),
620 drop_counter: drop_counter.clone(),
621 };
622
623 push(&mut blob_vec, foo3.clone());
624 assert_eq!(blob_vec.len(), 3);
625 assert_eq!(blob_vec.capacity(), 4);
626
627 let last_index = blob_vec.len() - 1;
628 let value = swap_remove::<Foo>(&mut blob_vec, last_index);
629 assert_eq!(foo3, value);
630
631 assert_eq!(blob_vec.len(), 2);
632 assert_eq!(blob_vec.capacity(), 4);
633
634 let value = swap_remove::<Foo>(&mut blob_vec, 0);
635 assert_eq!(foo1, value);
636 assert_eq!(blob_vec.len(), 1);
637 assert_eq!(blob_vec.capacity(), 4);
638
639 foo2.a = 8;
640 assert_eq!(get_mut::<Foo>(&mut blob_vec, 0), &foo2);
641 }
642 }
643
644 assert_eq!(*drop_counter.borrow(), 6);
645 }
646
647 #[test]
648 fn blob_vec_drop_empty_capacity() {
649 let item_layout = Layout::new::<Foo>();
650 let drop = drop_ptr::<Foo>;
651 // SAFETY: drop is able to drop a value of its `item_layout`
652 let _ = unsafe { BlobVec::new(item_layout, Some(drop), 0) };
653 }
654
655 #[test]
656 #[should_panic(expected = "capacity overflow")]
657 fn blob_vec_zst_size_overflow() {
658 // SAFETY: no drop is correct drop for `()`.
659 let mut blob_vec = unsafe { BlobVec::new(Layout::new::<()>(), None, 0) };
660
661 assert_eq!(usize::MAX, blob_vec.capacity(), "Self-check");
662
663 // SAFETY: Because `()` is a ZST trivial drop type, and because `BlobVec` capacity
664 // is always `usize::MAX` for ZSTs, we can arbitrarily set the length
665 // and still be sound.
666 unsafe {
667 blob_vec.set_len(usize::MAX);
668 }
669
670 // SAFETY: `BlobVec` was initialized for `()`, so it is safe to push `()` to it.
671 unsafe {
672 OwningPtr::make((), |ptr| {
673 // This should panic because len is usize::MAX, remaining capacity is 0.
674 blob_vec.push(ptr);
675 });
676 }
677 }
678
679 #[test]
680 #[should_panic(expected = "capacity overflow")]
681 fn blob_vec_capacity_overflow() {
682 // SAFETY: no drop is correct drop for `u32`.
683 let mut blob_vec = unsafe { BlobVec::new(Layout::new::<u32>(), None, 0) };
684
685 assert_eq!(0, blob_vec.capacity(), "Self-check");
686
687 OwningPtr::make(17u32, |ptr| {
688 // SAFETY: we push the value of correct type.
689 unsafe {
690 blob_vec.push(ptr);
691 }
692 });
693
694 blob_vec.reserve_exact(usize::MAX);
695 }
696
697 #[test]
698 fn aligned_zst() {
699 // NOTE: This test is explicitly for uncovering potential UB with miri.
700
701 #[derive(Component)]
702 #[repr(align(32))]
703 struct Zst;
704
705 let mut world = World::default();
706 world.spawn(Zst);
707 world.spawn(Zst);
708 world.spawn(Zst);
709 world.spawn_empty();
710
711 let mut count = 0;
712
713 let mut q = world.query::<&Zst>();
714 for zst in q.iter(&world) {
715 // Ensure that the references returned are properly aligned.
716 assert_eq!(
717 std::ptr::from_ref::<Zst>(zst) as usize % mem::align_of::<Zst>(),
718 0
719 );
720 count += 1;
721 }
722
723 assert_eq!(count, 3);
724 }
725}