zerocopy/lib.rs
1// Copyright 2018 The Fuchsia Authors
2//
3// Licensed under the 2-Clause BSD License <LICENSE-BSD or
4// https://opensource.org/license/bsd-2-clause>, Apache License, Version 2.0
5// <LICENSE-APACHE or https://www.apache.org/licenses/LICENSE-2.0>, or the MIT
6// license <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your option.
7// This file may not be copied, modified, or distributed except according to
8// those terms.
9
10// After updating the following doc comment, make sure to run the following
11// command to update `README.md` based on its contents:
12//
13// cargo -q run --manifest-path tools/Cargo.toml -p generate-readme > README.md
14
15//! *<span style="font-size: 100%; color:grey;">Need more out of zerocopy?
16//! Submit a [customer request issue][customer-request-issue]!</span>*
17//!
18//! ***<span style="font-size: 140%">Fast, safe, <span
19//! style="color:red;">compile error</span>. Pick two.</span>***
20//!
21//! Zerocopy makes zero-cost memory manipulation effortless. We write `unsafe`
22//! so you don't have to.
23//!
24//! *Thanks for using zerocopy 0.8! For an overview of what changes from 0.7,
25//! check out our [release notes][release-notes], which include a step-by-step
26//! guide for upgrading from 0.7.*
27//!
28//! *Have questions? Need help? Ask the maintainers on [GitHub][github-q-a] or
29//! on [Discord][discord]!*
30//!
31//! [customer-request-issue]: https://github.com/google/zerocopy/issues/new/choose
32//! [release-notes]: https://github.com/google/zerocopy/discussions/1680
33//! [github-q-a]: https://github.com/google/zerocopy/discussions/categories/q-a
34//! [discord]: https://discord.gg/MAvWH2R6zk
35//!
36//! # Overview
37//!
38//! ##### Conversion Traits
39//!
40//! Zerocopy provides four derivable traits for zero-cost conversions:
41//! - [`TryFromBytes`] indicates that a type may safely be converted from
42//! certain byte sequences (conditional on runtime checks)
43//! - [`FromZeros`] indicates that a sequence of zero bytes represents a valid
44//! instance of a type
45//! - [`FromBytes`] indicates that a type may safely be converted from an
46//! arbitrary byte sequence
47//! - [`IntoBytes`] indicates that a type may safely be converted *to* a byte
48//! sequence
49//!
50//! These traits support sized types, slices, and [slice DSTs][slice-dsts].
51//!
52//! [slice-dsts]: KnownLayout#dynamically-sized-types
53//!
54//! ##### Marker Traits
55//!
56//! Zerocopy provides three derivable marker traits that do not provide any
57//! functionality themselves, but are required to call certain methods provided
58//! by the conversion traits:
59//! - [`KnownLayout`] indicates that zerocopy can reason about certain layout
60//! qualities of a type
61//! - [`Immutable`] indicates that a type is free from interior mutability,
62//! except by ownership or an exclusive (`&mut`) borrow
63//! - [`Unaligned`] indicates that a type's alignment requirement is 1
64//!
65//! You should generally derive these marker traits whenever possible.
66//!
67//! ##### Conversion Macros
68//!
69//! Zerocopy provides six macros for safe casting between types:
70//!
71//! - ([`try_`][try_transmute])[`transmute`] (conditionally) converts a value of
72//! one type to a value of another type of the same size
73//! - ([`try_`][try_transmute_mut])[`transmute_mut`] (conditionally) converts a
74//! mutable reference of one type to a mutable reference of another type of
75//! the same size
76//! - ([`try_`][try_transmute_ref])[`transmute_ref`] (conditionally) converts a
77//! mutable or immutable reference of one type to an immutable reference of
78//! another type of the same size
79//!
80//! These macros perform *compile-time* size and alignment checks, meaning that
81//! unconditional casts have zero cost at runtime. Conditional casts do not need
82//! to validate size or alignment runtime, but do need to validate contents.
83//!
84//! These macros cannot be used in generic contexts. For generic conversions,
85//! use the methods defined by the [conversion traits](#conversion-traits).
86//!
87//! ##### Byteorder-Aware Numerics
88//!
89//! Zerocopy provides byte-order aware integer types that support these
90//! conversions; see the [`byteorder`] module. These types are especially useful
91//! for network parsing.
92//!
93//! # Cargo Features
94//!
95//! - **`alloc`**
96//! By default, `zerocopy` is `no_std`. When the `alloc` feature is enabled,
97//! the `alloc` crate is added as a dependency, and some allocation-related
98//! functionality is added.
99//!
100//! - **`std`**
101//! By default, `zerocopy` is `no_std`. When the `std` feature is enabled, the
102//! `std` crate is added as a dependency (ie, `no_std` is disabled), and
103//! support for some `std` types is added. `std` implies `alloc`.
104//!
105//! - **`derive`**
106//! Provides derives for the core marker traits via the `zerocopy-derive`
107//! crate. These derives are re-exported from `zerocopy`, so it is not
108//! necessary to depend on `zerocopy-derive` directly.
109//!
110//! However, you may experience better compile times if you instead directly
111//! depend on both `zerocopy` and `zerocopy-derive` in your `Cargo.toml`,
112//! since doing so will allow Rust to compile these crates in parallel. To do
113//! so, do *not* enable the `derive` feature, and list both dependencies in
114//! your `Cargo.toml` with the same leading non-zero version number; e.g:
115//!
116//! ```toml
117//! [dependencies]
118//! zerocopy = "0.X"
119//! zerocopy-derive = "0.X"
120//! ```
121//!
122//! To avoid the risk of [duplicate import errors][duplicate-import-errors] if
123//! one of your dependencies enables zerocopy's `derive` feature, import
124//! derives as `use zerocopy_derive::*` rather than by name (e.g., `use
125//! zerocopy_derive::FromBytes`).
126//!
127//! - **`simd`**
128//! When the `simd` feature is enabled, `FromZeros`, `FromBytes`, and
129//! `IntoBytes` impls are emitted for all stable SIMD types which exist on the
130//! target platform. Note that the layout of SIMD types is not yet stabilized,
131//! so these impls may be removed in the future if layout changes make them
132//! invalid. For more information, see the Unsafe Code Guidelines Reference
133//! page on the [layout of packed SIMD vectors][simd-layout].
134//!
135//! - **`simd-nightly`**
136//! Enables the `simd` feature and adds support for SIMD types which are only
137//! available on nightly. Since these types are unstable, support for any type
138//! may be removed at any point in the future.
139//!
140//! - **`float-nightly`**
141//! Adds support for the unstable `f16` and `f128` types. These types are
142//! not yet fully implemented and may not be supported on all platforms.
143//!
144//! [duplicate-import-errors]: https://github.com/google/zerocopy/issues/1587
145//! [simd-layout]: https://rust-lang.github.io/unsafe-code-guidelines/layout/packed-simd-vectors.html
146//!
147//! # Security Ethos
148//!
149//! Zerocopy is expressly designed for use in security-critical contexts. We
150//! strive to ensure that that zerocopy code is sound under Rust's current
151//! memory model, and *any future memory model*. We ensure this by:
152//! - **...not 'guessing' about Rust's semantics.**
153//! We annotate `unsafe` code with a precise rationale for its soundness that
154//! cites a relevant section of Rust's official documentation. When Rust's
155//! documented semantics are unclear, we work with the Rust Operational
156//! Semantics Team to clarify Rust's documentation.
157//! - **...rigorously testing our implementation.**
158//! We run tests using [Miri], ensuring that zerocopy is sound across a wide
159//! array of supported target platforms of varying endianness and pointer
160//! width, and across both current and experimental memory models of Rust.
161//! - **...formally proving the correctness of our implementation.**
162//! We apply formal verification tools like [Kani][kani] to prove zerocopy's
163//! correctness.
164//!
165//! For more information, see our full [soundness policy].
166//!
167//! [Miri]: https://github.com/rust-lang/miri
168//! [Kani]: https://github.com/model-checking/kani
169//! [soundness policy]: https://github.com/google/zerocopy/blob/main/POLICIES.md#soundness
170//!
171//! # Relationship to Project Safe Transmute
172//!
173//! [Project Safe Transmute] is an official initiative of the Rust Project to
174//! develop language-level support for safer transmutation. The Project consults
175//! with crates like zerocopy to identify aspects of safer transmutation that
176//! would benefit from compiler support, and has developed an [experimental,
177//! compiler-supported analysis][mcp-transmutability] which determines whether,
178//! for a given type, any value of that type may be soundly transmuted into
179//! another type. Once this functionality is sufficiently mature, zerocopy
180//! intends to replace its internal transmutability analysis (implemented by our
181//! custom derives) with the compiler-supported one. This change will likely be
182//! an implementation detail that is invisible to zerocopy's users.
183//!
184//! Project Safe Transmute will not replace the need for most of zerocopy's
185//! higher-level abstractions. The experimental compiler analysis is a tool for
186//! checking the soundness of `unsafe` code, not a tool to avoid writing
187//! `unsafe` code altogether. For the foreseeable future, crates like zerocopy
188//! will still be required in order to provide higher-level abstractions on top
189//! of the building block provided by Project Safe Transmute.
190//!
191//! [Project Safe Transmute]: https://rust-lang.github.io/rfcs/2835-project-safe-transmute.html
192//! [mcp-transmutability]: https://github.com/rust-lang/compiler-team/issues/411
193//!
194//! # MSRV
195//!
196//! See our [MSRV policy].
197//!
198//! [MSRV policy]: https://github.com/google/zerocopy/blob/main/POLICIES.md#msrv
199//!
200//! # Changelog
201//!
202//! Zerocopy uses [GitHub Releases].
203//!
204//! [GitHub Releases]: https://github.com/google/zerocopy/releases
205//!
206//! # Thanks
207//!
208//! Zerocopy is maintained by engineers at Google and Amazon with help from
209//! [many wonderful contributors][contributors]. Thank you to everyone who has
210//! lent a hand in making Rust a little more secure!
211//!
212//! [contributors]: https://github.com/google/zerocopy/graphs/contributors
213
214// Sometimes we want to use lints which were added after our MSRV.
215// `unknown_lints` is `warn` by default and we deny warnings in CI, so without
216// this attribute, any unknown lint would cause a CI failure when testing with
217// our MSRV.
218#![allow(unknown_lints, non_local_definitions, unreachable_patterns)]
219#![deny(renamed_and_removed_lints)]
220#![deny(
221 anonymous_parameters,
222 deprecated_in_future,
223 late_bound_lifetime_arguments,
224 missing_copy_implementations,
225 missing_debug_implementations,
226 missing_docs,
227 path_statements,
228 patterns_in_fns_without_body,
229 rust_2018_idioms,
230 trivial_numeric_casts,
231 unreachable_pub,
232 unsafe_op_in_unsafe_fn,
233 unused_extern_crates,
234 // We intentionally choose not to deny `unused_qualifications`. When items
235 // are added to the prelude (e.g., `core::mem::size_of`), this has the
236 // consequence of making some uses trigger this lint on the latest toolchain
237 // (e.g., `mem::size_of`), but fixing it (e.g. by replacing with `size_of`)
238 // does not work on older toolchains.
239 //
240 // We tested a more complicated fix in #1413, but ultimately decided that,
241 // since this lint is just a minor style lint, the complexity isn't worth it
242 // - it's fine to occasionally have unused qualifications slip through,
243 // especially since these do not affect our user-facing API in any way.
244 variant_size_differences
245)]
246#![cfg_attr(
247 __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS,
248 deny(fuzzy_provenance_casts, lossy_provenance_casts)
249)]
250#![deny(
251 clippy::all,
252 clippy::alloc_instead_of_core,
253 clippy::arithmetic_side_effects,
254 clippy::as_underscore,
255 clippy::assertions_on_result_states,
256 clippy::as_conversions,
257 clippy::correctness,
258 clippy::dbg_macro,
259 clippy::decimal_literal_representation,
260 clippy::double_must_use,
261 clippy::get_unwrap,
262 clippy::indexing_slicing,
263 clippy::missing_inline_in_public_items,
264 clippy::missing_safety_doc,
265 clippy::must_use_candidate,
266 clippy::must_use_unit,
267 clippy::obfuscated_if_else,
268 clippy::perf,
269 clippy::print_stdout,
270 clippy::return_self_not_must_use,
271 clippy::std_instead_of_core,
272 clippy::style,
273 clippy::suspicious,
274 clippy::todo,
275 clippy::undocumented_unsafe_blocks,
276 clippy::unimplemented,
277 clippy::unnested_or_patterns,
278 clippy::unwrap_used,
279 clippy::use_debug
280)]
281#![allow(clippy::type_complexity)]
282#![deny(
283 rustdoc::bare_urls,
284 rustdoc::broken_intra_doc_links,
285 rustdoc::invalid_codeblock_attributes,
286 rustdoc::invalid_html_tags,
287 rustdoc::invalid_rust_codeblocks,
288 rustdoc::missing_crate_level_docs,
289 rustdoc::private_intra_doc_links
290)]
291// In test code, it makes sense to weight more heavily towards concise, readable
292// code over correct or debuggable code.
293#![cfg_attr(any(test, kani), allow(
294 // In tests, you get line numbers and have access to source code, so panic
295 // messages are less important. You also often unwrap a lot, which would
296 // make expect'ing instead very verbose.
297 clippy::unwrap_used,
298 // In tests, there's no harm to "panic risks" - the worst that can happen is
299 // that your test will fail, and you'll fix it. By contrast, panic risks in
300 // production code introduce the possibly of code panicking unexpectedly "in
301 // the field".
302 clippy::arithmetic_side_effects,
303 clippy::indexing_slicing,
304))]
305#![cfg_attr(not(any(test, kani, feature = "std")), no_std)]
306#![cfg_attr(
307 all(feature = "simd-nightly", any(target_arch = "x86", target_arch = "x86_64")),
308 feature(stdarch_x86_avx512)
309)]
310#![cfg_attr(
311 all(feature = "simd-nightly", target_arch = "arm"),
312 feature(stdarch_arm_dsp, stdarch_arm_neon_intrinsics)
313)]
314#![cfg_attr(
315 all(feature = "simd-nightly", any(target_arch = "powerpc", target_arch = "powerpc64")),
316 feature(stdarch_powerpc)
317)]
318#![cfg_attr(feature = "float-nightly", feature(f16, f128))]
319#![cfg_attr(doc_cfg, feature(doc_cfg))]
320#![cfg_attr(
321 __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS,
322 feature(layout_for_ptr, coverage_attribute)
323)]
324
325// This is a hack to allow zerocopy-derive derives to work in this crate. They
326// assume that zerocopy is linked as an extern crate, so they access items from
327// it as `zerocopy::Xxx`. This makes that still work.
328#[cfg(any(feature = "derive", test))]
329extern crate self as zerocopy;
330
331#[doc(hidden)]
332#[macro_use]
333pub mod util;
334
335pub mod byte_slice;
336pub mod byteorder;
337mod deprecated;
338// This module is `pub` so that zerocopy's error types and error handling
339// documentation is grouped together in a cohesive module. In practice, we
340// expect most users to use the re-export of `error`'s items to avoid identifier
341// stuttering.
342pub mod error;
343mod impls;
344#[doc(hidden)]
345pub mod layout;
346mod macros;
347#[doc(hidden)]
348pub mod pointer;
349mod r#ref;
350// TODO(#252): If we make this pub, come up with a better name.
351mod wrappers;
352
353pub use crate::byte_slice::*;
354pub use crate::byteorder::*;
355pub use crate::error::*;
356pub use crate::r#ref::*;
357pub use crate::wrappers::*;
358
359use core::{
360 cell::{Cell, UnsafeCell},
361 cmp::Ordering,
362 fmt::{self, Debug, Display, Formatter},
363 hash::Hasher,
364 marker::PhantomData,
365 mem::{self, ManuallyDrop, MaybeUninit as CoreMaybeUninit},
366 num::{
367 NonZeroI128, NonZeroI16, NonZeroI32, NonZeroI64, NonZeroI8, NonZeroIsize, NonZeroU128,
368 NonZeroU16, NonZeroU32, NonZeroU64, NonZeroU8, NonZeroUsize, Wrapping,
369 },
370 ops::{Deref, DerefMut},
371 ptr::{self, NonNull},
372 slice,
373};
374
375#[cfg(feature = "std")]
376use std::io;
377
378use crate::pointer::invariant::{self, BecauseExclusive};
379
380#[cfg(any(feature = "alloc", test, kani))]
381extern crate alloc;
382#[cfg(any(feature = "alloc", test))]
383use alloc::{boxed::Box, vec::Vec};
384
385#[cfg(any(feature = "alloc", test))]
386use core::alloc::Layout;
387
388// Used by `TryFromBytes::is_bit_valid`.
389#[doc(hidden)]
390pub use crate::pointer::{invariant::BecauseImmutable, Maybe, Ptr};
391// Used by `KnownLayout`.
392#[doc(hidden)]
393pub use crate::layout::*;
394
395// For each trait polyfill, as soon as the corresponding feature is stable, the
396// polyfill import will be unused because method/function resolution will prefer
397// the inherent method/function over a trait method/function. Thus, we suppress
398// the `unused_imports` warning.
399//
400// See the documentation on `util::polyfills` for more information.
401#[allow(unused_imports)]
402use crate::util::polyfills::{self, NonNullExt as _, NumExt as _};
403
404#[rustversion::nightly]
405#[cfg(all(test, not(__ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS)))]
406const _: () = {
407 #[deprecated = "some tests may be skipped due to missing RUSTFLAGS=\"--cfg __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS\""]
408 const _WARNING: () = ();
409 #[warn(deprecated)]
410 _WARNING
411};
412
413// These exist so that code which was written against the old names will get
414// less confusing error messages when they upgrade to a more recent version of
415// zerocopy. On our MSRV toolchain, the error messages read, for example:
416//
417// error[E0603]: trait `FromZeroes` is private
418// --> examples/deprecated.rs:1:15
419// |
420// 1 | use zerocopy::FromZeroes;
421// | ^^^^^^^^^^ private trait
422// |
423// note: the trait `FromZeroes` is defined here
424// --> /Users/josh/workspace/zerocopy/src/lib.rs:1845:5
425// |
426// 1845 | use FromZeros as FromZeroes;
427// | ^^^^^^^^^^^^^^^^^^^^^^^
428//
429// The "note" provides enough context to make it easy to figure out how to fix
430// the error.
431#[allow(unused)]
432use {FromZeros as FromZeroes, IntoBytes as AsBytes, Ref as LayoutVerified};
433
434/// Implements [`KnownLayout`].
435///
436/// This derive analyzes various aspects of a type's layout that are needed for
437/// some of zerocopy's APIs. It can be applied to structs, enums, and unions;
438/// e.g.:
439///
440/// ```
441/// # use zerocopy_derive::KnownLayout;
442/// #[derive(KnownLayout)]
443/// struct MyStruct {
444/// # /*
445/// ...
446/// # */
447/// }
448///
449/// #[derive(KnownLayout)]
450/// enum MyEnum {
451/// # V00,
452/// # /*
453/// ...
454/// # */
455/// }
456///
457/// #[derive(KnownLayout)]
458/// union MyUnion {
459/// # variant: u8,
460/// # /*
461/// ...
462/// # */
463/// }
464/// ```
465///
466/// # Limitations
467///
468/// This derive cannot currently be applied to unsized structs without an
469/// explicit `repr` attribute.
470///
471/// Some invocations of this derive run afoul of a [known bug] in Rust's type
472/// privacy checker. For example, this code:
473///
474/// ```compile_fail,E0446
475/// use zerocopy::*;
476/// # use zerocopy_derive::*;
477///
478/// #[derive(KnownLayout)]
479/// #[repr(C)]
480/// pub struct PublicType {
481/// leading: Foo,
482/// trailing: Bar,
483/// }
484///
485/// #[derive(KnownLayout)]
486/// struct Foo;
487///
488/// #[derive(KnownLayout)]
489/// struct Bar;
490/// ```
491///
492/// ...results in a compilation error:
493///
494/// ```text
495/// error[E0446]: private type `Bar` in public interface
496/// --> examples/bug.rs:3:10
497/// |
498/// 3 | #[derive(KnownLayout)]
499/// | ^^^^^^^^^^^ can't leak private type
500/// ...
501/// 14 | struct Bar;
502/// | ---------- `Bar` declared as private
503/// |
504/// = note: this error originates in the derive macro `KnownLayout` (in Nightly builds, run with -Z macro-backtrace for more info)
505/// ```
506///
507/// This issue arises when `#[derive(KnownLayout)]` is applied to `repr(C)`
508/// structs whose trailing field type is less public than the enclosing struct.
509///
510/// To work around this, mark the trailing field type `pub` and annotate it with
511/// `#[doc(hidden)]`; e.g.:
512///
513/// ```no_run
514/// use zerocopy::*;
515/// # use zerocopy_derive::*;
516///
517/// #[derive(KnownLayout)]
518/// #[repr(C)]
519/// pub struct PublicType {
520/// leading: Foo,
521/// trailing: Bar,
522/// }
523///
524/// #[derive(KnownLayout)]
525/// struct Foo;
526///
527/// #[doc(hidden)]
528/// #[derive(KnownLayout)]
529/// pub struct Bar; // <- `Bar` is now also `pub`
530/// ```
531///
532/// [known bug]: https://github.com/rust-lang/rust/issues/45713
533#[cfg(any(feature = "derive", test))]
534#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
535pub use zerocopy_derive::KnownLayout;
536
537/// Indicates that zerocopy can reason about certain aspects of a type's layout.
538///
539/// This trait is required by many of zerocopy's APIs. It supports sized types,
540/// slices, and [slice DSTs](#dynamically-sized-types).
541///
542/// # Implementation
543///
544/// **Do not implement this trait yourself!** Instead, use
545/// [`#[derive(KnownLayout)]`][derive]; e.g.:
546///
547/// ```
548/// # use zerocopy_derive::KnownLayout;
549/// #[derive(KnownLayout)]
550/// struct MyStruct {
551/// # /*
552/// ...
553/// # */
554/// }
555///
556/// #[derive(KnownLayout)]
557/// enum MyEnum {
558/// # /*
559/// ...
560/// # */
561/// }
562///
563/// #[derive(KnownLayout)]
564/// union MyUnion {
565/// # variant: u8,
566/// # /*
567/// ...
568/// # */
569/// }
570/// ```
571///
572/// This derive performs a sophisticated analysis to deduce the layout
573/// characteristics of types. You **must** implement this trait via the derive.
574///
575/// # Dynamically-sized types
576///
577/// `KnownLayout` supports slice-based dynamically sized types ("slice DSTs").
578///
579/// A slice DST is a type whose trailing field is either a slice or another
580/// slice DST, rather than a type with fixed size. For example:
581///
582/// ```
583/// #[repr(C)]
584/// struct PacketHeader {
585/// # /*
586/// ...
587/// # */
588/// }
589///
590/// #[repr(C)]
591/// struct Packet {
592/// header: PacketHeader,
593/// body: [u8],
594/// }
595/// ```
596///
597/// It can be useful to think of slice DSTs as a generalization of slices - in
598/// other words, a normal slice is just the special case of a slice DST with
599/// zero leading fields. In particular:
600/// - Like slices, slice DSTs can have different lengths at runtime
601/// - Like slices, slice DSTs cannot be passed by-value, but only by reference
602/// or via other indirection such as `Box`
603/// - Like slices, a reference (or `Box`, or other pointer type) to a slice DST
604/// encodes the number of elements in the trailing slice field
605///
606/// ## Slice DST layout
607///
608/// Just like other composite Rust types, the layout of a slice DST is not
609/// well-defined unless it is specified using an explicit `#[repr(...)]`
610/// attribute such as `#[repr(C)]`. [Other representations are
611/// supported][reprs], but in this section, we'll use `#[repr(C)]` as our
612/// example.
613///
614/// A `#[repr(C)]` slice DST is laid out [just like sized `#[repr(C)]`
615/// types][repr-c-structs], but the presenence of a variable-length field
616/// introduces the possibility of *dynamic padding*. In particular, it may be
617/// necessary to add trailing padding *after* the trailing slice field in order
618/// to satisfy the outer type's alignment, and the amount of padding required
619/// may be a function of the length of the trailing slice field. This is just a
620/// natural consequence of the normal `#[repr(C)]` rules applied to slice DSTs,
621/// but it can result in surprising behavior. For example, consider the
622/// following type:
623///
624/// ```
625/// #[repr(C)]
626/// struct Foo {
627/// a: u32,
628/// b: u8,
629/// z: [u16],
630/// }
631/// ```
632///
633/// Assuming that `u32` has alignment 4 (this is not true on all platforms),
634/// then `Foo` has alignment 4 as well. Here is the smallest possible value for
635/// `Foo`:
636///
637/// ```text
638/// byte offset | 01234567
639/// field | aaaab---
640/// ><
641/// ```
642///
643/// In this value, `z` has length 0. Abiding by `#[repr(C)]`, the lowest offset
644/// that we can place `z` at is 5, but since `z` has alignment 2, we need to
645/// round up to offset 6. This means that there is one byte of padding between
646/// `b` and `z`, then 0 bytes of `z` itself (denoted `><` in this diagram), and
647/// then two bytes of padding after `z` in order to satisfy the overall
648/// alignment of `Foo`. The size of this instance is 8 bytes.
649///
650/// What about if `z` has length 1?
651///
652/// ```text
653/// byte offset | 01234567
654/// field | aaaab-zz
655/// ```
656///
657/// In this instance, `z` has length 1, and thus takes up 2 bytes. That means
658/// that we no longer need padding after `z` in order to satisfy `Foo`'s
659/// alignment. We've now seen two different values of `Foo` with two different
660/// lengths of `z`, but they both have the same size - 8 bytes.
661///
662/// What about if `z` has length 2?
663///
664/// ```text
665/// byte offset | 012345678901
666/// field | aaaab-zzzz--
667/// ```
668///
669/// Now `z` has length 2, and thus takes up 4 bytes. This brings our un-padded
670/// size to 10, and so we now need another 2 bytes of padding after `z` to
671/// satisfy `Foo`'s alignment.
672///
673/// Again, all of this is just a logical consequence of the `#[repr(C)]` rules
674/// applied to slice DSTs, but it can be surprising that the amount of trailing
675/// padding becomes a function of the trailing slice field's length, and thus
676/// can only be computed at runtime.
677///
678/// [reprs]: https://doc.rust-lang.org/reference/type-layout.html#representations
679/// [repr-c-structs]: https://doc.rust-lang.org/reference/type-layout.html#reprc-structs
680///
681/// ## What is a valid size?
682///
683/// There are two places in zerocopy's API that we refer to "a valid size" of a
684/// type. In normal casts or conversions, where the source is a byte slice, we
685/// need to know whether the source byte slice is a valid size of the
686/// destination type. In prefix or suffix casts, we need to know whether *there
687/// exists* a valid size of the destination type which fits in the source byte
688/// slice and, if so, what the largest such size is.
689///
690/// As outlined above, a slice DST's size is defined by the number of elements
691/// in its trailing slice field. However, there is not necessarily a 1-to-1
692/// mapping between trailing slice field length and overall size. As we saw in
693/// the previous section with the type `Foo`, instances with both 0 and 1
694/// elements in the trailing `z` field result in a `Foo` whose size is 8 bytes.
695///
696/// When we say "x is a valid size of `T`", we mean one of two things:
697/// - If `T: Sized`, then we mean that `x == size_of::<T>()`
698/// - If `T` is a slice DST, then we mean that there exists a `len` such that the instance of
699/// `T` with `len` trailing slice elements has size `x`
700///
701/// When we say "largest possible size of `T` that fits in a byte slice", we
702/// mean one of two things:
703/// - If `T: Sized`, then we mean `size_of::<T>()` if the byte slice is at least
704/// `size_of::<T>()` bytes long
705/// - If `T` is a slice DST, then we mean to consider all values, `len`, such
706/// that the instance of `T` with `len` trailing slice elements fits in the
707/// byte slice, and to choose the largest such `len`, if any
708///
709///
710/// # Safety
711///
712/// This trait does not convey any safety guarantees to code outside this crate.
713///
714/// You must not rely on the `#[doc(hidden)]` internals of `KnownLayout`. Future
715/// releases of zerocopy may make backwards-breaking changes to these items,
716/// including changes that only affect soundness, which may cause code which
717/// uses those items to silently become unsound.
718///
719#[cfg_attr(feature = "derive", doc = "[derive]: zerocopy_derive::KnownLayout")]
720#[cfg_attr(
721 not(feature = "derive"),
722 doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.KnownLayout.html"),
723)]
724#[cfg_attr(
725 zerocopy_diagnostic_on_unimplemented_1_78_0,
726 diagnostic::on_unimplemented(note = "Consider adding `#[derive(KnownLayout)]` to `{Self}`")
727)]
728pub unsafe trait KnownLayout {
729 // The `Self: Sized` bound makes it so that `KnownLayout` can still be
730 // object safe. It's not currently object safe thanks to `const LAYOUT`, and
731 // it likely won't be in the future, but there's no reason not to be
732 // forwards-compatible with object safety.
733 #[doc(hidden)]
734 fn only_derive_is_allowed_to_implement_this_trait()
735 where
736 Self: Sized;
737
738 /// The type of metadata stored in a pointer to `Self`.
739 ///
740 /// This is `()` for sized types and `usize` for slice DSTs.
741 type PointerMetadata: PointerMetadata;
742
743 /// A maybe-uninitialized analog of `Self`
744 ///
745 /// # Safety
746 ///
747 /// `Self::LAYOUT` and `Self::MaybeUninit::LAYOUT` are identical.
748 /// `Self::MaybeUninit` admits uninitialized bytes in all positions.
749 #[doc(hidden)]
750 type MaybeUninit: ?Sized + KnownLayout<PointerMetadata = Self::PointerMetadata>;
751
752 /// The layout of `Self`.
753 ///
754 /// # Safety
755 ///
756 /// Callers may assume that `LAYOUT` accurately reflects the layout of
757 /// `Self`. In particular:
758 /// - `LAYOUT.align` is equal to `Self`'s alignment
759 /// - If `Self: Sized`, then `LAYOUT.size_info == SizeInfo::Sized { size }`
760 /// where `size == size_of::<Self>()`
761 /// - If `Self` is a slice DST, then `LAYOUT.size_info ==
762 /// SizeInfo::SliceDst(slice_layout)` where:
763 /// - The size, `size`, of an instance of `Self` with `elems` trailing
764 /// slice elements is equal to `slice_layout.offset +
765 /// slice_layout.elem_size * elems` rounded up to the nearest multiple
766 /// of `LAYOUT.align`
767 /// - For such an instance, any bytes in the range `[slice_layout.offset +
768 /// slice_layout.elem_size * elems, size)` are padding and must not be
769 /// assumed to be initialized
770 #[doc(hidden)]
771 const LAYOUT: DstLayout;
772
773 /// SAFETY: The returned pointer has the same address and provenance as
774 /// `bytes`. If `Self` is a DST, the returned pointer's referent has `elems`
775 /// elements in its trailing slice.
776 #[doc(hidden)]
777 fn raw_from_ptr_len(bytes: NonNull<u8>, meta: Self::PointerMetadata) -> NonNull<Self>;
778
779 /// Extracts the metadata from a pointer to `Self`.
780 ///
781 /// # Safety
782 ///
783 /// `pointer_to_metadata` always returns the correct metadata stored in
784 /// `ptr`.
785 #[doc(hidden)]
786 fn pointer_to_metadata(ptr: *mut Self) -> Self::PointerMetadata;
787
788 /// Computes the length of the byte range addressed by `ptr`.
789 ///
790 /// Returns `None` if the resulting length would not fit in an `usize`.
791 ///
792 /// # Safety
793 ///
794 /// Callers may assume that `size_of_val_raw` always returns the correct
795 /// size.
796 ///
797 /// Callers may assume that, if `ptr` addresses a byte range whose length
798 /// fits in an `usize`, this will return `Some`.
799 #[doc(hidden)]
800 #[must_use]
801 #[inline(always)]
802 fn size_of_val_raw(ptr: NonNull<Self>) -> Option<usize> {
803 let meta = Self::pointer_to_metadata(ptr.as_ptr());
804 // SAFETY: `size_for_metadata` promises to only return `None` if the
805 // resulting size would not fit in a `usize`.
806 meta.size_for_metadata(Self::LAYOUT)
807 }
808}
809
810/// The metadata associated with a [`KnownLayout`] type.
811#[doc(hidden)]
812pub trait PointerMetadata: Copy + Eq + Debug {
813 /// Constructs a `Self` from an element count.
814 ///
815 /// If `Self = ()`, this returns `()`. If `Self = usize`, this returns
816 /// `elems`. No other types are currently supported.
817 fn from_elem_count(elems: usize) -> Self;
818
819 /// Computes the size of the object with the given layout and pointer
820 /// metadata.
821 ///
822 /// # Panics
823 ///
824 /// If `Self = ()`, `layout` must describe a sized type. If `Self = usize`,
825 /// `layout` must describe a slice DST. Otherwise, `size_for_metadata` may
826 /// panic.
827 ///
828 /// # Safety
829 ///
830 /// `size_for_metadata` promises to only return `None` if the resulting size
831 /// would not fit in a `usize`.
832 fn size_for_metadata(&self, layout: DstLayout) -> Option<usize>;
833}
834
835impl PointerMetadata for () {
836 #[inline]
837 #[allow(clippy::unused_unit)]
838 fn from_elem_count(_elems: usize) -> () {}
839
840 #[inline]
841 fn size_for_metadata(&self, layout: DstLayout) -> Option<usize> {
842 match layout.size_info {
843 SizeInfo::Sized { size } => Some(size),
844 // NOTE: This branch is unreachable, but we return `None` rather
845 // than `unreachable!()` to avoid generating panic paths.
846 SizeInfo::SliceDst(_) => None,
847 }
848 }
849}
850
851impl PointerMetadata for usize {
852 #[inline]
853 fn from_elem_count(elems: usize) -> usize {
854 elems
855 }
856
857 #[inline]
858 fn size_for_metadata(&self, layout: DstLayout) -> Option<usize> {
859 match layout.size_info {
860 SizeInfo::SliceDst(TrailingSliceLayout { offset, elem_size }) => {
861 let slice_len = elem_size.checked_mul(*self)?;
862 let without_padding = offset.checked_add(slice_len)?;
863 without_padding.checked_add(util::padding_needed_for(without_padding, layout.align))
864 }
865 // NOTE: This branch is unreachable, but we return `None` rather
866 // than `unreachable!()` to avoid generating panic paths.
867 SizeInfo::Sized { .. } => None,
868 }
869 }
870}
871
872// SAFETY: Delegates safety to `DstLayout::for_slice`.
873unsafe impl<T> KnownLayout for [T] {
874 #[allow(clippy::missing_inline_in_public_items, dead_code)]
875 #[cfg_attr(
876 all(coverage_nightly, __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS),
877 coverage(off)
878 )]
879 fn only_derive_is_allowed_to_implement_this_trait()
880 where
881 Self: Sized,
882 {
883 }
884
885 type PointerMetadata = usize;
886
887 // SAFETY: `CoreMaybeUninit<T>::LAYOUT` and `T::LAYOUT` are identical
888 // because `CoreMaybeUninit<T>` has the same size and alignment as `T` [1].
889 // Consequently, `[CoreMaybeUninit<T>]::LAYOUT` and `[T]::LAYOUT` are
890 // identical, because they both lack a fixed-sized prefix and because they
891 // inherit the alignments of their inner element type (which are identical)
892 // [2][3].
893 //
894 // `[CoreMaybeUninit<T>]` admits uninitialized bytes at all positions
895 // because `CoreMaybeUninit<T>` admits uninitialized bytes at all positions
896 // and because the inner elements of `[CoreMaybeUninit<T>]` are laid out
897 // back-to-back [2][3].
898 //
899 // [1] Per https://doc.rust-lang.org/1.81.0/std/mem/union.MaybeUninit.html#layout-1:
900 //
901 // `MaybeUninit<T>` is guaranteed to have the same size, alignment, and ABI as
902 // `T`
903 //
904 // [2] Per https://doc.rust-lang.org/1.82.0/reference/type-layout.html#slice-layout:
905 //
906 // Slices have the same layout as the section of the array they slice.
907 //
908 // [3] Per https://doc.rust-lang.org/1.82.0/reference/type-layout.html#array-layout:
909 //
910 // An array of `[T; N]` has a size of `size_of::<T>() * N` and the same
911 // alignment of `T`. Arrays are laid out so that the zero-based `nth`
912 // element of the array is offset from the start of the array by `n *
913 // size_of::<T>()` bytes.
914 type MaybeUninit = [CoreMaybeUninit<T>];
915
916 const LAYOUT: DstLayout = DstLayout::for_slice::<T>();
917
918 // SAFETY: `.cast` preserves address and provenance. The returned pointer
919 // refers to an object with `elems` elements by construction.
920 #[inline(always)]
921 fn raw_from_ptr_len(data: NonNull<u8>, elems: usize) -> NonNull<Self> {
922 // TODO(#67): Remove this allow. See NonNullExt for more details.
923 #[allow(unstable_name_collisions)]
924 NonNull::slice_from_raw_parts(data.cast::<T>(), elems)
925 }
926
927 #[inline(always)]
928 fn pointer_to_metadata(ptr: *mut [T]) -> usize {
929 #[allow(clippy::as_conversions)]
930 let slc = ptr as *const [()];
931
932 // SAFETY:
933 // - `()` has alignment 1, so `slc` is trivially aligned.
934 // - `slc` was derived from a non-null pointer.
935 // - The size is 0 regardless of the length, so it is sound to
936 // materialize a reference regardless of location.
937 // - By invariant, `self.ptr` has valid provenance.
938 let slc = unsafe { &*slc };
939
940 // This is correct because the preceding `as` cast preserves the number
941 // of slice elements. [1]
942 //
943 // [1] Per https://doc.rust-lang.org/reference/expressions/operator-expr.html#pointer-to-pointer-cast:
944 //
945 // For slice types like `[T]` and `[U]`, the raw pointer types `*const
946 // [T]`, `*mut [T]`, `*const [U]`, and `*mut [U]` encode the number of
947 // elements in this slice. Casts between these raw pointer types
948 // preserve the number of elements. ... The same holds for `str` and
949 // any compound type whose unsized tail is a slice type, such as
950 // struct `Foo(i32, [u8])` or `(u64, Foo)`.
951 slc.len()
952 }
953}
954
955#[rustfmt::skip]
956impl_known_layout!(
957 (),
958 u8, i8, u16, i16, u32, i32, u64, i64, u128, i128, usize, isize, f32, f64,
959 bool, char,
960 NonZeroU8, NonZeroI8, NonZeroU16, NonZeroI16, NonZeroU32, NonZeroI32,
961 NonZeroU64, NonZeroI64, NonZeroU128, NonZeroI128, NonZeroUsize, NonZeroIsize
962);
963#[rustfmt::skip]
964#[cfg(feature = "float-nightly")]
965impl_known_layout!(
966 #[cfg_attr(doc_cfg, doc(cfg(feature = "float-nightly")))]
967 f16,
968 #[cfg_attr(doc_cfg, doc(cfg(feature = "float-nightly")))]
969 f128
970);
971#[rustfmt::skip]
972impl_known_layout!(
973 T => Option<T>,
974 T: ?Sized => PhantomData<T>,
975 T => Wrapping<T>,
976 T => CoreMaybeUninit<T>,
977 T: ?Sized => *const T,
978 T: ?Sized => *mut T,
979 T: ?Sized => &'_ T,
980 T: ?Sized => &'_ mut T,
981);
982impl_known_layout!(const N: usize, T => [T; N]);
983
984safety_comment! {
985 /// SAFETY:
986 /// `str` has the same representation as `[u8]`. `ManuallyDrop<T>` [1],
987 /// `UnsafeCell<T>` [2], and `Cell<T>` [3] have the same representation as
988 /// `T`.
989 ///
990 /// [1] Per https://doc.rust-lang.org/1.85.0/std/mem/struct.ManuallyDrop.html:
991 ///
992 /// `ManuallyDrop<T>` is guaranteed to have the same layout and bit
993 /// validity as `T`
994 ///
995 /// [2] Per https://doc.rust-lang.org/1.85.0/core/cell/struct.UnsafeCell.html#memory-layout:
996 ///
997 /// `UnsafeCell<T>` has the same in-memory representation as its inner
998 /// type `T`.
999 ///
1000 /// [3] Per https://doc.rust-lang.org/1.85.0/core/cell/struct.Cell.html#memory-layout:
1001 ///
1002 /// `Cell<T>` has the same in-memory representation as `T`.
1003 unsafe_impl_known_layout!(#[repr([u8])] str);
1004 unsafe_impl_known_layout!(T: ?Sized + KnownLayout => #[repr(T)] ManuallyDrop<T>);
1005 unsafe_impl_known_layout!(T: ?Sized + KnownLayout => #[repr(T)] UnsafeCell<T>);
1006 unsafe_impl_known_layout!(T: ?Sized + KnownLayout => #[repr(T)] Cell<T>);
1007}
1008
1009safety_comment! {
1010 /// SAFETY:
1011 /// - By consequence of the invariant on `T::MaybeUninit` that `T::LAYOUT`
1012 /// and `T::MaybeUninit::LAYOUT` are equal, `T` and `T::MaybeUninit`
1013 /// have the same:
1014 /// - Fixed prefix size
1015 /// - Alignment
1016 /// - (For DSTs) trailing slice element size
1017 /// - By consequence of the above, referents `T::MaybeUninit` and `T` have
1018 /// the require the same kind of pointer metadata, and thus it is valid to
1019 /// perform an `as` cast from `*mut T` and `*mut T::MaybeUninit`, and this
1020 /// operation preserves referent size (ie, `size_of_val_raw`).
1021 unsafe_impl_known_layout!(T: ?Sized + KnownLayout => #[repr(T::MaybeUninit)] MaybeUninit<T>);
1022}
1023
1024/// Analyzes whether a type is [`FromZeros`].
1025///
1026/// This derive analyzes, at compile time, whether the annotated type satisfies
1027/// the [safety conditions] of `FromZeros` and implements `FromZeros` and its
1028/// supertraits if it is sound to do so. This derive can be applied to structs,
1029/// enums, and unions; e.g.:
1030///
1031/// ```
1032/// # use zerocopy_derive::{FromZeros, Immutable};
1033/// #[derive(FromZeros)]
1034/// struct MyStruct {
1035/// # /*
1036/// ...
1037/// # */
1038/// }
1039///
1040/// #[derive(FromZeros)]
1041/// #[repr(u8)]
1042/// enum MyEnum {
1043/// # Variant0,
1044/// # /*
1045/// ...
1046/// # */
1047/// }
1048///
1049/// #[derive(FromZeros, Immutable)]
1050/// union MyUnion {
1051/// # variant: u8,
1052/// # /*
1053/// ...
1054/// # */
1055/// }
1056/// ```
1057///
1058/// [safety conditions]: trait@FromZeros#safety
1059///
1060/// # Analysis
1061///
1062/// *This section describes, roughly, the analysis performed by this derive to
1063/// determine whether it is sound to implement `FromZeros` for a given type.
1064/// Unless you are modifying the implementation of this derive, or attempting to
1065/// manually implement `FromZeros` for a type yourself, you don't need to read
1066/// this section.*
1067///
1068/// If a type has the following properties, then this derive can implement
1069/// `FromZeros` for that type:
1070///
1071/// - If the type is a struct, all of its fields must be `FromZeros`.
1072/// - If the type is an enum:
1073/// - It must have a defined representation (`repr`s `C`, `u8`, `u16`, `u32`,
1074/// `u64`, `usize`, `i8`, `i16`, `i32`, `i64`, or `isize`).
1075/// - It must have a variant with a discriminant/tag of `0`, and its fields
1076/// must be `FromZeros`. See [the reference] for a description of
1077/// discriminant values are specified.
1078/// - The fields of that variant must be `FromZeros`.
1079///
1080/// This analysis is subject to change. Unsafe code may *only* rely on the
1081/// documented [safety conditions] of `FromZeros`, and must *not* rely on the
1082/// implementation details of this derive.
1083///
1084/// [the reference]: https://doc.rust-lang.org/reference/items/enumerations.html#custom-discriminant-values-for-fieldless-enumerations
1085///
1086/// ## Why isn't an explicit representation required for structs?
1087///
1088/// Neither this derive, nor the [safety conditions] of `FromZeros`, requires
1089/// that structs are marked with `#[repr(C)]`.
1090///
1091/// Per the [Rust reference](reference),
1092///
1093/// > The representation of a type can change the padding between fields, but
1094/// > does not change the layout of the fields themselves.
1095///
1096/// [reference]: https://doc.rust-lang.org/reference/type-layout.html#representations
1097///
1098/// Since the layout of structs only consists of padding bytes and field bytes,
1099/// a struct is soundly `FromZeros` if:
1100/// 1. its padding is soundly `FromZeros`, and
1101/// 2. its fields are soundly `FromZeros`.
1102///
1103/// The answer to the first question is always yes: padding bytes do not have
1104/// any validity constraints. A [discussion] of this question in the Unsafe Code
1105/// Guidelines Working Group concluded that it would be virtually unimaginable
1106/// for future versions of rustc to add validity constraints to padding bytes.
1107///
1108/// [discussion]: https://github.com/rust-lang/unsafe-code-guidelines/issues/174
1109///
1110/// Whether a struct is soundly `FromZeros` therefore solely depends on whether
1111/// its fields are `FromZeros`.
1112// TODO(#146): Document why we don't require an enum to have an explicit `repr`
1113// attribute.
1114#[cfg(any(feature = "derive", test))]
1115#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
1116pub use zerocopy_derive::FromZeros;
1117
1118/// Analyzes whether a type is [`Immutable`].
1119///
1120/// This derive analyzes, at compile time, whether the annotated type satisfies
1121/// the [safety conditions] of `Immutable` and implements `Immutable` if it is
1122/// sound to do so. This derive can be applied to structs, enums, and unions;
1123/// e.g.:
1124///
1125/// ```
1126/// # use zerocopy_derive::Immutable;
1127/// #[derive(Immutable)]
1128/// struct MyStruct {
1129/// # /*
1130/// ...
1131/// # */
1132/// }
1133///
1134/// #[derive(Immutable)]
1135/// enum MyEnum {
1136/// # Variant0,
1137/// # /*
1138/// ...
1139/// # */
1140/// }
1141///
1142/// #[derive(Immutable)]
1143/// union MyUnion {
1144/// # variant: u8,
1145/// # /*
1146/// ...
1147/// # */
1148/// }
1149/// ```
1150///
1151/// # Analysis
1152///
1153/// *This section describes, roughly, the analysis performed by this derive to
1154/// determine whether it is sound to implement `Immutable` for a given type.
1155/// Unless you are modifying the implementation of this derive, you don't need
1156/// to read this section.*
1157///
1158/// If a type has the following properties, then this derive can implement
1159/// `Immutable` for that type:
1160///
1161/// - All fields must be `Immutable`.
1162///
1163/// This analysis is subject to change. Unsafe code may *only* rely on the
1164/// documented [safety conditions] of `Immutable`, and must *not* rely on the
1165/// implementation details of this derive.
1166///
1167/// [safety conditions]: trait@Immutable#safety
1168#[cfg(any(feature = "derive", test))]
1169#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
1170pub use zerocopy_derive::Immutable;
1171
1172/// Types which are free from interior mutability.
1173///
1174/// `T: Immutable` indicates that `T` does not permit interior mutation, except
1175/// by ownership or an exclusive (`&mut`) borrow.
1176///
1177/// # Implementation
1178///
1179/// **Do not implement this trait yourself!** Instead, use
1180/// [`#[derive(Immutable)]`][derive] (requires the `derive` Cargo feature);
1181/// e.g.:
1182///
1183/// ```
1184/// # use zerocopy_derive::Immutable;
1185/// #[derive(Immutable)]
1186/// struct MyStruct {
1187/// # /*
1188/// ...
1189/// # */
1190/// }
1191///
1192/// #[derive(Immutable)]
1193/// enum MyEnum {
1194/// # /*
1195/// ...
1196/// # */
1197/// }
1198///
1199/// #[derive(Immutable)]
1200/// union MyUnion {
1201/// # variant: u8,
1202/// # /*
1203/// ...
1204/// # */
1205/// }
1206/// ```
1207///
1208/// This derive performs a sophisticated, compile-time safety analysis to
1209/// determine whether a type is `Immutable`.
1210///
1211/// # Safety
1212///
1213/// Unsafe code outside of this crate must not make any assumptions about `T`
1214/// based on `T: Immutable`. We reserve the right to relax the requirements for
1215/// `Immutable` in the future, and if unsafe code outside of this crate makes
1216/// assumptions based on `T: Immutable`, future relaxations may cause that code
1217/// to become unsound.
1218///
1219// # Safety (Internal)
1220//
1221// If `T: Immutable`, unsafe code *inside of this crate* may assume that, given
1222// `t: &T`, `t` does not contain any [`UnsafeCell`]s at any byte location
1223// within the byte range addressed by `t`. This includes ranges of length 0
1224// (e.g., `UnsafeCell<()>` and `[UnsafeCell<u8>; 0]`). If a type implements
1225// `Immutable` which violates this assumptions, it may cause this crate to
1226// exhibit [undefined behavior].
1227//
1228// [`UnsafeCell`]: core::cell::UnsafeCell
1229// [undefined behavior]: https://raphlinus.github.io/programming/rust/2018/08/17/undefined-behavior.html
1230#[cfg_attr(
1231 feature = "derive",
1232 doc = "[derive]: zerocopy_derive::Immutable",
1233 doc = "[derive-analysis]: zerocopy_derive::Immutable#analysis"
1234)]
1235#[cfg_attr(
1236 not(feature = "derive"),
1237 doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.Immutable.html"),
1238 doc = concat!("[derive-analysis]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.Immutable.html#analysis"),
1239)]
1240#[cfg_attr(
1241 zerocopy_diagnostic_on_unimplemented_1_78_0,
1242 diagnostic::on_unimplemented(note = "Consider adding `#[derive(Immutable)]` to `{Self}`")
1243)]
1244pub unsafe trait Immutable {
1245 // The `Self: Sized` bound makes it so that `Immutable` is still object
1246 // safe.
1247 #[doc(hidden)]
1248 fn only_derive_is_allowed_to_implement_this_trait()
1249 where
1250 Self: Sized;
1251}
1252
1253/// Implements [`TryFromBytes`].
1254///
1255/// This derive synthesizes the runtime checks required to check whether a
1256/// sequence of initialized bytes corresponds to a valid instance of a type.
1257/// This derive can be applied to structs, enums, and unions; e.g.:
1258///
1259/// ```
1260/// # use zerocopy_derive::{TryFromBytes, Immutable};
1261/// #[derive(TryFromBytes)]
1262/// struct MyStruct {
1263/// # /*
1264/// ...
1265/// # */
1266/// }
1267///
1268/// #[derive(TryFromBytes)]
1269/// #[repr(u8)]
1270/// enum MyEnum {
1271/// # V00,
1272/// # /*
1273/// ...
1274/// # */
1275/// }
1276///
1277/// #[derive(TryFromBytes, Immutable)]
1278/// union MyUnion {
1279/// # variant: u8,
1280/// # /*
1281/// ...
1282/// # */
1283/// }
1284/// ```
1285///
1286/// # Portability
1287///
1288/// To ensure consistent endianness for enums with multi-byte representations,
1289/// explicitly specify and convert each discriminant using `.to_le()` or
1290/// `.to_be()`; e.g.:
1291///
1292/// ```
1293/// # use zerocopy_derive::TryFromBytes;
1294/// // `DataStoreVersion` is encoded in little-endian.
1295/// #[derive(TryFromBytes)]
1296/// #[repr(u32)]
1297/// pub enum DataStoreVersion {
1298/// /// Version 1 of the data store.
1299/// V1 = 9u32.to_le(),
1300///
1301/// /// Version 2 of the data store.
1302/// V2 = 10u32.to_le(),
1303/// }
1304/// ```
1305///
1306/// [safety conditions]: trait@TryFromBytes#safety
1307#[cfg(any(feature = "derive", test))]
1308#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
1309pub use zerocopy_derive::TryFromBytes;
1310
1311/// Types for which some bit patterns are valid.
1312///
1313/// A memory region of the appropriate length which contains initialized bytes
1314/// can be viewed as a `TryFromBytes` type so long as the runtime value of those
1315/// bytes corresponds to a [*valid instance*] of that type. For example,
1316/// [`bool`] is `TryFromBytes`, so zerocopy can transmute a [`u8`] into a
1317/// [`bool`] so long as it first checks that the value of the [`u8`] is `0` or
1318/// `1`.
1319///
1320/// # Implementation
1321///
1322/// **Do not implement this trait yourself!** Instead, use
1323/// [`#[derive(TryFromBytes)]`][derive]; e.g.:
1324///
1325/// ```
1326/// # use zerocopy_derive::{TryFromBytes, Immutable};
1327/// #[derive(TryFromBytes)]
1328/// struct MyStruct {
1329/// # /*
1330/// ...
1331/// # */
1332/// }
1333///
1334/// #[derive(TryFromBytes)]
1335/// #[repr(u8)]
1336/// enum MyEnum {
1337/// # V00,
1338/// # /*
1339/// ...
1340/// # */
1341/// }
1342///
1343/// #[derive(TryFromBytes, Immutable)]
1344/// union MyUnion {
1345/// # variant: u8,
1346/// # /*
1347/// ...
1348/// # */
1349/// }
1350/// ```
1351///
1352/// This derive ensures that the runtime check of whether bytes correspond to a
1353/// valid instance is sound. You **must** implement this trait via the derive.
1354///
1355/// # What is a "valid instance"?
1356///
1357/// In Rust, each type has *bit validity*, which refers to the set of bit
1358/// patterns which may appear in an instance of that type. It is impossible for
1359/// safe Rust code to produce values which violate bit validity (ie, values
1360/// outside of the "valid" set of bit patterns). If `unsafe` code produces an
1361/// invalid value, this is considered [undefined behavior].
1362///
1363/// Rust's bit validity rules are currently being decided, which means that some
1364/// types have three classes of bit patterns: those which are definitely valid,
1365/// and whose validity is documented in the language; those which may or may not
1366/// be considered valid at some point in the future; and those which are
1367/// definitely invalid.
1368///
1369/// Zerocopy takes a conservative approach, and only considers a bit pattern to
1370/// be valid if its validity is a documenteed guarantee provided by the
1371/// language.
1372///
1373/// For most use cases, Rust's current guarantees align with programmers'
1374/// intuitions about what ought to be valid. As a result, zerocopy's
1375/// conservatism should not affect most users.
1376///
1377/// If you are negatively affected by lack of support for a particular type,
1378/// we encourage you to let us know by [filing an issue][github-repo].
1379///
1380/// # `TryFromBytes` is not symmetrical with [`IntoBytes`]
1381///
1382/// There are some types which implement both `TryFromBytes` and [`IntoBytes`],
1383/// but for which `TryFromBytes` is not guaranteed to accept all byte sequences
1384/// produced by `IntoBytes`. In other words, for some `T: TryFromBytes +
1385/// IntoBytes`, there exist values of `t: T` such that
1386/// `TryFromBytes::try_ref_from_bytes(t.as_bytes()) == None`. Code should not
1387/// generally assume that values produced by `IntoBytes` will necessarily be
1388/// accepted as valid by `TryFromBytes`.
1389///
1390/// # Safety
1391///
1392/// On its own, `T: TryFromBytes` does not make any guarantees about the layout
1393/// or representation of `T`. It merely provides the ability to perform a
1394/// validity check at runtime via methods like [`try_ref_from_bytes`].
1395///
1396/// You must not rely on the `#[doc(hidden)]` internals of `TryFromBytes`.
1397/// Future releases of zerocopy may make backwards-breaking changes to these
1398/// items, including changes that only affect soundness, which may cause code
1399/// which uses those items to silently become unsound.
1400///
1401/// [undefined behavior]: https://raphlinus.github.io/programming/rust/2018/08/17/undefined-behavior.html
1402/// [github-repo]: https://github.com/google/zerocopy
1403/// [`try_ref_from_bytes`]: TryFromBytes::try_ref_from_bytes
1404/// [*valid instance*]: #what-is-a-valid-instance
1405#[cfg_attr(feature = "derive", doc = "[derive]: zerocopy_derive::TryFromBytes")]
1406#[cfg_attr(
1407 not(feature = "derive"),
1408 doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.TryFromBytes.html"),
1409)]
1410#[cfg_attr(
1411 zerocopy_diagnostic_on_unimplemented_1_78_0,
1412 diagnostic::on_unimplemented(note = "Consider adding `#[derive(TryFromBytes)]` to `{Self}`")
1413)]
1414pub unsafe trait TryFromBytes {
1415 // The `Self: Sized` bound makes it so that `TryFromBytes` is still object
1416 // safe.
1417 #[doc(hidden)]
1418 fn only_derive_is_allowed_to_implement_this_trait()
1419 where
1420 Self: Sized;
1421
1422 /// Does a given memory range contain a valid instance of `Self`?
1423 ///
1424 /// # Safety
1425 ///
1426 /// Unsafe code may assume that, if `is_bit_valid(candidate)` returns true,
1427 /// `*candidate` contains a valid `Self`.
1428 ///
1429 /// # Panics
1430 ///
1431 /// `is_bit_valid` may panic. Callers are responsible for ensuring that any
1432 /// `unsafe` code remains sound even in the face of `is_bit_valid`
1433 /// panicking. (We support user-defined validation routines; so long as
1434 /// these routines are not required to be `unsafe`, there is no way to
1435 /// ensure that these do not generate panics.)
1436 ///
1437 /// Besides user-defined validation routines panicking, `is_bit_valid` will
1438 /// either panic or fail to compile if called on a pointer with [`Shared`]
1439 /// aliasing when `Self: !Immutable`.
1440 ///
1441 /// [`UnsafeCell`]: core::cell::UnsafeCell
1442 /// [`Shared`]: invariant::Shared
1443 #[doc(hidden)]
1444 fn is_bit_valid<A: invariant::Reference>(candidate: Maybe<'_, Self, A>) -> bool;
1445
1446 /// Attempts to interpret the given `source` as a `&Self`.
1447 ///
1448 /// If the bytes of `source` are a valid instance of `Self`, this method
1449 /// returns a reference to those bytes interpreted as a `Self`. If the
1450 /// length of `source` is not a [valid size of `Self`][valid-size], or if
1451 /// `source` is not appropriately aligned, or if `source` is not a valid
1452 /// instance of `Self`, this returns `Err`. If [`Self:
1453 /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
1454 /// error][ConvertError::from].
1455 ///
1456 /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
1457 ///
1458 /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
1459 /// [self-unaligned]: Unaligned
1460 /// [slice-dst]: KnownLayout#dynamically-sized-types
1461 ///
1462 /// # Compile-Time Assertions
1463 ///
1464 /// This method cannot yet be used on unsized types whose dynamically-sized
1465 /// component is zero-sized. Attempting to use this method on such types
1466 /// results in a compile-time assertion error; e.g.:
1467 ///
1468 /// ```compile_fail,E0080
1469 /// use zerocopy::*;
1470 /// # use zerocopy_derive::*;
1471 ///
1472 /// #[derive(TryFromBytes, Immutable, KnownLayout)]
1473 /// #[repr(C)]
1474 /// struct ZSTy {
1475 /// leading_sized: u16,
1476 /// trailing_dst: [()],
1477 /// }
1478 ///
1479 /// let _ = ZSTy::try_ref_from_bytes(0u16.as_bytes()); // âš Compile Error!
1480 /// ```
1481 ///
1482 /// # Examples
1483 ///
1484 /// ```
1485 /// use zerocopy::TryFromBytes;
1486 /// # use zerocopy_derive::*;
1487 ///
1488 /// // The only valid value of this type is the byte `0xC0`
1489 /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1490 /// #[repr(u8)]
1491 /// enum C0 { xC0 = 0xC0 }
1492 ///
1493 /// // The only valid value of this type is the byte sequence `0xC0C0`.
1494 /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1495 /// #[repr(C)]
1496 /// struct C0C0(C0, C0);
1497 ///
1498 /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1499 /// #[repr(C)]
1500 /// struct Packet {
1501 /// magic_number: C0C0,
1502 /// mug_size: u8,
1503 /// temperature: u8,
1504 /// marshmallows: [[u8; 2]],
1505 /// }
1506 ///
1507 /// let bytes = &[0xC0, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5][..];
1508 ///
1509 /// let packet = Packet::try_ref_from_bytes(bytes).unwrap();
1510 ///
1511 /// assert_eq!(packet.mug_size, 240);
1512 /// assert_eq!(packet.temperature, 77);
1513 /// assert_eq!(packet.marshmallows, [[0, 1], [2, 3], [4, 5]]);
1514 ///
1515 /// // These bytes are not valid instance of `Packet`.
1516 /// let bytes = &[0x10, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5][..];
1517 /// assert!(Packet::try_ref_from_bytes(bytes).is_err());
1518 /// ```
1519 #[must_use = "has no side effects"]
1520 #[inline]
1521 fn try_ref_from_bytes(source: &[u8]) -> Result<&Self, TryCastError<&[u8], Self>>
1522 where
1523 Self: KnownLayout + Immutable,
1524 {
1525 static_assert_dst_is_not_zst!(Self);
1526 match Ptr::from_ref(source).try_cast_into_no_leftover::<Self, BecauseImmutable>(None) {
1527 Ok(source) => {
1528 // This call may panic. If that happens, it doesn't cause any soundness
1529 // issues, as we have not generated any invalid state which we need to
1530 // fix before returning.
1531 //
1532 // Note that one panic or post-monomorphization error condition is
1533 // calling `try_into_valid` (and thus `is_bit_valid`) with a shared
1534 // pointer when `Self: !Immutable`. Since `Self: Immutable`, this panic
1535 // condition will not happen.
1536 match source.try_into_valid() {
1537 Ok(valid) => Ok(valid.as_ref()),
1538 Err(e) => {
1539 Err(e.map_src(|src| src.as_bytes::<BecauseImmutable>().as_ref()).into())
1540 }
1541 }
1542 }
1543 Err(e) => Err(e.map_src(Ptr::as_ref).into()),
1544 }
1545 }
1546
1547 /// Attempts to interpret the prefix of the given `source` as a `&Self`.
1548 ///
1549 /// This method computes the [largest possible size of `Self`][valid-size]
1550 /// that can fit in the leading bytes of `source`. If that prefix is a valid
1551 /// instance of `Self`, this method returns a reference to those bytes
1552 /// interpreted as `Self`, and a reference to the remaining bytes. If there
1553 /// are insufficient bytes, or if `source` is not appropriately aligned, or
1554 /// if those bytes are not a valid instance of `Self`, this returns `Err`.
1555 /// If [`Self: Unaligned`][self-unaligned], you can [infallibly discard the
1556 /// alignment error][ConvertError::from].
1557 ///
1558 /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
1559 ///
1560 /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
1561 /// [self-unaligned]: Unaligned
1562 /// [slice-dst]: KnownLayout#dynamically-sized-types
1563 ///
1564 /// # Compile-Time Assertions
1565 ///
1566 /// This method cannot yet be used on unsized types whose dynamically-sized
1567 /// component is zero-sized. Attempting to use this method on such types
1568 /// results in a compile-time assertion error; e.g.:
1569 ///
1570 /// ```compile_fail,E0080
1571 /// use zerocopy::*;
1572 /// # use zerocopy_derive::*;
1573 ///
1574 /// #[derive(TryFromBytes, Immutable, KnownLayout)]
1575 /// #[repr(C)]
1576 /// struct ZSTy {
1577 /// leading_sized: u16,
1578 /// trailing_dst: [()],
1579 /// }
1580 ///
1581 /// let _ = ZSTy::try_ref_from_prefix(0u16.as_bytes()); // âš Compile Error!
1582 /// ```
1583 ///
1584 /// # Examples
1585 ///
1586 /// ```
1587 /// use zerocopy::TryFromBytes;
1588 /// # use zerocopy_derive::*;
1589 ///
1590 /// // The only valid value of this type is the byte `0xC0`
1591 /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1592 /// #[repr(u8)]
1593 /// enum C0 { xC0 = 0xC0 }
1594 ///
1595 /// // The only valid value of this type is the bytes `0xC0C0`.
1596 /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1597 /// #[repr(C)]
1598 /// struct C0C0(C0, C0);
1599 ///
1600 /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1601 /// #[repr(C)]
1602 /// struct Packet {
1603 /// magic_number: C0C0,
1604 /// mug_size: u8,
1605 /// temperature: u8,
1606 /// marshmallows: [[u8; 2]],
1607 /// }
1608 ///
1609 /// // These are more bytes than are needed to encode a `Packet`.
1610 /// let bytes = &[0xC0, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5, 6][..];
1611 ///
1612 /// let (packet, suffix) = Packet::try_ref_from_prefix(bytes).unwrap();
1613 ///
1614 /// assert_eq!(packet.mug_size, 240);
1615 /// assert_eq!(packet.temperature, 77);
1616 /// assert_eq!(packet.marshmallows, [[0, 1], [2, 3], [4, 5]]);
1617 /// assert_eq!(suffix, &[6u8][..]);
1618 ///
1619 /// // These bytes are not valid instance of `Packet`.
1620 /// let bytes = &[0x10, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5, 6][..];
1621 /// assert!(Packet::try_ref_from_prefix(bytes).is_err());
1622 /// ```
1623 #[must_use = "has no side effects"]
1624 #[inline]
1625 fn try_ref_from_prefix(source: &[u8]) -> Result<(&Self, &[u8]), TryCastError<&[u8], Self>>
1626 where
1627 Self: KnownLayout + Immutable,
1628 {
1629 static_assert_dst_is_not_zst!(Self);
1630 try_ref_from_prefix_suffix(source, CastType::Prefix, None)
1631 }
1632
1633 /// Attempts to interpret the suffix of the given `source` as a `&Self`.
1634 ///
1635 /// This method computes the [largest possible size of `Self`][valid-size]
1636 /// that can fit in the trailing bytes of `source`. If that suffix is a
1637 /// valid instance of `Self`, this method returns a reference to those bytes
1638 /// interpreted as `Self`, and a reference to the preceding bytes. If there
1639 /// are insufficient bytes, or if the suffix of `source` would not be
1640 /// appropriately aligned, or if the suffix is not a valid instance of
1641 /// `Self`, this returns `Err`. If [`Self: Unaligned`][self-unaligned], you
1642 /// can [infallibly discard the alignment error][ConvertError::from].
1643 ///
1644 /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
1645 ///
1646 /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
1647 /// [self-unaligned]: Unaligned
1648 /// [slice-dst]: KnownLayout#dynamically-sized-types
1649 ///
1650 /// # Compile-Time Assertions
1651 ///
1652 /// This method cannot yet be used on unsized types whose dynamically-sized
1653 /// component is zero-sized. Attempting to use this method on such types
1654 /// results in a compile-time assertion error; e.g.:
1655 ///
1656 /// ```compile_fail,E0080
1657 /// use zerocopy::*;
1658 /// # use zerocopy_derive::*;
1659 ///
1660 /// #[derive(TryFromBytes, Immutable, KnownLayout)]
1661 /// #[repr(C)]
1662 /// struct ZSTy {
1663 /// leading_sized: u16,
1664 /// trailing_dst: [()],
1665 /// }
1666 ///
1667 /// let _ = ZSTy::try_ref_from_suffix(0u16.as_bytes()); // âš Compile Error!
1668 /// ```
1669 ///
1670 /// # Examples
1671 ///
1672 /// ```
1673 /// use zerocopy::TryFromBytes;
1674 /// # use zerocopy_derive::*;
1675 ///
1676 /// // The only valid value of this type is the byte `0xC0`
1677 /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1678 /// #[repr(u8)]
1679 /// enum C0 { xC0 = 0xC0 }
1680 ///
1681 /// // The only valid value of this type is the bytes `0xC0C0`.
1682 /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1683 /// #[repr(C)]
1684 /// struct C0C0(C0, C0);
1685 ///
1686 /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1687 /// #[repr(C)]
1688 /// struct Packet {
1689 /// magic_number: C0C0,
1690 /// mug_size: u8,
1691 /// temperature: u8,
1692 /// marshmallows: [[u8; 2]],
1693 /// }
1694 ///
1695 /// // These are more bytes than are needed to encode a `Packet`.
1696 /// let bytes = &[0, 0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7][..];
1697 ///
1698 /// let (prefix, packet) = Packet::try_ref_from_suffix(bytes).unwrap();
1699 ///
1700 /// assert_eq!(packet.mug_size, 240);
1701 /// assert_eq!(packet.temperature, 77);
1702 /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]);
1703 /// assert_eq!(prefix, &[0u8][..]);
1704 ///
1705 /// // These bytes are not valid instance of `Packet`.
1706 /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 77, 240, 0xC0, 0x10][..];
1707 /// assert!(Packet::try_ref_from_suffix(bytes).is_err());
1708 /// ```
1709 #[must_use = "has no side effects"]
1710 #[inline]
1711 fn try_ref_from_suffix(source: &[u8]) -> Result<(&[u8], &Self), TryCastError<&[u8], Self>>
1712 where
1713 Self: KnownLayout + Immutable,
1714 {
1715 static_assert_dst_is_not_zst!(Self);
1716 try_ref_from_prefix_suffix(source, CastType::Suffix, None).map(swap)
1717 }
1718
1719 /// Attempts to interpret the given `source` as a `&mut Self` without
1720 /// copying.
1721 ///
1722 /// If the bytes of `source` are a valid instance of `Self`, this method
1723 /// returns a reference to those bytes interpreted as a `Self`. If the
1724 /// length of `source` is not a [valid size of `Self`][valid-size], or if
1725 /// `source` is not appropriately aligned, or if `source` is not a valid
1726 /// instance of `Self`, this returns `Err`. If [`Self:
1727 /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
1728 /// error][ConvertError::from].
1729 ///
1730 /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
1731 ///
1732 /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
1733 /// [self-unaligned]: Unaligned
1734 /// [slice-dst]: KnownLayout#dynamically-sized-types
1735 ///
1736 /// # Compile-Time Assertions
1737 ///
1738 /// This method cannot yet be used on unsized types whose dynamically-sized
1739 /// component is zero-sized. Attempting to use this method on such types
1740 /// results in a compile-time assertion error; e.g.:
1741 ///
1742 /// ```compile_fail,E0080
1743 /// use zerocopy::*;
1744 /// # use zerocopy_derive::*;
1745 ///
1746 /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
1747 /// #[repr(C, packed)]
1748 /// struct ZSTy {
1749 /// leading_sized: [u8; 2],
1750 /// trailing_dst: [()],
1751 /// }
1752 ///
1753 /// let mut source = [85, 85];
1754 /// let _ = ZSTy::try_mut_from_bytes(&mut source[..]); // âš Compile Error!
1755 /// ```
1756 ///
1757 /// # Examples
1758 ///
1759 /// ```
1760 /// use zerocopy::TryFromBytes;
1761 /// # use zerocopy_derive::*;
1762 ///
1763 /// // The only valid value of this type is the byte `0xC0`
1764 /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
1765 /// #[repr(u8)]
1766 /// enum C0 { xC0 = 0xC0 }
1767 ///
1768 /// // The only valid value of this type is the bytes `0xC0C0`.
1769 /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
1770 /// #[repr(C)]
1771 /// struct C0C0(C0, C0);
1772 ///
1773 /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
1774 /// #[repr(C, packed)]
1775 /// struct Packet {
1776 /// magic_number: C0C0,
1777 /// mug_size: u8,
1778 /// temperature: u8,
1779 /// marshmallows: [[u8; 2]],
1780 /// }
1781 ///
1782 /// let bytes = &mut [0xC0, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5][..];
1783 ///
1784 /// let packet = Packet::try_mut_from_bytes(bytes).unwrap();
1785 ///
1786 /// assert_eq!(packet.mug_size, 240);
1787 /// assert_eq!(packet.temperature, 77);
1788 /// assert_eq!(packet.marshmallows, [[0, 1], [2, 3], [4, 5]]);
1789 ///
1790 /// packet.temperature = 111;
1791 ///
1792 /// assert_eq!(bytes, [0xC0, 0xC0, 240, 111, 0, 1, 2, 3, 4, 5]);
1793 ///
1794 /// // These bytes are not valid instance of `Packet`.
1795 /// let bytes = &mut [0x10, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5, 6][..];
1796 /// assert!(Packet::try_mut_from_bytes(bytes).is_err());
1797 /// ```
1798 #[must_use = "has no side effects"]
1799 #[inline]
1800 fn try_mut_from_bytes(bytes: &mut [u8]) -> Result<&mut Self, TryCastError<&mut [u8], Self>>
1801 where
1802 Self: KnownLayout + IntoBytes,
1803 {
1804 static_assert_dst_is_not_zst!(Self);
1805 match Ptr::from_mut(bytes).try_cast_into_no_leftover::<Self, BecauseExclusive>(None) {
1806 Ok(source) => {
1807 // This call may panic. If that happens, it doesn't cause any soundness
1808 // issues, as we have not generated any invalid state which we need to
1809 // fix before returning.
1810 //
1811 // Note that one panic or post-monomorphization error condition is
1812 // calling `try_into_valid` (and thus `is_bit_valid`) with a shared
1813 // pointer when `Self: !Immutable`. Since `Self: Immutable`, this panic
1814 // condition will not happen.
1815 match source.try_into_valid() {
1816 Ok(source) => Ok(source.as_mut()),
1817 Err(e) => {
1818 Err(e.map_src(|src| src.as_bytes::<BecauseExclusive>().as_mut()).into())
1819 }
1820 }
1821 }
1822 Err(e) => Err(e.map_src(Ptr::as_mut).into()),
1823 }
1824 }
1825
1826 /// Attempts to interpret the prefix of the given `source` as a `&mut
1827 /// Self`.
1828 ///
1829 /// This method computes the [largest possible size of `Self`][valid-size]
1830 /// that can fit in the leading bytes of `source`. If that prefix is a valid
1831 /// instance of `Self`, this method returns a reference to those bytes
1832 /// interpreted as `Self`, and a reference to the remaining bytes. If there
1833 /// are insufficient bytes, or if `source` is not appropriately aligned, or
1834 /// if the bytes are not a valid instance of `Self`, this returns `Err`. If
1835 /// [`Self: Unaligned`][self-unaligned], you can [infallibly discard the
1836 /// alignment error][ConvertError::from].
1837 ///
1838 /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
1839 ///
1840 /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
1841 /// [self-unaligned]: Unaligned
1842 /// [slice-dst]: KnownLayout#dynamically-sized-types
1843 ///
1844 /// # Compile-Time Assertions
1845 ///
1846 /// This method cannot yet be used on unsized types whose dynamically-sized
1847 /// component is zero-sized. Attempting to use this method on such types
1848 /// results in a compile-time assertion error; e.g.:
1849 ///
1850 /// ```compile_fail,E0080
1851 /// use zerocopy::*;
1852 /// # use zerocopy_derive::*;
1853 ///
1854 /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
1855 /// #[repr(C, packed)]
1856 /// struct ZSTy {
1857 /// leading_sized: [u8; 2],
1858 /// trailing_dst: [()],
1859 /// }
1860 ///
1861 /// let mut source = [85, 85];
1862 /// let _ = ZSTy::try_mut_from_prefix(&mut source[..]); // âš Compile Error!
1863 /// ```
1864 ///
1865 /// # Examples
1866 ///
1867 /// ```
1868 /// use zerocopy::TryFromBytes;
1869 /// # use zerocopy_derive::*;
1870 ///
1871 /// // The only valid value of this type is the byte `0xC0`
1872 /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
1873 /// #[repr(u8)]
1874 /// enum C0 { xC0 = 0xC0 }
1875 ///
1876 /// // The only valid value of this type is the bytes `0xC0C0`.
1877 /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
1878 /// #[repr(C)]
1879 /// struct C0C0(C0, C0);
1880 ///
1881 /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
1882 /// #[repr(C, packed)]
1883 /// struct Packet {
1884 /// magic_number: C0C0,
1885 /// mug_size: u8,
1886 /// temperature: u8,
1887 /// marshmallows: [[u8; 2]],
1888 /// }
1889 ///
1890 /// // These are more bytes than are needed to encode a `Packet`.
1891 /// let bytes = &mut [0xC0, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5, 6][..];
1892 ///
1893 /// let (packet, suffix) = Packet::try_mut_from_prefix(bytes).unwrap();
1894 ///
1895 /// assert_eq!(packet.mug_size, 240);
1896 /// assert_eq!(packet.temperature, 77);
1897 /// assert_eq!(packet.marshmallows, [[0, 1], [2, 3], [4, 5]]);
1898 /// assert_eq!(suffix, &[6u8][..]);
1899 ///
1900 /// packet.temperature = 111;
1901 /// suffix[0] = 222;
1902 ///
1903 /// assert_eq!(bytes, [0xC0, 0xC0, 240, 111, 0, 1, 2, 3, 4, 5, 222]);
1904 ///
1905 /// // These bytes are not valid instance of `Packet`.
1906 /// let bytes = &mut [0x10, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5, 6][..];
1907 /// assert!(Packet::try_mut_from_prefix(bytes).is_err());
1908 /// ```
1909 #[must_use = "has no side effects"]
1910 #[inline]
1911 fn try_mut_from_prefix(
1912 source: &mut [u8],
1913 ) -> Result<(&mut Self, &mut [u8]), TryCastError<&mut [u8], Self>>
1914 where
1915 Self: KnownLayout + IntoBytes,
1916 {
1917 static_assert_dst_is_not_zst!(Self);
1918 try_mut_from_prefix_suffix(source, CastType::Prefix, None)
1919 }
1920
1921 /// Attempts to interpret the suffix of the given `source` as a `&mut
1922 /// Self`.
1923 ///
1924 /// This method computes the [largest possible size of `Self`][valid-size]
1925 /// that can fit in the trailing bytes of `source`. If that suffix is a
1926 /// valid instance of `Self`, this method returns a reference to those bytes
1927 /// interpreted as `Self`, and a reference to the preceding bytes. If there
1928 /// are insufficient bytes, or if the suffix of `source` would not be
1929 /// appropriately aligned, or if the suffix is not a valid instance of
1930 /// `Self`, this returns `Err`. If [`Self: Unaligned`][self-unaligned], you
1931 /// can [infallibly discard the alignment error][ConvertError::from].
1932 ///
1933 /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
1934 ///
1935 /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
1936 /// [self-unaligned]: Unaligned
1937 /// [slice-dst]: KnownLayout#dynamically-sized-types
1938 ///
1939 /// # Compile-Time Assertions
1940 ///
1941 /// This method cannot yet be used on unsized types whose dynamically-sized
1942 /// component is zero-sized. Attempting to use this method on such types
1943 /// results in a compile-time assertion error; e.g.:
1944 ///
1945 /// ```compile_fail,E0080
1946 /// use zerocopy::*;
1947 /// # use zerocopy_derive::*;
1948 ///
1949 /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
1950 /// #[repr(C, packed)]
1951 /// struct ZSTy {
1952 /// leading_sized: u16,
1953 /// trailing_dst: [()],
1954 /// }
1955 ///
1956 /// let mut source = [85, 85];
1957 /// let _ = ZSTy::try_mut_from_suffix(&mut source[..]); // âš Compile Error!
1958 /// ```
1959 ///
1960 /// # Examples
1961 ///
1962 /// ```
1963 /// use zerocopy::TryFromBytes;
1964 /// # use zerocopy_derive::*;
1965 ///
1966 /// // The only valid value of this type is the byte `0xC0`
1967 /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
1968 /// #[repr(u8)]
1969 /// enum C0 { xC0 = 0xC0 }
1970 ///
1971 /// // The only valid value of this type is the bytes `0xC0C0`.
1972 /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
1973 /// #[repr(C)]
1974 /// struct C0C0(C0, C0);
1975 ///
1976 /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
1977 /// #[repr(C, packed)]
1978 /// struct Packet {
1979 /// magic_number: C0C0,
1980 /// mug_size: u8,
1981 /// temperature: u8,
1982 /// marshmallows: [[u8; 2]],
1983 /// }
1984 ///
1985 /// // These are more bytes than are needed to encode a `Packet`.
1986 /// let bytes = &mut [0, 0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7][..];
1987 ///
1988 /// let (prefix, packet) = Packet::try_mut_from_suffix(bytes).unwrap();
1989 ///
1990 /// assert_eq!(packet.mug_size, 240);
1991 /// assert_eq!(packet.temperature, 77);
1992 /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]);
1993 /// assert_eq!(prefix, &[0u8][..]);
1994 ///
1995 /// prefix[0] = 111;
1996 /// packet.temperature = 222;
1997 ///
1998 /// assert_eq!(bytes, [111, 0xC0, 0xC0, 240, 222, 2, 3, 4, 5, 6, 7]);
1999 ///
2000 /// // These bytes are not valid instance of `Packet`.
2001 /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 77, 240, 0xC0, 0x10][..];
2002 /// assert!(Packet::try_mut_from_suffix(bytes).is_err());
2003 /// ```
2004 #[must_use = "has no side effects"]
2005 #[inline]
2006 fn try_mut_from_suffix(
2007 source: &mut [u8],
2008 ) -> Result<(&mut [u8], &mut Self), TryCastError<&mut [u8], Self>>
2009 where
2010 Self: KnownLayout + IntoBytes,
2011 {
2012 static_assert_dst_is_not_zst!(Self);
2013 try_mut_from_prefix_suffix(source, CastType::Suffix, None).map(swap)
2014 }
2015
2016 /// Attempts to interpret the given `source` as a `&Self` with a DST length
2017 /// equal to `count`.
2018 ///
2019 /// This method attempts to return a reference to `source` interpreted as a
2020 /// `Self` with `count` trailing elements. If the length of `source` is not
2021 /// equal to the size of `Self` with `count` elements, if `source` is not
2022 /// appropriately aligned, or if `source` does not contain a valid instance
2023 /// of `Self`, this returns `Err`. If [`Self: Unaligned`][self-unaligned],
2024 /// you can [infallibly discard the alignment error][ConvertError::from].
2025 ///
2026 /// [self-unaligned]: Unaligned
2027 /// [slice-dst]: KnownLayout#dynamically-sized-types
2028 ///
2029 /// # Examples
2030 ///
2031 /// ```
2032 /// # #![allow(non_camel_case_types)] // For C0::xC0
2033 /// use zerocopy::TryFromBytes;
2034 /// # use zerocopy_derive::*;
2035 ///
2036 /// // The only valid value of this type is the byte `0xC0`
2037 /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2038 /// #[repr(u8)]
2039 /// enum C0 { xC0 = 0xC0 }
2040 ///
2041 /// // The only valid value of this type is the bytes `0xC0C0`.
2042 /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2043 /// #[repr(C)]
2044 /// struct C0C0(C0, C0);
2045 ///
2046 /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2047 /// #[repr(C)]
2048 /// struct Packet {
2049 /// magic_number: C0C0,
2050 /// mug_size: u8,
2051 /// temperature: u8,
2052 /// marshmallows: [[u8; 2]],
2053 /// }
2054 ///
2055 /// let bytes = &[0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7][..];
2056 ///
2057 /// let packet = Packet::try_ref_from_bytes_with_elems(bytes, 3).unwrap();
2058 ///
2059 /// assert_eq!(packet.mug_size, 240);
2060 /// assert_eq!(packet.temperature, 77);
2061 /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]);
2062 ///
2063 /// // These bytes are not valid instance of `Packet`.
2064 /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 77, 240, 0xC0, 0xC0][..];
2065 /// assert!(Packet::try_ref_from_bytes_with_elems(bytes, 3).is_err());
2066 /// ```
2067 ///
2068 /// Since an explicit `count` is provided, this method supports types with
2069 /// zero-sized trailing slice elements. Methods such as [`try_ref_from_bytes`]
2070 /// which do not take an explicit count do not support such types.
2071 ///
2072 /// ```
2073 /// use core::num::NonZeroU16;
2074 /// use zerocopy::*;
2075 /// # use zerocopy_derive::*;
2076 ///
2077 /// #[derive(TryFromBytes, Immutable, KnownLayout)]
2078 /// #[repr(C)]
2079 /// struct ZSTy {
2080 /// leading_sized: NonZeroU16,
2081 /// trailing_dst: [()],
2082 /// }
2083 ///
2084 /// let src = 0xCAFEu16.as_bytes();
2085 /// let zsty = ZSTy::try_ref_from_bytes_with_elems(src, 42).unwrap();
2086 /// assert_eq!(zsty.trailing_dst.len(), 42);
2087 /// ```
2088 ///
2089 /// [`try_ref_from_bytes`]: TryFromBytes::try_ref_from_bytes
2090 #[must_use = "has no side effects"]
2091 #[inline]
2092 fn try_ref_from_bytes_with_elems(
2093 source: &[u8],
2094 count: usize,
2095 ) -> Result<&Self, TryCastError<&[u8], Self>>
2096 where
2097 Self: KnownLayout<PointerMetadata = usize> + Immutable,
2098 {
2099 match Ptr::from_ref(source).try_cast_into_no_leftover::<Self, BecauseImmutable>(Some(count))
2100 {
2101 Ok(source) => {
2102 // This call may panic. If that happens, it doesn't cause any soundness
2103 // issues, as we have not generated any invalid state which we need to
2104 // fix before returning.
2105 //
2106 // Note that one panic or post-monomorphization error condition is
2107 // calling `try_into_valid` (and thus `is_bit_valid`) with a shared
2108 // pointer when `Self: !Immutable`. Since `Self: Immutable`, this panic
2109 // condition will not happen.
2110 match source.try_into_valid() {
2111 Ok(source) => Ok(source.as_ref()),
2112 Err(e) => {
2113 Err(e.map_src(|src| src.as_bytes::<BecauseImmutable>().as_ref()).into())
2114 }
2115 }
2116 }
2117 Err(e) => Err(e.map_src(Ptr::as_ref).into()),
2118 }
2119 }
2120
2121 /// Attempts to interpret the prefix of the given `source` as a `&Self` with
2122 /// a DST length equal to `count`.
2123 ///
2124 /// This method attempts to return a reference to the prefix of `source`
2125 /// interpreted as a `Self` with `count` trailing elements, and a reference
2126 /// to the remaining bytes. If the length of `source` is less than the size
2127 /// of `Self` with `count` elements, if `source` is not appropriately
2128 /// aligned, or if the prefix of `source` does not contain a valid instance
2129 /// of `Self`, this returns `Err`. If [`Self: Unaligned`][self-unaligned],
2130 /// you can [infallibly discard the alignment error][ConvertError::from].
2131 ///
2132 /// [self-unaligned]: Unaligned
2133 /// [slice-dst]: KnownLayout#dynamically-sized-types
2134 ///
2135 /// # Examples
2136 ///
2137 /// ```
2138 /// # #![allow(non_camel_case_types)] // For C0::xC0
2139 /// use zerocopy::TryFromBytes;
2140 /// # use zerocopy_derive::*;
2141 ///
2142 /// // The only valid value of this type is the byte `0xC0`
2143 /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2144 /// #[repr(u8)]
2145 /// enum C0 { xC0 = 0xC0 }
2146 ///
2147 /// // The only valid value of this type is the bytes `0xC0C0`.
2148 /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2149 /// #[repr(C)]
2150 /// struct C0C0(C0, C0);
2151 ///
2152 /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2153 /// #[repr(C)]
2154 /// struct Packet {
2155 /// magic_number: C0C0,
2156 /// mug_size: u8,
2157 /// temperature: u8,
2158 /// marshmallows: [[u8; 2]],
2159 /// }
2160 ///
2161 /// let bytes = &[0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7, 8][..];
2162 ///
2163 /// let (packet, suffix) = Packet::try_ref_from_prefix_with_elems(bytes, 3).unwrap();
2164 ///
2165 /// assert_eq!(packet.mug_size, 240);
2166 /// assert_eq!(packet.temperature, 77);
2167 /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]);
2168 /// assert_eq!(suffix, &[8u8][..]);
2169 ///
2170 /// // These bytes are not valid instance of `Packet`.
2171 /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 77, 240, 0xC0, 0xC0][..];
2172 /// assert!(Packet::try_ref_from_prefix_with_elems(bytes, 3).is_err());
2173 /// ```
2174 ///
2175 /// Since an explicit `count` is provided, this method supports types with
2176 /// zero-sized trailing slice elements. Methods such as [`try_ref_from_prefix`]
2177 /// which do not take an explicit count do not support such types.
2178 ///
2179 /// ```
2180 /// use core::num::NonZeroU16;
2181 /// use zerocopy::*;
2182 /// # use zerocopy_derive::*;
2183 ///
2184 /// #[derive(TryFromBytes, Immutable, KnownLayout)]
2185 /// #[repr(C)]
2186 /// struct ZSTy {
2187 /// leading_sized: NonZeroU16,
2188 /// trailing_dst: [()],
2189 /// }
2190 ///
2191 /// let src = 0xCAFEu16.as_bytes();
2192 /// let (zsty, _) = ZSTy::try_ref_from_prefix_with_elems(src, 42).unwrap();
2193 /// assert_eq!(zsty.trailing_dst.len(), 42);
2194 /// ```
2195 ///
2196 /// [`try_ref_from_prefix`]: TryFromBytes::try_ref_from_prefix
2197 #[must_use = "has no side effects"]
2198 #[inline]
2199 fn try_ref_from_prefix_with_elems(
2200 source: &[u8],
2201 count: usize,
2202 ) -> Result<(&Self, &[u8]), TryCastError<&[u8], Self>>
2203 where
2204 Self: KnownLayout<PointerMetadata = usize> + Immutable,
2205 {
2206 try_ref_from_prefix_suffix(source, CastType::Prefix, Some(count))
2207 }
2208
2209 /// Attempts to interpret the suffix of the given `source` as a `&Self` with
2210 /// a DST length equal to `count`.
2211 ///
2212 /// This method attempts to return a reference to the suffix of `source`
2213 /// interpreted as a `Self` with `count` trailing elements, and a reference
2214 /// to the preceding bytes. If the length of `source` is less than the size
2215 /// of `Self` with `count` elements, if the suffix of `source` is not
2216 /// appropriately aligned, or if the suffix of `source` does not contain a
2217 /// valid instance of `Self`, this returns `Err`. If [`Self:
2218 /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
2219 /// error][ConvertError::from].
2220 ///
2221 /// [self-unaligned]: Unaligned
2222 /// [slice-dst]: KnownLayout#dynamically-sized-types
2223 ///
2224 /// # Examples
2225 ///
2226 /// ```
2227 /// # #![allow(non_camel_case_types)] // For C0::xC0
2228 /// use zerocopy::TryFromBytes;
2229 /// # use zerocopy_derive::*;
2230 ///
2231 /// // The only valid value of this type is the byte `0xC0`
2232 /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2233 /// #[repr(u8)]
2234 /// enum C0 { xC0 = 0xC0 }
2235 ///
2236 /// // The only valid value of this type is the bytes `0xC0C0`.
2237 /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2238 /// #[repr(C)]
2239 /// struct C0C0(C0, C0);
2240 ///
2241 /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2242 /// #[repr(C)]
2243 /// struct Packet {
2244 /// magic_number: C0C0,
2245 /// mug_size: u8,
2246 /// temperature: u8,
2247 /// marshmallows: [[u8; 2]],
2248 /// }
2249 ///
2250 /// let bytes = &[123, 0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7][..];
2251 ///
2252 /// let (prefix, packet) = Packet::try_ref_from_suffix_with_elems(bytes, 3).unwrap();
2253 ///
2254 /// assert_eq!(packet.mug_size, 240);
2255 /// assert_eq!(packet.temperature, 77);
2256 /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]);
2257 /// assert_eq!(prefix, &[123u8][..]);
2258 ///
2259 /// // These bytes are not valid instance of `Packet`.
2260 /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 77, 240, 0xC0, 0xC0][..];
2261 /// assert!(Packet::try_ref_from_suffix_with_elems(bytes, 3).is_err());
2262 /// ```
2263 ///
2264 /// Since an explicit `count` is provided, this method supports types with
2265 /// zero-sized trailing slice elements. Methods such as [`try_ref_from_prefix`]
2266 /// which do not take an explicit count do not support such types.
2267 ///
2268 /// ```
2269 /// use core::num::NonZeroU16;
2270 /// use zerocopy::*;
2271 /// # use zerocopy_derive::*;
2272 ///
2273 /// #[derive(TryFromBytes, Immutable, KnownLayout)]
2274 /// #[repr(C)]
2275 /// struct ZSTy {
2276 /// leading_sized: NonZeroU16,
2277 /// trailing_dst: [()],
2278 /// }
2279 ///
2280 /// let src = 0xCAFEu16.as_bytes();
2281 /// let (_, zsty) = ZSTy::try_ref_from_suffix_with_elems(src, 42).unwrap();
2282 /// assert_eq!(zsty.trailing_dst.len(), 42);
2283 /// ```
2284 ///
2285 /// [`try_ref_from_prefix`]: TryFromBytes::try_ref_from_prefix
2286 #[must_use = "has no side effects"]
2287 #[inline]
2288 fn try_ref_from_suffix_with_elems(
2289 source: &[u8],
2290 count: usize,
2291 ) -> Result<(&[u8], &Self), TryCastError<&[u8], Self>>
2292 where
2293 Self: KnownLayout<PointerMetadata = usize> + Immutable,
2294 {
2295 try_ref_from_prefix_suffix(source, CastType::Suffix, Some(count)).map(swap)
2296 }
2297
2298 /// Attempts to interpret the given `source` as a `&mut Self` with a DST
2299 /// length equal to `count`.
2300 ///
2301 /// This method attempts to return a reference to `source` interpreted as a
2302 /// `Self` with `count` trailing elements. If the length of `source` is not
2303 /// equal to the size of `Self` with `count` elements, if `source` is not
2304 /// appropriately aligned, or if `source` does not contain a valid instance
2305 /// of `Self`, this returns `Err`. If [`Self: Unaligned`][self-unaligned],
2306 /// you can [infallibly discard the alignment error][ConvertError::from].
2307 ///
2308 /// [self-unaligned]: Unaligned
2309 /// [slice-dst]: KnownLayout#dynamically-sized-types
2310 ///
2311 /// # Examples
2312 ///
2313 /// ```
2314 /// # #![allow(non_camel_case_types)] // For C0::xC0
2315 /// use zerocopy::TryFromBytes;
2316 /// # use zerocopy_derive::*;
2317 ///
2318 /// // The only valid value of this type is the byte `0xC0`
2319 /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2320 /// #[repr(u8)]
2321 /// enum C0 { xC0 = 0xC0 }
2322 ///
2323 /// // The only valid value of this type is the bytes `0xC0C0`.
2324 /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2325 /// #[repr(C)]
2326 /// struct C0C0(C0, C0);
2327 ///
2328 /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2329 /// #[repr(C, packed)]
2330 /// struct Packet {
2331 /// magic_number: C0C0,
2332 /// mug_size: u8,
2333 /// temperature: u8,
2334 /// marshmallows: [[u8; 2]],
2335 /// }
2336 ///
2337 /// let bytes = &mut [0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7][..];
2338 ///
2339 /// let packet = Packet::try_mut_from_bytes_with_elems(bytes, 3).unwrap();
2340 ///
2341 /// assert_eq!(packet.mug_size, 240);
2342 /// assert_eq!(packet.temperature, 77);
2343 /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]);
2344 ///
2345 /// packet.temperature = 111;
2346 ///
2347 /// assert_eq!(bytes, [0xC0, 0xC0, 240, 111, 2, 3, 4, 5, 6, 7]);
2348 ///
2349 /// // These bytes are not valid instance of `Packet`.
2350 /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 77, 240, 0xC0, 0xC0][..];
2351 /// assert!(Packet::try_mut_from_bytes_with_elems(bytes, 3).is_err());
2352 /// ```
2353 ///
2354 /// Since an explicit `count` is provided, this method supports types with
2355 /// zero-sized trailing slice elements. Methods such as [`try_mut_from_bytes`]
2356 /// which do not take an explicit count do not support such types.
2357 ///
2358 /// ```
2359 /// use core::num::NonZeroU16;
2360 /// use zerocopy::*;
2361 /// # use zerocopy_derive::*;
2362 ///
2363 /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2364 /// #[repr(C, packed)]
2365 /// struct ZSTy {
2366 /// leading_sized: NonZeroU16,
2367 /// trailing_dst: [()],
2368 /// }
2369 ///
2370 /// let mut src = 0xCAFEu16;
2371 /// let src = src.as_mut_bytes();
2372 /// let zsty = ZSTy::try_mut_from_bytes_with_elems(src, 42).unwrap();
2373 /// assert_eq!(zsty.trailing_dst.len(), 42);
2374 /// ```
2375 ///
2376 /// [`try_mut_from_bytes`]: TryFromBytes::try_mut_from_bytes
2377 #[must_use = "has no side effects"]
2378 #[inline]
2379 fn try_mut_from_bytes_with_elems(
2380 source: &mut [u8],
2381 count: usize,
2382 ) -> Result<&mut Self, TryCastError<&mut [u8], Self>>
2383 where
2384 Self: KnownLayout<PointerMetadata = usize> + IntoBytes,
2385 {
2386 match Ptr::from_mut(source).try_cast_into_no_leftover::<Self, BecauseExclusive>(Some(count))
2387 {
2388 Ok(source) => {
2389 // This call may panic. If that happens, it doesn't cause any soundness
2390 // issues, as we have not generated any invalid state which we need to
2391 // fix before returning.
2392 //
2393 // Note that one panic or post-monomorphization error condition is
2394 // calling `try_into_valid` (and thus `is_bit_valid`) with a shared
2395 // pointer when `Self: !Immutable`. Since `Self: Immutable`, this panic
2396 // condition will not happen.
2397 match source.try_into_valid() {
2398 Ok(source) => Ok(source.as_mut()),
2399 Err(e) => {
2400 Err(e.map_src(|src| src.as_bytes::<BecauseExclusive>().as_mut()).into())
2401 }
2402 }
2403 }
2404 Err(e) => Err(e.map_src(Ptr::as_mut).into()),
2405 }
2406 }
2407
2408 /// Attempts to interpret the prefix of the given `source` as a `&mut Self`
2409 /// with a DST length equal to `count`.
2410 ///
2411 /// This method attempts to return a reference to the prefix of `source`
2412 /// interpreted as a `Self` with `count` trailing elements, and a reference
2413 /// to the remaining bytes. If the length of `source` is less than the size
2414 /// of `Self` with `count` elements, if `source` is not appropriately
2415 /// aligned, or if the prefix of `source` does not contain a valid instance
2416 /// of `Self`, this returns `Err`. If [`Self: Unaligned`][self-unaligned],
2417 /// you can [infallibly discard the alignment error][ConvertError::from].
2418 ///
2419 /// [self-unaligned]: Unaligned
2420 /// [slice-dst]: KnownLayout#dynamically-sized-types
2421 ///
2422 /// # Examples
2423 ///
2424 /// ```
2425 /// # #![allow(non_camel_case_types)] // For C0::xC0
2426 /// use zerocopy::TryFromBytes;
2427 /// # use zerocopy_derive::*;
2428 ///
2429 /// // The only valid value of this type is the byte `0xC0`
2430 /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2431 /// #[repr(u8)]
2432 /// enum C0 { xC0 = 0xC0 }
2433 ///
2434 /// // The only valid value of this type is the bytes `0xC0C0`.
2435 /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2436 /// #[repr(C)]
2437 /// struct C0C0(C0, C0);
2438 ///
2439 /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2440 /// #[repr(C, packed)]
2441 /// struct Packet {
2442 /// magic_number: C0C0,
2443 /// mug_size: u8,
2444 /// temperature: u8,
2445 /// marshmallows: [[u8; 2]],
2446 /// }
2447 ///
2448 /// let bytes = &mut [0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7, 8][..];
2449 ///
2450 /// let (packet, suffix) = Packet::try_mut_from_prefix_with_elems(bytes, 3).unwrap();
2451 ///
2452 /// assert_eq!(packet.mug_size, 240);
2453 /// assert_eq!(packet.temperature, 77);
2454 /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]);
2455 /// assert_eq!(suffix, &[8u8][..]);
2456 ///
2457 /// packet.temperature = 111;
2458 /// suffix[0] = 222;
2459 ///
2460 /// assert_eq!(bytes, [0xC0, 0xC0, 240, 111, 2, 3, 4, 5, 6, 7, 222]);
2461 ///
2462 /// // These bytes are not valid instance of `Packet`.
2463 /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 77, 240, 0xC0, 0xC0][..];
2464 /// assert!(Packet::try_mut_from_prefix_with_elems(bytes, 3).is_err());
2465 /// ```
2466 ///
2467 /// Since an explicit `count` is provided, this method supports types with
2468 /// zero-sized trailing slice elements. Methods such as [`try_mut_from_prefix`]
2469 /// which do not take an explicit count do not support such types.
2470 ///
2471 /// ```
2472 /// use core::num::NonZeroU16;
2473 /// use zerocopy::*;
2474 /// # use zerocopy_derive::*;
2475 ///
2476 /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2477 /// #[repr(C, packed)]
2478 /// struct ZSTy {
2479 /// leading_sized: NonZeroU16,
2480 /// trailing_dst: [()],
2481 /// }
2482 ///
2483 /// let mut src = 0xCAFEu16;
2484 /// let src = src.as_mut_bytes();
2485 /// let (zsty, _) = ZSTy::try_mut_from_prefix_with_elems(src, 42).unwrap();
2486 /// assert_eq!(zsty.trailing_dst.len(), 42);
2487 /// ```
2488 ///
2489 /// [`try_mut_from_prefix`]: TryFromBytes::try_mut_from_prefix
2490 #[must_use = "has no side effects"]
2491 #[inline]
2492 fn try_mut_from_prefix_with_elems(
2493 source: &mut [u8],
2494 count: usize,
2495 ) -> Result<(&mut Self, &mut [u8]), TryCastError<&mut [u8], Self>>
2496 where
2497 Self: KnownLayout<PointerMetadata = usize> + IntoBytes,
2498 {
2499 try_mut_from_prefix_suffix(source, CastType::Prefix, Some(count))
2500 }
2501
2502 /// Attempts to interpret the suffix of the given `source` as a `&mut Self`
2503 /// with a DST length equal to `count`.
2504 ///
2505 /// This method attempts to return a reference to the suffix of `source`
2506 /// interpreted as a `Self` with `count` trailing elements, and a reference
2507 /// to the preceding bytes. If the length of `source` is less than the size
2508 /// of `Self` with `count` elements, if the suffix of `source` is not
2509 /// appropriately aligned, or if the suffix of `source` does not contain a
2510 /// valid instance of `Self`, this returns `Err`. If [`Self:
2511 /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
2512 /// error][ConvertError::from].
2513 ///
2514 /// [self-unaligned]: Unaligned
2515 /// [slice-dst]: KnownLayout#dynamically-sized-types
2516 ///
2517 /// # Examples
2518 ///
2519 /// ```
2520 /// # #![allow(non_camel_case_types)] // For C0::xC0
2521 /// use zerocopy::TryFromBytes;
2522 /// # use zerocopy_derive::*;
2523 ///
2524 /// // The only valid value of this type is the byte `0xC0`
2525 /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2526 /// #[repr(u8)]
2527 /// enum C0 { xC0 = 0xC0 }
2528 ///
2529 /// // The only valid value of this type is the bytes `0xC0C0`.
2530 /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2531 /// #[repr(C)]
2532 /// struct C0C0(C0, C0);
2533 ///
2534 /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2535 /// #[repr(C, packed)]
2536 /// struct Packet {
2537 /// magic_number: C0C0,
2538 /// mug_size: u8,
2539 /// temperature: u8,
2540 /// marshmallows: [[u8; 2]],
2541 /// }
2542 ///
2543 /// let bytes = &mut [123, 0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7][..];
2544 ///
2545 /// let (prefix, packet) = Packet::try_mut_from_suffix_with_elems(bytes, 3).unwrap();
2546 ///
2547 /// assert_eq!(packet.mug_size, 240);
2548 /// assert_eq!(packet.temperature, 77);
2549 /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]);
2550 /// assert_eq!(prefix, &[123u8][..]);
2551 ///
2552 /// prefix[0] = 111;
2553 /// packet.temperature = 222;
2554 ///
2555 /// assert_eq!(bytes, [111, 0xC0, 0xC0, 240, 222, 2, 3, 4, 5, 6, 7]);
2556 ///
2557 /// // These bytes are not valid instance of `Packet`.
2558 /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 77, 240, 0xC0, 0xC0][..];
2559 /// assert!(Packet::try_mut_from_suffix_with_elems(bytes, 3).is_err());
2560 /// ```
2561 ///
2562 /// Since an explicit `count` is provided, this method supports types with
2563 /// zero-sized trailing slice elements. Methods such as [`try_mut_from_prefix`]
2564 /// which do not take an explicit count do not support such types.
2565 ///
2566 /// ```
2567 /// use core::num::NonZeroU16;
2568 /// use zerocopy::*;
2569 /// # use zerocopy_derive::*;
2570 ///
2571 /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2572 /// #[repr(C, packed)]
2573 /// struct ZSTy {
2574 /// leading_sized: NonZeroU16,
2575 /// trailing_dst: [()],
2576 /// }
2577 ///
2578 /// let mut src = 0xCAFEu16;
2579 /// let src = src.as_mut_bytes();
2580 /// let (_, zsty) = ZSTy::try_mut_from_suffix_with_elems(src, 42).unwrap();
2581 /// assert_eq!(zsty.trailing_dst.len(), 42);
2582 /// ```
2583 ///
2584 /// [`try_mut_from_prefix`]: TryFromBytes::try_mut_from_prefix
2585 #[must_use = "has no side effects"]
2586 #[inline]
2587 fn try_mut_from_suffix_with_elems(
2588 source: &mut [u8],
2589 count: usize,
2590 ) -> Result<(&mut [u8], &mut Self), TryCastError<&mut [u8], Self>>
2591 where
2592 Self: KnownLayout<PointerMetadata = usize> + IntoBytes,
2593 {
2594 try_mut_from_prefix_suffix(source, CastType::Suffix, Some(count)).map(swap)
2595 }
2596
2597 /// Attempts to read the given `source` as a `Self`.
2598 ///
2599 /// If `source.len() != size_of::<Self>()` or the bytes are not a valid
2600 /// instance of `Self`, this returns `Err`.
2601 ///
2602 /// # Examples
2603 ///
2604 /// ```
2605 /// use zerocopy::TryFromBytes;
2606 /// # use zerocopy_derive::*;
2607 ///
2608 /// // The only valid value of this type is the byte `0xC0`
2609 /// #[derive(TryFromBytes)]
2610 /// #[repr(u8)]
2611 /// enum C0 { xC0 = 0xC0 }
2612 ///
2613 /// // The only valid value of this type is the bytes `0xC0C0`.
2614 /// #[derive(TryFromBytes)]
2615 /// #[repr(C)]
2616 /// struct C0C0(C0, C0);
2617 ///
2618 /// #[derive(TryFromBytes)]
2619 /// #[repr(C)]
2620 /// struct Packet {
2621 /// magic_number: C0C0,
2622 /// mug_size: u8,
2623 /// temperature: u8,
2624 /// }
2625 ///
2626 /// let bytes = &[0xC0, 0xC0, 240, 77][..];
2627 ///
2628 /// let packet = Packet::try_read_from_bytes(bytes).unwrap();
2629 ///
2630 /// assert_eq!(packet.mug_size, 240);
2631 /// assert_eq!(packet.temperature, 77);
2632 ///
2633 /// // These bytes are not valid instance of `Packet`.
2634 /// let bytes = &mut [0x10, 0xC0, 240, 77][..];
2635 /// assert!(Packet::try_read_from_bytes(bytes).is_err());
2636 /// ```
2637 #[must_use = "has no side effects"]
2638 #[inline]
2639 fn try_read_from_bytes(source: &[u8]) -> Result<Self, TryReadError<&[u8], Self>>
2640 where
2641 Self: Sized,
2642 {
2643 let candidate = match CoreMaybeUninit::<Self>::read_from_bytes(source) {
2644 Ok(candidate) => candidate,
2645 Err(e) => {
2646 return Err(TryReadError::Size(e.with_dst()));
2647 }
2648 };
2649 // SAFETY: `candidate` was copied from from `source: &[u8]`, so all of
2650 // its bytes are initialized.
2651 unsafe { try_read_from(source, candidate) }
2652 }
2653
2654 /// Attempts to read a `Self` from the prefix of the given `source`.
2655 ///
2656 /// This attempts to read a `Self` from the first `size_of::<Self>()` bytes
2657 /// of `source`, returning that `Self` and any remaining bytes. If
2658 /// `source.len() < size_of::<Self>()` or the bytes are not a valid instance
2659 /// of `Self`, it returns `Err`.
2660 ///
2661 /// # Examples
2662 ///
2663 /// ```
2664 /// use zerocopy::TryFromBytes;
2665 /// # use zerocopy_derive::*;
2666 ///
2667 /// // The only valid value of this type is the byte `0xC0`
2668 /// #[derive(TryFromBytes)]
2669 /// #[repr(u8)]
2670 /// enum C0 { xC0 = 0xC0 }
2671 ///
2672 /// // The only valid value of this type is the bytes `0xC0C0`.
2673 /// #[derive(TryFromBytes)]
2674 /// #[repr(C)]
2675 /// struct C0C0(C0, C0);
2676 ///
2677 /// #[derive(TryFromBytes)]
2678 /// #[repr(C)]
2679 /// struct Packet {
2680 /// magic_number: C0C0,
2681 /// mug_size: u8,
2682 /// temperature: u8,
2683 /// }
2684 ///
2685 /// // These are more bytes than are needed to encode a `Packet`.
2686 /// let bytes = &[0xC0, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5, 6][..];
2687 ///
2688 /// let (packet, suffix) = Packet::try_read_from_prefix(bytes).unwrap();
2689 ///
2690 /// assert_eq!(packet.mug_size, 240);
2691 /// assert_eq!(packet.temperature, 77);
2692 /// assert_eq!(suffix, &[0u8, 1, 2, 3, 4, 5, 6][..]);
2693 ///
2694 /// // These bytes are not valid instance of `Packet`.
2695 /// let bytes = &[0x10, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5, 6][..];
2696 /// assert!(Packet::try_read_from_prefix(bytes).is_err());
2697 /// ```
2698 #[must_use = "has no side effects"]
2699 #[inline]
2700 fn try_read_from_prefix(source: &[u8]) -> Result<(Self, &[u8]), TryReadError<&[u8], Self>>
2701 where
2702 Self: Sized,
2703 {
2704 let (candidate, suffix) = match CoreMaybeUninit::<Self>::read_from_prefix(source) {
2705 Ok(candidate) => candidate,
2706 Err(e) => {
2707 return Err(TryReadError::Size(e.with_dst()));
2708 }
2709 };
2710 // SAFETY: `candidate` was copied from from `source: &[u8]`, so all of
2711 // its bytes are initialized.
2712 unsafe { try_read_from(source, candidate).map(|slf| (slf, suffix)) }
2713 }
2714
2715 /// Attempts to read a `Self` from the suffix of the given `source`.
2716 ///
2717 /// This attempts to read a `Self` from the last `size_of::<Self>()` bytes
2718 /// of `source`, returning that `Self` and any preceding bytes. If
2719 /// `source.len() < size_of::<Self>()` or the bytes are not a valid instance
2720 /// of `Self`, it returns `Err`.
2721 ///
2722 /// # Examples
2723 ///
2724 /// ```
2725 /// # #![allow(non_camel_case_types)] // For C0::xC0
2726 /// use zerocopy::TryFromBytes;
2727 /// # use zerocopy_derive::*;
2728 ///
2729 /// // The only valid value of this type is the byte `0xC0`
2730 /// #[derive(TryFromBytes)]
2731 /// #[repr(u8)]
2732 /// enum C0 { xC0 = 0xC0 }
2733 ///
2734 /// // The only valid value of this type is the bytes `0xC0C0`.
2735 /// #[derive(TryFromBytes)]
2736 /// #[repr(C)]
2737 /// struct C0C0(C0, C0);
2738 ///
2739 /// #[derive(TryFromBytes)]
2740 /// #[repr(C)]
2741 /// struct Packet {
2742 /// magic_number: C0C0,
2743 /// mug_size: u8,
2744 /// temperature: u8,
2745 /// }
2746 ///
2747 /// // These are more bytes than are needed to encode a `Packet`.
2748 /// let bytes = &[0, 1, 2, 3, 4, 5, 0xC0, 0xC0, 240, 77][..];
2749 ///
2750 /// let (prefix, packet) = Packet::try_read_from_suffix(bytes).unwrap();
2751 ///
2752 /// assert_eq!(packet.mug_size, 240);
2753 /// assert_eq!(packet.temperature, 77);
2754 /// assert_eq!(prefix, &[0u8, 1, 2, 3, 4, 5][..]);
2755 ///
2756 /// // These bytes are not valid instance of `Packet`.
2757 /// let bytes = &[0, 1, 2, 3, 4, 5, 0x10, 0xC0, 240, 77][..];
2758 /// assert!(Packet::try_read_from_suffix(bytes).is_err());
2759 /// ```
2760 #[must_use = "has no side effects"]
2761 #[inline]
2762 fn try_read_from_suffix(source: &[u8]) -> Result<(&[u8], Self), TryReadError<&[u8], Self>>
2763 where
2764 Self: Sized,
2765 {
2766 let (prefix, candidate) = match CoreMaybeUninit::<Self>::read_from_suffix(source) {
2767 Ok(candidate) => candidate,
2768 Err(e) => {
2769 return Err(TryReadError::Size(e.with_dst()));
2770 }
2771 };
2772 // SAFETY: `candidate` was copied from from `source: &[u8]`, so all of
2773 // its bytes are initialized.
2774 unsafe { try_read_from(source, candidate).map(|slf| (prefix, slf)) }
2775 }
2776}
2777
2778#[inline(always)]
2779fn try_ref_from_prefix_suffix<T: TryFromBytes + KnownLayout + Immutable + ?Sized>(
2780 source: &[u8],
2781 cast_type: CastType,
2782 meta: Option<T::PointerMetadata>,
2783) -> Result<(&T, &[u8]), TryCastError<&[u8], T>> {
2784 match Ptr::from_ref(source).try_cast_into::<T, BecauseImmutable>(cast_type, meta) {
2785 Ok((source, prefix_suffix)) => {
2786 // This call may panic. If that happens, it doesn't cause any soundness
2787 // issues, as we have not generated any invalid state which we need to
2788 // fix before returning.
2789 //
2790 // Note that one panic or post-monomorphization error condition is
2791 // calling `try_into_valid` (and thus `is_bit_valid`) with a shared
2792 // pointer when `Self: !Immutable`. Since `Self: Immutable`, this panic
2793 // condition will not happen.
2794 match source.try_into_valid() {
2795 Ok(valid) => Ok((valid.as_ref(), prefix_suffix.as_ref())),
2796 Err(e) => Err(e.map_src(|src| src.as_bytes::<BecauseImmutable>().as_ref()).into()),
2797 }
2798 }
2799 Err(e) => Err(e.map_src(Ptr::as_ref).into()),
2800 }
2801}
2802
2803#[inline(always)]
2804fn try_mut_from_prefix_suffix<T: IntoBytes + TryFromBytes + KnownLayout + ?Sized>(
2805 candidate: &mut [u8],
2806 cast_type: CastType,
2807 meta: Option<T::PointerMetadata>,
2808) -> Result<(&mut T, &mut [u8]), TryCastError<&mut [u8], T>> {
2809 match Ptr::from_mut(candidate).try_cast_into::<T, BecauseExclusive>(cast_type, meta) {
2810 Ok((candidate, prefix_suffix)) => {
2811 // This call may panic. If that happens, it doesn't cause any soundness
2812 // issues, as we have not generated any invalid state which we need to
2813 // fix before returning.
2814 //
2815 // Note that one panic or post-monomorphization error condition is
2816 // calling `try_into_valid` (and thus `is_bit_valid`) with a shared
2817 // pointer when `Self: !Immutable`. Since `Self: Immutable`, this panic
2818 // condition will not happen.
2819 match candidate.try_into_valid() {
2820 Ok(valid) => Ok((valid.as_mut(), prefix_suffix.as_mut())),
2821 Err(e) => Err(e.map_src(|src| src.as_bytes::<BecauseExclusive>().as_mut()).into()),
2822 }
2823 }
2824 Err(e) => Err(e.map_src(Ptr::as_mut).into()),
2825 }
2826}
2827
2828#[inline(always)]
2829fn swap<T, U>((t, u): (T, U)) -> (U, T) {
2830 (u, t)
2831}
2832
2833/// # Safety
2834///
2835/// All bytes of `candidate` must be initialized.
2836#[inline(always)]
2837unsafe fn try_read_from<S, T: TryFromBytes>(
2838 source: S,
2839 mut candidate: CoreMaybeUninit<T>,
2840) -> Result<T, TryReadError<S, T>> {
2841 // We use `from_mut` despite not mutating via `c_ptr` so that we don't need
2842 // to add a `T: Immutable` bound.
2843 let c_ptr = Ptr::from_mut(&mut candidate);
2844 // SAFETY: `c_ptr` has no uninitialized sub-ranges because it derived from
2845 // `candidate`, which the caller promises is entirely initialized. Since
2846 // `candidate` is a `MaybeUninit`, it has no validity requirements, and so
2847 // no values written to an `Initialized` `c_ptr` can violate its validity.
2848 // Since `c_ptr` has `Exclusive` aliasing, no mutations may happen except
2849 // via `c_ptr` so long as it is live, so we don't need to worry about the
2850 // fact that `c_ptr` may have more restricted validity than `candidate`.
2851 let c_ptr = unsafe { c_ptr.assume_validity::<invariant::Initialized>() };
2852 let c_ptr = c_ptr.transmute();
2853
2854 // Since we don't have `T: KnownLayout`, we hack around that by using
2855 // `Wrapping<T>`, which implements `KnownLayout` even if `T` doesn't.
2856 //
2857 // This call may panic. If that happens, it doesn't cause any soundness
2858 // issues, as we have not generated any invalid state which we need to fix
2859 // before returning.
2860 //
2861 // Note that one panic or post-monomorphization error condition is calling
2862 // `try_into_valid` (and thus `is_bit_valid`) with a shared pointer when
2863 // `Self: !Immutable`. Since `Self: Immutable`, this panic condition will
2864 // not happen.
2865 if !Wrapping::<T>::is_bit_valid(c_ptr.forget_aligned()) {
2866 return Err(ValidityError::new(source).into());
2867 }
2868
2869 fn _assert_same_size_and_validity<T>()
2870 where
2871 Wrapping<T>: pointer::TransmuteFrom<T, invariant::Valid, invariant::Valid>,
2872 T: pointer::TransmuteFrom<Wrapping<T>, invariant::Valid, invariant::Valid>,
2873 {
2874 }
2875
2876 _assert_same_size_and_validity::<T>();
2877
2878 // SAFETY: We just validated that `candidate` contains a valid
2879 // `Wrapping<T>`, which has the same size and bit validity as `T`, as
2880 // guaranteed by the preceding type assertion.
2881 Ok(unsafe { candidate.assume_init() })
2882}
2883
2884/// Types for which a sequence of bytes all set to zero represents a valid
2885/// instance of the type.
2886///
2887/// Any memory region of the appropriate length which is guaranteed to contain
2888/// only zero bytes can be viewed as any `FromZeros` type with no runtime
2889/// overhead. This is useful whenever memory is known to be in a zeroed state,
2890/// such memory returned from some allocation routines.
2891///
2892/// # Warning: Padding bytes
2893///
2894/// Note that, when a value is moved or copied, only the non-padding bytes of
2895/// that value are guaranteed to be preserved. It is unsound to assume that
2896/// values written to padding bytes are preserved after a move or copy. For more
2897/// details, see the [`FromBytes` docs][frombytes-warning-padding-bytes].
2898///
2899/// [frombytes-warning-padding-bytes]: FromBytes#warning-padding-bytes
2900///
2901/// # Implementation
2902///
2903/// **Do not implement this trait yourself!** Instead, use
2904/// [`#[derive(FromZeros)]`][derive]; e.g.:
2905///
2906/// ```
2907/// # use zerocopy_derive::{FromZeros, Immutable};
2908/// #[derive(FromZeros)]
2909/// struct MyStruct {
2910/// # /*
2911/// ...
2912/// # */
2913/// }
2914///
2915/// #[derive(FromZeros)]
2916/// #[repr(u8)]
2917/// enum MyEnum {
2918/// # Variant0,
2919/// # /*
2920/// ...
2921/// # */
2922/// }
2923///
2924/// #[derive(FromZeros, Immutable)]
2925/// union MyUnion {
2926/// # variant: u8,
2927/// # /*
2928/// ...
2929/// # */
2930/// }
2931/// ```
2932///
2933/// This derive performs a sophisticated, compile-time safety analysis to
2934/// determine whether a type is `FromZeros`.
2935///
2936/// # Safety
2937///
2938/// *This section describes what is required in order for `T: FromZeros`, and
2939/// what unsafe code may assume of such types. If you don't plan on implementing
2940/// `FromZeros` manually, and you don't plan on writing unsafe code that
2941/// operates on `FromZeros` types, then you don't need to read this section.*
2942///
2943/// If `T: FromZeros`, then unsafe code may assume that it is sound to produce a
2944/// `T` whose bytes are all initialized to zero. If a type is marked as
2945/// `FromZeros` which violates this contract, it may cause undefined behavior.
2946///
2947/// `#[derive(FromZeros)]` only permits [types which satisfy these
2948/// requirements][derive-analysis].
2949///
2950#[cfg_attr(
2951 feature = "derive",
2952 doc = "[derive]: zerocopy_derive::FromZeros",
2953 doc = "[derive-analysis]: zerocopy_derive::FromZeros#analysis"
2954)]
2955#[cfg_attr(
2956 not(feature = "derive"),
2957 doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.FromZeros.html"),
2958 doc = concat!("[derive-analysis]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.FromZeros.html#analysis"),
2959)]
2960#[cfg_attr(
2961 zerocopy_diagnostic_on_unimplemented_1_78_0,
2962 diagnostic::on_unimplemented(note = "Consider adding `#[derive(FromZeros)]` to `{Self}`")
2963)]
2964pub unsafe trait FromZeros: TryFromBytes {
2965 // The `Self: Sized` bound makes it so that `FromZeros` is still object
2966 // safe.
2967 #[doc(hidden)]
2968 fn only_derive_is_allowed_to_implement_this_trait()
2969 where
2970 Self: Sized;
2971
2972 /// Overwrites `self` with zeros.
2973 ///
2974 /// Sets every byte in `self` to 0. While this is similar to doing `*self =
2975 /// Self::new_zeroed()`, it differs in that `zero` does not semantically
2976 /// drop the current value and replace it with a new one — it simply
2977 /// modifies the bytes of the existing value.
2978 ///
2979 /// # Examples
2980 ///
2981 /// ```
2982 /// # use zerocopy::FromZeros;
2983 /// # use zerocopy_derive::*;
2984 /// #
2985 /// #[derive(FromZeros)]
2986 /// #[repr(C)]
2987 /// struct PacketHeader {
2988 /// src_port: [u8; 2],
2989 /// dst_port: [u8; 2],
2990 /// length: [u8; 2],
2991 /// checksum: [u8; 2],
2992 /// }
2993 ///
2994 /// let mut header = PacketHeader {
2995 /// src_port: 100u16.to_be_bytes(),
2996 /// dst_port: 200u16.to_be_bytes(),
2997 /// length: 300u16.to_be_bytes(),
2998 /// checksum: 400u16.to_be_bytes(),
2999 /// };
3000 ///
3001 /// header.zero();
3002 ///
3003 /// assert_eq!(header.src_port, [0, 0]);
3004 /// assert_eq!(header.dst_port, [0, 0]);
3005 /// assert_eq!(header.length, [0, 0]);
3006 /// assert_eq!(header.checksum, [0, 0]);
3007 /// ```
3008 #[inline(always)]
3009 fn zero(&mut self) {
3010 let slf: *mut Self = self;
3011 let len = mem::size_of_val(self);
3012 // SAFETY:
3013 // - `self` is guaranteed by the type system to be valid for writes of
3014 // size `size_of_val(self)`.
3015 // - `u8`'s alignment is 1, and thus `self` is guaranteed to be aligned
3016 // as required by `u8`.
3017 // - Since `Self: FromZeros`, the all-zeros instance is a valid instance
3018 // of `Self.`
3019 //
3020 // TODO(#429): Add references to docs and quotes.
3021 unsafe { ptr::write_bytes(slf.cast::<u8>(), 0, len) };
3022 }
3023
3024 /// Creates an instance of `Self` from zeroed bytes.
3025 ///
3026 /// # Examples
3027 ///
3028 /// ```
3029 /// # use zerocopy::FromZeros;
3030 /// # use zerocopy_derive::*;
3031 /// #
3032 /// #[derive(FromZeros)]
3033 /// #[repr(C)]
3034 /// struct PacketHeader {
3035 /// src_port: [u8; 2],
3036 /// dst_port: [u8; 2],
3037 /// length: [u8; 2],
3038 /// checksum: [u8; 2],
3039 /// }
3040 ///
3041 /// let header: PacketHeader = FromZeros::new_zeroed();
3042 ///
3043 /// assert_eq!(header.src_port, [0, 0]);
3044 /// assert_eq!(header.dst_port, [0, 0]);
3045 /// assert_eq!(header.length, [0, 0]);
3046 /// assert_eq!(header.checksum, [0, 0]);
3047 /// ```
3048 #[must_use = "has no side effects"]
3049 #[inline(always)]
3050 fn new_zeroed() -> Self
3051 where
3052 Self: Sized,
3053 {
3054 // SAFETY: `FromZeros` says that the all-zeros bit pattern is legal.
3055 unsafe { mem::zeroed() }
3056 }
3057
3058 /// Creates a `Box<Self>` from zeroed bytes.
3059 ///
3060 /// This function is useful for allocating large values on the heap and
3061 /// zero-initializing them, without ever creating a temporary instance of
3062 /// `Self` on the stack. For example, `<[u8; 1048576]>::new_box_zeroed()`
3063 /// will allocate `[u8; 1048576]` directly on the heap; it does not require
3064 /// storing `[u8; 1048576]` in a temporary variable on the stack.
3065 ///
3066 /// On systems that use a heap implementation that supports allocating from
3067 /// pre-zeroed memory, using `new_box_zeroed` (or related functions) may
3068 /// have performance benefits.
3069 ///
3070 /// # Errors
3071 ///
3072 /// Returns an error on allocation failure. Allocation failure is guaranteed
3073 /// never to cause a panic or an abort.
3074 #[must_use = "has no side effects (other than allocation)"]
3075 #[cfg(any(feature = "alloc", test))]
3076 #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
3077 #[inline]
3078 fn new_box_zeroed() -> Result<Box<Self>, AllocError>
3079 where
3080 Self: Sized,
3081 {
3082 // If `T` is a ZST, then return a proper boxed instance of it. There is
3083 // no allocation, but `Box` does require a correct dangling pointer.
3084 let layout = Layout::new::<Self>();
3085 if layout.size() == 0 {
3086 // Construct the `Box` from a dangling pointer to avoid calling
3087 // `Self::new_zeroed`. This ensures that stack space is never
3088 // allocated for `Self` even on lower opt-levels where this branch
3089 // might not get optimized out.
3090
3091 // SAFETY: Per [1], when `T` is a ZST, `Box<T>`'s only validity
3092 // requirements are that the pointer is non-null and sufficiently
3093 // aligned. Per [2], `NonNull::dangling` produces a pointer which
3094 // is sufficiently aligned. Since the produced pointer is a
3095 // `NonNull`, it is non-null.
3096 //
3097 // [1] Per https://doc.rust-lang.org/nightly/std/boxed/index.html#memory-layout:
3098 //
3099 // For zero-sized values, the `Box` pointer has to be non-null and sufficiently aligned.
3100 //
3101 // [2] Per https://doc.rust-lang.org/std/ptr/struct.NonNull.html#method.dangling:
3102 //
3103 // Creates a new `NonNull` that is dangling, but well-aligned.
3104 return Ok(unsafe { Box::from_raw(NonNull::dangling().as_ptr()) });
3105 }
3106
3107 // TODO(#429): Add a "SAFETY" comment and remove this `allow`.
3108 #[allow(clippy::undocumented_unsafe_blocks)]
3109 let ptr = unsafe { alloc::alloc::alloc_zeroed(layout).cast::<Self>() };
3110 if ptr.is_null() {
3111 return Err(AllocError);
3112 }
3113 // TODO(#429): Add a "SAFETY" comment and remove this `allow`.
3114 #[allow(clippy::undocumented_unsafe_blocks)]
3115 Ok(unsafe { Box::from_raw(ptr) })
3116 }
3117
3118 /// Creates a `Box<[Self]>` (a boxed slice) from zeroed bytes.
3119 ///
3120 /// This function is useful for allocating large values of `[Self]` on the
3121 /// heap and zero-initializing them, without ever creating a temporary
3122 /// instance of `[Self; _]` on the stack. For example,
3123 /// `u8::new_box_slice_zeroed(1048576)` will allocate the slice directly on
3124 /// the heap; it does not require storing the slice on the stack.
3125 ///
3126 /// On systems that use a heap implementation that supports allocating from
3127 /// pre-zeroed memory, using `new_box_slice_zeroed` may have performance
3128 /// benefits.
3129 ///
3130 /// If `Self` is a zero-sized type, then this function will return a
3131 /// `Box<[Self]>` that has the correct `len`. Such a box cannot contain any
3132 /// actual information, but its `len()` property will report the correct
3133 /// value.
3134 ///
3135 /// # Errors
3136 ///
3137 /// Returns an error on allocation failure. Allocation failure is
3138 /// guaranteed never to cause a panic or an abort.
3139 #[must_use = "has no side effects (other than allocation)"]
3140 #[cfg(feature = "alloc")]
3141 #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
3142 #[inline]
3143 fn new_box_zeroed_with_elems(count: usize) -> Result<Box<Self>, AllocError>
3144 where
3145 Self: KnownLayout<PointerMetadata = usize>,
3146 {
3147 // SAFETY: `alloc::alloc::alloc_zeroed` is a valid argument of
3148 // `new_box`. The referent of the pointer returned by `alloc_zeroed`
3149 // (and, consequently, the `Box` derived from it) is a valid instance of
3150 // `Self`, because `Self` is `FromZeros`.
3151 unsafe { crate::util::new_box(count, alloc::alloc::alloc_zeroed) }
3152 }
3153
3154 #[deprecated(since = "0.8.0", note = "renamed to `FromZeros::new_box_zeroed_with_elems`")]
3155 #[doc(hidden)]
3156 #[cfg(feature = "alloc")]
3157 #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
3158 #[must_use = "has no side effects (other than allocation)"]
3159 #[inline(always)]
3160 fn new_box_slice_zeroed(len: usize) -> Result<Box<[Self]>, AllocError>
3161 where
3162 Self: Sized,
3163 {
3164 <[Self]>::new_box_zeroed_with_elems(len)
3165 }
3166
3167 /// Creates a `Vec<Self>` from zeroed bytes.
3168 ///
3169 /// This function is useful for allocating large values of `Vec`s and
3170 /// zero-initializing them, without ever creating a temporary instance of
3171 /// `[Self; _]` (or many temporary instances of `Self`) on the stack. For
3172 /// example, `u8::new_vec_zeroed(1048576)` will allocate directly on the
3173 /// heap; it does not require storing intermediate values on the stack.
3174 ///
3175 /// On systems that use a heap implementation that supports allocating from
3176 /// pre-zeroed memory, using `new_vec_zeroed` may have performance benefits.
3177 ///
3178 /// If `Self` is a zero-sized type, then this function will return a
3179 /// `Vec<Self>` that has the correct `len`. Such a `Vec` cannot contain any
3180 /// actual information, but its `len()` property will report the correct
3181 /// value.
3182 ///
3183 /// # Errors
3184 ///
3185 /// Returns an error on allocation failure. Allocation failure is
3186 /// guaranteed never to cause a panic or an abort.
3187 #[must_use = "has no side effects (other than allocation)"]
3188 #[cfg(feature = "alloc")]
3189 #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
3190 #[inline(always)]
3191 fn new_vec_zeroed(len: usize) -> Result<Vec<Self>, AllocError>
3192 where
3193 Self: Sized,
3194 {
3195 <[Self]>::new_box_zeroed_with_elems(len).map(Into::into)
3196 }
3197
3198 /// Extends a `Vec<Self>` by pushing `additional` new items onto the end of
3199 /// the vector. The new items are initialized with zeros.
3200 #[cfg(zerocopy_panic_in_const_and_vec_try_reserve_1_57_0)]
3201 #[cfg(feature = "alloc")]
3202 #[cfg_attr(doc_cfg, doc(cfg(all(rust = "1.57.0", feature = "alloc"))))]
3203 #[inline(always)]
3204 fn extend_vec_zeroed(v: &mut Vec<Self>, additional: usize) -> Result<(), AllocError>
3205 where
3206 Self: Sized,
3207 {
3208 // PANICS: We pass `v.len()` for `position`, so the `position > v.len()`
3209 // panic condition is not satisfied.
3210 <Self as FromZeros>::insert_vec_zeroed(v, v.len(), additional)
3211 }
3212
3213 /// Inserts `additional` new items into `Vec<Self>` at `position`. The new
3214 /// items are initialized with zeros.
3215 ///
3216 /// # Panics
3217 ///
3218 /// Panics if `position > v.len()`.
3219 #[cfg(zerocopy_panic_in_const_and_vec_try_reserve_1_57_0)]
3220 #[cfg(feature = "alloc")]
3221 #[cfg_attr(doc_cfg, doc(cfg(all(rust = "1.57.0", feature = "alloc"))))]
3222 #[inline]
3223 fn insert_vec_zeroed(
3224 v: &mut Vec<Self>,
3225 position: usize,
3226 additional: usize,
3227 ) -> Result<(), AllocError>
3228 where
3229 Self: Sized,
3230 {
3231 assert!(position <= v.len());
3232 // We only conditionally compile on versions on which `try_reserve` is
3233 // stable; the Clippy lint is a false positive.
3234 #[allow(clippy::incompatible_msrv)]
3235 v.try_reserve(additional).map_err(|_| AllocError)?;
3236 // SAFETY: The `try_reserve` call guarantees that these cannot overflow:
3237 // * `ptr.add(position)`
3238 // * `position + additional`
3239 // * `v.len() + additional`
3240 //
3241 // `v.len() - position` cannot overflow because we asserted that
3242 // `position <= v.len()`.
3243 unsafe {
3244 // This is a potentially overlapping copy.
3245 let ptr = v.as_mut_ptr();
3246 #[allow(clippy::arithmetic_side_effects)]
3247 ptr.add(position).copy_to(ptr.add(position + additional), v.len() - position);
3248 ptr.add(position).write_bytes(0, additional);
3249 #[allow(clippy::arithmetic_side_effects)]
3250 v.set_len(v.len() + additional);
3251 }
3252
3253 Ok(())
3254 }
3255}
3256
3257/// Analyzes whether a type is [`FromBytes`].
3258///
3259/// This derive analyzes, at compile time, whether the annotated type satisfies
3260/// the [safety conditions] of `FromBytes` and implements `FromBytes` and its
3261/// supertraits if it is sound to do so. This derive can be applied to structs,
3262/// enums, and unions;
3263/// e.g.:
3264///
3265/// ```
3266/// # use zerocopy_derive::{FromBytes, FromZeros, Immutable};
3267/// #[derive(FromBytes)]
3268/// struct MyStruct {
3269/// # /*
3270/// ...
3271/// # */
3272/// }
3273///
3274/// #[derive(FromBytes)]
3275/// #[repr(u8)]
3276/// enum MyEnum {
3277/// # V00, V01, V02, V03, V04, V05, V06, V07, V08, V09, V0A, V0B, V0C, V0D, V0E,
3278/// # V0F, V10, V11, V12, V13, V14, V15, V16, V17, V18, V19, V1A, V1B, V1C, V1D,
3279/// # V1E, V1F, V20, V21, V22, V23, V24, V25, V26, V27, V28, V29, V2A, V2B, V2C,
3280/// # V2D, V2E, V2F, V30, V31, V32, V33, V34, V35, V36, V37, V38, V39, V3A, V3B,
3281/// # V3C, V3D, V3E, V3F, V40, V41, V42, V43, V44, V45, V46, V47, V48, V49, V4A,
3282/// # V4B, V4C, V4D, V4E, V4F, V50, V51, V52, V53, V54, V55, V56, V57, V58, V59,
3283/// # V5A, V5B, V5C, V5D, V5E, V5F, V60, V61, V62, V63, V64, V65, V66, V67, V68,
3284/// # V69, V6A, V6B, V6C, V6D, V6E, V6F, V70, V71, V72, V73, V74, V75, V76, V77,
3285/// # V78, V79, V7A, V7B, V7C, V7D, V7E, V7F, V80, V81, V82, V83, V84, V85, V86,
3286/// # V87, V88, V89, V8A, V8B, V8C, V8D, V8E, V8F, V90, V91, V92, V93, V94, V95,
3287/// # V96, V97, V98, V99, V9A, V9B, V9C, V9D, V9E, V9F, VA0, VA1, VA2, VA3, VA4,
3288/// # VA5, VA6, VA7, VA8, VA9, VAA, VAB, VAC, VAD, VAE, VAF, VB0, VB1, VB2, VB3,
3289/// # VB4, VB5, VB6, VB7, VB8, VB9, VBA, VBB, VBC, VBD, VBE, VBF, VC0, VC1, VC2,
3290/// # VC3, VC4, VC5, VC6, VC7, VC8, VC9, VCA, VCB, VCC, VCD, VCE, VCF, VD0, VD1,
3291/// # VD2, VD3, VD4, VD5, VD6, VD7, VD8, VD9, VDA, VDB, VDC, VDD, VDE, VDF, VE0,
3292/// # VE1, VE2, VE3, VE4, VE5, VE6, VE7, VE8, VE9, VEA, VEB, VEC, VED, VEE, VEF,
3293/// # VF0, VF1, VF2, VF3, VF4, VF5, VF6, VF7, VF8, VF9, VFA, VFB, VFC, VFD, VFE,
3294/// # VFF,
3295/// # /*
3296/// ...
3297/// # */
3298/// }
3299///
3300/// #[derive(FromBytes, Immutable)]
3301/// union MyUnion {
3302/// # variant: u8,
3303/// # /*
3304/// ...
3305/// # */
3306/// }
3307/// ```
3308///
3309/// [safety conditions]: trait@FromBytes#safety
3310///
3311/// # Analysis
3312///
3313/// *This section describes, roughly, the analysis performed by this derive to
3314/// determine whether it is sound to implement `FromBytes` for a given type.
3315/// Unless you are modifying the implementation of this derive, or attempting to
3316/// manually implement `FromBytes` for a type yourself, you don't need to read
3317/// this section.*
3318///
3319/// If a type has the following properties, then this derive can implement
3320/// `FromBytes` for that type:
3321///
3322/// - If the type is a struct, all of its fields must be `FromBytes`.
3323/// - If the type is an enum:
3324/// - It must have a defined representation (`repr`s `C`, `u8`, `u16`, `u32`,
3325/// `u64`, `usize`, `i8`, `i16`, `i32`, `i64`, or `isize`).
3326/// - The maximum number of discriminants must be used (so that every possible
3327/// bit pattern is a valid one). Be very careful when using the `C`,
3328/// `usize`, or `isize` representations, as their size is
3329/// platform-dependent.
3330/// - Its fields must be `FromBytes`.
3331///
3332/// This analysis is subject to change. Unsafe code may *only* rely on the
3333/// documented [safety conditions] of `FromBytes`, and must *not* rely on the
3334/// implementation details of this derive.
3335///
3336/// ## Why isn't an explicit representation required for structs?
3337///
3338/// Neither this derive, nor the [safety conditions] of `FromBytes`, requires
3339/// that structs are marked with `#[repr(C)]`.
3340///
3341/// Per the [Rust reference](reference),
3342///
3343/// > The representation of a type can change the padding between fields, but
3344/// > does not change the layout of the fields themselves.
3345///
3346/// [reference]: https://doc.rust-lang.org/reference/type-layout.html#representations
3347///
3348/// Since the layout of structs only consists of padding bytes and field bytes,
3349/// a struct is soundly `FromBytes` if:
3350/// 1. its padding is soundly `FromBytes`, and
3351/// 2. its fields are soundly `FromBytes`.
3352///
3353/// The answer to the first question is always yes: padding bytes do not have
3354/// any validity constraints. A [discussion] of this question in the Unsafe Code
3355/// Guidelines Working Group concluded that it would be virtually unimaginable
3356/// for future versions of rustc to add validity constraints to padding bytes.
3357///
3358/// [discussion]: https://github.com/rust-lang/unsafe-code-guidelines/issues/174
3359///
3360/// Whether a struct is soundly `FromBytes` therefore solely depends on whether
3361/// its fields are `FromBytes`.
3362// TODO(#146): Document why we don't require an enum to have an explicit `repr`
3363// attribute.
3364#[cfg(any(feature = "derive", test))]
3365#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
3366pub use zerocopy_derive::FromBytes;
3367
3368/// Types for which any bit pattern is valid.
3369///
3370/// Any memory region of the appropriate length which contains initialized bytes
3371/// can be viewed as any `FromBytes` type with no runtime overhead. This is
3372/// useful for efficiently parsing bytes as structured data.
3373///
3374/// # Warning: Padding bytes
3375///
3376/// Note that, when a value is moved or copied, only the non-padding bytes of
3377/// that value are guaranteed to be preserved. It is unsound to assume that
3378/// values written to padding bytes are preserved after a move or copy. For
3379/// example, the following is unsound:
3380///
3381/// ```rust,no_run
3382/// use core::mem::{size_of, transmute};
3383/// use zerocopy::FromZeros;
3384/// # use zerocopy_derive::*;
3385///
3386/// // Assume `Foo` is a type with padding bytes.
3387/// #[derive(FromZeros, Default)]
3388/// struct Foo {
3389/// # /*
3390/// ...
3391/// # */
3392/// }
3393///
3394/// let mut foo: Foo = Foo::default();
3395/// FromZeros::zero(&mut foo);
3396/// // UNSOUND: Although `FromZeros::zero` writes zeros to all bytes of `foo`,
3397/// // those writes are not guaranteed to be preserved in padding bytes when
3398/// // `foo` is moved, so this may expose padding bytes as `u8`s.
3399/// let foo_bytes: [u8; size_of::<Foo>()] = unsafe { transmute(foo) };
3400/// ```
3401///
3402/// # Implementation
3403///
3404/// **Do not implement this trait yourself!** Instead, use
3405/// [`#[derive(FromBytes)]`][derive]; e.g.:
3406///
3407/// ```
3408/// # use zerocopy_derive::{FromBytes, Immutable};
3409/// #[derive(FromBytes)]
3410/// struct MyStruct {
3411/// # /*
3412/// ...
3413/// # */
3414/// }
3415///
3416/// #[derive(FromBytes)]
3417/// #[repr(u8)]
3418/// enum MyEnum {
3419/// # V00, V01, V02, V03, V04, V05, V06, V07, V08, V09, V0A, V0B, V0C, V0D, V0E,
3420/// # V0F, V10, V11, V12, V13, V14, V15, V16, V17, V18, V19, V1A, V1B, V1C, V1D,
3421/// # V1E, V1F, V20, V21, V22, V23, V24, V25, V26, V27, V28, V29, V2A, V2B, V2C,
3422/// # V2D, V2E, V2F, V30, V31, V32, V33, V34, V35, V36, V37, V38, V39, V3A, V3B,
3423/// # V3C, V3D, V3E, V3F, V40, V41, V42, V43, V44, V45, V46, V47, V48, V49, V4A,
3424/// # V4B, V4C, V4D, V4E, V4F, V50, V51, V52, V53, V54, V55, V56, V57, V58, V59,
3425/// # V5A, V5B, V5C, V5D, V5E, V5F, V60, V61, V62, V63, V64, V65, V66, V67, V68,
3426/// # V69, V6A, V6B, V6C, V6D, V6E, V6F, V70, V71, V72, V73, V74, V75, V76, V77,
3427/// # V78, V79, V7A, V7B, V7C, V7D, V7E, V7F, V80, V81, V82, V83, V84, V85, V86,
3428/// # V87, V88, V89, V8A, V8B, V8C, V8D, V8E, V8F, V90, V91, V92, V93, V94, V95,
3429/// # V96, V97, V98, V99, V9A, V9B, V9C, V9D, V9E, V9F, VA0, VA1, VA2, VA3, VA4,
3430/// # VA5, VA6, VA7, VA8, VA9, VAA, VAB, VAC, VAD, VAE, VAF, VB0, VB1, VB2, VB3,
3431/// # VB4, VB5, VB6, VB7, VB8, VB9, VBA, VBB, VBC, VBD, VBE, VBF, VC0, VC1, VC2,
3432/// # VC3, VC4, VC5, VC6, VC7, VC8, VC9, VCA, VCB, VCC, VCD, VCE, VCF, VD0, VD1,
3433/// # VD2, VD3, VD4, VD5, VD6, VD7, VD8, VD9, VDA, VDB, VDC, VDD, VDE, VDF, VE0,
3434/// # VE1, VE2, VE3, VE4, VE5, VE6, VE7, VE8, VE9, VEA, VEB, VEC, VED, VEE, VEF,
3435/// # VF0, VF1, VF2, VF3, VF4, VF5, VF6, VF7, VF8, VF9, VFA, VFB, VFC, VFD, VFE,
3436/// # VFF,
3437/// # /*
3438/// ...
3439/// # */
3440/// }
3441///
3442/// #[derive(FromBytes, Immutable)]
3443/// union MyUnion {
3444/// # variant: u8,
3445/// # /*
3446/// ...
3447/// # */
3448/// }
3449/// ```
3450///
3451/// This derive performs a sophisticated, compile-time safety analysis to
3452/// determine whether a type is `FromBytes`.
3453///
3454/// # Safety
3455///
3456/// *This section describes what is required in order for `T: FromBytes`, and
3457/// what unsafe code may assume of such types. If you don't plan on implementing
3458/// `FromBytes` manually, and you don't plan on writing unsafe code that
3459/// operates on `FromBytes` types, then you don't need to read this section.*
3460///
3461/// If `T: FromBytes`, then unsafe code may assume that it is sound to produce a
3462/// `T` whose bytes are initialized to any sequence of valid `u8`s (in other
3463/// words, any byte value which is not uninitialized). If a type is marked as
3464/// `FromBytes` which violates this contract, it may cause undefined behavior.
3465///
3466/// `#[derive(FromBytes)]` only permits [types which satisfy these
3467/// requirements][derive-analysis].
3468///
3469#[cfg_attr(
3470 feature = "derive",
3471 doc = "[derive]: zerocopy_derive::FromBytes",
3472 doc = "[derive-analysis]: zerocopy_derive::FromBytes#analysis"
3473)]
3474#[cfg_attr(
3475 not(feature = "derive"),
3476 doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.FromBytes.html"),
3477 doc = concat!("[derive-analysis]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.FromBytes.html#analysis"),
3478)]
3479#[cfg_attr(
3480 zerocopy_diagnostic_on_unimplemented_1_78_0,
3481 diagnostic::on_unimplemented(note = "Consider adding `#[derive(FromBytes)]` to `{Self}`")
3482)]
3483pub unsafe trait FromBytes: FromZeros {
3484 // The `Self: Sized` bound makes it so that `FromBytes` is still object
3485 // safe.
3486 #[doc(hidden)]
3487 fn only_derive_is_allowed_to_implement_this_trait()
3488 where
3489 Self: Sized;
3490
3491 /// Interprets the given `source` as a `&Self`.
3492 ///
3493 /// This method attempts to return a reference to `source` interpreted as a
3494 /// `Self`. If the length of `source` is not a [valid size of
3495 /// `Self`][valid-size], or if `source` is not appropriately aligned, this
3496 /// returns `Err`. If [`Self: Unaligned`][self-unaligned], you can
3497 /// [infallibly discard the alignment error][size-error-from].
3498 ///
3499 /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
3500 ///
3501 /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
3502 /// [self-unaligned]: Unaligned
3503 /// [size-error-from]: error/struct.SizeError.html#method.from-1
3504 /// [slice-dst]: KnownLayout#dynamically-sized-types
3505 ///
3506 /// # Compile-Time Assertions
3507 ///
3508 /// This method cannot yet be used on unsized types whose dynamically-sized
3509 /// component is zero-sized. Attempting to use this method on such types
3510 /// results in a compile-time assertion error; e.g.:
3511 ///
3512 /// ```compile_fail,E0080
3513 /// use zerocopy::*;
3514 /// # use zerocopy_derive::*;
3515 ///
3516 /// #[derive(FromBytes, Immutable, KnownLayout)]
3517 /// #[repr(C)]
3518 /// struct ZSTy {
3519 /// leading_sized: u16,
3520 /// trailing_dst: [()],
3521 /// }
3522 ///
3523 /// let _ = ZSTy::ref_from_bytes(0u16.as_bytes()); // âš Compile Error!
3524 /// ```
3525 ///
3526 /// # Examples
3527 ///
3528 /// ```
3529 /// use zerocopy::FromBytes;
3530 /// # use zerocopy_derive::*;
3531 ///
3532 /// #[derive(FromBytes, KnownLayout, Immutable)]
3533 /// #[repr(C)]
3534 /// struct PacketHeader {
3535 /// src_port: [u8; 2],
3536 /// dst_port: [u8; 2],
3537 /// length: [u8; 2],
3538 /// checksum: [u8; 2],
3539 /// }
3540 ///
3541 /// #[derive(FromBytes, KnownLayout, Immutable)]
3542 /// #[repr(C)]
3543 /// struct Packet {
3544 /// header: PacketHeader,
3545 /// body: [u8],
3546 /// }
3547 ///
3548 /// // These bytes encode a `Packet`.
3549 /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11][..];
3550 ///
3551 /// let packet = Packet::ref_from_bytes(bytes).unwrap();
3552 ///
3553 /// assert_eq!(packet.header.src_port, [0, 1]);
3554 /// assert_eq!(packet.header.dst_port, [2, 3]);
3555 /// assert_eq!(packet.header.length, [4, 5]);
3556 /// assert_eq!(packet.header.checksum, [6, 7]);
3557 /// assert_eq!(packet.body, [8, 9, 10, 11]);
3558 /// ```
3559 #[must_use = "has no side effects"]
3560 #[inline]
3561 fn ref_from_bytes(source: &[u8]) -> Result<&Self, CastError<&[u8], Self>>
3562 where
3563 Self: KnownLayout + Immutable,
3564 {
3565 static_assert_dst_is_not_zst!(Self);
3566 match Ptr::from_ref(source).try_cast_into_no_leftover::<_, BecauseImmutable>(None) {
3567 Ok(ptr) => Ok(ptr.recall_validity().as_ref()),
3568 Err(err) => Err(err.map_src(|src| src.as_ref())),
3569 }
3570 }
3571
3572 /// Interprets the prefix of the given `source` as a `&Self` without
3573 /// copying.
3574 ///
3575 /// This method computes the [largest possible size of `Self`][valid-size]
3576 /// that can fit in the leading bytes of `source`, then attempts to return
3577 /// both a reference to those bytes interpreted as a `Self`, and a reference
3578 /// to the remaining bytes. If there are insufficient bytes, or if `source`
3579 /// is not appropriately aligned, this returns `Err`. If [`Self:
3580 /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
3581 /// error][size-error-from].
3582 ///
3583 /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
3584 ///
3585 /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
3586 /// [self-unaligned]: Unaligned
3587 /// [size-error-from]: error/struct.SizeError.html#method.from-1
3588 /// [slice-dst]: KnownLayout#dynamically-sized-types
3589 ///
3590 /// # Compile-Time Assertions
3591 ///
3592 /// This method cannot yet be used on unsized types whose dynamically-sized
3593 /// component is zero-sized. See [`ref_from_prefix_with_elems`], which does
3594 /// support such types. Attempting to use this method on such types results
3595 /// in a compile-time assertion error; e.g.:
3596 ///
3597 /// ```compile_fail,E0080
3598 /// use zerocopy::*;
3599 /// # use zerocopy_derive::*;
3600 ///
3601 /// #[derive(FromBytes, Immutable, KnownLayout)]
3602 /// #[repr(C)]
3603 /// struct ZSTy {
3604 /// leading_sized: u16,
3605 /// trailing_dst: [()],
3606 /// }
3607 ///
3608 /// let _ = ZSTy::ref_from_prefix(0u16.as_bytes()); // âš Compile Error!
3609 /// ```
3610 ///
3611 /// [`ref_from_prefix_with_elems`]: FromBytes::ref_from_prefix_with_elems
3612 ///
3613 /// # Examples
3614 ///
3615 /// ```
3616 /// use zerocopy::FromBytes;
3617 /// # use zerocopy_derive::*;
3618 ///
3619 /// #[derive(FromBytes, KnownLayout, Immutable)]
3620 /// #[repr(C)]
3621 /// struct PacketHeader {
3622 /// src_port: [u8; 2],
3623 /// dst_port: [u8; 2],
3624 /// length: [u8; 2],
3625 /// checksum: [u8; 2],
3626 /// }
3627 ///
3628 /// #[derive(FromBytes, KnownLayout, Immutable)]
3629 /// #[repr(C)]
3630 /// struct Packet {
3631 /// header: PacketHeader,
3632 /// body: [[u8; 2]],
3633 /// }
3634 ///
3635 /// // These are more bytes than are needed to encode a `Packet`.
3636 /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14][..];
3637 ///
3638 /// let (packet, suffix) = Packet::ref_from_prefix(bytes).unwrap();
3639 ///
3640 /// assert_eq!(packet.header.src_port, [0, 1]);
3641 /// assert_eq!(packet.header.dst_port, [2, 3]);
3642 /// assert_eq!(packet.header.length, [4, 5]);
3643 /// assert_eq!(packet.header.checksum, [6, 7]);
3644 /// assert_eq!(packet.body, [[8, 9], [10, 11], [12, 13]]);
3645 /// assert_eq!(suffix, &[14u8][..]);
3646 /// ```
3647 #[must_use = "has no side effects"]
3648 #[inline]
3649 fn ref_from_prefix(source: &[u8]) -> Result<(&Self, &[u8]), CastError<&[u8], Self>>
3650 where
3651 Self: KnownLayout + Immutable,
3652 {
3653 static_assert_dst_is_not_zst!(Self);
3654 ref_from_prefix_suffix(source, None, CastType::Prefix)
3655 }
3656
3657 /// Interprets the suffix of the given bytes as a `&Self`.
3658 ///
3659 /// This method computes the [largest possible size of `Self`][valid-size]
3660 /// that can fit in the trailing bytes of `source`, then attempts to return
3661 /// both a reference to those bytes interpreted as a `Self`, and a reference
3662 /// to the preceding bytes. If there are insufficient bytes, or if that
3663 /// suffix of `source` is not appropriately aligned, this returns `Err`. If
3664 /// [`Self: Unaligned`][self-unaligned], you can [infallibly discard the
3665 /// alignment error][size-error-from].
3666 ///
3667 /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
3668 ///
3669 /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
3670 /// [self-unaligned]: Unaligned
3671 /// [size-error-from]: error/struct.SizeError.html#method.from-1
3672 /// [slice-dst]: KnownLayout#dynamically-sized-types
3673 ///
3674 /// # Compile-Time Assertions
3675 ///
3676 /// This method cannot yet be used on unsized types whose dynamically-sized
3677 /// component is zero-sized. See [`ref_from_suffix_with_elems`], which does
3678 /// support such types. Attempting to use this method on such types results
3679 /// in a compile-time assertion error; e.g.:
3680 ///
3681 /// ```compile_fail,E0080
3682 /// use zerocopy::*;
3683 /// # use zerocopy_derive::*;
3684 ///
3685 /// #[derive(FromBytes, Immutable, KnownLayout)]
3686 /// #[repr(C)]
3687 /// struct ZSTy {
3688 /// leading_sized: u16,
3689 /// trailing_dst: [()],
3690 /// }
3691 ///
3692 /// let _ = ZSTy::ref_from_suffix(0u16.as_bytes()); // âš Compile Error!
3693 /// ```
3694 ///
3695 /// [`ref_from_suffix_with_elems`]: FromBytes::ref_from_suffix_with_elems
3696 ///
3697 /// # Examples
3698 ///
3699 /// ```
3700 /// use zerocopy::FromBytes;
3701 /// # use zerocopy_derive::*;
3702 ///
3703 /// #[derive(FromBytes, Immutable, KnownLayout)]
3704 /// #[repr(C)]
3705 /// struct PacketTrailer {
3706 /// frame_check_sequence: [u8; 4],
3707 /// }
3708 ///
3709 /// // These are more bytes than are needed to encode a `PacketTrailer`.
3710 /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
3711 ///
3712 /// let (prefix, trailer) = PacketTrailer::ref_from_suffix(bytes).unwrap();
3713 ///
3714 /// assert_eq!(prefix, &[0, 1, 2, 3, 4, 5][..]);
3715 /// assert_eq!(trailer.frame_check_sequence, [6, 7, 8, 9]);
3716 /// ```
3717 #[must_use = "has no side effects"]
3718 #[inline]
3719 fn ref_from_suffix(source: &[u8]) -> Result<(&[u8], &Self), CastError<&[u8], Self>>
3720 where
3721 Self: Immutable + KnownLayout,
3722 {
3723 static_assert_dst_is_not_zst!(Self);
3724 ref_from_prefix_suffix(source, None, CastType::Suffix).map(swap)
3725 }
3726
3727 /// Interprets the given `source` as a `&mut Self`.
3728 ///
3729 /// This method attempts to return a reference to `source` interpreted as a
3730 /// `Self`. If the length of `source` is not a [valid size of
3731 /// `Self`][valid-size], or if `source` is not appropriately aligned, this
3732 /// returns `Err`. If [`Self: Unaligned`][self-unaligned], you can
3733 /// [infallibly discard the alignment error][size-error-from].
3734 ///
3735 /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
3736 ///
3737 /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
3738 /// [self-unaligned]: Unaligned
3739 /// [size-error-from]: error/struct.SizeError.html#method.from-1
3740 /// [slice-dst]: KnownLayout#dynamically-sized-types
3741 ///
3742 /// # Compile-Time Assertions
3743 ///
3744 /// This method cannot yet be used on unsized types whose dynamically-sized
3745 /// component is zero-sized. See [`mut_from_prefix_with_elems`], which does
3746 /// support such types. Attempting to use this method on such types results
3747 /// in a compile-time assertion error; e.g.:
3748 ///
3749 /// ```compile_fail,E0080
3750 /// use zerocopy::*;
3751 /// # use zerocopy_derive::*;
3752 ///
3753 /// #[derive(FromBytes, Immutable, IntoBytes, KnownLayout)]
3754 /// #[repr(C, packed)]
3755 /// struct ZSTy {
3756 /// leading_sized: [u8; 2],
3757 /// trailing_dst: [()],
3758 /// }
3759 ///
3760 /// let mut source = [85, 85];
3761 /// let _ = ZSTy::mut_from_bytes(&mut source[..]); // âš Compile Error!
3762 /// ```
3763 ///
3764 /// [`mut_from_prefix_with_elems`]: FromBytes::mut_from_prefix_with_elems
3765 ///
3766 /// # Examples
3767 ///
3768 /// ```
3769 /// use zerocopy::FromBytes;
3770 /// # use zerocopy_derive::*;
3771 ///
3772 /// #[derive(FromBytes, IntoBytes, KnownLayout, Immutable)]
3773 /// #[repr(C)]
3774 /// struct PacketHeader {
3775 /// src_port: [u8; 2],
3776 /// dst_port: [u8; 2],
3777 /// length: [u8; 2],
3778 /// checksum: [u8; 2],
3779 /// }
3780 ///
3781 /// // These bytes encode a `PacketHeader`.
3782 /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7][..];
3783 ///
3784 /// let header = PacketHeader::mut_from_bytes(bytes).unwrap();
3785 ///
3786 /// assert_eq!(header.src_port, [0, 1]);
3787 /// assert_eq!(header.dst_port, [2, 3]);
3788 /// assert_eq!(header.length, [4, 5]);
3789 /// assert_eq!(header.checksum, [6, 7]);
3790 ///
3791 /// header.checksum = [0, 0];
3792 ///
3793 /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 0, 0]);
3794 /// ```
3795 #[must_use = "has no side effects"]
3796 #[inline]
3797 fn mut_from_bytes(source: &mut [u8]) -> Result<&mut Self, CastError<&mut [u8], Self>>
3798 where
3799 Self: IntoBytes + KnownLayout,
3800 {
3801 static_assert_dst_is_not_zst!(Self);
3802 match Ptr::from_mut(source).try_cast_into_no_leftover::<_, BecauseExclusive>(None) {
3803 Ok(ptr) => Ok(ptr.recall_validity().as_mut()),
3804 Err(err) => Err(err.map_src(|src| src.as_mut())),
3805 }
3806 }
3807
3808 /// Interprets the prefix of the given `source` as a `&mut Self` without
3809 /// copying.
3810 ///
3811 /// This method computes the [largest possible size of `Self`][valid-size]
3812 /// that can fit in the leading bytes of `source`, then attempts to return
3813 /// both a reference to those bytes interpreted as a `Self`, and a reference
3814 /// to the remaining bytes. If there are insufficient bytes, or if `source`
3815 /// is not appropriately aligned, this returns `Err`. If [`Self:
3816 /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
3817 /// error][size-error-from].
3818 ///
3819 /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
3820 ///
3821 /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
3822 /// [self-unaligned]: Unaligned
3823 /// [size-error-from]: error/struct.SizeError.html#method.from-1
3824 /// [slice-dst]: KnownLayout#dynamically-sized-types
3825 ///
3826 /// # Compile-Time Assertions
3827 ///
3828 /// This method cannot yet be used on unsized types whose dynamically-sized
3829 /// component is zero-sized. See [`mut_from_suffix_with_elems`], which does
3830 /// support such types. Attempting to use this method on such types results
3831 /// in a compile-time assertion error; e.g.:
3832 ///
3833 /// ```compile_fail,E0080
3834 /// use zerocopy::*;
3835 /// # use zerocopy_derive::*;
3836 ///
3837 /// #[derive(FromBytes, Immutable, IntoBytes, KnownLayout)]
3838 /// #[repr(C, packed)]
3839 /// struct ZSTy {
3840 /// leading_sized: [u8; 2],
3841 /// trailing_dst: [()],
3842 /// }
3843 ///
3844 /// let mut source = [85, 85];
3845 /// let _ = ZSTy::mut_from_prefix(&mut source[..]); // âš Compile Error!
3846 /// ```
3847 ///
3848 /// [`mut_from_suffix_with_elems`]: FromBytes::mut_from_suffix_with_elems
3849 ///
3850 /// # Examples
3851 ///
3852 /// ```
3853 /// use zerocopy::FromBytes;
3854 /// # use zerocopy_derive::*;
3855 ///
3856 /// #[derive(FromBytes, IntoBytes, KnownLayout, Immutable)]
3857 /// #[repr(C)]
3858 /// struct PacketHeader {
3859 /// src_port: [u8; 2],
3860 /// dst_port: [u8; 2],
3861 /// length: [u8; 2],
3862 /// checksum: [u8; 2],
3863 /// }
3864 ///
3865 /// // These are more bytes than are needed to encode a `PacketHeader`.
3866 /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
3867 ///
3868 /// let (header, body) = PacketHeader::mut_from_prefix(bytes).unwrap();
3869 ///
3870 /// assert_eq!(header.src_port, [0, 1]);
3871 /// assert_eq!(header.dst_port, [2, 3]);
3872 /// assert_eq!(header.length, [4, 5]);
3873 /// assert_eq!(header.checksum, [6, 7]);
3874 /// assert_eq!(body, &[8, 9][..]);
3875 ///
3876 /// header.checksum = [0, 0];
3877 /// body.fill(1);
3878 ///
3879 /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 0, 0, 1, 1]);
3880 /// ```
3881 #[must_use = "has no side effects"]
3882 #[inline]
3883 fn mut_from_prefix(
3884 source: &mut [u8],
3885 ) -> Result<(&mut Self, &mut [u8]), CastError<&mut [u8], Self>>
3886 where
3887 Self: IntoBytes + KnownLayout,
3888 {
3889 static_assert_dst_is_not_zst!(Self);
3890 mut_from_prefix_suffix(source, None, CastType::Prefix)
3891 }
3892
3893 /// Interprets the suffix of the given `source` as a `&mut Self` without
3894 /// copying.
3895 ///
3896 /// This method computes the [largest possible size of `Self`][valid-size]
3897 /// that can fit in the trailing bytes of `source`, then attempts to return
3898 /// both a reference to those bytes interpreted as a `Self`, and a reference
3899 /// to the preceding bytes. If there are insufficient bytes, or if that
3900 /// suffix of `source` is not appropriately aligned, this returns `Err`. If
3901 /// [`Self: Unaligned`][self-unaligned], you can [infallibly discard the
3902 /// alignment error][size-error-from].
3903 ///
3904 /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
3905 ///
3906 /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
3907 /// [self-unaligned]: Unaligned
3908 /// [size-error-from]: error/struct.SizeError.html#method.from-1
3909 /// [slice-dst]: KnownLayout#dynamically-sized-types
3910 ///
3911 /// # Compile-Time Assertions
3912 ///
3913 /// This method cannot yet be used on unsized types whose dynamically-sized
3914 /// component is zero-sized. Attempting to use this method on such types
3915 /// results in a compile-time assertion error; e.g.:
3916 ///
3917 /// ```compile_fail,E0080
3918 /// use zerocopy::*;
3919 /// # use zerocopy_derive::*;
3920 ///
3921 /// #[derive(FromBytes, Immutable, IntoBytes, KnownLayout)]
3922 /// #[repr(C, packed)]
3923 /// struct ZSTy {
3924 /// leading_sized: [u8; 2],
3925 /// trailing_dst: [()],
3926 /// }
3927 ///
3928 /// let mut source = [85, 85];
3929 /// let _ = ZSTy::mut_from_suffix(&mut source[..]); // âš Compile Error!
3930 /// ```
3931 ///
3932 /// # Examples
3933 ///
3934 /// ```
3935 /// use zerocopy::FromBytes;
3936 /// # use zerocopy_derive::*;
3937 ///
3938 /// #[derive(FromBytes, IntoBytes, KnownLayout, Immutable)]
3939 /// #[repr(C)]
3940 /// struct PacketTrailer {
3941 /// frame_check_sequence: [u8; 4],
3942 /// }
3943 ///
3944 /// // These are more bytes than are needed to encode a `PacketTrailer`.
3945 /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
3946 ///
3947 /// let (prefix, trailer) = PacketTrailer::mut_from_suffix(bytes).unwrap();
3948 ///
3949 /// assert_eq!(prefix, &[0u8, 1, 2, 3, 4, 5][..]);
3950 /// assert_eq!(trailer.frame_check_sequence, [6, 7, 8, 9]);
3951 ///
3952 /// prefix.fill(0);
3953 /// trailer.frame_check_sequence.fill(1);
3954 ///
3955 /// assert_eq!(bytes, [0, 0, 0, 0, 0, 0, 1, 1, 1, 1]);
3956 /// ```
3957 #[must_use = "has no side effects"]
3958 #[inline]
3959 fn mut_from_suffix(
3960 source: &mut [u8],
3961 ) -> Result<(&mut [u8], &mut Self), CastError<&mut [u8], Self>>
3962 where
3963 Self: IntoBytes + KnownLayout,
3964 {
3965 static_assert_dst_is_not_zst!(Self);
3966 mut_from_prefix_suffix(source, None, CastType::Suffix).map(swap)
3967 }
3968
3969 /// Interprets the given `source` as a `&Self` with a DST length equal to
3970 /// `count`.
3971 ///
3972 /// This method attempts to return a reference to `source` interpreted as a
3973 /// `Self` with `count` trailing elements. If the length of `source` is not
3974 /// equal to the size of `Self` with `count` elements, or if `source` is not
3975 /// appropriately aligned, this returns `Err`. If [`Self:
3976 /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
3977 /// error][size-error-from].
3978 ///
3979 /// [self-unaligned]: Unaligned
3980 /// [size-error-from]: error/struct.SizeError.html#method.from-1
3981 ///
3982 /// # Examples
3983 ///
3984 /// ```
3985 /// use zerocopy::FromBytes;
3986 /// # use zerocopy_derive::*;
3987 ///
3988 /// # #[derive(Debug, PartialEq, Eq)]
3989 /// #[derive(FromBytes, Immutable)]
3990 /// #[repr(C)]
3991 /// struct Pixel {
3992 /// r: u8,
3993 /// g: u8,
3994 /// b: u8,
3995 /// a: u8,
3996 /// }
3997 ///
3998 /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7][..];
3999 ///
4000 /// let pixels = <[Pixel]>::ref_from_bytes_with_elems(bytes, 2).unwrap();
4001 ///
4002 /// assert_eq!(pixels, &[
4003 /// Pixel { r: 0, g: 1, b: 2, a: 3 },
4004 /// Pixel { r: 4, g: 5, b: 6, a: 7 },
4005 /// ]);
4006 ///
4007 /// ```
4008 ///
4009 /// Since an explicit `count` is provided, this method supports types with
4010 /// zero-sized trailing slice elements. Methods such as [`ref_from_bytes`]
4011 /// which do not take an explicit count do not support such types.
4012 ///
4013 /// ```
4014 /// use zerocopy::*;
4015 /// # use zerocopy_derive::*;
4016 ///
4017 /// #[derive(FromBytes, Immutable, KnownLayout)]
4018 /// #[repr(C)]
4019 /// struct ZSTy {
4020 /// leading_sized: [u8; 2],
4021 /// trailing_dst: [()],
4022 /// }
4023 ///
4024 /// let src = &[85, 85][..];
4025 /// let zsty = ZSTy::ref_from_bytes_with_elems(src, 42).unwrap();
4026 /// assert_eq!(zsty.trailing_dst.len(), 42);
4027 /// ```
4028 ///
4029 /// [`ref_from_bytes`]: FromBytes::ref_from_bytes
4030 #[must_use = "has no side effects"]
4031 #[inline]
4032 fn ref_from_bytes_with_elems(
4033 source: &[u8],
4034 count: usize,
4035 ) -> Result<&Self, CastError<&[u8], Self>>
4036 where
4037 Self: KnownLayout<PointerMetadata = usize> + Immutable,
4038 {
4039 let source = Ptr::from_ref(source);
4040 let maybe_slf = source.try_cast_into_no_leftover::<_, BecauseImmutable>(Some(count));
4041 match maybe_slf {
4042 Ok(slf) => Ok(slf.recall_validity().as_ref()),
4043 Err(err) => Err(err.map_src(|s| s.as_ref())),
4044 }
4045 }
4046
4047 /// Interprets the prefix of the given `source` as a DST `&Self` with length
4048 /// equal to `count`.
4049 ///
4050 /// This method attempts to return a reference to the prefix of `source`
4051 /// interpreted as a `Self` with `count` trailing elements, and a reference
4052 /// to the remaining bytes. If there are insufficient bytes, or if `source`
4053 /// is not appropriately aligned, this returns `Err`. If [`Self:
4054 /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
4055 /// error][size-error-from].
4056 ///
4057 /// [self-unaligned]: Unaligned
4058 /// [size-error-from]: error/struct.SizeError.html#method.from-1
4059 ///
4060 /// # Examples
4061 ///
4062 /// ```
4063 /// use zerocopy::FromBytes;
4064 /// # use zerocopy_derive::*;
4065 ///
4066 /// # #[derive(Debug, PartialEq, Eq)]
4067 /// #[derive(FromBytes, Immutable)]
4068 /// #[repr(C)]
4069 /// struct Pixel {
4070 /// r: u8,
4071 /// g: u8,
4072 /// b: u8,
4073 /// a: u8,
4074 /// }
4075 ///
4076 /// // These are more bytes than are needed to encode two `Pixel`s.
4077 /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
4078 ///
4079 /// let (pixels, suffix) = <[Pixel]>::ref_from_prefix_with_elems(bytes, 2).unwrap();
4080 ///
4081 /// assert_eq!(pixels, &[
4082 /// Pixel { r: 0, g: 1, b: 2, a: 3 },
4083 /// Pixel { r: 4, g: 5, b: 6, a: 7 },
4084 /// ]);
4085 ///
4086 /// assert_eq!(suffix, &[8, 9]);
4087 /// ```
4088 ///
4089 /// Since an explicit `count` is provided, this method supports types with
4090 /// zero-sized trailing slice elements. Methods such as [`ref_from_prefix`]
4091 /// which do not take an explicit count do not support such types.
4092 ///
4093 /// ```
4094 /// use zerocopy::*;
4095 /// # use zerocopy_derive::*;
4096 ///
4097 /// #[derive(FromBytes, Immutable, KnownLayout)]
4098 /// #[repr(C)]
4099 /// struct ZSTy {
4100 /// leading_sized: [u8; 2],
4101 /// trailing_dst: [()],
4102 /// }
4103 ///
4104 /// let src = &[85, 85][..];
4105 /// let (zsty, _) = ZSTy::ref_from_prefix_with_elems(src, 42).unwrap();
4106 /// assert_eq!(zsty.trailing_dst.len(), 42);
4107 /// ```
4108 ///
4109 /// [`ref_from_prefix`]: FromBytes::ref_from_prefix
4110 #[must_use = "has no side effects"]
4111 #[inline]
4112 fn ref_from_prefix_with_elems(
4113 source: &[u8],
4114 count: usize,
4115 ) -> Result<(&Self, &[u8]), CastError<&[u8], Self>>
4116 where
4117 Self: KnownLayout<PointerMetadata = usize> + Immutable,
4118 {
4119 ref_from_prefix_suffix(source, Some(count), CastType::Prefix)
4120 }
4121
4122 /// Interprets the suffix of the given `source` as a DST `&Self` with length
4123 /// equal to `count`.
4124 ///
4125 /// This method attempts to return a reference to the suffix of `source`
4126 /// interpreted as a `Self` with `count` trailing elements, and a reference
4127 /// to the preceding bytes. If there are insufficient bytes, or if that
4128 /// suffix of `source` is not appropriately aligned, this returns `Err`. If
4129 /// [`Self: Unaligned`][self-unaligned], you can [infallibly discard the
4130 /// alignment error][size-error-from].
4131 ///
4132 /// [self-unaligned]: Unaligned
4133 /// [size-error-from]: error/struct.SizeError.html#method.from-1
4134 ///
4135 /// # Examples
4136 ///
4137 /// ```
4138 /// use zerocopy::FromBytes;
4139 /// # use zerocopy_derive::*;
4140 ///
4141 /// # #[derive(Debug, PartialEq, Eq)]
4142 /// #[derive(FromBytes, Immutable)]
4143 /// #[repr(C)]
4144 /// struct Pixel {
4145 /// r: u8,
4146 /// g: u8,
4147 /// b: u8,
4148 /// a: u8,
4149 /// }
4150 ///
4151 /// // These are more bytes than are needed to encode two `Pixel`s.
4152 /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
4153 ///
4154 /// let (prefix, pixels) = <[Pixel]>::ref_from_suffix_with_elems(bytes, 2).unwrap();
4155 ///
4156 /// assert_eq!(prefix, &[0, 1]);
4157 ///
4158 /// assert_eq!(pixels, &[
4159 /// Pixel { r: 2, g: 3, b: 4, a: 5 },
4160 /// Pixel { r: 6, g: 7, b: 8, a: 9 },
4161 /// ]);
4162 /// ```
4163 ///
4164 /// Since an explicit `count` is provided, this method supports types with
4165 /// zero-sized trailing slice elements. Methods such as [`ref_from_suffix`]
4166 /// which do not take an explicit count do not support such types.
4167 ///
4168 /// ```
4169 /// use zerocopy::*;
4170 /// # use zerocopy_derive::*;
4171 ///
4172 /// #[derive(FromBytes, Immutable, KnownLayout)]
4173 /// #[repr(C)]
4174 /// struct ZSTy {
4175 /// leading_sized: [u8; 2],
4176 /// trailing_dst: [()],
4177 /// }
4178 ///
4179 /// let src = &[85, 85][..];
4180 /// let (_, zsty) = ZSTy::ref_from_suffix_with_elems(src, 42).unwrap();
4181 /// assert_eq!(zsty.trailing_dst.len(), 42);
4182 /// ```
4183 ///
4184 /// [`ref_from_suffix`]: FromBytes::ref_from_suffix
4185 #[must_use = "has no side effects"]
4186 #[inline]
4187 fn ref_from_suffix_with_elems(
4188 source: &[u8],
4189 count: usize,
4190 ) -> Result<(&[u8], &Self), CastError<&[u8], Self>>
4191 where
4192 Self: KnownLayout<PointerMetadata = usize> + Immutable,
4193 {
4194 ref_from_prefix_suffix(source, Some(count), CastType::Suffix).map(swap)
4195 }
4196
4197 /// Interprets the given `source` as a `&mut Self` with a DST length equal
4198 /// to `count`.
4199 ///
4200 /// This method attempts to return a reference to `source` interpreted as a
4201 /// `Self` with `count` trailing elements. If the length of `source` is not
4202 /// equal to the size of `Self` with `count` elements, or if `source` is not
4203 /// appropriately aligned, this returns `Err`. If [`Self:
4204 /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
4205 /// error][size-error-from].
4206 ///
4207 /// [self-unaligned]: Unaligned
4208 /// [size-error-from]: error/struct.SizeError.html#method.from-1
4209 ///
4210 /// # Examples
4211 ///
4212 /// ```
4213 /// use zerocopy::FromBytes;
4214 /// # use zerocopy_derive::*;
4215 ///
4216 /// # #[derive(Debug, PartialEq, Eq)]
4217 /// #[derive(KnownLayout, FromBytes, IntoBytes, Immutable)]
4218 /// #[repr(C)]
4219 /// struct Pixel {
4220 /// r: u8,
4221 /// g: u8,
4222 /// b: u8,
4223 /// a: u8,
4224 /// }
4225 ///
4226 /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7][..];
4227 ///
4228 /// let pixels = <[Pixel]>::mut_from_bytes_with_elems(bytes, 2).unwrap();
4229 ///
4230 /// assert_eq!(pixels, &[
4231 /// Pixel { r: 0, g: 1, b: 2, a: 3 },
4232 /// Pixel { r: 4, g: 5, b: 6, a: 7 },
4233 /// ]);
4234 ///
4235 /// pixels[1] = Pixel { r: 0, g: 0, b: 0, a: 0 };
4236 ///
4237 /// assert_eq!(bytes, [0, 1, 2, 3, 0, 0, 0, 0]);
4238 /// ```
4239 ///
4240 /// Since an explicit `count` is provided, this method supports types with
4241 /// zero-sized trailing slice elements. Methods such as [`mut_from`] which
4242 /// do not take an explicit count do not support such types.
4243 ///
4244 /// ```
4245 /// use zerocopy::*;
4246 /// # use zerocopy_derive::*;
4247 ///
4248 /// #[derive(FromBytes, IntoBytes, Immutable, KnownLayout)]
4249 /// #[repr(C, packed)]
4250 /// struct ZSTy {
4251 /// leading_sized: [u8; 2],
4252 /// trailing_dst: [()],
4253 /// }
4254 ///
4255 /// let src = &mut [85, 85][..];
4256 /// let zsty = ZSTy::mut_from_bytes_with_elems(src, 42).unwrap();
4257 /// assert_eq!(zsty.trailing_dst.len(), 42);
4258 /// ```
4259 ///
4260 /// [`mut_from`]: FromBytes::mut_from
4261 #[must_use = "has no side effects"]
4262 #[inline]
4263 fn mut_from_bytes_with_elems(
4264 source: &mut [u8],
4265 count: usize,
4266 ) -> Result<&mut Self, CastError<&mut [u8], Self>>
4267 where
4268 Self: IntoBytes + KnownLayout<PointerMetadata = usize> + Immutable,
4269 {
4270 let source = Ptr::from_mut(source);
4271 let maybe_slf = source.try_cast_into_no_leftover::<_, BecauseImmutable>(Some(count));
4272 match maybe_slf {
4273 Ok(slf) => Ok(slf
4274 .recall_validity::<_, (_, (_, (BecauseExclusive, BecauseExclusive)))>()
4275 .as_mut()),
4276 Err(err) => Err(err.map_src(|s| s.as_mut())),
4277 }
4278 }
4279
4280 /// Interprets the prefix of the given `source` as a `&mut Self` with DST
4281 /// length equal to `count`.
4282 ///
4283 /// This method attempts to return a reference to the prefix of `source`
4284 /// interpreted as a `Self` with `count` trailing elements, and a reference
4285 /// to the preceding bytes. If there are insufficient bytes, or if `source`
4286 /// is not appropriately aligned, this returns `Err`. If [`Self:
4287 /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
4288 /// error][size-error-from].
4289 ///
4290 /// [self-unaligned]: Unaligned
4291 /// [size-error-from]: error/struct.SizeError.html#method.from-1
4292 ///
4293 /// # Examples
4294 ///
4295 /// ```
4296 /// use zerocopy::FromBytes;
4297 /// # use zerocopy_derive::*;
4298 ///
4299 /// # #[derive(Debug, PartialEq, Eq)]
4300 /// #[derive(KnownLayout, FromBytes, IntoBytes, Immutable)]
4301 /// #[repr(C)]
4302 /// struct Pixel {
4303 /// r: u8,
4304 /// g: u8,
4305 /// b: u8,
4306 /// a: u8,
4307 /// }
4308 ///
4309 /// // These are more bytes than are needed to encode two `Pixel`s.
4310 /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
4311 ///
4312 /// let (pixels, suffix) = <[Pixel]>::mut_from_prefix_with_elems(bytes, 2).unwrap();
4313 ///
4314 /// assert_eq!(pixels, &[
4315 /// Pixel { r: 0, g: 1, b: 2, a: 3 },
4316 /// Pixel { r: 4, g: 5, b: 6, a: 7 },
4317 /// ]);
4318 ///
4319 /// assert_eq!(suffix, &[8, 9]);
4320 ///
4321 /// pixels[1] = Pixel { r: 0, g: 0, b: 0, a: 0 };
4322 /// suffix.fill(1);
4323 ///
4324 /// assert_eq!(bytes, [0, 1, 2, 3, 0, 0, 0, 0, 1, 1]);
4325 /// ```
4326 ///
4327 /// Since an explicit `count` is provided, this method supports types with
4328 /// zero-sized trailing slice elements. Methods such as [`mut_from_prefix`]
4329 /// which do not take an explicit count do not support such types.
4330 ///
4331 /// ```
4332 /// use zerocopy::*;
4333 /// # use zerocopy_derive::*;
4334 ///
4335 /// #[derive(FromBytes, IntoBytes, Immutable, KnownLayout)]
4336 /// #[repr(C, packed)]
4337 /// struct ZSTy {
4338 /// leading_sized: [u8; 2],
4339 /// trailing_dst: [()],
4340 /// }
4341 ///
4342 /// let src = &mut [85, 85][..];
4343 /// let (zsty, _) = ZSTy::mut_from_prefix_with_elems(src, 42).unwrap();
4344 /// assert_eq!(zsty.trailing_dst.len(), 42);
4345 /// ```
4346 ///
4347 /// [`mut_from_prefix`]: FromBytes::mut_from_prefix
4348 #[must_use = "has no side effects"]
4349 #[inline]
4350 fn mut_from_prefix_with_elems(
4351 source: &mut [u8],
4352 count: usize,
4353 ) -> Result<(&mut Self, &mut [u8]), CastError<&mut [u8], Self>>
4354 where
4355 Self: IntoBytes + KnownLayout<PointerMetadata = usize>,
4356 {
4357 mut_from_prefix_suffix(source, Some(count), CastType::Prefix)
4358 }
4359
4360 /// Interprets the suffix of the given `source` as a `&mut Self` with DST
4361 /// length equal to `count`.
4362 ///
4363 /// This method attempts to return a reference to the suffix of `source`
4364 /// interpreted as a `Self` with `count` trailing elements, and a reference
4365 /// to the remaining bytes. If there are insufficient bytes, or if that
4366 /// suffix of `source` is not appropriately aligned, this returns `Err`. If
4367 /// [`Self: Unaligned`][self-unaligned], you can [infallibly discard the
4368 /// alignment error][size-error-from].
4369 ///
4370 /// [self-unaligned]: Unaligned
4371 /// [size-error-from]: error/struct.SizeError.html#method.from-1
4372 ///
4373 /// # Examples
4374 ///
4375 /// ```
4376 /// use zerocopy::FromBytes;
4377 /// # use zerocopy_derive::*;
4378 ///
4379 /// # #[derive(Debug, PartialEq, Eq)]
4380 /// #[derive(FromBytes, IntoBytes, Immutable)]
4381 /// #[repr(C)]
4382 /// struct Pixel {
4383 /// r: u8,
4384 /// g: u8,
4385 /// b: u8,
4386 /// a: u8,
4387 /// }
4388 ///
4389 /// // These are more bytes than are needed to encode two `Pixel`s.
4390 /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
4391 ///
4392 /// let (prefix, pixels) = <[Pixel]>::mut_from_suffix_with_elems(bytes, 2).unwrap();
4393 ///
4394 /// assert_eq!(prefix, &[0, 1]);
4395 ///
4396 /// assert_eq!(pixels, &[
4397 /// Pixel { r: 2, g: 3, b: 4, a: 5 },
4398 /// Pixel { r: 6, g: 7, b: 8, a: 9 },
4399 /// ]);
4400 ///
4401 /// prefix.fill(9);
4402 /// pixels[1] = Pixel { r: 0, g: 0, b: 0, a: 0 };
4403 ///
4404 /// assert_eq!(bytes, [9, 9, 2, 3, 4, 5, 0, 0, 0, 0]);
4405 /// ```
4406 ///
4407 /// Since an explicit `count` is provided, this method supports types with
4408 /// zero-sized trailing slice elements. Methods such as [`mut_from_suffix`]
4409 /// which do not take an explicit count do not support such types.
4410 ///
4411 /// ```
4412 /// use zerocopy::*;
4413 /// # use zerocopy_derive::*;
4414 ///
4415 /// #[derive(FromBytes, IntoBytes, Immutable, KnownLayout)]
4416 /// #[repr(C, packed)]
4417 /// struct ZSTy {
4418 /// leading_sized: [u8; 2],
4419 /// trailing_dst: [()],
4420 /// }
4421 ///
4422 /// let src = &mut [85, 85][..];
4423 /// let (_, zsty) = ZSTy::mut_from_suffix_with_elems(src, 42).unwrap();
4424 /// assert_eq!(zsty.trailing_dst.len(), 42);
4425 /// ```
4426 ///
4427 /// [`mut_from_suffix`]: FromBytes::mut_from_suffix
4428 #[must_use = "has no side effects"]
4429 #[inline]
4430 fn mut_from_suffix_with_elems(
4431 source: &mut [u8],
4432 count: usize,
4433 ) -> Result<(&mut [u8], &mut Self), CastError<&mut [u8], Self>>
4434 where
4435 Self: IntoBytes + KnownLayout<PointerMetadata = usize>,
4436 {
4437 mut_from_prefix_suffix(source, Some(count), CastType::Suffix).map(swap)
4438 }
4439
4440 /// Reads a copy of `Self` from the given `source`.
4441 ///
4442 /// If `source.len() != size_of::<Self>()`, `read_from_bytes` returns `Err`.
4443 ///
4444 /// # Examples
4445 ///
4446 /// ```
4447 /// use zerocopy::FromBytes;
4448 /// # use zerocopy_derive::*;
4449 ///
4450 /// #[derive(FromBytes)]
4451 /// #[repr(C)]
4452 /// struct PacketHeader {
4453 /// src_port: [u8; 2],
4454 /// dst_port: [u8; 2],
4455 /// length: [u8; 2],
4456 /// checksum: [u8; 2],
4457 /// }
4458 ///
4459 /// // These bytes encode a `PacketHeader`.
4460 /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7][..];
4461 ///
4462 /// let header = PacketHeader::read_from_bytes(bytes).unwrap();
4463 ///
4464 /// assert_eq!(header.src_port, [0, 1]);
4465 /// assert_eq!(header.dst_port, [2, 3]);
4466 /// assert_eq!(header.length, [4, 5]);
4467 /// assert_eq!(header.checksum, [6, 7]);
4468 /// ```
4469 #[must_use = "has no side effects"]
4470 #[inline]
4471 fn read_from_bytes(source: &[u8]) -> Result<Self, SizeError<&[u8], Self>>
4472 where
4473 Self: Sized,
4474 {
4475 match Ref::<_, Unalign<Self>>::sized_from(source) {
4476 Ok(r) => Ok(Ref::read(&r).into_inner()),
4477 Err(CastError::Size(e)) => Err(e.with_dst()),
4478 Err(CastError::Alignment(_)) => {
4479 // SAFETY: `Unalign<Self>` is trivially aligned, so
4480 // `Ref::sized_from` cannot fail due to unmet alignment
4481 // requirements.
4482 unsafe { core::hint::unreachable_unchecked() }
4483 }
4484 Err(CastError::Validity(i)) => match i {},
4485 }
4486 }
4487
4488 /// Reads a copy of `Self` from the prefix of the given `source`.
4489 ///
4490 /// This attempts to read a `Self` from the first `size_of::<Self>()` bytes
4491 /// of `source`, returning that `Self` and any remaining bytes. If
4492 /// `source.len() < size_of::<Self>()`, it returns `Err`.
4493 ///
4494 /// # Examples
4495 ///
4496 /// ```
4497 /// use zerocopy::FromBytes;
4498 /// # use zerocopy_derive::*;
4499 ///
4500 /// #[derive(FromBytes)]
4501 /// #[repr(C)]
4502 /// struct PacketHeader {
4503 /// src_port: [u8; 2],
4504 /// dst_port: [u8; 2],
4505 /// length: [u8; 2],
4506 /// checksum: [u8; 2],
4507 /// }
4508 ///
4509 /// // These are more bytes than are needed to encode a `PacketHeader`.
4510 /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
4511 ///
4512 /// let (header, body) = PacketHeader::read_from_prefix(bytes).unwrap();
4513 ///
4514 /// assert_eq!(header.src_port, [0, 1]);
4515 /// assert_eq!(header.dst_port, [2, 3]);
4516 /// assert_eq!(header.length, [4, 5]);
4517 /// assert_eq!(header.checksum, [6, 7]);
4518 /// assert_eq!(body, [8, 9]);
4519 /// ```
4520 #[must_use = "has no side effects"]
4521 #[inline]
4522 fn read_from_prefix(source: &[u8]) -> Result<(Self, &[u8]), SizeError<&[u8], Self>>
4523 where
4524 Self: Sized,
4525 {
4526 match Ref::<_, Unalign<Self>>::sized_from_prefix(source) {
4527 Ok((r, suffix)) => Ok((Ref::read(&r).into_inner(), suffix)),
4528 Err(CastError::Size(e)) => Err(e.with_dst()),
4529 Err(CastError::Alignment(_)) => {
4530 // SAFETY: `Unalign<Self>` is trivially aligned, so
4531 // `Ref::sized_from_prefix` cannot fail due to unmet alignment
4532 // requirements.
4533 unsafe { core::hint::unreachable_unchecked() }
4534 }
4535 Err(CastError::Validity(i)) => match i {},
4536 }
4537 }
4538
4539 /// Reads a copy of `Self` from the suffix of the given `source`.
4540 ///
4541 /// This attempts to read a `Self` from the last `size_of::<Self>()` bytes
4542 /// of `source`, returning that `Self` and any preceding bytes. If
4543 /// `source.len() < size_of::<Self>()`, it returns `Err`.
4544 ///
4545 /// # Examples
4546 ///
4547 /// ```
4548 /// use zerocopy::FromBytes;
4549 /// # use zerocopy_derive::*;
4550 ///
4551 /// #[derive(FromBytes)]
4552 /// #[repr(C)]
4553 /// struct PacketTrailer {
4554 /// frame_check_sequence: [u8; 4],
4555 /// }
4556 ///
4557 /// // These are more bytes than are needed to encode a `PacketTrailer`.
4558 /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
4559 ///
4560 /// let (prefix, trailer) = PacketTrailer::read_from_suffix(bytes).unwrap();
4561 ///
4562 /// assert_eq!(prefix, [0, 1, 2, 3, 4, 5]);
4563 /// assert_eq!(trailer.frame_check_sequence, [6, 7, 8, 9]);
4564 /// ```
4565 #[must_use = "has no side effects"]
4566 #[inline]
4567 fn read_from_suffix(source: &[u8]) -> Result<(&[u8], Self), SizeError<&[u8], Self>>
4568 where
4569 Self: Sized,
4570 {
4571 match Ref::<_, Unalign<Self>>::sized_from_suffix(source) {
4572 Ok((prefix, r)) => Ok((prefix, Ref::read(&r).into_inner())),
4573 Err(CastError::Size(e)) => Err(e.with_dst()),
4574 Err(CastError::Alignment(_)) => {
4575 // SAFETY: `Unalign<Self>` is trivially aligned, so
4576 // `Ref::sized_from_suffix` cannot fail due to unmet alignment
4577 // requirements.
4578 unsafe { core::hint::unreachable_unchecked() }
4579 }
4580 Err(CastError::Validity(i)) => match i {},
4581 }
4582 }
4583
4584 /// Reads a copy of `self` from an `io::Read`.
4585 ///
4586 /// This is useful for interfacing with operating system byte sinks (files,
4587 /// sockets, etc.).
4588 ///
4589 /// # Examples
4590 ///
4591 /// ```no_run
4592 /// use zerocopy::{byteorder::big_endian::*, FromBytes};
4593 /// use std::fs::File;
4594 /// # use zerocopy_derive::*;
4595 ///
4596 /// #[derive(FromBytes)]
4597 /// #[repr(C)]
4598 /// struct BitmapFileHeader {
4599 /// signature: [u8; 2],
4600 /// size: U32,
4601 /// reserved: U64,
4602 /// offset: U64,
4603 /// }
4604 ///
4605 /// let mut file = File::open("image.bin").unwrap();
4606 /// let header = BitmapFileHeader::read_from_io(&mut file).unwrap();
4607 /// ```
4608 #[cfg(feature = "std")]
4609 #[inline(always)]
4610 fn read_from_io<R>(mut src: R) -> io::Result<Self>
4611 where
4612 Self: Sized,
4613 R: io::Read,
4614 {
4615 // NOTE(#2319, #2320): We do `buf.zero()` separately rather than
4616 // constructing `let buf = CoreMaybeUninit::zeroed()` because, if `Self`
4617 // contains padding bytes, then a typed copy of `CoreMaybeUninit<Self>`
4618 // will not necessarily preserve zeros written to those padding byte
4619 // locations, and so `buf` could contain uninitialized bytes.
4620 let mut buf = CoreMaybeUninit::<Self>::uninit();
4621 buf.zero();
4622
4623 let ptr = Ptr::from_mut(&mut buf);
4624 // SAFETY: After `buf.zero()`, `buf` consists entirely of initialized,
4625 // zeroed bytes. Since `MaybeUninit` has no validity requirements, `ptr`
4626 // cannot be used to write values which will violate `buf`'s bit
4627 // validity. Since `ptr` has `Exclusive` aliasing, nothing other than
4628 // `ptr` may be used to mutate `ptr`'s referent, and so its bit validity
4629 // cannot be violated even though `buf` may have more permissive bit
4630 // validity than `ptr`.
4631 let ptr = unsafe { ptr.assume_validity::<invariant::Initialized>() };
4632 let ptr = ptr.as_bytes::<BecauseExclusive>();
4633 src.read_exact(ptr.as_mut())?;
4634 // SAFETY: `buf` entirely consists of initialized bytes, and `Self` is
4635 // `FromBytes`.
4636 Ok(unsafe { buf.assume_init() })
4637 }
4638
4639 #[deprecated(since = "0.8.0", note = "renamed to `FromBytes::ref_from_bytes`")]
4640 #[doc(hidden)]
4641 #[must_use = "has no side effects"]
4642 #[inline(always)]
4643 fn ref_from(source: &[u8]) -> Option<&Self>
4644 where
4645 Self: KnownLayout + Immutable,
4646 {
4647 Self::ref_from_bytes(source).ok()
4648 }
4649
4650 #[deprecated(since = "0.8.0", note = "renamed to `FromBytes::mut_from_bytes`")]
4651 #[doc(hidden)]
4652 #[must_use = "has no side effects"]
4653 #[inline(always)]
4654 fn mut_from(source: &mut [u8]) -> Option<&mut Self>
4655 where
4656 Self: KnownLayout + IntoBytes,
4657 {
4658 Self::mut_from_bytes(source).ok()
4659 }
4660
4661 #[deprecated(since = "0.8.0", note = "renamed to `FromBytes::ref_from_prefix_with_elems`")]
4662 #[doc(hidden)]
4663 #[must_use = "has no side effects"]
4664 #[inline(always)]
4665 fn slice_from_prefix(source: &[u8], count: usize) -> Option<(&[Self], &[u8])>
4666 where
4667 Self: Sized + Immutable,
4668 {
4669 <[Self]>::ref_from_prefix_with_elems(source, count).ok()
4670 }
4671
4672 #[deprecated(since = "0.8.0", note = "renamed to `FromBytes::ref_from_suffix_with_elems`")]
4673 #[doc(hidden)]
4674 #[must_use = "has no side effects"]
4675 #[inline(always)]
4676 fn slice_from_suffix(source: &[u8], count: usize) -> Option<(&[u8], &[Self])>
4677 where
4678 Self: Sized + Immutable,
4679 {
4680 <[Self]>::ref_from_suffix_with_elems(source, count).ok()
4681 }
4682
4683 #[deprecated(since = "0.8.0", note = "renamed to `FromBytes::mut_from_prefix_with_elems`")]
4684 #[doc(hidden)]
4685 #[must_use = "has no side effects"]
4686 #[inline(always)]
4687 fn mut_slice_from_prefix(source: &mut [u8], count: usize) -> Option<(&mut [Self], &mut [u8])>
4688 where
4689 Self: Sized + IntoBytes,
4690 {
4691 <[Self]>::mut_from_prefix_with_elems(source, count).ok()
4692 }
4693
4694 #[deprecated(since = "0.8.0", note = "renamed to `FromBytes::mut_from_suffix_with_elems`")]
4695 #[doc(hidden)]
4696 #[must_use = "has no side effects"]
4697 #[inline(always)]
4698 fn mut_slice_from_suffix(source: &mut [u8], count: usize) -> Option<(&mut [u8], &mut [Self])>
4699 where
4700 Self: Sized + IntoBytes,
4701 {
4702 <[Self]>::mut_from_suffix_with_elems(source, count).ok()
4703 }
4704
4705 #[deprecated(since = "0.8.0", note = "renamed to `FromBytes::read_from_bytes`")]
4706 #[doc(hidden)]
4707 #[must_use = "has no side effects"]
4708 #[inline(always)]
4709 fn read_from(source: &[u8]) -> Option<Self>
4710 where
4711 Self: Sized,
4712 {
4713 Self::read_from_bytes(source).ok()
4714 }
4715}
4716
4717/// Interprets the given affix of the given bytes as a `&Self`.
4718///
4719/// This method computes the largest possible size of `Self` that can fit in the
4720/// prefix or suffix bytes of `source`, then attempts to return both a reference
4721/// to those bytes interpreted as a `Self`, and a reference to the excess bytes.
4722/// If there are insufficient bytes, or if that affix of `source` is not
4723/// appropriately aligned, this returns `Err`.
4724#[inline(always)]
4725fn ref_from_prefix_suffix<T: FromBytes + KnownLayout + Immutable + ?Sized>(
4726 source: &[u8],
4727 meta: Option<T::PointerMetadata>,
4728 cast_type: CastType,
4729) -> Result<(&T, &[u8]), CastError<&[u8], T>> {
4730 let (slf, prefix_suffix) = Ptr::from_ref(source)
4731 .try_cast_into::<_, BecauseImmutable>(cast_type, meta)
4732 .map_err(|err| err.map_src(|s| s.as_ref()))?;
4733 Ok((slf.recall_validity().as_ref(), prefix_suffix.as_ref()))
4734}
4735
4736/// Interprets the given affix of the given bytes as a `&mut Self` without
4737/// copying.
4738///
4739/// This method computes the largest possible size of `Self` that can fit in the
4740/// prefix or suffix bytes of `source`, then attempts to return both a reference
4741/// to those bytes interpreted as a `Self`, and a reference to the excess bytes.
4742/// If there are insufficient bytes, or if that affix of `source` is not
4743/// appropriately aligned, this returns `Err`.
4744#[inline(always)]
4745fn mut_from_prefix_suffix<T: FromBytes + IntoBytes + KnownLayout + ?Sized>(
4746 source: &mut [u8],
4747 meta: Option<T::PointerMetadata>,
4748 cast_type: CastType,
4749) -> Result<(&mut T, &mut [u8]), CastError<&mut [u8], T>> {
4750 let (slf, prefix_suffix) = Ptr::from_mut(source)
4751 .try_cast_into::<_, BecauseExclusive>(cast_type, meta)
4752 .map_err(|err| err.map_src(|s| s.as_mut()))?;
4753 Ok((slf.recall_validity().as_mut(), prefix_suffix.as_mut()))
4754}
4755
4756/// Analyzes whether a type is [`IntoBytes`].
4757///
4758/// This derive analyzes, at compile time, whether the annotated type satisfies
4759/// the [safety conditions] of `IntoBytes` and implements `IntoBytes` if it is
4760/// sound to do so. This derive can be applied to structs and enums (see below
4761/// for union support); e.g.:
4762///
4763/// ```
4764/// # use zerocopy_derive::{IntoBytes};
4765/// #[derive(IntoBytes)]
4766/// #[repr(C)]
4767/// struct MyStruct {
4768/// # /*
4769/// ...
4770/// # */
4771/// }
4772///
4773/// #[derive(IntoBytes)]
4774/// #[repr(u8)]
4775/// enum MyEnum {
4776/// # Variant,
4777/// # /*
4778/// ...
4779/// # */
4780/// }
4781/// ```
4782///
4783/// [safety conditions]: trait@IntoBytes#safety
4784///
4785/// # Error Messages
4786///
4787/// On Rust toolchains prior to 1.78.0, due to the way that the custom derive
4788/// for `IntoBytes` is implemented, you may get an error like this:
4789///
4790/// ```text
4791/// error[E0277]: the trait bound `(): PaddingFree<Foo, true>` is not satisfied
4792/// --> lib.rs:23:10
4793/// |
4794/// 1 | #[derive(IntoBytes)]
4795/// | ^^^^^^^^^ the trait `PaddingFree<Foo, true>` is not implemented for `()`
4796/// |
4797/// = help: the following implementations were found:
4798/// <() as PaddingFree<T, false>>
4799/// ```
4800///
4801/// This error indicates that the type being annotated has padding bytes, which
4802/// is illegal for `IntoBytes` types. Consider reducing the alignment of some
4803/// fields by using types in the [`byteorder`] module, wrapping field types in
4804/// [`Unalign`], adding explicit struct fields where those padding bytes would
4805/// be, or using `#[repr(packed)]`. See the Rust Reference's page on [type
4806/// layout] for more information about type layout and padding.
4807///
4808/// [type layout]: https://doc.rust-lang.org/reference/type-layout.html
4809///
4810/// # Unions
4811///
4812/// Currently, union bit validity is [up in the air][union-validity], and so
4813/// zerocopy does not support `#[derive(IntoBytes)]` on unions by default.
4814/// However, implementing `IntoBytes` on a union type is likely sound on all
4815/// existing Rust toolchains - it's just that it may become unsound in the
4816/// future. You can opt-in to `#[derive(IntoBytes)]` support on unions by
4817/// passing the unstable `zerocopy_derive_union_into_bytes` cfg:
4818///
4819/// ```shell
4820/// $ RUSTFLAGS='--cfg zerocopy_derive_union_into_bytes' cargo build
4821/// ```
4822///
4823/// However, it is your responsibility to ensure that this derive is sound on
4824/// the specific versions of the Rust toolchain you are using! We make no
4825/// stability or soundness guarantees regarding this cfg, and may remove it at
4826/// any point.
4827///
4828/// We are actively working with Rust to stabilize the necessary language
4829/// guarantees to support this in a forwards-compatible way, which will enable
4830/// us to remove the cfg gate. As part of this effort, we need to know how much
4831/// demand there is for this feature. If you would like to use `IntoBytes` on
4832/// unions, [please let us know][discussion].
4833///
4834/// [union-validity]: https://github.com/rust-lang/unsafe-code-guidelines/issues/438
4835/// [discussion]: https://github.com/google/zerocopy/discussions/1802
4836///
4837/// # Analysis
4838///
4839/// *This section describes, roughly, the analysis performed by this derive to
4840/// determine whether it is sound to implement `IntoBytes` for a given type.
4841/// Unless you are modifying the implementation of this derive, or attempting to
4842/// manually implement `IntoBytes` for a type yourself, you don't need to read
4843/// this section.*
4844///
4845/// If a type has the following properties, then this derive can implement
4846/// `IntoBytes` for that type:
4847///
4848/// - If the type is a struct, its fields must be [`IntoBytes`]. Additionally:
4849/// - if the type is `repr(transparent)` or `repr(packed)`, it is
4850/// [`IntoBytes`] if its fields are [`IntoBytes`]; else,
4851/// - if the type is `repr(C)` with at most one field, it is [`IntoBytes`]
4852/// if its field is [`IntoBytes`]; else,
4853/// - if the type has no generic parameters, it is [`IntoBytes`] if the type
4854/// is sized and has no padding bytes; else,
4855/// - if the type is `repr(C)`, its fields must be [`Unaligned`].
4856/// - If the type is an enum:
4857/// - It must have a defined representation (`repr`s `C`, `u8`, `u16`, `u32`,
4858/// `u64`, `usize`, `i8`, `i16`, `i32`, `i64`, or `isize`).
4859/// - It must have no padding bytes.
4860/// - Its fields must be [`IntoBytes`].
4861///
4862/// This analysis is subject to change. Unsafe code may *only* rely on the
4863/// documented [safety conditions] of `FromBytes`, and must *not* rely on the
4864/// implementation details of this derive.
4865///
4866/// [Rust Reference]: https://doc.rust-lang.org/reference/type-layout.html
4867#[cfg(any(feature = "derive", test))]
4868#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
4869pub use zerocopy_derive::IntoBytes;
4870
4871/// Types that can be converted to an immutable slice of initialized bytes.
4872///
4873/// Any `IntoBytes` type can be converted to a slice of initialized bytes of the
4874/// same size. This is useful for efficiently serializing structured data as raw
4875/// bytes.
4876///
4877/// # Implementation
4878///
4879/// **Do not implement this trait yourself!** Instead, use
4880/// [`#[derive(IntoBytes)]`][derive]; e.g.:
4881///
4882/// ```
4883/// # use zerocopy_derive::IntoBytes;
4884/// #[derive(IntoBytes)]
4885/// #[repr(C)]
4886/// struct MyStruct {
4887/// # /*
4888/// ...
4889/// # */
4890/// }
4891///
4892/// #[derive(IntoBytes)]
4893/// #[repr(u8)]
4894/// enum MyEnum {
4895/// # Variant0,
4896/// # /*
4897/// ...
4898/// # */
4899/// }
4900/// ```
4901///
4902/// This derive performs a sophisticated, compile-time safety analysis to
4903/// determine whether a type is `IntoBytes`. See the [derive
4904/// documentation][derive] for guidance on how to interpret error messages
4905/// produced by the derive's analysis.
4906///
4907/// # Safety
4908///
4909/// *This section describes what is required in order for `T: IntoBytes`, and
4910/// what unsafe code may assume of such types. If you don't plan on implementing
4911/// `IntoBytes` manually, and you don't plan on writing unsafe code that
4912/// operates on `IntoBytes` types, then you don't need to read this section.*
4913///
4914/// If `T: IntoBytes`, then unsafe code may assume that it is sound to treat any
4915/// `t: T` as an immutable `[u8]` of length `size_of_val(t)`. If a type is
4916/// marked as `IntoBytes` which violates this contract, it may cause undefined
4917/// behavior.
4918///
4919/// `#[derive(IntoBytes)]` only permits [types which satisfy these
4920/// requirements][derive-analysis].
4921///
4922#[cfg_attr(
4923 feature = "derive",
4924 doc = "[derive]: zerocopy_derive::IntoBytes",
4925 doc = "[derive-analysis]: zerocopy_derive::IntoBytes#analysis"
4926)]
4927#[cfg_attr(
4928 not(feature = "derive"),
4929 doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.IntoBytes.html"),
4930 doc = concat!("[derive-analysis]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.IntoBytes.html#analysis"),
4931)]
4932#[cfg_attr(
4933 zerocopy_diagnostic_on_unimplemented_1_78_0,
4934 diagnostic::on_unimplemented(note = "Consider adding `#[derive(IntoBytes)]` to `{Self}`")
4935)]
4936pub unsafe trait IntoBytes {
4937 // The `Self: Sized` bound makes it so that this function doesn't prevent
4938 // `IntoBytes` from being object safe. Note that other `IntoBytes` methods
4939 // prevent object safety, but those provide a benefit in exchange for object
4940 // safety. If at some point we remove those methods, change their type
4941 // signatures, or move them out of this trait so that `IntoBytes` is object
4942 // safe again, it's important that this function not prevent object safety.
4943 #[doc(hidden)]
4944 fn only_derive_is_allowed_to_implement_this_trait()
4945 where
4946 Self: Sized;
4947
4948 /// Gets the bytes of this value.
4949 ///
4950 /// # Examples
4951 ///
4952 /// ```
4953 /// use zerocopy::IntoBytes;
4954 /// # use zerocopy_derive::*;
4955 ///
4956 /// #[derive(IntoBytes, Immutable)]
4957 /// #[repr(C)]
4958 /// struct PacketHeader {
4959 /// src_port: [u8; 2],
4960 /// dst_port: [u8; 2],
4961 /// length: [u8; 2],
4962 /// checksum: [u8; 2],
4963 /// }
4964 ///
4965 /// let header = PacketHeader {
4966 /// src_port: [0, 1],
4967 /// dst_port: [2, 3],
4968 /// length: [4, 5],
4969 /// checksum: [6, 7],
4970 /// };
4971 ///
4972 /// let bytes = header.as_bytes();
4973 ///
4974 /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 6, 7]);
4975 /// ```
4976 #[must_use = "has no side effects"]
4977 #[inline(always)]
4978 fn as_bytes(&self) -> &[u8]
4979 where
4980 Self: Immutable,
4981 {
4982 // Note that this method does not have a `Self: Sized` bound;
4983 // `size_of_val` works for unsized values too.
4984 let len = mem::size_of_val(self);
4985 let slf: *const Self = self;
4986
4987 // SAFETY:
4988 // - `slf.cast::<u8>()` is valid for reads for `len * size_of::<u8>()`
4989 // many bytes because...
4990 // - `slf` is the same pointer as `self`, and `self` is a reference
4991 // which points to an object whose size is `len`. Thus...
4992 // - The entire region of `len` bytes starting at `slf` is contained
4993 // within a single allocation.
4994 // - `slf` is non-null.
4995 // - `slf` is trivially aligned to `align_of::<u8>() == 1`.
4996 // - `Self: IntoBytes` ensures that all of the bytes of `slf` are
4997 // initialized.
4998 // - Since `slf` is derived from `self`, and `self` is an immutable
4999 // reference, the only other references to this memory region that
5000 // could exist are other immutable references, and those don't allow
5001 // mutation. `Self: Immutable` prohibits types which contain
5002 // `UnsafeCell`s, which are the only types for which this rule
5003 // wouldn't be sufficient.
5004 // - The total size of the resulting slice is no larger than
5005 // `isize::MAX` because no allocation produced by safe code can be
5006 // larger than `isize::MAX`.
5007 //
5008 // TODO(#429): Add references to docs and quotes.
5009 unsafe { slice::from_raw_parts(slf.cast::<u8>(), len) }
5010 }
5011
5012 /// Gets the bytes of this value mutably.
5013 ///
5014 /// # Examples
5015 ///
5016 /// ```
5017 /// use zerocopy::IntoBytes;
5018 /// # use zerocopy_derive::*;
5019 ///
5020 /// # #[derive(Eq, PartialEq, Debug)]
5021 /// #[derive(FromBytes, IntoBytes, Immutable)]
5022 /// #[repr(C)]
5023 /// struct PacketHeader {
5024 /// src_port: [u8; 2],
5025 /// dst_port: [u8; 2],
5026 /// length: [u8; 2],
5027 /// checksum: [u8; 2],
5028 /// }
5029 ///
5030 /// let mut header = PacketHeader {
5031 /// src_port: [0, 1],
5032 /// dst_port: [2, 3],
5033 /// length: [4, 5],
5034 /// checksum: [6, 7],
5035 /// };
5036 ///
5037 /// let bytes = header.as_mut_bytes();
5038 ///
5039 /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 6, 7]);
5040 ///
5041 /// bytes.reverse();
5042 ///
5043 /// assert_eq!(header, PacketHeader {
5044 /// src_port: [7, 6],
5045 /// dst_port: [5, 4],
5046 /// length: [3, 2],
5047 /// checksum: [1, 0],
5048 /// });
5049 /// ```
5050 #[must_use = "has no side effects"]
5051 #[inline(always)]
5052 fn as_mut_bytes(&mut self) -> &mut [u8]
5053 where
5054 Self: FromBytes,
5055 {
5056 // Note that this method does not have a `Self: Sized` bound;
5057 // `size_of_val` works for unsized values too.
5058 let len = mem::size_of_val(self);
5059 let slf: *mut Self = self;
5060
5061 // SAFETY:
5062 // - `slf.cast::<u8>()` is valid for reads and writes for `len *
5063 // size_of::<u8>()` many bytes because...
5064 // - `slf` is the same pointer as `self`, and `self` is a reference
5065 // which points to an object whose size is `len`. Thus...
5066 // - The entire region of `len` bytes starting at `slf` is contained
5067 // within a single allocation.
5068 // - `slf` is non-null.
5069 // - `slf` is trivially aligned to `align_of::<u8>() == 1`.
5070 // - `Self: IntoBytes` ensures that all of the bytes of `slf` are
5071 // initialized.
5072 // - `Self: FromBytes` ensures that no write to this memory region
5073 // could result in it containing an invalid `Self`.
5074 // - Since `slf` is derived from `self`, and `self` is a mutable
5075 // reference, no other references to this memory region can exist.
5076 // - The total size of the resulting slice is no larger than
5077 // `isize::MAX` because no allocation produced by safe code can be
5078 // larger than `isize::MAX`.
5079 //
5080 // TODO(#429): Add references to docs and quotes.
5081 unsafe { slice::from_raw_parts_mut(slf.cast::<u8>(), len) }
5082 }
5083
5084 /// Writes a copy of `self` to `dst`.
5085 ///
5086 /// If `dst.len() != size_of_val(self)`, `write_to` returns `Err`.
5087 ///
5088 /// # Examples
5089 ///
5090 /// ```
5091 /// use zerocopy::IntoBytes;
5092 /// # use zerocopy_derive::*;
5093 ///
5094 /// #[derive(IntoBytes, Immutable)]
5095 /// #[repr(C)]
5096 /// struct PacketHeader {
5097 /// src_port: [u8; 2],
5098 /// dst_port: [u8; 2],
5099 /// length: [u8; 2],
5100 /// checksum: [u8; 2],
5101 /// }
5102 ///
5103 /// let header = PacketHeader {
5104 /// src_port: [0, 1],
5105 /// dst_port: [2, 3],
5106 /// length: [4, 5],
5107 /// checksum: [6, 7],
5108 /// };
5109 ///
5110 /// let mut bytes = [0, 0, 0, 0, 0, 0, 0, 0];
5111 ///
5112 /// header.write_to(&mut bytes[..]);
5113 ///
5114 /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 6, 7]);
5115 /// ```
5116 ///
5117 /// If too many or too few target bytes are provided, `write_to` returns
5118 /// `Err` and leaves the target bytes unmodified:
5119 ///
5120 /// ```
5121 /// # use zerocopy::IntoBytes;
5122 /// # let header = u128::MAX;
5123 /// let mut excessive_bytes = &mut [0u8; 128][..];
5124 ///
5125 /// let write_result = header.write_to(excessive_bytes);
5126 ///
5127 /// assert!(write_result.is_err());
5128 /// assert_eq!(excessive_bytes, [0u8; 128]);
5129 /// ```
5130 #[must_use = "callers should check the return value to see if the operation succeeded"]
5131 #[inline]
5132 fn write_to(&self, dst: &mut [u8]) -> Result<(), SizeError<&Self, &mut [u8]>>
5133 where
5134 Self: Immutable,
5135 {
5136 let src = self.as_bytes();
5137 if dst.len() == src.len() {
5138 // SAFETY: Within this branch of the conditional, we have ensured
5139 // that `dst.len()` is equal to `src.len()`. Neither the size of the
5140 // source nor the size of the destination change between the above
5141 // size check and the invocation of `copy_unchecked`.
5142 unsafe { util::copy_unchecked(src, dst) }
5143 Ok(())
5144 } else {
5145 Err(SizeError::new(self))
5146 }
5147 }
5148
5149 /// Writes a copy of `self` to the prefix of `dst`.
5150 ///
5151 /// `write_to_prefix` writes `self` to the first `size_of_val(self)` bytes
5152 /// of `dst`. If `dst.len() < size_of_val(self)`, it returns `Err`.
5153 ///
5154 /// # Examples
5155 ///
5156 /// ```
5157 /// use zerocopy::IntoBytes;
5158 /// # use zerocopy_derive::*;
5159 ///
5160 /// #[derive(IntoBytes, Immutable)]
5161 /// #[repr(C)]
5162 /// struct PacketHeader {
5163 /// src_port: [u8; 2],
5164 /// dst_port: [u8; 2],
5165 /// length: [u8; 2],
5166 /// checksum: [u8; 2],
5167 /// }
5168 ///
5169 /// let header = PacketHeader {
5170 /// src_port: [0, 1],
5171 /// dst_port: [2, 3],
5172 /// length: [4, 5],
5173 /// checksum: [6, 7],
5174 /// };
5175 ///
5176 /// let mut bytes = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
5177 ///
5178 /// header.write_to_prefix(&mut bytes[..]);
5179 ///
5180 /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 6, 7, 0, 0]);
5181 /// ```
5182 ///
5183 /// If insufficient target bytes are provided, `write_to_prefix` returns
5184 /// `Err` and leaves the target bytes unmodified:
5185 ///
5186 /// ```
5187 /// # use zerocopy::IntoBytes;
5188 /// # let header = u128::MAX;
5189 /// let mut insufficent_bytes = &mut [0, 0][..];
5190 ///
5191 /// let write_result = header.write_to_suffix(insufficent_bytes);
5192 ///
5193 /// assert!(write_result.is_err());
5194 /// assert_eq!(insufficent_bytes, [0, 0]);
5195 /// ```
5196 #[must_use = "callers should check the return value to see if the operation succeeded"]
5197 #[inline]
5198 fn write_to_prefix(&self, dst: &mut [u8]) -> Result<(), SizeError<&Self, &mut [u8]>>
5199 where
5200 Self: Immutable,
5201 {
5202 let src = self.as_bytes();
5203 match dst.get_mut(..src.len()) {
5204 Some(dst) => {
5205 // SAFETY: Within this branch of the `match`, we have ensured
5206 // through fallible subslicing that `dst.len()` is equal to
5207 // `src.len()`. Neither the size of the source nor the size of
5208 // the destination change between the above subslicing operation
5209 // and the invocation of `copy_unchecked`.
5210 unsafe { util::copy_unchecked(src, dst) }
5211 Ok(())
5212 }
5213 None => Err(SizeError::new(self)),
5214 }
5215 }
5216
5217 /// Writes a copy of `self` to the suffix of `dst`.
5218 ///
5219 /// `write_to_suffix` writes `self` to the last `size_of_val(self)` bytes of
5220 /// `dst`. If `dst.len() < size_of_val(self)`, it returns `Err`.
5221 ///
5222 /// # Examples
5223 ///
5224 /// ```
5225 /// use zerocopy::IntoBytes;
5226 /// # use zerocopy_derive::*;
5227 ///
5228 /// #[derive(IntoBytes, Immutable)]
5229 /// #[repr(C)]
5230 /// struct PacketHeader {
5231 /// src_port: [u8; 2],
5232 /// dst_port: [u8; 2],
5233 /// length: [u8; 2],
5234 /// checksum: [u8; 2],
5235 /// }
5236 ///
5237 /// let header = PacketHeader {
5238 /// src_port: [0, 1],
5239 /// dst_port: [2, 3],
5240 /// length: [4, 5],
5241 /// checksum: [6, 7],
5242 /// };
5243 ///
5244 /// let mut bytes = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
5245 ///
5246 /// header.write_to_suffix(&mut bytes[..]);
5247 ///
5248 /// assert_eq!(bytes, [0, 0, 0, 1, 2, 3, 4, 5, 6, 7]);
5249 ///
5250 /// let mut insufficent_bytes = &mut [0, 0][..];
5251 ///
5252 /// let write_result = header.write_to_suffix(insufficent_bytes);
5253 ///
5254 /// assert!(write_result.is_err());
5255 /// assert_eq!(insufficent_bytes, [0, 0]);
5256 /// ```
5257 ///
5258 /// If insufficient target bytes are provided, `write_to_suffix` returns
5259 /// `Err` and leaves the target bytes unmodified:
5260 ///
5261 /// ```
5262 /// # use zerocopy::IntoBytes;
5263 /// # let header = u128::MAX;
5264 /// let mut insufficent_bytes = &mut [0, 0][..];
5265 ///
5266 /// let write_result = header.write_to_suffix(insufficent_bytes);
5267 ///
5268 /// assert!(write_result.is_err());
5269 /// assert_eq!(insufficent_bytes, [0, 0]);
5270 /// ```
5271 #[must_use = "callers should check the return value to see if the operation succeeded"]
5272 #[inline]
5273 fn write_to_suffix(&self, dst: &mut [u8]) -> Result<(), SizeError<&Self, &mut [u8]>>
5274 where
5275 Self: Immutable,
5276 {
5277 let src = self.as_bytes();
5278 let start = if let Some(start) = dst.len().checked_sub(src.len()) {
5279 start
5280 } else {
5281 return Err(SizeError::new(self));
5282 };
5283 let dst = if let Some(dst) = dst.get_mut(start..) {
5284 dst
5285 } else {
5286 // get_mut() should never return None here. We return a `SizeError`
5287 // rather than .unwrap() because in the event the branch is not
5288 // optimized away, returning a value is generally lighter-weight
5289 // than panicking.
5290 return Err(SizeError::new(self));
5291 };
5292 // SAFETY: Through fallible subslicing of `dst`, we have ensured that
5293 // `dst.len()` is equal to `src.len()`. Neither the size of the source
5294 // nor the size of the destination change between the above subslicing
5295 // operation and the invocation of `copy_unchecked`.
5296 unsafe {
5297 util::copy_unchecked(src, dst);
5298 }
5299 Ok(())
5300 }
5301
5302 /// Writes a copy of `self` to an `io::Write`.
5303 ///
5304 /// This is a shorthand for `dst.write_all(self.as_bytes())`, and is useful
5305 /// for interfacing with operating system byte sinks (files, sockets, etc.).
5306 ///
5307 /// # Examples
5308 ///
5309 /// ```no_run
5310 /// use zerocopy::{byteorder::big_endian::U16, FromBytes, IntoBytes};
5311 /// use std::fs::File;
5312 /// # use zerocopy_derive::*;
5313 ///
5314 /// #[derive(FromBytes, IntoBytes, Immutable, KnownLayout)]
5315 /// #[repr(C, packed)]
5316 /// struct GrayscaleImage {
5317 /// height: U16,
5318 /// width: U16,
5319 /// pixels: [U16],
5320 /// }
5321 ///
5322 /// let image = GrayscaleImage::ref_from_bytes(&[0, 0, 0, 0][..]).unwrap();
5323 /// let mut file = File::create("image.bin").unwrap();
5324 /// image.write_to_io(&mut file).unwrap();
5325 /// ```
5326 ///
5327 /// If the write fails, `write_to_io` returns `Err` and a partial write may
5328 /// have occured; e.g.:
5329 ///
5330 /// ```
5331 /// # use zerocopy::IntoBytes;
5332 ///
5333 /// let src = u128::MAX;
5334 /// let mut dst = [0u8; 2];
5335 ///
5336 /// let write_result = src.write_to_io(&mut dst[..]);
5337 ///
5338 /// assert!(write_result.is_err());
5339 /// assert_eq!(dst, [255, 255]);
5340 /// ```
5341 #[cfg(feature = "std")]
5342 #[inline(always)]
5343 fn write_to_io<W>(&self, mut dst: W) -> io::Result<()>
5344 where
5345 Self: Immutable,
5346 W: io::Write,
5347 {
5348 dst.write_all(self.as_bytes())
5349 }
5350
5351 #[deprecated(since = "0.8.0", note = "`IntoBytes::as_bytes_mut` was renamed to `as_mut_bytes`")]
5352 #[doc(hidden)]
5353 #[inline]
5354 fn as_bytes_mut(&mut self) -> &mut [u8]
5355 where
5356 Self: FromBytes,
5357 {
5358 self.as_mut_bytes()
5359 }
5360}
5361
5362/// Analyzes whether a type is [`Unaligned`].
5363///
5364/// This derive analyzes, at compile time, whether the annotated type satisfies
5365/// the [safety conditions] of `Unaligned` and implements `Unaligned` if it is
5366/// sound to do so. This derive can be applied to structs, enums, and unions;
5367/// e.g.:
5368///
5369/// ```
5370/// # use zerocopy_derive::Unaligned;
5371/// #[derive(Unaligned)]
5372/// #[repr(C)]
5373/// struct MyStruct {
5374/// # /*
5375/// ...
5376/// # */
5377/// }
5378///
5379/// #[derive(Unaligned)]
5380/// #[repr(u8)]
5381/// enum MyEnum {
5382/// # Variant0,
5383/// # /*
5384/// ...
5385/// # */
5386/// }
5387///
5388/// #[derive(Unaligned)]
5389/// #[repr(packed)]
5390/// union MyUnion {
5391/// # variant: u8,
5392/// # /*
5393/// ...
5394/// # */
5395/// }
5396/// ```
5397///
5398/// # Analysis
5399///
5400/// *This section describes, roughly, the analysis performed by this derive to
5401/// determine whether it is sound to implement `Unaligned` for a given type.
5402/// Unless you are modifying the implementation of this derive, or attempting to
5403/// manually implement `Unaligned` for a type yourself, you don't need to read
5404/// this section.*
5405///
5406/// If a type has the following properties, then this derive can implement
5407/// `Unaligned` for that type:
5408///
5409/// - If the type is a struct or union:
5410/// - If `repr(align(N))` is provided, `N` must equal 1.
5411/// - If the type is `repr(C)` or `repr(transparent)`, all fields must be
5412/// [`Unaligned`].
5413/// - If the type is not `repr(C)` or `repr(transparent)`, it must be
5414/// `repr(packed)` or `repr(packed(1))`.
5415/// - If the type is an enum:
5416/// - If `repr(align(N))` is provided, `N` must equal 1.
5417/// - It must be a field-less enum (meaning that all variants have no fields).
5418/// - It must be `repr(i8)` or `repr(u8)`.
5419///
5420/// [safety conditions]: trait@Unaligned#safety
5421#[cfg(any(feature = "derive", test))]
5422#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
5423pub use zerocopy_derive::Unaligned;
5424
5425/// Types with no alignment requirement.
5426///
5427/// If `T: Unaligned`, then `align_of::<T>() == 1`.
5428///
5429/// # Implementation
5430///
5431/// **Do not implement this trait yourself!** Instead, use
5432/// [`#[derive(Unaligned)]`][derive]; e.g.:
5433///
5434/// ```
5435/// # use zerocopy_derive::Unaligned;
5436/// #[derive(Unaligned)]
5437/// #[repr(C)]
5438/// struct MyStruct {
5439/// # /*
5440/// ...
5441/// # */
5442/// }
5443///
5444/// #[derive(Unaligned)]
5445/// #[repr(u8)]
5446/// enum MyEnum {
5447/// # Variant0,
5448/// # /*
5449/// ...
5450/// # */
5451/// }
5452///
5453/// #[derive(Unaligned)]
5454/// #[repr(packed)]
5455/// union MyUnion {
5456/// # variant: u8,
5457/// # /*
5458/// ...
5459/// # */
5460/// }
5461/// ```
5462///
5463/// This derive performs a sophisticated, compile-time safety analysis to
5464/// determine whether a type is `Unaligned`.
5465///
5466/// # Safety
5467///
5468/// *This section describes what is required in order for `T: Unaligned`, and
5469/// what unsafe code may assume of such types. If you don't plan on implementing
5470/// `Unaligned` manually, and you don't plan on writing unsafe code that
5471/// operates on `Unaligned` types, then you don't need to read this section.*
5472///
5473/// If `T: Unaligned`, then unsafe code may assume that it is sound to produce a
5474/// reference to `T` at any memory location regardless of alignment. If a type
5475/// is marked as `Unaligned` which violates this contract, it may cause
5476/// undefined behavior.
5477///
5478/// `#[derive(Unaligned)]` only permits [types which satisfy these
5479/// requirements][derive-analysis].
5480///
5481#[cfg_attr(
5482 feature = "derive",
5483 doc = "[derive]: zerocopy_derive::Unaligned",
5484 doc = "[derive-analysis]: zerocopy_derive::Unaligned#analysis"
5485)]
5486#[cfg_attr(
5487 not(feature = "derive"),
5488 doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.Unaligned.html"),
5489 doc = concat!("[derive-analysis]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.Unaligned.html#analysis"),
5490)]
5491#[cfg_attr(
5492 zerocopy_diagnostic_on_unimplemented_1_78_0,
5493 diagnostic::on_unimplemented(note = "Consider adding `#[derive(Unaligned)]` to `{Self}`")
5494)]
5495pub unsafe trait Unaligned {
5496 // The `Self: Sized` bound makes it so that `Unaligned` is still object
5497 // safe.
5498 #[doc(hidden)]
5499 fn only_derive_is_allowed_to_implement_this_trait()
5500 where
5501 Self: Sized;
5502}
5503
5504/// Derives an optimized implementation of [`Hash`] for types that implement
5505/// [`IntoBytes`] and [`Immutable`].
5506///
5507/// The standard library's derive for `Hash` generates a recursive descent
5508/// into the fields of the type it is applied to. Instead, the implementation
5509/// derived by this macro makes a single call to [`Hasher::write()`] for both
5510/// [`Hash::hash()`] and [`Hash::hash_slice()`], feeding the hasher the bytes
5511/// of the type or slice all at once.
5512///
5513/// [`Hash`]: core::hash::Hash
5514/// [`Hash::hash()`]: core::hash::Hash::hash()
5515/// [`Hash::hash_slice()`]: core::hash::Hash::hash_slice()
5516#[cfg(any(feature = "derive", test))]
5517#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
5518pub use zerocopy_derive::ByteHash;
5519
5520/// Derives an optimized implementation of [`PartialEq`] and [`Eq`] for types
5521/// that implement [`IntoBytes`] and [`Immutable`].
5522///
5523/// The standard library's derive for [`PartialEq`] generates a recursive
5524/// descent into the fields of the type it is applied to. Instead, the
5525/// implementation derived by this macro performs a single slice comparison of
5526/// the bytes of the two values being compared.
5527#[cfg(any(feature = "derive", test))]
5528#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
5529pub use zerocopy_derive::ByteEq;
5530
5531#[cfg(feature = "alloc")]
5532#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
5533#[cfg(zerocopy_panic_in_const_and_vec_try_reserve_1_57_0)]
5534mod alloc_support {
5535 use super::*;
5536
5537 /// Extends a `Vec<T>` by pushing `additional` new items onto the end of the
5538 /// vector. The new items are initialized with zeros.
5539 #[cfg(zerocopy_panic_in_const_and_vec_try_reserve_1_57_0)]
5540 #[doc(hidden)]
5541 #[deprecated(since = "0.8.0", note = "moved to `FromZeros`")]
5542 #[inline(always)]
5543 pub fn extend_vec_zeroed<T: FromZeros>(
5544 v: &mut Vec<T>,
5545 additional: usize,
5546 ) -> Result<(), AllocError> {
5547 <T as FromZeros>::extend_vec_zeroed(v, additional)
5548 }
5549
5550 /// Inserts `additional` new items into `Vec<T>` at `position`. The new
5551 /// items are initialized with zeros.
5552 ///
5553 /// # Panics
5554 ///
5555 /// Panics if `position > v.len()`.
5556 #[cfg(zerocopy_panic_in_const_and_vec_try_reserve_1_57_0)]
5557 #[doc(hidden)]
5558 #[deprecated(since = "0.8.0", note = "moved to `FromZeros`")]
5559 #[inline(always)]
5560 pub fn insert_vec_zeroed<T: FromZeros>(
5561 v: &mut Vec<T>,
5562 position: usize,
5563 additional: usize,
5564 ) -> Result<(), AllocError> {
5565 <T as FromZeros>::insert_vec_zeroed(v, position, additional)
5566 }
5567}
5568
5569#[cfg(feature = "alloc")]
5570#[cfg(zerocopy_panic_in_const_and_vec_try_reserve_1_57_0)]
5571#[doc(hidden)]
5572pub use alloc_support::*;
5573
5574#[cfg(test)]
5575#[allow(clippy::assertions_on_result_states, clippy::unreadable_literal)]
5576mod tests {
5577 use static_assertions::assert_impl_all;
5578
5579 use super::*;
5580 use crate::util::testutil::*;
5581
5582 // An unsized type.
5583 //
5584 // This is used to test the custom derives of our traits. The `[u8]` type
5585 // gets a hand-rolled impl, so it doesn't exercise our custom derives.
5586 #[derive(Debug, Eq, PartialEq, FromBytes, IntoBytes, Unaligned, Immutable)]
5587 #[repr(transparent)]
5588 struct Unsized([u8]);
5589
5590 impl Unsized {
5591 fn from_mut_slice(slc: &mut [u8]) -> &mut Unsized {
5592 // SAFETY: This *probably* sound - since the layouts of `[u8]` and
5593 // `Unsized` are the same, so are the layouts of `&mut [u8]` and
5594 // `&mut Unsized`. [1] Even if it turns out that this isn't actually
5595 // guaranteed by the language spec, we can just change this since
5596 // it's in test code.
5597 //
5598 // [1] https://github.com/rust-lang/unsafe-code-guidelines/issues/375
5599 unsafe { mem::transmute(slc) }
5600 }
5601 }
5602
5603 #[test]
5604 fn test_known_layout() {
5605 // Test that `$ty` and `ManuallyDrop<$ty>` have the expected layout.
5606 // Test that `PhantomData<$ty>` has the same layout as `()` regardless
5607 // of `$ty`.
5608 macro_rules! test {
5609 ($ty:ty, $expect:expr) => {
5610 let expect = $expect;
5611 assert_eq!(<$ty as KnownLayout>::LAYOUT, expect);
5612 assert_eq!(<ManuallyDrop<$ty> as KnownLayout>::LAYOUT, expect);
5613 assert_eq!(<PhantomData<$ty> as KnownLayout>::LAYOUT, <() as KnownLayout>::LAYOUT);
5614 };
5615 }
5616
5617 let layout = |offset, align, _trailing_slice_elem_size| DstLayout {
5618 align: NonZeroUsize::new(align).unwrap(),
5619 size_info: match _trailing_slice_elem_size {
5620 None => SizeInfo::Sized { size: offset },
5621 Some(elem_size) => SizeInfo::SliceDst(TrailingSliceLayout { offset, elem_size }),
5622 },
5623 };
5624
5625 test!((), layout(0, 1, None));
5626 test!(u8, layout(1, 1, None));
5627 // Use `align_of` because `u64` alignment may be smaller than 8 on some
5628 // platforms.
5629 test!(u64, layout(8, mem::align_of::<u64>(), None));
5630 test!(AU64, layout(8, 8, None));
5631
5632 test!(Option<&'static ()>, usize::LAYOUT);
5633
5634 test!([()], layout(0, 1, Some(0)));
5635 test!([u8], layout(0, 1, Some(1)));
5636 test!(str, layout(0, 1, Some(1)));
5637 }
5638
5639 #[cfg(feature = "derive")]
5640 #[test]
5641 fn test_known_layout_derive() {
5642 // In this and other files (`late_compile_pass.rs`,
5643 // `mid_compile_pass.rs`, and `struct.rs`), we test success and failure
5644 // modes of `derive(KnownLayout)` for the following combination of
5645 // properties:
5646 //
5647 // +------------+--------------------------------------+-----------+
5648 // | | trailing field properties | |
5649 // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
5650 // |------------+----------+----------------+----------+-----------|
5651 // | N | N | N | N | KL00 |
5652 // | N | N | N | Y | KL01 |
5653 // | N | N | Y | N | KL02 |
5654 // | N | N | Y | Y | KL03 |
5655 // | N | Y | N | N | KL04 |
5656 // | N | Y | N | Y | KL05 |
5657 // | N | Y | Y | N | KL06 |
5658 // | N | Y | Y | Y | KL07 |
5659 // | Y | N | N | N | KL08 |
5660 // | Y | N | N | Y | KL09 |
5661 // | Y | N | Y | N | KL10 |
5662 // | Y | N | Y | Y | KL11 |
5663 // | Y | Y | N | N | KL12 |
5664 // | Y | Y | N | Y | KL13 |
5665 // | Y | Y | Y | N | KL14 |
5666 // | Y | Y | Y | Y | KL15 |
5667 // +------------+----------+----------------+----------+-----------+
5668
5669 struct NotKnownLayout<T = ()> {
5670 _t: T,
5671 }
5672
5673 #[derive(KnownLayout)]
5674 #[repr(C)]
5675 struct AlignSize<const ALIGN: usize, const SIZE: usize>
5676 where
5677 elain::Align<ALIGN>: elain::Alignment,
5678 {
5679 _align: elain::Align<ALIGN>,
5680 size: [u8; SIZE],
5681 }
5682
5683 type AU16 = AlignSize<2, 2>;
5684 type AU32 = AlignSize<4, 4>;
5685
5686 fn _assert_kl<T: ?Sized + KnownLayout>(_: &T) {}
5687
5688 let sized_layout = |align, size| DstLayout {
5689 align: NonZeroUsize::new(align).unwrap(),
5690 size_info: SizeInfo::Sized { size },
5691 };
5692
5693 let unsized_layout = |align, elem_size, offset| DstLayout {
5694 align: NonZeroUsize::new(align).unwrap(),
5695 size_info: SizeInfo::SliceDst(TrailingSliceLayout { offset, elem_size }),
5696 };
5697
5698 // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
5699 // | N | N | N | Y | KL01 |
5700 #[allow(dead_code)]
5701 #[derive(KnownLayout)]
5702 struct KL01(NotKnownLayout<AU32>, NotKnownLayout<AU16>);
5703
5704 let expected = DstLayout::for_type::<KL01>();
5705
5706 assert_eq!(<KL01 as KnownLayout>::LAYOUT, expected);
5707 assert_eq!(<KL01 as KnownLayout>::LAYOUT, sized_layout(4, 8));
5708
5709 // ...with `align(N)`:
5710 #[allow(dead_code)]
5711 #[derive(KnownLayout)]
5712 #[repr(align(64))]
5713 struct KL01Align(NotKnownLayout<AU32>, NotKnownLayout<AU16>);
5714
5715 let expected = DstLayout::for_type::<KL01Align>();
5716
5717 assert_eq!(<KL01Align as KnownLayout>::LAYOUT, expected);
5718 assert_eq!(<KL01Align as KnownLayout>::LAYOUT, sized_layout(64, 64));
5719
5720 // ...with `packed`:
5721 #[allow(dead_code)]
5722 #[derive(KnownLayout)]
5723 #[repr(packed)]
5724 struct KL01Packed(NotKnownLayout<AU32>, NotKnownLayout<AU16>);
5725
5726 let expected = DstLayout::for_type::<KL01Packed>();
5727
5728 assert_eq!(<KL01Packed as KnownLayout>::LAYOUT, expected);
5729 assert_eq!(<KL01Packed as KnownLayout>::LAYOUT, sized_layout(1, 6));
5730
5731 // ...with `packed(N)`:
5732 #[allow(dead_code)]
5733 #[derive(KnownLayout)]
5734 #[repr(packed(2))]
5735 struct KL01PackedN(NotKnownLayout<AU32>, NotKnownLayout<AU16>);
5736
5737 assert_impl_all!(KL01PackedN: KnownLayout);
5738
5739 let expected = DstLayout::for_type::<KL01PackedN>();
5740
5741 assert_eq!(<KL01PackedN as KnownLayout>::LAYOUT, expected);
5742 assert_eq!(<KL01PackedN as KnownLayout>::LAYOUT, sized_layout(2, 6));
5743
5744 // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
5745 // | N | N | Y | Y | KL03 |
5746 #[allow(dead_code)]
5747 #[derive(KnownLayout)]
5748 struct KL03(NotKnownLayout, u8);
5749
5750 let expected = DstLayout::for_type::<KL03>();
5751
5752 assert_eq!(<KL03 as KnownLayout>::LAYOUT, expected);
5753 assert_eq!(<KL03 as KnownLayout>::LAYOUT, sized_layout(1, 1));
5754
5755 // ... with `align(N)`
5756 #[allow(dead_code)]
5757 #[derive(KnownLayout)]
5758 #[repr(align(64))]
5759 struct KL03Align(NotKnownLayout<AU32>, u8);
5760
5761 let expected = DstLayout::for_type::<KL03Align>();
5762
5763 assert_eq!(<KL03Align as KnownLayout>::LAYOUT, expected);
5764 assert_eq!(<KL03Align as KnownLayout>::LAYOUT, sized_layout(64, 64));
5765
5766 // ... with `packed`:
5767 #[allow(dead_code)]
5768 #[derive(KnownLayout)]
5769 #[repr(packed)]
5770 struct KL03Packed(NotKnownLayout<AU32>, u8);
5771
5772 let expected = DstLayout::for_type::<KL03Packed>();
5773
5774 assert_eq!(<KL03Packed as KnownLayout>::LAYOUT, expected);
5775 assert_eq!(<KL03Packed as KnownLayout>::LAYOUT, sized_layout(1, 5));
5776
5777 // ... with `packed(N)`
5778 #[allow(dead_code)]
5779 #[derive(KnownLayout)]
5780 #[repr(packed(2))]
5781 struct KL03PackedN(NotKnownLayout<AU32>, u8);
5782
5783 assert_impl_all!(KL03PackedN: KnownLayout);
5784
5785 let expected = DstLayout::for_type::<KL03PackedN>();
5786
5787 assert_eq!(<KL03PackedN as KnownLayout>::LAYOUT, expected);
5788 assert_eq!(<KL03PackedN as KnownLayout>::LAYOUT, sized_layout(2, 6));
5789
5790 // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
5791 // | N | Y | N | Y | KL05 |
5792 #[allow(dead_code)]
5793 #[derive(KnownLayout)]
5794 struct KL05<T>(u8, T);
5795
5796 fn _test_kl05<T>(t: T) -> impl KnownLayout {
5797 KL05(0u8, t)
5798 }
5799
5800 // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
5801 // | N | Y | Y | Y | KL07 |
5802 #[allow(dead_code)]
5803 #[derive(KnownLayout)]
5804 struct KL07<T: KnownLayout>(u8, T);
5805
5806 fn _test_kl07<T: KnownLayout>(t: T) -> impl KnownLayout {
5807 let _ = KL07(0u8, t);
5808 }
5809
5810 // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
5811 // | Y | N | Y | N | KL10 |
5812 #[allow(dead_code)]
5813 #[derive(KnownLayout)]
5814 #[repr(C)]
5815 struct KL10(NotKnownLayout<AU32>, [u8]);
5816
5817 let expected = DstLayout::new_zst(None)
5818 .extend(DstLayout::for_type::<NotKnownLayout<AU32>>(), None)
5819 .extend(<[u8] as KnownLayout>::LAYOUT, None)
5820 .pad_to_align();
5821
5822 assert_eq!(<KL10 as KnownLayout>::LAYOUT, expected);
5823 assert_eq!(<KL10 as KnownLayout>::LAYOUT, unsized_layout(4, 1, 4));
5824
5825 // ...with `align(N)`:
5826 #[allow(dead_code)]
5827 #[derive(KnownLayout)]
5828 #[repr(C, align(64))]
5829 struct KL10Align(NotKnownLayout<AU32>, [u8]);
5830
5831 let repr_align = NonZeroUsize::new(64);
5832
5833 let expected = DstLayout::new_zst(repr_align)
5834 .extend(DstLayout::for_type::<NotKnownLayout<AU32>>(), None)
5835 .extend(<[u8] as KnownLayout>::LAYOUT, None)
5836 .pad_to_align();
5837
5838 assert_eq!(<KL10Align as KnownLayout>::LAYOUT, expected);
5839 assert_eq!(<KL10Align as KnownLayout>::LAYOUT, unsized_layout(64, 1, 4));
5840
5841 // ...with `packed`:
5842 #[allow(dead_code)]
5843 #[derive(KnownLayout)]
5844 #[repr(C, packed)]
5845 struct KL10Packed(NotKnownLayout<AU32>, [u8]);
5846
5847 let repr_packed = NonZeroUsize::new(1);
5848
5849 let expected = DstLayout::new_zst(None)
5850 .extend(DstLayout::for_type::<NotKnownLayout<AU32>>(), repr_packed)
5851 .extend(<[u8] as KnownLayout>::LAYOUT, repr_packed)
5852 .pad_to_align();
5853
5854 assert_eq!(<KL10Packed as KnownLayout>::LAYOUT, expected);
5855 assert_eq!(<KL10Packed as KnownLayout>::LAYOUT, unsized_layout(1, 1, 4));
5856
5857 // ...with `packed(N)`:
5858 #[allow(dead_code)]
5859 #[derive(KnownLayout)]
5860 #[repr(C, packed(2))]
5861 struct KL10PackedN(NotKnownLayout<AU32>, [u8]);
5862
5863 let repr_packed = NonZeroUsize::new(2);
5864
5865 let expected = DstLayout::new_zst(None)
5866 .extend(DstLayout::for_type::<NotKnownLayout<AU32>>(), repr_packed)
5867 .extend(<[u8] as KnownLayout>::LAYOUT, repr_packed)
5868 .pad_to_align();
5869
5870 assert_eq!(<KL10PackedN as KnownLayout>::LAYOUT, expected);
5871 assert_eq!(<KL10PackedN as KnownLayout>::LAYOUT, unsized_layout(2, 1, 4));
5872
5873 // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
5874 // | Y | N | Y | Y | KL11 |
5875 #[allow(dead_code)]
5876 #[derive(KnownLayout)]
5877 #[repr(C)]
5878 struct KL11(NotKnownLayout<AU64>, u8);
5879
5880 let expected = DstLayout::new_zst(None)
5881 .extend(DstLayout::for_type::<NotKnownLayout<AU64>>(), None)
5882 .extend(<u8 as KnownLayout>::LAYOUT, None)
5883 .pad_to_align();
5884
5885 assert_eq!(<KL11 as KnownLayout>::LAYOUT, expected);
5886 assert_eq!(<KL11 as KnownLayout>::LAYOUT, sized_layout(8, 16));
5887
5888 // ...with `align(N)`:
5889 #[allow(dead_code)]
5890 #[derive(KnownLayout)]
5891 #[repr(C, align(64))]
5892 struct KL11Align(NotKnownLayout<AU64>, u8);
5893
5894 let repr_align = NonZeroUsize::new(64);
5895
5896 let expected = DstLayout::new_zst(repr_align)
5897 .extend(DstLayout::for_type::<NotKnownLayout<AU64>>(), None)
5898 .extend(<u8 as KnownLayout>::LAYOUT, None)
5899 .pad_to_align();
5900
5901 assert_eq!(<KL11Align as KnownLayout>::LAYOUT, expected);
5902 assert_eq!(<KL11Align as KnownLayout>::LAYOUT, sized_layout(64, 64));
5903
5904 // ...with `packed`:
5905 #[allow(dead_code)]
5906 #[derive(KnownLayout)]
5907 #[repr(C, packed)]
5908 struct KL11Packed(NotKnownLayout<AU64>, u8);
5909
5910 let repr_packed = NonZeroUsize::new(1);
5911
5912 let expected = DstLayout::new_zst(None)
5913 .extend(DstLayout::for_type::<NotKnownLayout<AU64>>(), repr_packed)
5914 .extend(<u8 as KnownLayout>::LAYOUT, repr_packed)
5915 .pad_to_align();
5916
5917 assert_eq!(<KL11Packed as KnownLayout>::LAYOUT, expected);
5918 assert_eq!(<KL11Packed as KnownLayout>::LAYOUT, sized_layout(1, 9));
5919
5920 // ...with `packed(N)`:
5921 #[allow(dead_code)]
5922 #[derive(KnownLayout)]
5923 #[repr(C, packed(2))]
5924 struct KL11PackedN(NotKnownLayout<AU64>, u8);
5925
5926 let repr_packed = NonZeroUsize::new(2);
5927
5928 let expected = DstLayout::new_zst(None)
5929 .extend(DstLayout::for_type::<NotKnownLayout<AU64>>(), repr_packed)
5930 .extend(<u8 as KnownLayout>::LAYOUT, repr_packed)
5931 .pad_to_align();
5932
5933 assert_eq!(<KL11PackedN as KnownLayout>::LAYOUT, expected);
5934 assert_eq!(<KL11PackedN as KnownLayout>::LAYOUT, sized_layout(2, 10));
5935
5936 // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
5937 // | Y | Y | Y | N | KL14 |
5938 #[allow(dead_code)]
5939 #[derive(KnownLayout)]
5940 #[repr(C)]
5941 struct KL14<T: ?Sized + KnownLayout>(u8, T);
5942
5943 fn _test_kl14<T: ?Sized + KnownLayout>(kl: &KL14<T>) {
5944 _assert_kl(kl)
5945 }
5946
5947 // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
5948 // | Y | Y | Y | Y | KL15 |
5949 #[allow(dead_code)]
5950 #[derive(KnownLayout)]
5951 #[repr(C)]
5952 struct KL15<T: KnownLayout>(u8, T);
5953
5954 fn _test_kl15<T: KnownLayout>(t: T) -> impl KnownLayout {
5955 let _ = KL15(0u8, t);
5956 }
5957
5958 // Test a variety of combinations of field types:
5959 // - ()
5960 // - u8
5961 // - AU16
5962 // - [()]
5963 // - [u8]
5964 // - [AU16]
5965
5966 #[allow(clippy::upper_case_acronyms, dead_code)]
5967 #[derive(KnownLayout)]
5968 #[repr(C)]
5969 struct KLTU<T, U: ?Sized>(T, U);
5970
5971 assert_eq!(<KLTU<(), ()> as KnownLayout>::LAYOUT, sized_layout(1, 0));
5972
5973 assert_eq!(<KLTU<(), u8> as KnownLayout>::LAYOUT, sized_layout(1, 1));
5974
5975 assert_eq!(<KLTU<(), AU16> as KnownLayout>::LAYOUT, sized_layout(2, 2));
5976
5977 assert_eq!(<KLTU<(), [()]> as KnownLayout>::LAYOUT, unsized_layout(1, 0, 0));
5978
5979 assert_eq!(<KLTU<(), [u8]> as KnownLayout>::LAYOUT, unsized_layout(1, 1, 0));
5980
5981 assert_eq!(<KLTU<(), [AU16]> as KnownLayout>::LAYOUT, unsized_layout(2, 2, 0));
5982
5983 assert_eq!(<KLTU<u8, ()> as KnownLayout>::LAYOUT, sized_layout(1, 1));
5984
5985 assert_eq!(<KLTU<u8, u8> as KnownLayout>::LAYOUT, sized_layout(1, 2));
5986
5987 assert_eq!(<KLTU<u8, AU16> as KnownLayout>::LAYOUT, sized_layout(2, 4));
5988
5989 assert_eq!(<KLTU<u8, [()]> as KnownLayout>::LAYOUT, unsized_layout(1, 0, 1));
5990
5991 assert_eq!(<KLTU<u8, [u8]> as KnownLayout>::LAYOUT, unsized_layout(1, 1, 1));
5992
5993 assert_eq!(<KLTU<u8, [AU16]> as KnownLayout>::LAYOUT, unsized_layout(2, 2, 2));
5994
5995 assert_eq!(<KLTU<AU16, ()> as KnownLayout>::LAYOUT, sized_layout(2, 2));
5996
5997 assert_eq!(<KLTU<AU16, u8> as KnownLayout>::LAYOUT, sized_layout(2, 4));
5998
5999 assert_eq!(<KLTU<AU16, AU16> as KnownLayout>::LAYOUT, sized_layout(2, 4));
6000
6001 assert_eq!(<KLTU<AU16, [()]> as KnownLayout>::LAYOUT, unsized_layout(2, 0, 2));
6002
6003 assert_eq!(<KLTU<AU16, [u8]> as KnownLayout>::LAYOUT, unsized_layout(2, 1, 2));
6004
6005 assert_eq!(<KLTU<AU16, [AU16]> as KnownLayout>::LAYOUT, unsized_layout(2, 2, 2));
6006
6007 // Test a variety of field counts.
6008
6009 #[derive(KnownLayout)]
6010 #[repr(C)]
6011 struct KLF0;
6012
6013 assert_eq!(<KLF0 as KnownLayout>::LAYOUT, sized_layout(1, 0));
6014
6015 #[derive(KnownLayout)]
6016 #[repr(C)]
6017 struct KLF1([u8]);
6018
6019 assert_eq!(<KLF1 as KnownLayout>::LAYOUT, unsized_layout(1, 1, 0));
6020
6021 #[derive(KnownLayout)]
6022 #[repr(C)]
6023 struct KLF2(NotKnownLayout<u8>, [u8]);
6024
6025 assert_eq!(<KLF2 as KnownLayout>::LAYOUT, unsized_layout(1, 1, 1));
6026
6027 #[derive(KnownLayout)]
6028 #[repr(C)]
6029 struct KLF3(NotKnownLayout<u8>, NotKnownLayout<AU16>, [u8]);
6030
6031 assert_eq!(<KLF3 as KnownLayout>::LAYOUT, unsized_layout(2, 1, 4));
6032
6033 #[derive(KnownLayout)]
6034 #[repr(C)]
6035 struct KLF4(NotKnownLayout<u8>, NotKnownLayout<AU16>, NotKnownLayout<AU32>, [u8]);
6036
6037 assert_eq!(<KLF4 as KnownLayout>::LAYOUT, unsized_layout(4, 1, 8));
6038 }
6039
6040 #[test]
6041 fn test_object_safety() {
6042 fn _takes_no_cell(_: &dyn Immutable) {}
6043 fn _takes_unaligned(_: &dyn Unaligned) {}
6044 }
6045
6046 #[test]
6047 fn test_from_zeros_only() {
6048 // Test types that implement `FromZeros` but not `FromBytes`.
6049
6050 assert!(!bool::new_zeroed());
6051 assert_eq!(char::new_zeroed(), '\0');
6052
6053 #[cfg(feature = "alloc")]
6054 {
6055 assert_eq!(bool::new_box_zeroed(), Ok(Box::new(false)));
6056 assert_eq!(char::new_box_zeroed(), Ok(Box::new('\0')));
6057
6058 assert_eq!(
6059 <[bool]>::new_box_zeroed_with_elems(3).unwrap().as_ref(),
6060 [false, false, false]
6061 );
6062 assert_eq!(
6063 <[char]>::new_box_zeroed_with_elems(3).unwrap().as_ref(),
6064 ['\0', '\0', '\0']
6065 );
6066
6067 assert_eq!(bool::new_vec_zeroed(3).unwrap().as_ref(), [false, false, false]);
6068 assert_eq!(char::new_vec_zeroed(3).unwrap().as_ref(), ['\0', '\0', '\0']);
6069 }
6070
6071 let mut string = "hello".to_string();
6072 let s: &mut str = string.as_mut();
6073 assert_eq!(s, "hello");
6074 s.zero();
6075 assert_eq!(s, "\0\0\0\0\0");
6076 }
6077
6078 #[test]
6079 fn test_zst_count_preserved() {
6080 // Test that, when an explicit count is provided to for a type with a
6081 // ZST trailing slice element, that count is preserved. This is
6082 // important since, for such types, all element counts result in objects
6083 // of the same size, and so the correct behavior is ambiguous. However,
6084 // preserving the count as requested by the user is the behavior that we
6085 // document publicly.
6086
6087 // FromZeros methods
6088 #[cfg(feature = "alloc")]
6089 assert_eq!(<[()]>::new_box_zeroed_with_elems(3).unwrap().len(), 3);
6090 #[cfg(feature = "alloc")]
6091 assert_eq!(<()>::new_vec_zeroed(3).unwrap().len(), 3);
6092
6093 // FromBytes methods
6094 assert_eq!(<[()]>::ref_from_bytes_with_elems(&[][..], 3).unwrap().len(), 3);
6095 assert_eq!(<[()]>::ref_from_prefix_with_elems(&[][..], 3).unwrap().0.len(), 3);
6096 assert_eq!(<[()]>::ref_from_suffix_with_elems(&[][..], 3).unwrap().1.len(), 3);
6097 assert_eq!(<[()]>::mut_from_bytes_with_elems(&mut [][..], 3).unwrap().len(), 3);
6098 assert_eq!(<[()]>::mut_from_prefix_with_elems(&mut [][..], 3).unwrap().0.len(), 3);
6099 assert_eq!(<[()]>::mut_from_suffix_with_elems(&mut [][..], 3).unwrap().1.len(), 3);
6100 }
6101
6102 #[test]
6103 fn test_read_write() {
6104 const VAL: u64 = 0x12345678;
6105 #[cfg(target_endian = "big")]
6106 const VAL_BYTES: [u8; 8] = VAL.to_be_bytes();
6107 #[cfg(target_endian = "little")]
6108 const VAL_BYTES: [u8; 8] = VAL.to_le_bytes();
6109 const ZEROS: [u8; 8] = [0u8; 8];
6110
6111 // Test `FromBytes::{read_from, read_from_prefix, read_from_suffix}`.
6112
6113 assert_eq!(u64::read_from_bytes(&VAL_BYTES[..]), Ok(VAL));
6114 // The first 8 bytes are from `VAL_BYTES` and the second 8 bytes are all
6115 // zeros.
6116 let bytes_with_prefix: [u8; 16] = transmute!([VAL_BYTES, [0; 8]]);
6117 assert_eq!(u64::read_from_prefix(&bytes_with_prefix[..]), Ok((VAL, &ZEROS[..])));
6118 assert_eq!(u64::read_from_suffix(&bytes_with_prefix[..]), Ok((&VAL_BYTES[..], 0)));
6119 // The first 8 bytes are all zeros and the second 8 bytes are from
6120 // `VAL_BYTES`
6121 let bytes_with_suffix: [u8; 16] = transmute!([[0; 8], VAL_BYTES]);
6122 assert_eq!(u64::read_from_prefix(&bytes_with_suffix[..]), Ok((0, &VAL_BYTES[..])));
6123 assert_eq!(u64::read_from_suffix(&bytes_with_suffix[..]), Ok((&ZEROS[..], VAL)));
6124
6125 // Test `IntoBytes::{write_to, write_to_prefix, write_to_suffix}`.
6126
6127 let mut bytes = [0u8; 8];
6128 assert_eq!(VAL.write_to(&mut bytes[..]), Ok(()));
6129 assert_eq!(bytes, VAL_BYTES);
6130 let mut bytes = [0u8; 16];
6131 assert_eq!(VAL.write_to_prefix(&mut bytes[..]), Ok(()));
6132 let want: [u8; 16] = transmute!([VAL_BYTES, [0; 8]]);
6133 assert_eq!(bytes, want);
6134 let mut bytes = [0u8; 16];
6135 assert_eq!(VAL.write_to_suffix(&mut bytes[..]), Ok(()));
6136 let want: [u8; 16] = transmute!([[0; 8], VAL_BYTES]);
6137 assert_eq!(bytes, want);
6138 }
6139
6140 #[test]
6141 #[cfg(feature = "std")]
6142 fn test_read_io_with_padding_soundness() {
6143 // This test is designed to exhibit potential UB in
6144 // `FromBytes::read_from_io`. (see #2319, #2320).
6145
6146 // On most platforms (where `align_of::<u16>() == 2`), `WithPadding`
6147 // will have inter-field padding between `x` and `y`.
6148 #[derive(FromBytes)]
6149 #[repr(C)]
6150 struct WithPadding {
6151 x: u8,
6152 y: u16,
6153 }
6154 struct ReadsInRead;
6155 impl std::io::Read for ReadsInRead {
6156 fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
6157 // This body branches on every byte of `buf`, ensuring that it
6158 // exhibits UB if any byte of `buf` is uninitialized.
6159 if buf.iter().all(|&x| x == 0) {
6160 Ok(buf.len())
6161 } else {
6162 buf.iter_mut().for_each(|x| *x = 0);
6163 Ok(buf.len())
6164 }
6165 }
6166 }
6167 assert!(matches!(WithPadding::read_from_io(ReadsInRead), Ok(WithPadding { x: 0, y: 0 })));
6168 }
6169
6170 #[test]
6171 #[cfg(feature = "std")]
6172 fn test_read_write_io() {
6173 let mut long_buffer = [0, 0, 0, 0];
6174 assert!(matches!(u16::MAX.write_to_io(&mut long_buffer[..]), Ok(())));
6175 assert_eq!(long_buffer, [255, 255, 0, 0]);
6176 assert!(matches!(u16::read_from_io(&long_buffer[..]), Ok(u16::MAX)));
6177
6178 let mut short_buffer = [0, 0];
6179 assert!(u32::MAX.write_to_io(&mut short_buffer[..]).is_err());
6180 assert_eq!(short_buffer, [255, 255]);
6181 assert!(u32::read_from_io(&short_buffer[..]).is_err());
6182 }
6183
6184 #[test]
6185 fn test_try_from_bytes_try_read_from() {
6186 assert_eq!(<bool as TryFromBytes>::try_read_from_bytes(&[0]), Ok(false));
6187 assert_eq!(<bool as TryFromBytes>::try_read_from_bytes(&[1]), Ok(true));
6188
6189 assert_eq!(<bool as TryFromBytes>::try_read_from_prefix(&[0, 2]), Ok((false, &[2][..])));
6190 assert_eq!(<bool as TryFromBytes>::try_read_from_prefix(&[1, 2]), Ok((true, &[2][..])));
6191
6192 assert_eq!(<bool as TryFromBytes>::try_read_from_suffix(&[2, 0]), Ok((&[2][..], false)));
6193 assert_eq!(<bool as TryFromBytes>::try_read_from_suffix(&[2, 1]), Ok((&[2][..], true)));
6194
6195 // If we don't pass enough bytes, it fails.
6196 assert!(matches!(
6197 <u8 as TryFromBytes>::try_read_from_bytes(&[]),
6198 Err(TryReadError::Size(_))
6199 ));
6200 assert!(matches!(
6201 <u8 as TryFromBytes>::try_read_from_prefix(&[]),
6202 Err(TryReadError::Size(_))
6203 ));
6204 assert!(matches!(
6205 <u8 as TryFromBytes>::try_read_from_suffix(&[]),
6206 Err(TryReadError::Size(_))
6207 ));
6208
6209 // If we pass too many bytes, it fails.
6210 assert!(matches!(
6211 <u8 as TryFromBytes>::try_read_from_bytes(&[0, 0]),
6212 Err(TryReadError::Size(_))
6213 ));
6214
6215 // If we pass an invalid value, it fails.
6216 assert!(matches!(
6217 <bool as TryFromBytes>::try_read_from_bytes(&[2]),
6218 Err(TryReadError::Validity(_))
6219 ));
6220 assert!(matches!(
6221 <bool as TryFromBytes>::try_read_from_prefix(&[2, 0]),
6222 Err(TryReadError::Validity(_))
6223 ));
6224 assert!(matches!(
6225 <bool as TryFromBytes>::try_read_from_suffix(&[0, 2]),
6226 Err(TryReadError::Validity(_))
6227 ));
6228
6229 // Reading from a misaligned buffer should still succeed. Since `AU64`'s
6230 // alignment is 8, and since we read from two adjacent addresses one
6231 // byte apart, it is guaranteed that at least one of them (though
6232 // possibly both) will be misaligned.
6233 let bytes: [u8; 9] = [0, 0, 0, 0, 0, 0, 0, 0, 0];
6234 assert_eq!(<AU64 as TryFromBytes>::try_read_from_bytes(&bytes[..8]), Ok(AU64(0)));
6235 assert_eq!(<AU64 as TryFromBytes>::try_read_from_bytes(&bytes[1..9]), Ok(AU64(0)));
6236
6237 assert_eq!(
6238 <AU64 as TryFromBytes>::try_read_from_prefix(&bytes[..8]),
6239 Ok((AU64(0), &[][..]))
6240 );
6241 assert_eq!(
6242 <AU64 as TryFromBytes>::try_read_from_prefix(&bytes[1..9]),
6243 Ok((AU64(0), &[][..]))
6244 );
6245
6246 assert_eq!(
6247 <AU64 as TryFromBytes>::try_read_from_suffix(&bytes[..8]),
6248 Ok((&[][..], AU64(0)))
6249 );
6250 assert_eq!(
6251 <AU64 as TryFromBytes>::try_read_from_suffix(&bytes[1..9]),
6252 Ok((&[][..], AU64(0)))
6253 );
6254 }
6255
6256 #[test]
6257 fn test_ref_from_mut_from() {
6258 // Test `FromBytes::{ref_from, mut_from}{,_prefix,Suffix}` success cases
6259 // Exhaustive coverage for these methods is covered by the `Ref` tests above,
6260 // which these helper methods defer to.
6261
6262 let mut buf =
6263 Align::<[u8; 16], AU64>::new([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]);
6264
6265 assert_eq!(
6266 AU64::ref_from_bytes(&buf.t[8..]).unwrap().0.to_ne_bytes(),
6267 [8, 9, 10, 11, 12, 13, 14, 15]
6268 );
6269 let suffix = AU64::mut_from_bytes(&mut buf.t[8..]).unwrap();
6270 suffix.0 = 0x0101010101010101;
6271 // The `[u8:9]` is a non-half size of the full buffer, which would catch
6272 // `from_prefix` having the same implementation as `from_suffix` (issues #506, #511).
6273 assert_eq!(
6274 <[u8; 9]>::ref_from_suffix(&buf.t[..]).unwrap(),
6275 (&[0, 1, 2, 3, 4, 5, 6][..], &[7u8, 1, 1, 1, 1, 1, 1, 1, 1])
6276 );
6277 let (prefix, suffix) = AU64::mut_from_suffix(&mut buf.t[1..]).unwrap();
6278 assert_eq!(prefix, &mut [1u8, 2, 3, 4, 5, 6, 7][..]);
6279 suffix.0 = 0x0202020202020202;
6280 let (prefix, suffix) = <[u8; 10]>::mut_from_suffix(&mut buf.t[..]).unwrap();
6281 assert_eq!(prefix, &mut [0u8, 1, 2, 3, 4, 5][..]);
6282 suffix[0] = 42;
6283 assert_eq!(
6284 <[u8; 9]>::ref_from_prefix(&buf.t[..]).unwrap(),
6285 (&[0u8, 1, 2, 3, 4, 5, 42, 7, 2], &[2u8, 2, 2, 2, 2, 2, 2][..])
6286 );
6287 <[u8; 2]>::mut_from_prefix(&mut buf.t[..]).unwrap().0[1] = 30;
6288 assert_eq!(buf.t, [0, 30, 2, 3, 4, 5, 42, 7, 2, 2, 2, 2, 2, 2, 2, 2]);
6289 }
6290
6291 #[test]
6292 fn test_ref_from_mut_from_error() {
6293 // Test `FromBytes::{ref_from, mut_from}{,_prefix,Suffix}` error cases.
6294
6295 // Fail because the buffer is too large.
6296 let mut buf = Align::<[u8; 16], AU64>::default();
6297 // `buf.t` should be aligned to 8, so only the length check should fail.
6298 assert!(AU64::ref_from_bytes(&buf.t[..]).is_err());
6299 assert!(AU64::mut_from_bytes(&mut buf.t[..]).is_err());
6300 assert!(<[u8; 8]>::ref_from_bytes(&buf.t[..]).is_err());
6301 assert!(<[u8; 8]>::mut_from_bytes(&mut buf.t[..]).is_err());
6302
6303 // Fail because the buffer is too small.
6304 let mut buf = Align::<[u8; 4], AU64>::default();
6305 assert!(AU64::ref_from_bytes(&buf.t[..]).is_err());
6306 assert!(AU64::mut_from_bytes(&mut buf.t[..]).is_err());
6307 assert!(<[u8; 8]>::ref_from_bytes(&buf.t[..]).is_err());
6308 assert!(<[u8; 8]>::mut_from_bytes(&mut buf.t[..]).is_err());
6309 assert!(AU64::ref_from_prefix(&buf.t[..]).is_err());
6310 assert!(AU64::mut_from_prefix(&mut buf.t[..]).is_err());
6311 assert!(AU64::ref_from_suffix(&buf.t[..]).is_err());
6312 assert!(AU64::mut_from_suffix(&mut buf.t[..]).is_err());
6313 assert!(<[u8; 8]>::ref_from_prefix(&buf.t[..]).is_err());
6314 assert!(<[u8; 8]>::mut_from_prefix(&mut buf.t[..]).is_err());
6315 assert!(<[u8; 8]>::ref_from_suffix(&buf.t[..]).is_err());
6316 assert!(<[u8; 8]>::mut_from_suffix(&mut buf.t[..]).is_err());
6317
6318 // Fail because the alignment is insufficient.
6319 let mut buf = Align::<[u8; 13], AU64>::default();
6320 assert!(AU64::ref_from_bytes(&buf.t[1..]).is_err());
6321 assert!(AU64::mut_from_bytes(&mut buf.t[1..]).is_err());
6322 assert!(AU64::ref_from_bytes(&buf.t[1..]).is_err());
6323 assert!(AU64::mut_from_bytes(&mut buf.t[1..]).is_err());
6324 assert!(AU64::ref_from_prefix(&buf.t[1..]).is_err());
6325 assert!(AU64::mut_from_prefix(&mut buf.t[1..]).is_err());
6326 assert!(AU64::ref_from_suffix(&buf.t[..]).is_err());
6327 assert!(AU64::mut_from_suffix(&mut buf.t[..]).is_err());
6328 }
6329
6330 #[test]
6331 fn test_to_methods() {
6332 /// Run a series of tests by calling `IntoBytes` methods on `t`.
6333 ///
6334 /// `bytes` is the expected byte sequence returned from `t.as_bytes()`
6335 /// before `t` has been modified. `post_mutation` is the expected
6336 /// sequence returned from `t.as_bytes()` after `t.as_mut_bytes()[0]`
6337 /// has had its bits flipped (by applying `^= 0xFF`).
6338 ///
6339 /// `N` is the size of `t` in bytes.
6340 fn test<T: FromBytes + IntoBytes + Immutable + Debug + Eq + ?Sized, const N: usize>(
6341 t: &mut T,
6342 bytes: &[u8],
6343 post_mutation: &T,
6344 ) {
6345 // Test that we can access the underlying bytes, and that we get the
6346 // right bytes and the right number of bytes.
6347 assert_eq!(t.as_bytes(), bytes);
6348
6349 // Test that changes to the underlying byte slices are reflected in
6350 // the original object.
6351 t.as_mut_bytes()[0] ^= 0xFF;
6352 assert_eq!(t, post_mutation);
6353 t.as_mut_bytes()[0] ^= 0xFF;
6354
6355 // `write_to` rejects slices that are too small or too large.
6356 assert!(t.write_to(&mut vec![0; N - 1][..]).is_err());
6357 assert!(t.write_to(&mut vec![0; N + 1][..]).is_err());
6358
6359 // `write_to` works as expected.
6360 let mut bytes = [0; N];
6361 assert_eq!(t.write_to(&mut bytes[..]), Ok(()));
6362 assert_eq!(bytes, t.as_bytes());
6363
6364 // `write_to_prefix` rejects slices that are too small.
6365 assert!(t.write_to_prefix(&mut vec![0; N - 1][..]).is_err());
6366
6367 // `write_to_prefix` works with exact-sized slices.
6368 let mut bytes = [0; N];
6369 assert_eq!(t.write_to_prefix(&mut bytes[..]), Ok(()));
6370 assert_eq!(bytes, t.as_bytes());
6371
6372 // `write_to_prefix` works with too-large slices, and any bytes past
6373 // the prefix aren't modified.
6374 let mut too_many_bytes = vec![0; N + 1];
6375 too_many_bytes[N] = 123;
6376 assert_eq!(t.write_to_prefix(&mut too_many_bytes[..]), Ok(()));
6377 assert_eq!(&too_many_bytes[..N], t.as_bytes());
6378 assert_eq!(too_many_bytes[N], 123);
6379
6380 // `write_to_suffix` rejects slices that are too small.
6381 assert!(t.write_to_suffix(&mut vec![0; N - 1][..]).is_err());
6382
6383 // `write_to_suffix` works with exact-sized slices.
6384 let mut bytes = [0; N];
6385 assert_eq!(t.write_to_suffix(&mut bytes[..]), Ok(()));
6386 assert_eq!(bytes, t.as_bytes());
6387
6388 // `write_to_suffix` works with too-large slices, and any bytes
6389 // before the suffix aren't modified.
6390 let mut too_many_bytes = vec![0; N + 1];
6391 too_many_bytes[0] = 123;
6392 assert_eq!(t.write_to_suffix(&mut too_many_bytes[..]), Ok(()));
6393 assert_eq!(&too_many_bytes[1..], t.as_bytes());
6394 assert_eq!(too_many_bytes[0], 123);
6395 }
6396
6397 #[derive(Debug, Eq, PartialEq, FromBytes, IntoBytes, Immutable)]
6398 #[repr(C)]
6399 struct Foo {
6400 a: u32,
6401 b: Wrapping<u32>,
6402 c: Option<NonZeroU32>,
6403 }
6404
6405 let expected_bytes: Vec<u8> = if cfg!(target_endian = "little") {
6406 vec![1, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0]
6407 } else {
6408 vec![0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 0]
6409 };
6410 let post_mutation_expected_a =
6411 if cfg!(target_endian = "little") { 0x00_00_00_FE } else { 0xFF_00_00_01 };
6412 test::<_, 12>(
6413 &mut Foo { a: 1, b: Wrapping(2), c: None },
6414 expected_bytes.as_bytes(),
6415 &Foo { a: post_mutation_expected_a, b: Wrapping(2), c: None },
6416 );
6417 test::<_, 3>(
6418 Unsized::from_mut_slice(&mut [1, 2, 3]),
6419 &[1, 2, 3],
6420 Unsized::from_mut_slice(&mut [0xFE, 2, 3]),
6421 );
6422 }
6423
6424 #[test]
6425 fn test_array() {
6426 #[derive(FromBytes, IntoBytes, Immutable)]
6427 #[repr(C)]
6428 struct Foo {
6429 a: [u16; 33],
6430 }
6431
6432 let foo = Foo { a: [0xFFFF; 33] };
6433 let expected = [0xFFu8; 66];
6434 assert_eq!(foo.as_bytes(), &expected[..]);
6435 }
6436
6437 #[test]
6438 fn test_new_zeroed() {
6439 assert!(!bool::new_zeroed());
6440 assert_eq!(u64::new_zeroed(), 0);
6441 // This test exists in order to exercise unsafe code, especially when
6442 // running under Miri.
6443 #[allow(clippy::unit_cmp)]
6444 {
6445 assert_eq!(<()>::new_zeroed(), ());
6446 }
6447 }
6448
6449 #[test]
6450 fn test_transparent_packed_generic_struct() {
6451 #[derive(IntoBytes, FromBytes, Unaligned)]
6452 #[repr(transparent)]
6453 #[allow(dead_code)] // We never construct this type
6454 struct Foo<T> {
6455 _t: T,
6456 _phantom: PhantomData<()>,
6457 }
6458
6459 assert_impl_all!(Foo<u32>: FromZeros, FromBytes, IntoBytes);
6460 assert_impl_all!(Foo<u8>: Unaligned);
6461
6462 #[derive(IntoBytes, FromBytes, Unaligned)]
6463 #[repr(C, packed)]
6464 #[allow(dead_code)] // We never construct this type
6465 struct Bar<T, U> {
6466 _t: T,
6467 _u: U,
6468 }
6469
6470 assert_impl_all!(Bar<u8, AU64>: FromZeros, FromBytes, IntoBytes, Unaligned);
6471 }
6472
6473 #[cfg(feature = "alloc")]
6474 mod alloc {
6475 use super::*;
6476
6477 #[cfg(zerocopy_panic_in_const_and_vec_try_reserve_1_57_0)]
6478 #[test]
6479 fn test_extend_vec_zeroed() {
6480 // Test extending when there is an existing allocation.
6481 let mut v = vec![100u16, 200, 300];
6482 FromZeros::extend_vec_zeroed(&mut v, 3).unwrap();
6483 assert_eq!(v.len(), 6);
6484 assert_eq!(&*v, &[100, 200, 300, 0, 0, 0]);
6485 drop(v);
6486
6487 // Test extending when there is no existing allocation.
6488 let mut v: Vec<u64> = Vec::new();
6489 FromZeros::extend_vec_zeroed(&mut v, 3).unwrap();
6490 assert_eq!(v.len(), 3);
6491 assert_eq!(&*v, &[0, 0, 0]);
6492 drop(v);
6493 }
6494
6495 #[cfg(zerocopy_panic_in_const_and_vec_try_reserve_1_57_0)]
6496 #[test]
6497 fn test_extend_vec_zeroed_zst() {
6498 // Test extending when there is an existing (fake) allocation.
6499 let mut v = vec![(), (), ()];
6500 <()>::extend_vec_zeroed(&mut v, 3).unwrap();
6501 assert_eq!(v.len(), 6);
6502 assert_eq!(&*v, &[(), (), (), (), (), ()]);
6503 drop(v);
6504
6505 // Test extending when there is no existing (fake) allocation.
6506 let mut v: Vec<()> = Vec::new();
6507 <()>::extend_vec_zeroed(&mut v, 3).unwrap();
6508 assert_eq!(&*v, &[(), (), ()]);
6509 drop(v);
6510 }
6511
6512 #[cfg(zerocopy_panic_in_const_and_vec_try_reserve_1_57_0)]
6513 #[test]
6514 fn test_insert_vec_zeroed() {
6515 // Insert at start (no existing allocation).
6516 let mut v: Vec<u64> = Vec::new();
6517 u64::insert_vec_zeroed(&mut v, 0, 2).unwrap();
6518 assert_eq!(v.len(), 2);
6519 assert_eq!(&*v, &[0, 0]);
6520 drop(v);
6521
6522 // Insert at start.
6523 let mut v = vec![100u64, 200, 300];
6524 u64::insert_vec_zeroed(&mut v, 0, 2).unwrap();
6525 assert_eq!(v.len(), 5);
6526 assert_eq!(&*v, &[0, 0, 100, 200, 300]);
6527 drop(v);
6528
6529 // Insert at middle.
6530 let mut v = vec![100u64, 200, 300];
6531 u64::insert_vec_zeroed(&mut v, 1, 1).unwrap();
6532 assert_eq!(v.len(), 4);
6533 assert_eq!(&*v, &[100, 0, 200, 300]);
6534 drop(v);
6535
6536 // Insert at end.
6537 let mut v = vec![100u64, 200, 300];
6538 u64::insert_vec_zeroed(&mut v, 3, 1).unwrap();
6539 assert_eq!(v.len(), 4);
6540 assert_eq!(&*v, &[100, 200, 300, 0]);
6541 drop(v);
6542 }
6543
6544 #[cfg(zerocopy_panic_in_const_and_vec_try_reserve_1_57_0)]
6545 #[test]
6546 fn test_insert_vec_zeroed_zst() {
6547 // Insert at start (no existing fake allocation).
6548 let mut v: Vec<()> = Vec::new();
6549 <()>::insert_vec_zeroed(&mut v, 0, 2).unwrap();
6550 assert_eq!(v.len(), 2);
6551 assert_eq!(&*v, &[(), ()]);
6552 drop(v);
6553
6554 // Insert at start.
6555 let mut v = vec![(), (), ()];
6556 <()>::insert_vec_zeroed(&mut v, 0, 2).unwrap();
6557 assert_eq!(v.len(), 5);
6558 assert_eq!(&*v, &[(), (), (), (), ()]);
6559 drop(v);
6560
6561 // Insert at middle.
6562 let mut v = vec![(), (), ()];
6563 <()>::insert_vec_zeroed(&mut v, 1, 1).unwrap();
6564 assert_eq!(v.len(), 4);
6565 assert_eq!(&*v, &[(), (), (), ()]);
6566 drop(v);
6567
6568 // Insert at end.
6569 let mut v = vec![(), (), ()];
6570 <()>::insert_vec_zeroed(&mut v, 3, 1).unwrap();
6571 assert_eq!(v.len(), 4);
6572 assert_eq!(&*v, &[(), (), (), ()]);
6573 drop(v);
6574 }
6575
6576 #[test]
6577 fn test_new_box_zeroed() {
6578 assert_eq!(u64::new_box_zeroed(), Ok(Box::new(0)));
6579 }
6580
6581 #[test]
6582 fn test_new_box_zeroed_array() {
6583 drop(<[u32; 0x1000]>::new_box_zeroed());
6584 }
6585
6586 #[test]
6587 fn test_new_box_zeroed_zst() {
6588 // This test exists in order to exercise unsafe code, especially
6589 // when running under Miri.
6590 #[allow(clippy::unit_cmp)]
6591 {
6592 assert_eq!(<()>::new_box_zeroed(), Ok(Box::new(())));
6593 }
6594 }
6595
6596 #[test]
6597 fn test_new_box_zeroed_with_elems() {
6598 let mut s: Box<[u64]> = <[u64]>::new_box_zeroed_with_elems(3).unwrap();
6599 assert_eq!(s.len(), 3);
6600 assert_eq!(&*s, &[0, 0, 0]);
6601 s[1] = 3;
6602 assert_eq!(&*s, &[0, 3, 0]);
6603 }
6604
6605 #[test]
6606 fn test_new_box_zeroed_with_elems_empty() {
6607 let s: Box<[u64]> = <[u64]>::new_box_zeroed_with_elems(0).unwrap();
6608 assert_eq!(s.len(), 0);
6609 }
6610
6611 #[test]
6612 fn test_new_box_zeroed_with_elems_zst() {
6613 let mut s: Box<[()]> = <[()]>::new_box_zeroed_with_elems(3).unwrap();
6614 assert_eq!(s.len(), 3);
6615 assert!(s.get(10).is_none());
6616 // This test exists in order to exercise unsafe code, especially
6617 // when running under Miri.
6618 #[allow(clippy::unit_cmp)]
6619 {
6620 assert_eq!(s[1], ());
6621 }
6622 s[2] = ();
6623 }
6624
6625 #[test]
6626 fn test_new_box_zeroed_with_elems_zst_empty() {
6627 let s: Box<[()]> = <[()]>::new_box_zeroed_with_elems(0).unwrap();
6628 assert_eq!(s.len(), 0);
6629 }
6630
6631 #[test]
6632 fn new_box_zeroed_with_elems_errors() {
6633 assert_eq!(<[u16]>::new_box_zeroed_with_elems(usize::MAX), Err(AllocError));
6634
6635 let max = <usize as core::convert::TryFrom<_>>::try_from(isize::MAX).unwrap();
6636 assert_eq!(
6637 <[u16]>::new_box_zeroed_with_elems((max / mem::size_of::<u16>()) + 1),
6638 Err(AllocError)
6639 );
6640 }
6641 }
6642}